diff options
Diffstat (limited to 'include')
677 files changed, 16452 insertions, 6795 deletions
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index 1538a6853822..1b4c45815695 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h @@ -362,7 +362,7 @@ * * A less-safe version of the macros is provided for optional use if the * compiler uses excessive CPU stack (for example, this may happen in the - * debug case if code optimzation is disabled.) + * debug case if code optimization is disabled.) */ /* Exit trace helper macro */ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index f28b097c658f..3a82faac5767 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -78,6 +78,7 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, bool acpi_dev_found(const char *hid); bool acpi_dev_present(const char *hid, const char *uid, s64 hrv); +bool acpi_reduced_hardware(void); #ifdef CONFIG_ACPI @@ -689,11 +690,30 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha struct acpi_device * acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv); +/** + * for_each_acpi_dev_match - iterate over ACPI devices that matching the criteria + * @adev: pointer to the matching ACPI device, NULL at the end of the loop + * @hid: Hardware ID of the device. + * @uid: Unique ID of the device, pass NULL to not check _UID + * @hrv: Hardware Revision of the device, pass -1 to not check _HRV + * + * The caller is responsible for invoking acpi_dev_put() on the returned device. + * + * FIXME: Due to above requirement there is a window that may invalidate @adev + * and next iteration will use a dangling pointer, e.g. in the case of a + * hotplug event. That said, the caller should ensure that this will never + * happen. + */ #define for_each_acpi_dev_match(adev, hid, uid, hrv) \ for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv); \ adev; \ adev = acpi_dev_get_next_match_dev(adev, hid, uid, hrv)) +static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev) +{ + return adev ? to_acpi_device(get_device(&adev->dev)) : NULL; +} + static inline void acpi_dev_put(struct acpi_device *adev) { put_device(&adev->dev); diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index 94d356fcc483..8372b0e7fd15 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -12,18 +12,6 @@ #define ACPI_MAX_STRING 80 /* - * Please update drivers/acpi/debug.c and Documentation/firmware-guide/acpi/debug.rst - * if you add to this list. - */ -#define ACPI_SBS_COMPONENT 0x00100000 -#define ACPI_FAN_COMPONENT 0x00200000 -#define ACPI_PCI_COMPONENT 0x00400000 -#define ACPI_CONTAINER_COMPONENT 0x01000000 -#define ACPI_SYSTEM_COMPONENT 0x02000000 -#define ACPI_MEMORY_DEVICE_COMPONENT 0x08000000 -#define ACPI_PROCESSOR_COMPONENT 0x20000000 - -/* * _HID definitions * HIDs must conform to ACPI spec(6.1.4) * Linux specific HIDs do not apply to this and begin with LNX: diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 370293ee8399..f8d44b06f3e3 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20210105 +#define ACPI_CA_VERSION 0x20210331 #include <acpi/acconfig.h> #include <acpi/actypes.h> diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h index 9bccac9becd7..8e2319bbd0a2 100644 --- a/include/acpi/acrestyp.h +++ b/include/acpi/acrestyp.h @@ -381,7 +381,7 @@ struct acpi_resource_gpio { #define ACPI_IO_RESTRICT_OUTPUT 2 #define ACPI_IO_RESTRICT_NONE_PRESERVE 3 -/* Common structure for I2C, SPI, and UART serial descriptors */ +/* Common structure for I2C, SPI, UART, CSI2 serial descriptors */ #define ACPI_RESOURCE_SERIAL_COMMON \ u8 revision_id; \ @@ -403,6 +403,7 @@ ACPI_RESOURCE_SERIAL_COMMON}; #define ACPI_RESOURCE_SERIAL_TYPE_I2C 1 #define ACPI_RESOURCE_SERIAL_TYPE_SPI 2 #define ACPI_RESOURCE_SERIAL_TYPE_UART 3 +#define ACPI_RESOURCE_SERIAL_TYPE_CSI2 4 /* Values for slave_mode field above */ @@ -505,6 +506,11 @@ struct acpi_resource_uart_serialbus { #define ACPI_UART_CLEAR_TO_SEND (1<<6) #define ACPI_UART_REQUEST_TO_SEND (1<<7) +struct acpi_resource_csi2_serialbus { + ACPI_RESOURCE_SERIAL_COMMON u8 local_port_instance; + u8 phy_type; +}; + struct acpi_resource_pin_function { u8 revision_id; u8 pin_config; @@ -634,6 +640,7 @@ union acpi_resource_data { struct acpi_resource_i2c_serialbus i2c_serial_bus; struct acpi_resource_spi_serialbus spi_serial_bus; struct acpi_resource_uart_serialbus uart_serial_bus; + struct acpi_resource_csi2_serialbus csi2_serial_bus; struct acpi_resource_common_serialbus common_serial_bus; struct acpi_resource_pin_function pin_function; struct acpi_resource_pin_config pin_config; diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index af0a8c3b87b7..ce59903c2695 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -28,6 +28,7 @@ #define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */ #define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */ #define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ +#define ACPI_SIG_CEDT "CEDT" /* CXL Early Discovery Table */ #define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */ #define ACPI_SIG_CSRT "CSRT" /* Core System Resource Table */ #define ACPI_SIG_DBG2 "DBG2" /* Debug Port table type 2 */ @@ -303,6 +304,49 @@ struct acpi_table_boot { /******************************************************************************* * + * CEDT - CXL Early Discovery Table + * Version 1 + * + * Conforms to the "CXL Early Discovery Table" (CXL 2.0) + * + ******************************************************************************/ + +struct acpi_table_cedt { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* CEDT subtable header (Performance Record Structure) */ + +struct acpi_cedt_header { + u8 type; + u8 reserved; + u16 length; +}; + +/* Values for Type field above */ + +enum acpi_cedt_type { + ACPI_CEDT_TYPE_CHBS = 0, + ACPI_CEDT_TYPE_RESERVED = 1 +}; + +/* + * CEDT subtables + */ + +/* 0: CXL Host Bridge Structure */ + +struct acpi_cedt_chbs { + struct acpi_cedt_header header; + u32 uid; + u32 cxl_version; + u32 reserved; + u64 base; + u64 length; +}; + +/******************************************************************************* + * * CPEP - Corrected Platform Error Polling table (ACPI 4.0) * Version 1 * @@ -1445,7 +1489,8 @@ struct acpi_hmat_locality { struct acpi_hmat_structure header; u8 flags; u8 data_type; - u16 reserved1; + u8 min_transfer_size; + u8 reserved1; u32 number_of_initiator_Pds; u32 number_of_target_Pds; u32 reserved2; @@ -1454,15 +1499,18 @@ struct acpi_hmat_locality { /* Masks for Flags field above */ -#define ACPI_HMAT_MEMORY_HIERARCHY (0x0F) +#define ACPI_HMAT_MEMORY_HIERARCHY (0x0F) /* Bits 0-3 */ -/* Values for Memory Hierarchy flag */ +/* Values for Memory Hierarchy flags */ #define ACPI_HMAT_MEMORY 0 #define ACPI_HMAT_LAST_LEVEL_CACHE 1 #define ACPI_HMAT_1ST_LEVEL_CACHE 2 #define ACPI_HMAT_2ND_LEVEL_CACHE 3 #define ACPI_HMAT_3RD_LEVEL_CACHE 4 +#define ACPI_HMAT_MINIMUM_XFER_SIZE 0x10 /* Bit 4: ACPI 6.4 */ +#define ACPI_HMAT_NON_SEQUENTIAL_XFERS 0x20 /* Bit 5: ACPI 6.4 */ + /* Values for data_type field above */ diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index d6478c430c99..18cafe3ebddc 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -36,6 +36,7 @@ #define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */ #define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */ #define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */ +#define ACPI_SIG_PHAT "PHAT" /* Platform Health Assessment Table */ #define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */ #define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */ #define ACPI_SIG_RASF "RASF" /* RAS Feature table */ @@ -67,7 +68,7 @@ * IORT - IO Remapping Table * * Conforms to "IO Remapping Table System Software on ARM Platforms", - * Document number: ARM DEN 0049D, March 2018 + * Document number: ARM DEN 0049E.b, Feb 2021 * ******************************************************************************/ @@ -85,7 +86,7 @@ struct acpi_iort_node { u8 type; u16 length; u8 revision; - u32 reserved; + u32 identifier; u32 mapping_count; u32 mapping_offset; char node_data[1]; @@ -99,7 +100,8 @@ enum acpi_iort_node_type { ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02, ACPI_IORT_NODE_SMMU = 0x03, ACPI_IORT_NODE_SMMU_V3 = 0x04, - ACPI_IORT_NODE_PMCG = 0x05 + ACPI_IORT_NODE_PMCG = 0x05, + ACPI_IORT_NODE_RMR = 0x06, }; struct acpi_iort_id_mapping { @@ -166,10 +168,11 @@ struct acpi_iort_root_complex { u8 reserved[3]; /* Reserved, must be zero */ }; -/* Values for ats_attribute field above */ +/* Masks for ats_attribute field above */ -#define ACPI_IORT_ATS_SUPPORTED 0x00000001 /* The root complex supports ATS */ -#define ACPI_IORT_ATS_UNSUPPORTED 0x00000000 /* The root complex doesn't support ATS */ +#define ACPI_IORT_ATS_SUPPORTED (1) /* The root complex ATS support */ +#define ACPI_IORT_PRI_SUPPORTED (1<<1) /* The root complex PRI support */ +#define ACPI_IORT_PASID_FWD_SUPPORTED (1<<2) /* The root complex PASID forward support */ struct acpi_iort_smmu { u64 base_address; /* SMMU base address */ @@ -240,6 +243,18 @@ struct acpi_iort_pmcg { u64 page1_base_address; }; +struct acpi_iort_rmr { + u32 flags; + u32 rmr_count; + u32 rmr_offset; +}; + +struct acpi_iort_rmr_desc { + u64 base_address; + u64 length; + u32 reserved; +}; + /******************************************************************************* * * IVRS - I/O Virtualization Reporting Structure @@ -276,6 +291,7 @@ struct acpi_ivrs_header { enum acpi_ivrs_type { ACPI_IVRS_TYPE_HARDWARE1 = 0x10, ACPI_IVRS_TYPE_HARDWARE2 = 0x11, + ACPI_IVRS_TYPE_HARDWARE3 = 0x40, ACPI_IVRS_TYPE_MEMORY1 = 0x20, ACPI_IVRS_TYPE_MEMORY2 = 0x21, ACPI_IVRS_TYPE_MEMORY3 = 0x22 @@ -364,7 +380,11 @@ enum acpi_ivrs_device_entry_type { ACPI_IVRS_TYPE_ALIAS_START = 67, /* Uses struct acpi_ivrs_device8a */ ACPI_IVRS_TYPE_EXT_SELECT = 70, /* Uses struct acpi_ivrs_device8b */ ACPI_IVRS_TYPE_EXT_START = 71, /* Uses struct acpi_ivrs_device8b */ - ACPI_IVRS_TYPE_SPECIAL = 72 /* Uses struct acpi_ivrs_device8c */ + ACPI_IVRS_TYPE_SPECIAL = 72, /* Uses struct acpi_ivrs_device8c */ + + /* Variable-length device entries */ + + ACPI_IVRS_TYPE_HID = 240 /* Uses ACPI_IVRS_DEVICE_HID */ }; /* Values for Data field above */ @@ -416,6 +436,16 @@ struct acpi_ivrs_device8c { #define ACPI_IVHD_IOAPIC 1 #define ACPI_IVHD_HPET 2 +/* Type 240: variable-length device entry */ + +struct acpi_ivrs_device_hid { + struct acpi_ivrs_de_header header; + u64 acpi_hid; + u64 acpi_cid; + u8 uid_type; + u8 uid_length; +}; + /* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */ struct acpi_ivrs_memory { @@ -516,7 +546,8 @@ enum acpi_madt_type { ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, - ACPI_MADT_TYPE_RESERVED = 16 /* 16 and greater are reserved */ + ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16, + ACPI_MADT_TYPE_RESERVED = 17 /* 17 and greater are reserved */ }; /* @@ -723,6 +754,15 @@ struct acpi_madt_generic_translator { u32 reserved2; }; +/* 16: Multiprocessor wakeup (ACPI 6.4) */ + +struct acpi_madt_multiproc_wakeup { + struct acpi_subtable_header header; + u16 mailbox_version; + u32 reserved; /* reserved - must be zero */ + u64 base_address; +}; + /* * Common flags fields for MADT subtables */ @@ -983,12 +1023,14 @@ struct acpi_nfit_system_address { u64 address; u64 length; u64 memory_mapping; + u64 location_cookie; /* ACPI 6.4 */ }; /* Flags */ #define ACPI_NFIT_ADD_ONLINE_ONLY (1) /* 00: Add/Online Operation Only */ #define ACPI_NFIT_PROXIMITY_VALID (1<<1) /* 01: Proximity Domain Valid */ +#define ACPI_NFIT_LOCATION_COOKIE_VALID (1<<2) /* 02: SPA location cookie valid (ACPI 6.4) */ /* Range Type GUIDs appear in the include/acuuid.h file */ @@ -1184,7 +1226,8 @@ enum acpi_pcct_type { ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2, /* ACPI 6.1 */ ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3, /* ACPI 6.2 */ ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4, /* ACPI 6.2 */ - ACPI_PCCT_TYPE_RESERVED = 5 /* 5 and greater are reserved */ + ACPI_PCCT_TYPE_HW_REG_COMM_SUBSPACE = 5, /* ACPI 6.4 */ + ACPI_PCCT_TYPE_RESERVED = 6 /* 6 and greater are reserved */ }; /* @@ -1299,6 +1342,24 @@ struct acpi_pcct_ext_pcc_slave { u64 error_status_mask; }; +/* 5: HW Registers based Communications Subspace */ + +struct acpi_pcct_hw_reg { + struct acpi_subtable_header header; + u16 version; + u64 base_address; + u64 length; + struct acpi_generic_address doorbell_register; + u64 doorbell_preserve; + u64 doorbell_write; + struct acpi_generic_address cmd_complete_register; + u64 cmd_complete_mask; + struct acpi_generic_address error_status_register; + u64 error_status_mask; + u32 nominal_latency; + u32 min_turnaround_time; +}; + /* Values for doorbell flags above */ #define ACPI_PCCT_INTERRUPT_POLARITY (1) @@ -1357,6 +1418,66 @@ struct acpi_pdtt_channel { /******************************************************************************* * + * PHAT - Platform Health Assessment Table (ACPI 6.4) + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_phat { + struct acpi_table_header header; /* Common ACPI table header */ +}; + +/* Common header for PHAT subtables that follow main table */ + +struct acpi_phat_header { + u16 type; + u16 length; + u8 revision; +}; + +/* Values for Type field above */ + +#define ACPI_PHAT_TYPE_FW_VERSION_DATA 0 +#define ACPI_PHAT_TYPE_FW_HEALTH_DATA 1 +#define ACPI_PHAT_TYPE_RESERVED 2 /* 0x02-0xFFFF are reserved */ + +/* + * PHAT subtables, correspond to Type in struct acpi_phat_header + */ + +/* 0: Firmware Version Data Record */ + +struct acpi_phat_version_data { + struct acpi_phat_header header; + u8 reserved[3]; + u32 element_count; +}; + +struct acpi_phat_version_element { + u8 guid[16]; + u64 version_value; + u32 producer_id; +}; + +/* 1: Firmware Health Data Record */ + +struct acpi_phat_health_data { + struct acpi_phat_header header; + u8 reserved[2]; + u8 health; + u8 device_guid[16]; + u32 device_specific_offset; /* Zero if no Device-specific data */ +}; + +/* Values for Health field above */ + +#define ACPI_PHAT_ERRORS_FOUND 0 +#define ACPI_PHAT_NO_ERRORS 1 +#define ACPI_PHAT_UNKNOWN_ERRORS 2 +#define ACPI_PHAT_ADVISORY 3 + +/******************************************************************************* + * * PMTT - Platform Memory Topology Table (ACPI 5.0) * Version 1 * @@ -1364,7 +1485,11 @@ struct acpi_pdtt_channel { struct acpi_table_pmtt { struct acpi_table_header header; /* Common ACPI table header */ - u32 reserved; + u32 memory_device_count; + /* + * Immediately followed by: + * MEMORY_DEVICE memory_device_struct[memory_device_count]; + */ }; /* Common header for PMTT subtables that follow main table */ @@ -1375,6 +1500,12 @@ struct acpi_pmtt_header { u16 length; u16 flags; u16 reserved2; + u32 memory_device_count; /* Zero means no memory device structs follow */ + /* + * Immediately followed by: + * u8 type_specific_data[] + * MEMORY_DEVICE memory_device_struct[memory_device_count]; + */ }; /* Values for Type field above */ @@ -1382,7 +1513,8 @@ struct acpi_pmtt_header { #define ACPI_PMTT_TYPE_SOCKET 0 #define ACPI_PMTT_TYPE_CONTROLLER 1 #define ACPI_PMTT_TYPE_DIMM 2 -#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFF are reserved */ +#define ACPI_PMTT_TYPE_RESERVED 3 /* 0x03-0xFE are reserved */ +#define ACPI_PMTT_TYPE_VENDOR 0xFF /* Values for Flags field above */ @@ -1401,37 +1533,43 @@ struct acpi_pmtt_socket { u16 socket_id; u16 reserved; }; + /* + * Immediately followed by: + * MEMORY_DEVICE memory_device_struct[memory_device_count]; + */ /* 1: Memory Controller subtable */ struct acpi_pmtt_controller { struct acpi_pmtt_header header; - u32 read_latency; - u32 write_latency; - u32 read_bandwidth; - u32 write_bandwidth; - u16 access_width; - u16 alignment; + u16 controller_id; u16 reserved; - u16 domain_count; -}; - -/* 1a: Proximity Domain substructure */ - -struct acpi_pmtt_domain { - u32 proximity_domain; }; + /* + * Immediately followed by: + * MEMORY_DEVICE memory_device_struct[memory_device_count]; + */ /* 2: Physical Component Identifier (DIMM) */ struct acpi_pmtt_physical_component { struct acpi_pmtt_header header; - u16 component_id; - u16 reserved; - u32 memory_size; u32 bios_handle; }; +/* 0xFF: Vendor Specific Data */ + +struct acpi_pmtt_vendor_specific { + struct acpi_pmtt_header header; + u8 type_uuid[16]; + u8 specific[]; + /* + * Immediately followed by: + * u8 vendor_specific_data[]; + * MEMORY_DEVICE memory_device_struct[memory_device_count]; + */ +}; + /******************************************************************************* * * PPTT - Processor Properties Topology Table (ACPI 6.2) @@ -1485,6 +1623,12 @@ struct acpi_pptt_cache { u16 line_size; }; +/* 1: Cache Type Structure for PPTT version 3 */ + +struct acpi_pptt_cache_v1 { + u32 cache_id; +}; + /* Flags */ #define ACPI_PPTT_SIZE_PROPERTY_VALID (1) /* Physical property valid */ @@ -1494,6 +1638,7 @@ struct acpi_pptt_cache { #define ACPI_PPTT_CACHE_TYPE_VALID (1<<4) /* Cache type valid */ #define ACPI_PPTT_WRITE_POLICY_VALID (1<<5) /* Write policy valid */ #define ACPI_PPTT_LINE_SIZE_VALID (1<<6) /* Line size valid */ +#define ACPI_PPTT_CACHE_ID_VALID (1<<7) /* Cache ID valid */ /* Masks for Attributes */ @@ -1679,6 +1824,7 @@ enum acpi_sdev_type { /* Values for flags above */ #define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1) +#define ACPI_SDEV_SECURE_COMPONENTS_PRESENT (1<<1) /* * SDEV subtables @@ -1694,6 +1840,46 @@ struct acpi_sdev_namespace { u16 vendor_data_length; }; +struct acpi_sdev_secure_component { + u16 secure_component_offset; + u16 secure_component_length; +}; + +/* + * SDEV sub-subtables ("Components") for above + */ +struct acpi_sdev_component { + struct acpi_sdev_header header; +}; + +/* Values for sub-subtable type above */ + +enum acpi_sac_type { + ACPI_SDEV_TYPE_ID_COMPONENT = 0, + ACPI_SDEV_TYPE_MEM_COMPONENT = 1 +}; + +struct acpi_sdev_id_component { + struct acpi_sdev_header header; + u16 hardware_id_offset; + u16 hardware_id_length; + u16 subsystem_id_offset; + u16 subsystem_id_length; + u16 hardware_revision; + u8 hardware_rev_present; + u8 class_code_present; + u8 pci_base_class; + u8 pci_sub_class; + u8 pci_programming_xface; +}; + +struct acpi_sdev_mem_component { + struct acpi_sdev_header header; + u32 reserved; + u64 memory_base_address; + u64 memory_length; +}; + /* 1: PCIe Endpoint Device Based Device Structure */ struct acpi_sdev_pcie { diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index df5f4b27f3aa..86903ac5bbc5 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -33,6 +33,7 @@ #define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ #define ACPI_SIG_TPM2 "TPM2" /* Trusted Platform Module 2.0 H/W interface table */ #define ACPI_SIG_UEFI "UEFI" /* Uefi Boot Optimization Table */ +#define ACPI_SIG_VIOT "VIOT" /* Virtual I/O Translation Table */ #define ACPI_SIG_WAET "WAET" /* Windows ACPI Emulated devices Table */ #define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */ #define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */ @@ -285,7 +286,8 @@ struct acpi_srat_generic_affinity { /* Flags for struct acpi_srat_generic_affinity */ -#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */ +#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */ +#define ACPI_SRAT_ARCHITECTURAL_TRANSACTIONS (1<<1) /* ACPI 6.4 */ /******************************************************************************* * @@ -485,6 +487,72 @@ struct acpi_table_uefi { /******************************************************************************* * + * VIOT - Virtual I/O Translation Table + * Version 1 + * + ******************************************************************************/ + +struct acpi_table_viot { + struct acpi_table_header header; /* Common ACPI table header */ + u16 node_count; + u16 node_offset; + u8 reserved[8]; +}; + +/* VIOT subtable header */ + +struct acpi_viot_header { + u8 type; + u8 reserved; + u16 length; +}; + +/* Values for Type field above */ + +enum acpi_viot_node_type { + ACPI_VIOT_NODE_PCI_RANGE = 0x01, + ACPI_VIOT_NODE_MMIO = 0x02, + ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI = 0x03, + ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO = 0x04, + ACPI_VIOT_RESERVED = 0x05 +}; + +/* VIOT subtables */ + +struct acpi_viot_pci_range { + struct acpi_viot_header header; + u32 endpoint_start; + u16 segment_start; + u16 segment_end; + u16 bdf_start; + u16 bdf_end; + u16 output_node; + u8 reserved[6]; +}; + +struct acpi_viot_mmio { + struct acpi_viot_header header; + u32 endpoint; + u64 base_address; + u16 output_node; + u8 reserved[6]; +}; + +struct acpi_viot_virtio_iommu_pci { + struct acpi_viot_header header; + u16 segment; + u16 bdf; + u8 reserved[8]; +}; + +struct acpi_viot_virtio_iommu_mmio { + struct acpi_viot_header header; + u8 reserved[4]; + u64 base_address; +}; + +/******************************************************************************* + * * WAET - Windows ACPI Emulated devices Table * Version 1 * diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h index a5c2ca019a12..bc24388ce94e 100644 --- a/include/acpi/acuuid.h +++ b/include/acpi/acuuid.h @@ -68,5 +68,6 @@ #define UUID_DEVICE_GRAPHS "ab02a46b-74c7-45a2-bd68-f7d344ef2153" #define UUID_HIERARCHICAL_DATA_EXTENSION "dbb8e3e6-5886-4ba6-8795-1319f52a966b" #define UUID_CORESIGHT_GRAPH "3ecbc8b6-1d0e-4fb3-8107-e627f805c6cd" +#define UUID_USB4_CAPABILITIES "23a0d13a-26ab-486c-9c5f-0ffa525a575a" #endif /* __ACUUID_H__ */ diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index c7fc4524e151..9f4985b4d64d 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -11,6 +11,7 @@ #define _CPPC_ACPI_H #include <linux/acpi.h> +#include <linux/cpufreq.h> #include <linux/types.h> #include <acpi/pcc.h> @@ -132,6 +133,7 @@ struct cppc_cpudata { cpumask_var_t shared_cpu_map; }; +#ifdef CONFIG_ACPI_CPPC_LIB extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf); extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); @@ -142,5 +144,43 @@ extern unsigned int cppc_get_transition_latency(int cpu); extern bool cpc_ffh_supported(void); extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val); extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val); +#else /* !CONFIG_ACPI_CPPC_LIB */ +static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf) +{ + return -ENOTSUPP; +} +static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs) +{ + return -ENOTSUPP; +} +static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) +{ + return -ENOTSUPP; +} +static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps) +{ + return -ENOTSUPP; +} +static inline bool acpi_cpc_valid(void) +{ + return false; +} +static inline unsigned int cppc_get_transition_latency(int cpu) +{ + return CPUFREQ_ETERNAL; +} +static inline bool cpc_ffh_supported(void) +{ + return false; +} +static inline int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) +{ + return -ENOTSUPP; +} +static inline int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) +{ + return -ENOTSUPP; +} +#endif /* !CONFIG_ACPI_CPPC_LIB */ #endif /* _CPPC_ACPI_H*/ diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h index 0cd4f61d4248..f6656be81760 100644 --- a/include/acpi/platform/acgcc.h +++ b/include/acpi/platform/acgcc.h @@ -61,7 +61,7 @@ typedef __builtin_va_list va_list; #endif /* - * Explictly mark intentional explicit fallthrough to silence + * Explicitly mark intentional explicit fallthrough to silence * -Wimplicit-fallthrough in GCC 7.1+. */ diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 9fdf21302fdf..0d132ee2a291 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -2,6 +2,13 @@ #ifndef _ASM_GENERIC_BITOPS_FIND_H_ #define _ASM_GENERIC_BITOPS_FIND_H_ +extern unsigned long _find_next_bit(const unsigned long *addr1, + const unsigned long *addr2, unsigned long nbits, + unsigned long start, unsigned long invert, unsigned long le); +extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size); +extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size); +extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size); + #ifndef find_next_bit /** * find_next_bit - find the next set bit in a memory region @@ -12,8 +19,22 @@ * Returns the bit number for the next set bit * If no bits are set, returns @size. */ -extern unsigned long find_next_bit(const unsigned long *addr, unsigned long - size, unsigned long offset); +static inline +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + if (small_const_nbits(size)) { + unsigned long val; + + if (unlikely(offset >= size)) + return size; + + val = *addr & GENMASK(size - 1, offset); + return val ? __ffs(val) : size; + } + + return _find_next_bit(addr, NULL, size, offset, 0UL, 0); +} #endif #ifndef find_next_and_bit @@ -27,9 +48,23 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long * Returns the bit number for the next set bit * If no bits are set, returns @size. */ -extern unsigned long find_next_and_bit(const unsigned long *addr1, +static inline +unsigned long find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2, unsigned long size, - unsigned long offset); + unsigned long offset) +{ + if (small_const_nbits(size)) { + unsigned long val; + + if (unlikely(offset >= size)) + return size; + + val = *addr1 & *addr2 & GENMASK(size - 1, offset); + return val ? __ffs(val) : size; + } + + return _find_next_bit(addr1, addr2, size, offset, 0UL, 0); +} #endif #ifndef find_next_zero_bit @@ -42,8 +77,22 @@ extern unsigned long find_next_and_bit(const unsigned long *addr1, * Returns the bit number of the next zero bit * If no bits are zero, returns @size. */ -extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned - long size, unsigned long offset); +static inline +unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + if (small_const_nbits(size)) { + unsigned long val; + + if (unlikely(offset >= size)) + return size; + + val = *addr | ~GENMASK(size - 1, offset); + return val == ~0UL ? size : ffz(val); + } + + return _find_next_bit(addr, NULL, size, offset, ~0UL, 0); +} #endif #ifdef CONFIG_GENERIC_FIND_FIRST_BIT @@ -56,8 +105,17 @@ extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned * Returns the bit number of the first set bit. * If no bits are set, returns @size. */ -extern unsigned long find_first_bit(const unsigned long *addr, - unsigned long size); +static inline +unsigned long find_first_bit(const unsigned long *addr, unsigned long size) +{ + if (small_const_nbits(size)) { + unsigned long val = *addr & GENMASK(size - 1, 0); + + return val ? __ffs(val) : size; + } + + return _find_first_bit(addr, size); +} /** * find_first_zero_bit - find the first cleared bit in a memory region @@ -67,8 +125,17 @@ extern unsigned long find_first_bit(const unsigned long *addr, * Returns the bit number of the first cleared bit. * If no bits are zero, returns @size. */ -extern unsigned long find_first_zero_bit(const unsigned long *addr, - unsigned long size); +static inline +unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) +{ + if (small_const_nbits(size)) { + unsigned long val = *addr | ~GENMASK(size - 1, 0); + + return val == ~0UL ? size : ffz(val); + } + + return _find_first_zero_bit(addr, size); +} #else /* CONFIG_GENERIC_FIND_FIRST_BIT */ #ifndef find_first_bit @@ -80,6 +147,27 @@ extern unsigned long find_first_zero_bit(const unsigned long *addr, #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ +#ifndef find_last_bit +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The number of bits to search + * + * Returns the bit number of the last set bit, or size. + */ +static inline +unsigned long find_last_bit(const unsigned long *addr, unsigned long size) +{ + if (small_const_nbits(size)) { + unsigned long val = *addr & GENMASK(size - 1, 0); + + return val ? __fls(val) : size; + } + + return _find_last_bit(addr, size); +} +#endif + /** * find_next_clump8 - find next 8-bit clump with set bits in a memory region * @clump: location to store copy of found clump diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h index 188d3eba3ace..5a28629cbf4d 100644 --- a/include/asm-generic/bitops/le.h +++ b/include/asm-generic/bitops/le.h @@ -2,8 +2,10 @@ #ifndef _ASM_GENERIC_BITOPS_LE_H_ #define _ASM_GENERIC_BITOPS_LE_H_ +#include <asm-generic/bitops/find.h> #include <asm/types.h> #include <asm/byteorder.h> +#include <linux/swab.h> #if defined(__LITTLE_ENDIAN) @@ -32,13 +34,41 @@ static inline unsigned long find_first_zero_bit_le(const void *addr, #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) #ifndef find_next_zero_bit_le -extern unsigned long find_next_zero_bit_le(const void *addr, - unsigned long size, unsigned long offset); +static inline +unsigned long find_next_zero_bit_le(const void *addr, unsigned + long size, unsigned long offset) +{ + if (small_const_nbits(size)) { + unsigned long val = *(const unsigned long *)addr; + + if (unlikely(offset >= size)) + return size; + + val = swab(val) | ~GENMASK(size - 1, offset); + return val == ~0UL ? size : ffz(val); + } + + return _find_next_bit(addr, NULL, size, offset, ~0UL, 1); +} #endif #ifndef find_next_bit_le -extern unsigned long find_next_bit_le(const void *addr, - unsigned long size, unsigned long offset); +static inline +unsigned long find_next_bit_le(const void *addr, unsigned + long size, unsigned long offset) +{ + if (small_const_nbits(size)) { + unsigned long val = *(const unsigned long *)addr; + + if (unlikely(offset >= size)) + return size; + + val = swab(val) & GENMASK(size - 1, offset); + return val ? __ffs(val) : size; + } + + return _find_next_bit(addr, NULL, size, offset, 0UL, 1); +} #endif #ifndef find_first_zero_bit_le diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h index 3905c1c93dc2..1023e2a4bd37 100644 --- a/include/asm-generic/bitsperlong.h +++ b/include/asm-generic/bitsperlong.h @@ -23,4 +23,16 @@ #define BITS_PER_LONG_LONG 64 #endif +/* + * small_const_nbits(n) is true precisely when it is known at compile-time + * that BITMAP_SIZE(n) is 1, i.e. 1 <= n <= BITS_PER_LONG. This allows + * various bit/bitmap APIs to provide a fast inline implementation. Bitmaps + * of size 0 are very rare, and a compile-time-known-size 0 is most likely + * a sign of error. They will be handled correctly by the bit/bitmap APIs, + * but using the out-of-line functions, so that the inline implementations + * can unconditionally dereference the pointer(s). + */ +#define small_const_nbits(nbits) \ + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0) + #endif /* __ASM_GENERIC_BITS_PER_LONG */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 76a10e0dca9f..b402494883b6 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -241,6 +241,22 @@ void __warn(const char *file, int line, void *caller, unsigned taint, # define WARN_ON_SMP(x) ({0;}) #endif +/* + * WARN_ON_FUNCTION_MISMATCH() warns if a value doesn't match a + * function address, and can be useful for catching issues with + * callback functions, for example. + * + * With CONFIG_CFI_CLANG, the warning is disabled because the + * compiler replaces function addresses taken in C code with + * local jump table addresses, which breaks cross-module function + * address equality. + */ +#if defined(CONFIG_CFI_CLANG) && defined(CONFIG_MODULES) +# define WARN_ON_FUNCTION_MISMATCH(x, fn) ({ 0; }) +#else +# define WARN_ON_FUNCTION_MISMATCH(x, fn) WARN_ON_ONCE((x) != (fn)) +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index a3b98c86f077..cd905b44a630 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -8,12 +8,14 @@ * Optimization for constant divisors on 32-bit machines: * Copyright (C) 2006-2015 Nicolas Pitre * - * The semantics of do_div() are: + * The semantics of do_div() is, in C++ notation, observing that the name + * is a function-like macro and the n parameter has the semantics of a C++ + * reference: * - * uint32_t do_div(uint64_t *n, uint32_t base) + * uint32_t do_div(uint64_t &n, uint32_t base) * { - * uint32_t remainder = *n % base; - * *n = *n / base; + * uint32_t remainder = n % base; + * n = n / base; * return remainder; * } * diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h index 83448e837ded..515c3fb06ab3 100644 --- a/include/asm-generic/hyperv-tlfs.h +++ b/include/asm-generic/hyperv-tlfs.h @@ -89,9 +89,9 @@ #define HV_ACCESS_STATS BIT(8) #define HV_DEBUGGING BIT(11) #define HV_CPU_MANAGEMENT BIT(12) +#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20) #define HV_ISOLATION BIT(22) - /* * TSC page layout. */ @@ -159,11 +159,18 @@ struct ms_hyperv_tsc_page { #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 +/* Extended hypercalls */ +#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001 +#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003 + #define HV_FLUSH_ALL_PROCESSORS BIT(0) #define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1) #define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2) #define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) +/* Extended capability bits */ +#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8) + enum HV_GENERIC_SET_FORMAT { HV_GENERIC_SET_SPARSE_4K, HV_GENERIC_SET_ALL, @@ -220,6 +227,41 @@ enum HV_GENERIC_SET_FORMAT { #define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240) #define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30) +/* + * Define hypervisor message types. Some of the message types + * are x86/x64 specific, but there's no good way to separate + * them out into the arch-specific version of hyperv-tlfs.h + * because C doesn't provide a way to extend enum types. + * Keeping them all in the arch neutral hyperv-tlfs.h seems + * the least messy compromise. + */ +enum hv_message_type { + HVMSG_NONE = 0x00000000, + + /* Memory access messages. */ + HVMSG_UNMAPPED_GPA = 0x80000000, + HVMSG_GPA_INTERCEPT = 0x80000001, + + /* Timer notification messages. */ + HVMSG_TIMER_EXPIRED = 0x80000010, + + /* Error messages. */ + HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020, + HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021, + HVMSG_UNSUPPORTED_FEATURE = 0x80000022, + + /* Trace buffer complete messages. */ + HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040, + + /* Platform-specific processor intercept messages. */ + HVMSG_X64_IOPORT_INTERCEPT = 0x80010000, + HVMSG_X64_MSR_INTERCEPT = 0x80010001, + HVMSG_X64_CPUID_INTERCEPT = 0x80010002, + HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003, + HVMSG_X64_APIC_EOI = 0x80010004, + HVMSG_X64_LEGACY_FP_ERROR = 0x80010005 +}; + /* Define synthetic interrupt controller message flags. */ union hv_message_flags { __u8 asu8; @@ -373,8 +415,10 @@ struct hv_guest_mapping_flush { * by the bitwidth of "additional_pages" in union hv_gpa_page_range. */ #define HV_MAX_FLUSH_PAGES (2048) +#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0 +#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1 -/* HvFlushGuestPhysicalAddressList hypercall */ +/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */ union hv_gpa_page_range { u64 address_space; struct { @@ -382,6 +426,12 @@ union hv_gpa_page_range { u64 largepage:1; u64 basepfn:52; } page; + struct { + u64 reserved:12; + u64 page_size:1; + u64 reserved1:8; + u64 base_large_pfn:43; + }; }; /* @@ -739,4 +789,20 @@ struct hv_input_unmap_device_interrupt { #define HV_SOURCE_SHADOW_NONE 0x0 #define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1 +/* + * The whole argument should fit in a page to be able to pass to the hypervisor + * in one hypercall. + */ +#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \ + ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \ + sizeof(union hv_gpa_page_range)) + +/* HvExtCallMemoryHeatHint hypercall */ +#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2 +struct hv_memory_hint { + u64 type:2; + u64 reserved:62; + union hv_gpa_page_range ranges[]; +} __packed; + #endif diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index c6af40ce03be..e93375c710b9 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -942,7 +942,9 @@ static inline void *phys_to_virt(unsigned long address) * * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes * for specific drivers if the architecture choses to implement them. If they - * are not implemented we fall back to plain ioremap. + * are not implemented we fall back to plain ioremap. Conversely, ioremap_np() + * can provide stricter non-posted write semantics if the architecture + * implements them. */ #ifndef CONFIG_MMU #ifndef ioremap @@ -995,6 +997,23 @@ static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) } #endif +/* + * ioremap_np needs an explicit architecture implementation, as it + * requests stronger semantics than regular ioremap(). Portable drivers + * should instead use one of the higher-level abstractions, like + * devm_ioremap_resource(), to choose the correct variant for any given + * device and bus. Portable drivers with a good reason to want non-posted + * write semantics should always provide an ioremap() fallback in case + * ioremap_np() is not available. + */ +#ifndef ioremap_np +#define ioremap_np ioremap_np +static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) +{ + return NULL; +} +#endif + #ifdef CONFIG_HAS_IOPORT_MAP #ifndef CONFIG_GENERIC_IOMAP #ifndef ioport_map @@ -1045,17 +1064,6 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) #endif #endif /* CONFIG_GENERIC_IOMAP */ -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#ifndef xlate_dev_kmem_ptr -#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr -static inline void *xlate_dev_kmem_ptr(void *addr) -{ - return addr; -} -#endif - #ifndef xlate_dev_mem_ptr #define xlate_dev_mem_ptr xlate_dev_mem_ptr static inline void *xlate_dev_mem_ptr(phys_addr_t addr) diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index 649224664969..9b3eb6d86200 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -101,6 +101,15 @@ extern void ioport_unmap(void __iomem *); #define ioremap_wt ioremap #endif +#ifndef ARCH_HAS_IOREMAP_NP +/* See the comment in asm-generic/io.h about ioremap_np(). */ +#define ioremap_np ioremap_np +static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) +{ + return NULL; +} +#endif + #ifdef CONFIG_PCI /* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ struct pci_dev; diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index dff58a3db5d5..9a000ba2bb75 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -27,7 +27,7 @@ struct ms_hyperv_info { u32 features; - u32 features_b; + u32 priv_high; u32 misc_features; u32 hints; u32 nested_features; @@ -41,6 +41,53 @@ extern struct ms_hyperv_info ms_hyperv; extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); extern u64 hv_do_fast_hypercall8(u16 control, u64 input8); +/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */ +static inline int hv_result(u64 status) +{ + return status & HV_HYPERCALL_RESULT_MASK; +} + +static inline bool hv_result_success(u64 status) +{ + return hv_result(status) == HV_STATUS_SUCCESS; +} + +static inline unsigned int hv_repcomp(u64 status) +{ + /* Bits [43:32] of status have 'Reps completed' data. */ + return (status & HV_HYPERCALL_REP_COMP_MASK) >> + HV_HYPERCALL_REP_COMP_OFFSET; +} + +/* + * Rep hypercalls. Callers of this functions are supposed to ensure that + * rep_count and varhead_size comply with Hyper-V hypercall definition. + */ +static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, + void *input, void *output) +{ + u64 control = code; + u64 status; + u16 rep_comp; + + control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; + control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; + + do { + status = hv_do_hypercall(control, input, output); + if (!hv_result_success(status)) + return status; + + rep_comp = hv_repcomp(status); + + control &= ~HV_HYPERCALL_REP_START_MASK; + control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET; + + touch_nmi_watchdog(); + } while (rep_comp < rep_count); + + return status; +} /* Generate the guest OS identifier as described in the Hyper-V TLFS */ static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version, @@ -56,7 +103,6 @@ static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version, return guest_id; } - /* Free the message slot and signal end-of-message if required */ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) { @@ -88,14 +134,14 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) * possibly deliver another msg from the * hypervisor */ - hv_signal_eom(); + hv_set_register(HV_REGISTER_EOM, 0); } } -int hv_setup_vmbus_irq(int irq, void (*handler)(void)); -void hv_remove_vmbus_irq(void); -void hv_enable_vmbus_irq(void); -void hv_disable_vmbus_irq(void); +void hv_setup_vmbus_handler(void (*handler)(void)); +void hv_remove_vmbus_handler(void); +void hv_setup_stimer0_handler(void (*handler)(void)); +void hv_remove_stimer0_handler(void); void hv_setup_kexec_handler(void (*handler)(void)); void hv_remove_kexec_handler(void); @@ -103,6 +149,7 @@ void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); void hv_remove_crash_handler(void); extern int vmbus_interrupt; +extern int vmbus_irq; #if IS_ENABLED(CONFIG_HYPERV) /* @@ -117,6 +164,10 @@ extern u32 hv_max_vp_index; /* Sentinel value for an uninitialized entry in hv_vp_index array */ #define VP_INVAL U32_MAX +void *hv_alloc_hyperv_page(void); +void *hv_alloc_hyperv_zeroed_page(void); +void hv_free_hyperv_page(unsigned long addr); + /** * hv_cpu_number_to_vp_number() - Map CPU to VP. * @cpu_number: CPU number in Linux terms @@ -169,21 +220,16 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, } void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); -void hyperv_report_panic_msg(phys_addr_t pa, size_t size); bool hv_is_hyperv_initialized(void); bool hv_is_hibernation_supported(void); enum hv_isolation_type hv_get_isolation_type(void); bool hv_is_isolation_supported(void); void hyperv_cleanup(void); +bool hv_query_ext_cap(u64 cap_query); #else /* CONFIG_HYPERV */ static inline bool hv_is_hyperv_initialized(void) { return false; } static inline bool hv_is_hibernation_supported(void) { return false; } static inline void hyperv_cleanup(void) {} #endif /* CONFIG_HYPERV */ -#if IS_ENABLED(CONFIG_HYPERV) -extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void)); -extern void hv_remove_stimer0_irq(int irq); -#endif - #endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 0331d5d49551..40a9c101565e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -544,6 +544,22 @@ . = ALIGN((align)); \ __end_rodata = .; + +/* + * .text..L.cfi.jumptable.* contain Control-Flow Integrity (CFI) + * jump table entries. + */ +#ifdef CONFIG_CFI_CLANG +#define TEXT_CFI_JT \ + . = ALIGN(PMD_SIZE); \ + __cfi_jt_start = .; \ + *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \ + . = ALIGN(PMD_SIZE); \ + __cfi_jt_end = .; +#else +#define TEXT_CFI_JT +#endif + /* * Non-instrumentable text section */ @@ -570,6 +586,7 @@ NOINSTR_TEXT \ *(.text..refcount) \ *(.ref.text) \ + TEXT_CFI_JT \ MEM_KEEP(init.text*) \ MEM_KEEP(exit.text*) \ @@ -974,7 +991,8 @@ * keep any .init_array.* sections. * https://bugs.llvm.org/show_bug.cgi?id=46478 */ -#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) +#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) || \ + defined(CONFIG_CFI_CLANG) # ifdef CONFIG_CONSTRUCTORS # define SANITIZER_DISCARDS \ *(.eh_frame) diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index 1d68d5613dae..73c7139c866f 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -32,6 +32,7 @@ enum arch_timer_ppi_nr { ARCH_TIMER_PHYS_NONSECURE_PPI, ARCH_TIMER_VIRT_PPI, ARCH_TIMER_HYP_PPI, + ARCH_TIMER_HYP_VIRT_PPI, ARCH_TIMER_MAX_TIMER_PPI }; diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h index 34eef083c988..b6774aa5a4b8 100644 --- a/include/clocksource/hyperv_timer.h +++ b/include/clocksource/hyperv_timer.h @@ -21,8 +21,7 @@ #define HV_MIN_DELTA_TICKS 1 /* Routines called by the VMbus driver */ -extern int hv_stimer_alloc(void); -extern void hv_stimer_free(void); +extern int hv_stimer_alloc(bool have_percpu_irqs); extern int hv_stimer_cleanup(unsigned int cpu); extern void hv_stimer_legacy_init(unsigned int cpu, int sint); extern void hv_stimer_legacy_cleanup(unsigned int cpu); diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index fcde59c65a81..cb3d6b1c655d 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) * crypto_free_acomp() -- free ACOMPRESS tfm handle * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_acomp(struct crypto_acomp *tfm) { diff --git a/include/crypto/aead.h b/include/crypto/aead.h index fcc12c593ef8..e728469c4ccc 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) /** * crypto_free_aead() - zeroize and free aead handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_aead(struct crypto_aead *tfm) { diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 1d3aa252caba..5764b46bd1ec 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm( * crypto_free_akcipher() - free AKCIPHER tfm handle * * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_akcipher(struct crypto_akcipher *tfm) { diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 3a1c72fdb7cf..dabaee698718 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -47,13 +47,18 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds) hchacha_block_generic(state, out, nrounds); } -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv); -static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) +static inline void chacha_init_consts(u32 *state) { state[0] = 0x61707865; /* "expa" */ state[1] = 0x3320646e; /* "nd 3" */ state[2] = 0x79622d32; /* "2-by" */ state[3] = 0x6b206574; /* "te k" */ +} + +void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv); +static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) +{ + chacha_init_consts(state); state[4] = key[0]; state[5] = key[1]; state[6] = key[2]; diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h new file mode 100644 index 000000000000..70964781eb68 --- /dev/null +++ b/include/crypto/ecc_curve.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 HiSilicon */ + +#ifndef _CRYTO_ECC_CURVE_H +#define _CRYTO_ECC_CURVE_H + +#include <linux/types.h> + +/** + * struct ecc_point - elliptic curve point in affine coordinates + * + * @x: X coordinate in vli form. + * @y: Y coordinate in vli form. + * @ndigits: Length of vlis in u64 qwords. + */ +struct ecc_point { + u64 *x; + u64 *y; + u8 ndigits; +}; + +/** + * struct ecc_curve - definition of elliptic curve + * + * @name: Short name of the curve. + * @g: Generator point of the curve. + * @p: Prime number, if Barrett's reduction is used for this curve + * pre-calculated value 'mu' is appended to the @p after ndigits. + * Use of Barrett's reduction is heuristically determined in + * vli_mmod_fast(). + * @n: Order of the curve group. + * @a: Curve parameter a. + * @b: Curve parameter b. + */ +struct ecc_curve { + char *name; + struct ecc_point g; + u64 *p; + u64 *n; + u64 *a; + u64 *b; +}; + +/** + * ecc_get_curve() - get elliptic curve; + * @curve_id: Curves IDs: + * defined in 'include/crypto/ecdh.h'; + * + * Returns curve if get curve succssful, NULL otherwise + */ +const struct ecc_curve *ecc_get_curve(unsigned int curve_id); + +/** + * ecc_get_curve25519() - get curve25519 curve; + * + * Returns curve25519 + */ +const struct ecc_curve *ecc_get_curve25519(void); + +#endif diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h index a5b805b5526d..a9f98078d29c 100644 --- a/include/crypto/ecdh.h +++ b/include/crypto/ecdh.h @@ -25,16 +25,15 @@ /* Curves IDs */ #define ECC_CURVE_NIST_P192 0x0001 #define ECC_CURVE_NIST_P256 0x0002 +#define ECC_CURVE_NIST_P384 0x0003 /** * struct ecdh - define an ECDH private key * - * @curve_id: ECC curve the key is based on. * @key: Private ECDH key * @key_size: Size of the private ECDH key */ struct ecdh { - unsigned short curve_id; char *key; unsigned short key_size; }; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 13f8a6a54ca8..b2bc1e46e86a 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) /** * crypto_free_ahash() - zeroize and free the ahash handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_ahash(struct crypto_ahash *tfm) { @@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) /** * crypto_free_shash() - zeroize and free the message digest handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_shash(struct crypto_shash *tfm) { diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h index 064e52ca5248..196aa769f296 100644 --- a/include/crypto/internal/poly1305.h +++ b/include/crypto/internal/poly1305.h @@ -18,7 +18,8 @@ * only the ε-almost-∆-universal hash function (not the full MAC) is computed. */ -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key); +void poly1305_core_setkey(struct poly1305_core_key *key, + const u8 raw_key[POLY1305_BLOCK_SIZE]); static inline void poly1305_core_init(struct poly1305_state *state) { *state = (struct poly1305_state){}; diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index 88b591215d5c..cccceadc164b 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags) * crypto_free_kpp() - free KPP tfm handle * * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_kpp(struct crypto_kpp *tfm) { diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index f1f67fc749cf..090692ec3bc7 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h @@ -58,8 +58,10 @@ struct poly1305_desc_ctx { }; }; -void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key); -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key); +void poly1305_init_arch(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]); +void poly1305_init_generic(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]); static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key) { diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 8b4b844b4eef..17bb3673d3c1 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) /** * crypto_free_rng() - zeroize and free RNG handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_rng(struct crypto_rng *tfm) { diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 6a733b171a5d..ef0fc9ed4342 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm( /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm) { diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index cde3c8c9f20c..336e36506910 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -51,13 +51,14 @@ enum amd_asic_type { CHIP_RAVEN, /* 22 */ CHIP_ARCTURUS, /* 23 */ CHIP_RENOIR, /* 24 */ - CHIP_NAVI10, /* 25 */ - CHIP_NAVI14, /* 26 */ - CHIP_NAVI12, /* 27 */ - CHIP_SIENNA_CICHLID, /* 28 */ - CHIP_NAVY_FLOUNDER, /* 29 */ - CHIP_VANGOGH, /* 30 */ - CHIP_DIMGREY_CAVEFISH, /* 31 */ + CHIP_ALDEBARAN, /* 25 */ + CHIP_NAVI10, /* 26 */ + CHIP_NAVI14, /* 27 */ + CHIP_NAVI12, /* 28 */ + CHIP_SIENNA_CICHLID, /* 29 */ + CHIP_NAVY_FLOUNDER, /* 30 */ + CHIP_VANGOGH, /* 31 */ + CHIP_DIMGREY_CAVEFISH, /* 32 */ CHIP_LAST, }; diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index ce7023e9115d..ac5a28eff2c8 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -66,6 +66,8 @@ * * For an implementation of how to use this look at * drm_atomic_helper_setup_commit() from the atomic helper library. + * + * See also drm_crtc_commit_wait(). */ struct drm_crtc_commit { /** @@ -436,6 +438,8 @@ static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit) kref_put(&commit->ref, __drm_crtc_commit_free); } +int drm_crtc_commit_wait(struct drm_crtc_commit *commit); + struct drm_atomic_state * __must_check drm_atomic_state_alloc(struct drm_device *dev); void drm_atomic_state_clear(struct drm_atomic_state *state); diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h index 77941efb5426..ec64d141f578 100644 --- a/include/drm/drm_displayid.h +++ b/include/drm/drm_displayid.h @@ -22,6 +22,10 @@ #ifndef DRM_DISPLAYID_H #define DRM_DISPLAYID_H +#include <linux/types.h> + +struct edid; + #define DATA_BLOCK_PRODUCT_ID 0x00 #define DATA_BLOCK_DISPLAY_PARAMETERS 0x01 #define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02 @@ -52,7 +56,7 @@ #define PRODUCT_TYPE_REPEATER 5 #define PRODUCT_TYPE_DIRECT_DRIVE 6 -struct displayid_hdr { +struct displayid_header { u8 rev; u8 bytes; u8 prod_id; @@ -92,12 +96,22 @@ struct displayid_detailed_timing_block { struct displayid_detailed_timings_1 timings[]; }; -#define for_each_displayid_db(displayid, block, idx, length) \ - for ((block) = (struct displayid_block *)&(displayid)[idx]; \ - (idx) + sizeof(struct displayid_block) <= (length) && \ - (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \ - (block)->num_bytes > 0; \ - (idx) += sizeof(struct displayid_block) + (block)->num_bytes, \ - (block) = (struct displayid_block *)&(displayid)[idx]) +/* DisplayID iteration */ +struct displayid_iter { + const struct edid *edid; + + const u8 *section; + int length; + int idx; + int ext_index; +}; + +void displayid_iter_edid_begin(const struct edid *edid, + struct displayid_iter *iter); +const struct displayid_block * +__displayid_iter_next(struct displayid_iter *iter); +#define displayid_iter_for_each(__block, __iter) \ + while (((__block) = __displayid_iter_next(__iter))) +void displayid_iter_end(struct displayid_iter *iter); #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index edffd1dcca3e..1e85c2021f2f 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1016,6 +1016,11 @@ struct drm_device; #define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */ #define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */ +#define DP_EDP_MSO_LINK_CAPABILITIES 0x7a4 /* eDP 1.4 */ +# define DP_EDP_MSO_NUMBER_OF_LINKS_MASK (7 << 0) +# define DP_EDP_MSO_NUMBER_OF_LINKS_SHIFT 0 +# define DP_EDP_MSO_INDEPENDENT_LINK_BIT (1 << 3) + /* Sideband MSG Buffers */ #define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */ #define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */ @@ -1171,6 +1176,7 @@ struct drm_device; # define DP_PCON_ENABLE_MAX_BW_48GBPS 6 # define DP_PCON_ENABLE_SOURCE_CTL_MODE (1 << 3) # define DP_PCON_ENABLE_CONCURRENT_LINK (1 << 4) +# define DP_PCON_ENABLE_SEQUENTIAL_LINK (0 << 4) # define DP_PCON_ENABLE_LINK_FRL_MODE (1 << 5) # define DP_PCON_ENABLE_HPD_READY (1 << 6) # define DP_PCON_ENABLE_HDMI_LINK (1 << 7) @@ -1185,6 +1191,7 @@ struct drm_device; # define DP_PCON_FRL_BW_MASK_40GBPS (1 << 4) # define DP_PCON_FRL_BW_MASK_48GBPS (1 << 5) # define DP_PCON_FRL_LINK_TRAIN_EXTENDED (1 << 6) +# define DP_PCON_FRL_LINK_TRAIN_NORMAL (0 << 6) /* PCON HDMI LINK STATUS */ #define DP_PCON_HDMI_TX_LINK_STATUS 0x303B @@ -1839,34 +1846,34 @@ struct drm_dp_aux_cec { * @crc_count: counter of captured frame CRCs * @transfer: transfers a message representing a single AUX transaction * - * The .dev field should be set to a pointer to the device that implements - * the AUX channel. + * The @dev field should be set to a pointer to the device that implements the + * AUX channel. * - * The .name field may be used to specify the name of the I2C adapter. If set to - * NULL, dev_name() of .dev will be used. + * The @name field may be used to specify the name of the I2C adapter. If set to + * %NULL, dev_name() of @dev will be used. * - * Drivers provide a hardware-specific implementation of how transactions - * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg + * Drivers provide a hardware-specific implementation of how transactions are + * executed via the @transfer() function. A pointer to a &drm_dp_aux_msg * structure describing the transaction is passed into this function. Upon - * success, the implementation should return the number of payload bytes - * that were transferred, or a negative error-code on failure. Helpers - * propagate errors from the .transfer() function, with the exception of - * the -EBUSY error, which causes a transaction to be retried. On a short, - * helpers will return -EPROTO to make it simpler to check for failure. + * success, the implementation should return the number of payload bytes that + * were transferred, or a negative error-code on failure. Helpers propagate + * errors from the @transfer() function, with the exception of the %-EBUSY + * error, which causes a transaction to be retried. On a short, helpers will + * return %-EPROTO to make it simpler to check for failure. * * An AUX channel can also be used to transport I2C messages to a sink. A - * typical application of that is to access an EDID that's present in the - * sink device. The .transfer() function can also be used to execute such - * transactions. The drm_dp_aux_register() function registers an I2C - * adapter that can be passed to drm_probe_ddc(). Upon removal, drivers - * should call drm_dp_aux_unregister() to remove the I2C adapter. - * The I2C adapter uses long transfers by default; if a partial response is - * received, the adapter will drop down to the size given by the partial - * response for this transaction only. + * typical application of that is to access an EDID that's present in the sink + * device. The @transfer() function can also be used to execute such + * transactions. The drm_dp_aux_register() function registers an I2C adapter + * that can be passed to drm_probe_ddc(). Upon removal, drivers should call + * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long + * transfers by default; if a partial response is received, the adapter will + * drop down to the size given by the partial response for this transaction + * only. * - * Note that the aux helper code assumes that the .transfer() function - * only modifies the reply field of the drm_dp_aux_msg structure. The - * retry logic and i2c helpers assume this is the case. + * Note that the aux helper code assumes that the @transfer() function only + * modifies the reply field of the &drm_dp_aux_msg structure. The retry logic + * and i2c helpers assume this is the case. */ struct drm_dp_aux { const char *name; @@ -2149,9 +2156,9 @@ int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE], int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd); bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux); int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps, - bool concurrent_mode); + u8 frl_mode); int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask, - bool extended_train_mode); + u8 frl_type); int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux); int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux); diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 827838e0a97e..b439ae1921b8 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -74,7 +74,7 @@ enum drm_driver_feature { * @DRIVER_ATOMIC: * * Driver supports the full atomic modesetting userspace API. Drivers - * which only use atomic internally, but do not the support the full + * which only use atomic internally, but do not support the full * userspace API (e.g. not all properties converted to atomic, or * multi-plane updates are not guaranteed to be tear-free) should not * set this flag. diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index a158f585f658..759328a5eeb2 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -543,5 +543,8 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, struct drm_display_mode * drm_display_mode_from_cea_vic(struct drm_device *dev, u8 video_code); +const u8 *drm_find_edid_extension(const struct edid *edid, + int ext_id, int *ext_index); + #endif /* __DRM_EDID_H__ */ diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index 5bf78b5bcb2b..6e91a0280f31 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -225,6 +225,24 @@ void *__drmm_encoder_alloc(struct drm_device *dev, encoder_type, name, ##__VA_ARGS__)) /** + * drmm_plain_encoder_alloc - Allocate and initialize an encoder + * @dev: drm device + * @funcs: callbacks for this encoder (optional) + * @encoder_type: user visible type of the encoder + * @name: printf style format string for the encoder name, or NULL for default name + * + * This is a simplified version of drmm_encoder_alloc(), which only allocates + * and returns a struct drm_encoder instance, with no subclassing. + * + * Returns: + * Pointer to the new drm_encoder struct, or ERR_PTR on failure. + */ +#define drmm_plain_encoder_alloc(dev, funcs, encoder_type, name, ...) \ + ((struct drm_encoder *) \ + __drmm_encoder_alloc(dev, sizeof(struct drm_encoder), \ + 0, funcs, encoder_type, name, ##__VA_ARGS__)) + +/** * drm_encoder_index - find the index of a registered encoder * @encoder: encoder to find index for * diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h new file mode 100644 index 000000000000..cfc5adee3d13 --- /dev/null +++ b/include/drm/drm_gem_atomic_helper.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef __DRM_GEM_ATOMIC_HELPER_H__ +#define __DRM_GEM_ATOMIC_HELPER_H__ + +#include <linux/dma-buf-map.h> + +#include <drm/drm_plane.h> + +struct drm_simple_display_pipe; + +/* + * Plane Helpers + */ + +int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state); +int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + +/* + * Helpers for planes with shadow buffers + */ + +/** + * struct drm_shadow_plane_state - plane state for planes with shadow buffers + * + * For planes that use a shadow buffer, struct drm_shadow_plane_state + * provides the regular plane state plus mappings of the shadow buffer + * into kernel address space. + */ +struct drm_shadow_plane_state { + /** @base: plane state */ + struct drm_plane_state base; + + /* Transitional state - do not export or duplicate */ + + /** + * @map: Mappings of the plane's framebuffer BOs in to kernel address space + * + * The memory mappings stored in map should be established in the plane's + * prepare_fb callback and removed in the cleanup_fb callback. + */ + struct dma_buf_map map[4]; +}; + +/** + * to_drm_shadow_plane_state - upcasts from struct drm_plane_state + * @state: the plane state + */ +static inline struct drm_shadow_plane_state * +to_drm_shadow_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct drm_shadow_plane_state, base); +} + +void drm_gem_reset_shadow_plane(struct drm_plane *plane); +struct drm_plane_state *drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane); +void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane, + struct drm_plane_state *plane_state); + +/** + * DRM_GEM_SHADOW_PLANE_FUNCS - + * Initializes struct drm_plane_funcs for shadow-buffered planes + * + * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This + * macro initializes struct drm_plane_funcs to use the rsp helper functions. + */ +#define DRM_GEM_SHADOW_PLANE_FUNCS \ + .reset = drm_gem_reset_shadow_plane, \ + .atomic_duplicate_state = drm_gem_duplicate_shadow_plane_state, \ + .atomic_destroy_state = drm_gem_destroy_shadow_plane_state + +int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state); +void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state); + +/** + * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS - + * Initializes struct drm_plane_helper_funcs for shadow-buffered planes + * + * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This + * macro initializes struct drm_plane_helper_funcs to use the rsp helper + * functions. + */ +#define DRM_GEM_SHADOW_PLANE_HELPER_FUNCS \ + .prepare_fb = drm_gem_prepare_shadow_fb, \ + .cleanup_fb = drm_gem_cleanup_shadow_fb + +int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); +void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); +void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe); +struct drm_plane_state * +drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe); +void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); + +/** + * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS - + * Initializes struct drm_simple_display_pipe_funcs for shadow-buffered planes + * + * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This + * macro initializes struct drm_simple_display_pipe_funcs to use the rsp helper + * functions. + */ +#define DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS \ + .prepare_fb = drm_gem_simple_kms_prepare_shadow_fb, \ + .cleanup_fb = drm_gem_simple_kms_cleanup_shadow_fb, \ + .reset_plane = drm_gem_simple_kms_reset_shadow_plane, \ + .duplicate_plane_state = drm_gem_simple_kms_duplicate_shadow_plane_state, \ + .destroy_plane_state = drm_gem_simple_kms_destroy_shadow_plane_state + +#endif /* __DRM_GEM_ATOMIC_HELPER_H__ */ diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h index 6b013154911d..6bdffc7aa124 100644 --- a/include/drm/drm_gem_framebuffer_helper.h +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -9,9 +9,6 @@ struct drm_framebuffer; struct drm_framebuffer_funcs; struct drm_gem_object; struct drm_mode_fb_cmd2; -struct drm_plane; -struct drm_plane_state; -struct drm_simple_display_pipe; #define AFBC_VENDOR_AND_TYPE_MASK GENMASK_ULL(63, 52) @@ -44,8 +41,4 @@ int drm_gem_fb_afbc_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_afbc_framebuffer *afbc_fb); -int drm_gem_fb_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *state); -int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *plane_state); #endif diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index a4bac02249c2..288055d397d9 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -172,19 +172,19 @@ struct drm_vram_mm { uint64_t vram_base; size_t vram_size; - struct ttm_bo_device bdev; + struct ttm_device bdev; }; /** * drm_vram_mm_of_bdev() - \ - Returns the container of type &struct ttm_bo_device for field bdev. + Returns the container of type &struct ttm_device for field bdev. * @bdev: the TTM BO device * * Returns: * The containing instance of &struct drm_vram_mm */ static inline struct drm_vram_mm *drm_vram_mm_of_bdev( - struct ttm_bo_device *bdev) + struct ttm_device *bdev) { return container_of(bdev, struct drm_vram_mm, bdev); } diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h index ac22c246542a..0b1111e3228e 100644 --- a/include/drm/drm_hdcp.h +++ b/include/drm/drm_hdcp.h @@ -224,11 +224,14 @@ struct hdcp2_rep_stream_ready { /* HDCP2.2 TIMEOUTs in mSec */ #define HDCP_2_2_CERT_TIMEOUT_MS 100 +#define HDCP_2_2_DP_CERT_READ_TIMEOUT_MS 110 #define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000 #define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200 +#define HDCP_2_2_DP_HPRIME_READ_TIMEOUT_MS 7 #define HDCP_2_2_PAIRING_TIMEOUT_MS 200 +#define HDCP_2_2_DP_PAIRING_READ_TIMEOUT_MS 5 #define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20 -#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 7 +#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 16 #define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000 #define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100 diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index eb706342861d..f3a4b47b3986 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1179,7 +1179,7 @@ struct drm_plane_helper_funcs { * members in the plane structure. * * Drivers which always have their buffers pinned should use - * drm_gem_fb_prepare_fb() for this hook. + * drm_gem_plane_helper_prepare_fb() for this hook. * * The helpers will call @cleanup_fb with matching arguments for every * successful call to this hook. @@ -1233,9 +1233,8 @@ struct drm_plane_helper_funcs { * NOTE: * * This function is called in the check phase of an atomic update. The - * driver is not allowed to change anything outside of the free-standing - * state objects passed-in or assembled in the overall &drm_atomic_state - * update tracking structure. + * driver is not allowed to change anything outside of the + * &drm_atomic_state update tracking structure. * * RETURNS: * @@ -1245,7 +1244,7 @@ struct drm_plane_helper_funcs { * deadlock. */ int (*atomic_check)(struct drm_plane *plane, - struct drm_plane_state *state); + struct drm_atomic_state *state); /** * @atomic_update: @@ -1263,7 +1262,7 @@ struct drm_plane_helper_funcs { * transitional plane helpers, but it is optional. */ void (*atomic_update)(struct drm_plane *plane, - struct drm_plane_state *old_state); + struct drm_atomic_state *state); /** * @atomic_disable: * @@ -1287,14 +1286,14 @@ struct drm_plane_helper_funcs { * transitional plane helpers, but it is optional. */ void (*atomic_disable)(struct drm_plane *plane, - struct drm_plane_state *old_state); + struct drm_atomic_state *state); /** * @atomic_async_check: * - * Drivers should set this function pointer to check if the plane state - * can be updated in a async fashion. Here async means "not vblank - * synchronized". + * Drivers should set this function pointer to check if the plane's + * atomic state can be updated in a async fashion. Here async means + * "not vblank synchronized". * * This hook is called by drm_atomic_async_check() to establish if a * given update can be committed asynchronously, that is, if it can @@ -1306,7 +1305,7 @@ struct drm_plane_helper_funcs { * can not be applied in asynchronous manner. */ int (*atomic_async_check)(struct drm_plane *plane, - struct drm_plane_state *state); + struct drm_atomic_state *state); /** * @atomic_async_update: @@ -1322,11 +1321,9 @@ struct drm_plane_helper_funcs { * update won't happen if there is an outstanding commit modifying * the same plane. * - * Note that unlike &drm_plane_helper_funcs.atomic_update this hook - * takes the new &drm_plane_state as parameter. When doing async_update - * drivers shouldn't replace the &drm_plane_state but update the - * current one with the new plane configurations in the new - * plane_state. + * When doing async_update drivers shouldn't replace the + * &drm_plane_state but update the current one with the new plane + * configurations in the new plane_state. * * Drivers should also swap the framebuffers between current plane * state (&drm_plane.state) and new_state. @@ -1345,7 +1342,7 @@ struct drm_plane_helper_funcs { * for deferring if needed, until a common solution is created. */ void (*atomic_async_update)(struct drm_plane *plane, - struct drm_plane_state *new_state); + struct drm_atomic_state *state); }; /** diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 8ef06ee1c8eb..1294610e84f4 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -79,8 +79,8 @@ struct drm_plane_state { * preserved. * * Drivers should store any implicit fence in this from their - * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb() - * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers. + * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb() + * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers. */ struct dma_fence *fence; @@ -538,10 +538,14 @@ struct drm_plane_funcs { * * For compatibility with legacy userspace, only overlay planes are made * available to userspace by default. Userspace clients may set the - * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they + * &DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they * wish to receive a universal plane list containing all plane types. See also * drm_for_each_legacy_plane(). * + * In addition to setting each plane's type, drivers need to setup the + * &drm_crtc.primary and optionally &drm_crtc.cursor pointers for legacy + * IOCTLs. See drm_crtc_init_with_planes(). + * * WARNING: The values of this enum is UABI since they're exposed in the "type" * property. */ @@ -557,19 +561,20 @@ enum drm_plane_type { /** * @DRM_PLANE_TYPE_PRIMARY: * - * Primary planes represent a "main" plane for a CRTC. Primary planes - * are the planes operated upon by CRTC modesetting and flipping - * operations described in the &drm_crtc_funcs.page_flip and - * &drm_crtc_funcs.set_config hooks. + * A primary plane attached to a CRTC is the most likely to be able to + * light up the CRTC when no scaling/cropping is used and the plane + * covers the whole CRTC. */ DRM_PLANE_TYPE_PRIMARY, /** * @DRM_PLANE_TYPE_CURSOR: * - * Cursor planes represent a "cursor" plane for a CRTC. Cursor planes - * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and - * DRM_IOCTL_MODE_CURSOR2 IOCTLs. + * A cursor plane attached to a CRTC is more likely to be able to be + * enabled when no scaling/cropping is used and the framebuffer has the + * size indicated by &drm_mode_config.cursor_width and + * &drm_mode_config.cursor_height. Additionally, if the driver doesn't + * support modifiers, the framebuffer should have a linear layout. */ DRM_PLANE_TYPE_CURSOR, }; diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index f32d179e139d..a3c58c941bdc 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -524,16 +524,20 @@ void __drm_err(const char *format, ...); #define DRM_DEBUG_DP(fmt, ...) \ __drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__) - -#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \ -({ \ - static DEFINE_RATELIMIT_STATE(_rs, \ - DEFAULT_RATELIMIT_INTERVAL, \ - DEFAULT_RATELIMIT_BURST); \ - if (__ratelimit(&_rs)) \ - drm_dev_dbg(NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__); \ +#define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\ + const struct drm_device *drm_ = (drm); \ + \ + if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \ + drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \ }) +#define drm_dbg_kms_ratelimited(drm, fmt, ...) \ + __DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__) + +#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) drm_dbg_kms_ratelimited(NULL, fmt, ## __VA_ARGS__) + /* * struct drm_device based WARNs * diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h index e6dbf3161c2f..ef9944e9c5fc 100644 --- a/include/drm/drm_simple_kms_helper.h +++ b/include/drm/drm_simple_kms_helper.h @@ -117,7 +117,7 @@ struct drm_simple_display_pipe_funcs { * more details. * * Drivers which always have their buffers pinned should use - * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook. + * drm_gem_simple_display_pipe_prepare_fb() for this hook. */ int (*prepare_fb)(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state); @@ -149,6 +149,33 @@ struct drm_simple_display_pipe_funcs { * more details. */ void (*disable_vblank)(struct drm_simple_display_pipe *pipe); + + /** + * @reset_plane: + * + * Optional, called by &drm_plane_funcs.reset. Please read the + * documentation for the &drm_plane_funcs.reset hook for more details. + */ + void (*reset_plane)(struct drm_simple_display_pipe *pipe); + + /** + * @duplicate_plane_state: + * + * Optional, called by &drm_plane_funcs.atomic_duplicate_state. Please + * read the documentation for the &drm_plane_funcs.atomic_duplicate_state + * hook for more details. + */ + struct drm_plane_state * (*duplicate_plane_state)(struct drm_simple_display_pipe *pipe); + + /** + * @destroy_plane_state: + * + * Optional, called by &drm_plane_funcs.atomic_destroy_state. Please + * read the documentation for the &drm_plane_funcs.atomic_destroy_state + * hook for more details. + */ + void (*destroy_plane_state)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); }; /** diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index dd125f8c766c..733a3e2d1d10 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -247,7 +247,6 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc); void drm_crtc_vblank_reset(struct drm_crtc *crtc); void drm_crtc_vblank_on(struct drm_crtc *crtc); u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); -void drm_vblank_restore(struct drm_device *dev, unsigned int pipe); void drm_crtc_vblank_restore(struct drm_crtc *crtc); void drm_calc_timestamping_constants(struct drm_crtc *crtc, diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 975e8a67947f..10225a0a35d0 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -206,6 +206,12 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, return s_job && atomic_inc_return(&s_job->karma) > threshold; } +enum drm_gpu_sched_stat { + DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ + DRM_GPU_SCHED_STAT_NOMINAL, + DRM_GPU_SCHED_STAT_ENODEV, +}; + /** * struct drm_sched_backend_ops * @@ -230,10 +236,16 @@ struct drm_sched_backend_ops { struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); /** - * @timedout_job: Called when a job has taken too long to execute, - * to trigger GPU recovery. + * @timedout_job: Called when a job has taken too long to execute, + * to trigger GPU recovery. + * + * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, + * and the underlying driver has started or completed recovery. + * + * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer + * available, i.e. has been unplugged. */ - void (*timedout_job)(struct drm_sched_job *sched_job); + enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); /** * @free_job: Called once the job's finished fence has been signaled @@ -265,6 +277,7 @@ struct drm_sched_backend_ops { * @hang_limit: once the hangs by a job crosses this limit then it is marked * guilty and it will be considered for scheduling further. * @score: score to help loadbalancer pick a idle sched + * @_score: score used when the driver doesn't provide one * @ready: marks if the underlying HW is ready to work * @free_guilty: A hit to time out handler to free the guilty job. * @@ -285,7 +298,8 @@ struct drm_gpu_scheduler { struct list_head pending_list; spinlock_t job_list_lock; int hang_limit; - atomic_t score; + atomic_t *score; + atomic_t _score; bool ready; bool free_guilty; }; @@ -293,7 +307,7 @@ struct drm_gpu_scheduler { int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, uint32_t hw_submission, unsigned hang_limit, long timeout, - const char *name); + atomic_t *score, const char *name); void drm_sched_fini(struct drm_gpu_scheduler *sched); int drm_sched_job_init(struct drm_sched_job *job, @@ -308,7 +322,10 @@ void drm_sched_wakeup(struct drm_gpu_scheduler *sched); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); +void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max); void drm_sched_increase_karma(struct drm_sched_job *bad); +void drm_sched_reset_karma(struct drm_sched_job *bad); +void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type); bool drm_sched_dependency_optimized(struct dma_fence* fence, struct drm_sched_entity *entity); void drm_sched_fault(struct drm_gpu_scheduler *sched); diff --git a/include/drm/gud.h b/include/drm/gud.h new file mode 100644 index 000000000000..0b46b54fe56e --- /dev/null +++ b/include/drm/gud.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2020 Noralf Trønnes + */ + +#ifndef __LINUX_GUD_H +#define __LINUX_GUD_H + +#include <linux/types.h> + +/* + * struct gud_display_descriptor_req - Display descriptor + * @magic: Magic value GUD_DISPLAY_MAGIC + * @version: Protocol version + * @flags: Flags + * - STATUS_ON_SET: Always do a status request after a SET request. + * This is used by the Linux gadget driver since it has + * no way to control the status stage of a control OUT + * request that has a payload. + * - FULL_UPDATE: Always send the entire framebuffer when flushing changes. + * The GUD_REQ_SET_BUFFER request will not be sent + * before each bulk transfer, it will only be sent if the + * previous bulk transfer had failed. This gives the device + * a chance to reset its state machine if needed. + * This flag can not be used in combination with compression. + * @compression: Supported compression types + * - GUD_COMPRESSION_LZ4: LZ4 lossless compression. + * @max_buffer_size: Maximum buffer size the device can handle (optional). + * This is useful for devices that don't have a big enough + * buffer to decompress the entire framebuffer in one go. + * @min_width: Minimum pixel width the controller can handle + * @max_width: Maximum width + * @min_height: Minimum height + * @max_height: Maximum height + * + * Devices that have only one display mode will have min_width == max_width + * and min_height == max_height. + */ +struct gud_display_descriptor_req { + __le32 magic; +#define GUD_DISPLAY_MAGIC 0x1d50614d + __u8 version; + __le32 flags; +#define GUD_DISPLAY_FLAG_STATUS_ON_SET BIT(0) +#define GUD_DISPLAY_FLAG_FULL_UPDATE BIT(1) + __u8 compression; +#define GUD_COMPRESSION_LZ4 BIT(0) + __le32 max_buffer_size; + __le32 min_width; + __le32 max_width; + __le32 min_height; + __le32 max_height; +} __packed; + +/* + * struct gud_property_req - Property + * @prop: Property + * @val: Value + */ +struct gud_property_req { + __le16 prop; + __le64 val; +} __packed; + +/* + * struct gud_display_mode_req - Display mode + * @clock: Pixel clock in kHz + * @hdisplay: Horizontal display size + * @hsync_start: Horizontal sync start + * @hsync_end: Horizontal sync end + * @htotal: Horizontal total size + * @vdisplay: Vertical display size + * @vsync_start: Vertical sync start + * @vsync_end: Vertical sync end + * @vtotal: Vertical total size + * @flags: Bits 0-13 are the same as in the RandR protocol and also what DRM uses. + * The deprecated bits are reused for internal protocol flags leaving us + * free to follow DRM for the other bits in the future. + * - FLAG_PREFERRED: Set on the preferred display mode. + */ +struct gud_display_mode_req { + __le32 clock; + __le16 hdisplay; + __le16 hsync_start; + __le16 hsync_end; + __le16 htotal; + __le16 vdisplay; + __le16 vsync_start; + __le16 vsync_end; + __le16 vtotal; + __le32 flags; +#define GUD_DISPLAY_MODE_FLAG_PHSYNC BIT(0) +#define GUD_DISPLAY_MODE_FLAG_NHSYNC BIT(1) +#define GUD_DISPLAY_MODE_FLAG_PVSYNC BIT(2) +#define GUD_DISPLAY_MODE_FLAG_NVSYNC BIT(3) +#define GUD_DISPLAY_MODE_FLAG_INTERLACE BIT(4) +#define GUD_DISPLAY_MODE_FLAG_DBLSCAN BIT(5) +#define GUD_DISPLAY_MODE_FLAG_CSYNC BIT(6) +#define GUD_DISPLAY_MODE_FLAG_PCSYNC BIT(7) +#define GUD_DISPLAY_MODE_FLAG_NCSYNC BIT(8) +#define GUD_DISPLAY_MODE_FLAG_HSKEW BIT(9) +/* BCast and PixelMultiplex are deprecated */ +#define GUD_DISPLAY_MODE_FLAG_DBLCLK BIT(12) +#define GUD_DISPLAY_MODE_FLAG_CLKDIV2 BIT(13) +#define GUD_DISPLAY_MODE_FLAG_USER_MASK \ + (GUD_DISPLAY_MODE_FLAG_PHSYNC | GUD_DISPLAY_MODE_FLAG_NHSYNC | \ + GUD_DISPLAY_MODE_FLAG_PVSYNC | GUD_DISPLAY_MODE_FLAG_NVSYNC | \ + GUD_DISPLAY_MODE_FLAG_INTERLACE | GUD_DISPLAY_MODE_FLAG_DBLSCAN | \ + GUD_DISPLAY_MODE_FLAG_CSYNC | GUD_DISPLAY_MODE_FLAG_PCSYNC | \ + GUD_DISPLAY_MODE_FLAG_NCSYNC | GUD_DISPLAY_MODE_FLAG_HSKEW | \ + GUD_DISPLAY_MODE_FLAG_DBLCLK | GUD_DISPLAY_MODE_FLAG_CLKDIV2) +/* Internal protocol flags */ +#define GUD_DISPLAY_MODE_FLAG_PREFERRED BIT(10) +} __packed; + +/* + * struct gud_connector_descriptor_req - Connector descriptor + * @connector_type: Connector type (GUD_CONNECTOR_TYPE_*). + * If the host doesn't support the type it should fall back to PANEL. + * @flags: Flags + * - POLL_STATUS: Connector status can change (polled every 10 seconds) + * - INTERLACE: Interlaced modes are supported + * - DOUBLESCAN: Doublescan modes are supported + */ +struct gud_connector_descriptor_req { + __u8 connector_type; +#define GUD_CONNECTOR_TYPE_PANEL 0 +#define GUD_CONNECTOR_TYPE_VGA 1 +#define GUD_CONNECTOR_TYPE_COMPOSITE 2 +#define GUD_CONNECTOR_TYPE_SVIDEO 3 +#define GUD_CONNECTOR_TYPE_COMPONENT 4 +#define GUD_CONNECTOR_TYPE_DVI 5 +#define GUD_CONNECTOR_TYPE_DISPLAYPORT 6 +#define GUD_CONNECTOR_TYPE_HDMI 7 + __le32 flags; +#define GUD_CONNECTOR_FLAGS_POLL_STATUS BIT(0) +#define GUD_CONNECTOR_FLAGS_INTERLACE BIT(1) +#define GUD_CONNECTOR_FLAGS_DOUBLESCAN BIT(2) +} __packed; + +/* + * struct gud_set_buffer_req - Set buffer transfer info + * @x: X position of rectangle + * @y: Y position + * @width: Pixel width of rectangle + * @height: Pixel height + * @length: Buffer length in bytes + * @compression: Transfer compression + * @compressed_length: Compressed buffer length + * + * This request is issued right before the bulk transfer. + * @x, @y, @width and @height specifies the rectangle where the buffer should be + * placed inside the framebuffer. + */ +struct gud_set_buffer_req { + __le32 x; + __le32 y; + __le32 width; + __le32 height; + __le32 length; + __u8 compression; + __le32 compressed_length; +} __packed; + +/* + * struct gud_state_req - Display state + * @mode: Display mode + * @format: Pixel format GUD_PIXEL_FORMAT_* + * @connector: Connector index + * @properties: Array of properties + * + * The entire state is transferred each time there's a change. + */ +struct gud_state_req { + struct gud_display_mode_req mode; + __u8 format; + __u8 connector; + struct gud_property_req properties[]; +} __packed; + +/* List of supported connector properties: */ + +/* Margins in pixels to deal with overscan, range 0-100 */ +#define GUD_PROPERTY_TV_LEFT_MARGIN 1 +#define GUD_PROPERTY_TV_RIGHT_MARGIN 2 +#define GUD_PROPERTY_TV_TOP_MARGIN 3 +#define GUD_PROPERTY_TV_BOTTOM_MARGIN 4 +#define GUD_PROPERTY_TV_MODE 5 +/* Brightness in percent, range 0-100 */ +#define GUD_PROPERTY_TV_BRIGHTNESS 6 +/* Contrast in percent, range 0-100 */ +#define GUD_PROPERTY_TV_CONTRAST 7 +/* Flicker reduction in percent, range 0-100 */ +#define GUD_PROPERTY_TV_FLICKER_REDUCTION 8 +/* Overscan in percent, range 0-100 */ +#define GUD_PROPERTY_TV_OVERSCAN 9 +/* Saturation in percent, range 0-100 */ +#define GUD_PROPERTY_TV_SATURATION 10 +/* Hue in percent, range 0-100 */ +#define GUD_PROPERTY_TV_HUE 11 + +/* + * Backlight brightness is in the range 0-100 inclusive. The value represents the human perceptual + * brightness and not a linear PWM value. 0 is minimum brightness which should not turn the + * backlight completely off. The DPMS connector property should be used to control power which will + * trigger a GUD_REQ_SET_DISPLAY_ENABLE request. + * + * This does not map to a DRM property, it is used with the backlight device. + */ +#define GUD_PROPERTY_BACKLIGHT_BRIGHTNESS 12 + +/* List of supported properties that are not connector propeties: */ + +/* + * Plane rotation. Should return the supported bitmask on + * GUD_REQ_GET_PROPERTIES. GUD_ROTATION_0 is mandatory. + * + * Note: This is not display rotation so 90/270 will need scaling to make it fit (unless squared). + */ +#define GUD_PROPERTY_ROTATION 50 + #define GUD_ROTATION_0 BIT(0) + #define GUD_ROTATION_90 BIT(1) + #define GUD_ROTATION_180 BIT(2) + #define GUD_ROTATION_270 BIT(3) + #define GUD_ROTATION_REFLECT_X BIT(4) + #define GUD_ROTATION_REFLECT_Y BIT(5) + #define GUD_ROTATION_MASK (GUD_ROTATION_0 | GUD_ROTATION_90 | \ + GUD_ROTATION_180 | GUD_ROTATION_270 | \ + GUD_ROTATION_REFLECT_X | GUD_ROTATION_REFLECT_Y) + +/* USB Control requests: */ + +/* Get status from the last GET/SET control request. Value is u8. */ +#define GUD_REQ_GET_STATUS 0x00 + /* Status values: */ + #define GUD_STATUS_OK 0x00 + #define GUD_STATUS_BUSY 0x01 + #define GUD_STATUS_REQUEST_NOT_SUPPORTED 0x02 + #define GUD_STATUS_PROTOCOL_ERROR 0x03 + #define GUD_STATUS_INVALID_PARAMETER 0x04 + #define GUD_STATUS_ERROR 0x05 + +/* Get display descriptor as a &gud_display_descriptor_req */ +#define GUD_REQ_GET_DESCRIPTOR 0x01 + +/* Get supported pixel formats as a byte array of GUD_PIXEL_FORMAT_* */ +#define GUD_REQ_GET_FORMATS 0x40 + #define GUD_FORMATS_MAX_NUM 32 + /* R1 is a 1-bit monochrome transfer format presented to userspace as XRGB8888 */ + #define GUD_PIXEL_FORMAT_R1 0x01 + #define GUD_PIXEL_FORMAT_XRGB1111 0x20 + #define GUD_PIXEL_FORMAT_RGB565 0x40 + #define GUD_PIXEL_FORMAT_XRGB8888 0x80 + #define GUD_PIXEL_FORMAT_ARGB8888 0x81 + +/* + * Get supported properties that are not connector propeties as a &gud_property_req array. + * gud_property_req.val often contains the initial value for the property. + */ +#define GUD_REQ_GET_PROPERTIES 0x41 + #define GUD_PROPERTIES_MAX_NUM 32 + +/* Connector requests have the connector index passed in the wValue field */ + +/* Get connector descriptors as an array of &gud_connector_descriptor_req */ +#define GUD_REQ_GET_CONNECTORS 0x50 + #define GUD_CONNECTORS_MAX_NUM 32 + +/* + * Get properties supported by the connector as a &gud_property_req array. + * gud_property_req.val often contains the initial value for the property. + */ +#define GUD_REQ_GET_CONNECTOR_PROPERTIES 0x51 + #define GUD_CONNECTOR_PROPERTIES_MAX_NUM 32 + +/* + * Issued when there's a TV_MODE property present. + * Gets an array of the supported TV_MODE names each entry of length + * GUD_CONNECTOR_TV_MODE_NAME_LEN. Names must be NUL-terminated. + */ +#define GUD_REQ_GET_CONNECTOR_TV_MODE_VALUES 0x52 + #define GUD_CONNECTOR_TV_MODE_NAME_LEN 16 + #define GUD_CONNECTOR_TV_MODE_MAX_NUM 16 + +/* When userspace checks connector status, this is issued first, not used for poll requests. */ +#define GUD_REQ_SET_CONNECTOR_FORCE_DETECT 0x53 + +/* + * Get connector status. Value is u8. + * + * Userspace will get a HOTPLUG uevent if one of the following is true: + * - Connection status has changed since last + * - CHANGED is set + */ +#define GUD_REQ_GET_CONNECTOR_STATUS 0x54 + #define GUD_CONNECTOR_STATUS_DISCONNECTED 0x00 + #define GUD_CONNECTOR_STATUS_CONNECTED 0x01 + #define GUD_CONNECTOR_STATUS_UNKNOWN 0x02 + #define GUD_CONNECTOR_STATUS_CONNECTED_MASK 0x03 + #define GUD_CONNECTOR_STATUS_CHANGED BIT(7) + +/* + * Display modes can be fetched as either EDID data or an array of &gud_display_mode_req. + * + * If GUD_REQ_GET_CONNECTOR_MODES returns zero, EDID is used to create display modes. + * If both display modes and EDID are returned, EDID is just passed on to userspace + * in the EDID connector property. + */ + +/* Get &gud_display_mode_req array of supported display modes */ +#define GUD_REQ_GET_CONNECTOR_MODES 0x55 + #define GUD_CONNECTOR_MAX_NUM_MODES 128 + +/* Get Extended Display Identification Data */ +#define GUD_REQ_GET_CONNECTOR_EDID 0x56 + #define GUD_CONNECTOR_MAX_EDID_LEN 2048 + +/* Set buffer properties before bulk transfer as &gud_set_buffer_req */ +#define GUD_REQ_SET_BUFFER 0x60 + +/* Check display configuration as &gud_state_req */ +#define GUD_REQ_SET_STATE_CHECK 0x61 + +/* Apply the previous STATE_CHECK configuration */ +#define GUD_REQ_SET_STATE_COMMIT 0x62 + +/* Enable/disable the display controller, value is u8: 0/1 */ +#define GUD_REQ_SET_CONTROLLER_ENABLE 0x63 + +/* Enable/disable display/output (DPMS), value is u8: 0/1 */ +#define GUD_REQ_SET_DISPLAY_ENABLE 0x64 + +#endif diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 931e46191047..ebd0dd1c35b3 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -634,4 +634,15 @@ INTEL_VGA_DEVICE(0x4907, info), \ INTEL_VGA_DEVICE(0x4908, info) +/* ADL-S */ +#define INTEL_ADLS_IDS(info) \ + INTEL_VGA_DEVICE(0x4680, info), \ + INTEL_VGA_DEVICE(0x4681, info), \ + INTEL_VGA_DEVICE(0x4682, info), \ + INTEL_VGA_DEVICE(0x4683, info), \ + INTEL_VGA_DEVICE(0x4690, info), \ + INTEL_VGA_DEVICE(0x4691, info), \ + INTEL_VGA_DEVICE(0x4692, info), \ + INTEL_VGA_DEVICE(0x4693, info) + #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index b8ca13664fa2..2155e2e38aec 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -44,9 +44,9 @@ #include "ttm_resource.h" -struct ttm_bo_global; +struct ttm_global; -struct ttm_bo_device; +struct ttm_device; struct dma_buf_map; @@ -88,7 +88,6 @@ struct ttm_tt; * @type: The bo type. * @destroy: Destruction function. If NULL, kfree is used. * @num_pages: Actual number of pages. - * @acc_size: Accounted size for this object. * @kref: Reference count of this buffer object. When this refcount reaches * zero, the object is destroyed or put on the delayed delete list. * @mem: structure describing current placement. @@ -122,10 +121,9 @@ struct ttm_buffer_object { * Members constant at init. */ - struct ttm_bo_device *bdev; + struct ttm_device *bdev; enum ttm_bo_type type; void (*destroy) (struct ttm_buffer_object *); - size_t acc_size; /** * Members not needing protection. @@ -146,7 +144,6 @@ struct ttm_buffer_object { struct list_head lru; struct list_head ddestroy; - struct list_head swap; /** * Members protected by a bo reservation. @@ -313,7 +310,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo); * @bulk: optional bulk move structure to remember BO positions * * Move this BO to the tail of all lru lists used to lookup and reserve an - * object. This function must be called with struct ttm_bo_global::lru_lock + * object. This function must be called with struct ttm_global::lru_lock * held, and is used to make a BO less likely to be considered for eviction. */ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, @@ -326,7 +323,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, * @bulk: bulk move structure * * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that - * BO order never changes. Should be called with ttm_bo_global::lru_lock held. + * BO order never changes. Should be called with ttm_global::lru_lock held. */ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); @@ -337,14 +334,14 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); * Returns * True if the workqueue was queued at the time */ -int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); +int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev); /** * ttm_bo_unlock_delayed_workqueue * * Allows the delayed workqueue to run. */ -void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); +void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched); /** * ttm_bo_eviction_valuable @@ -357,21 +354,16 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place); -size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, - unsigned long bo_size, - unsigned struct_size); - /** * ttm_bo_init_reserved * - * @bdev: Pointer to a ttm_bo_device struct. + * @bdev: Pointer to a ttm_device struct. * @bo: Pointer to a ttm_buffer_object to be initialized. * @size: Requested size of buffer object. * @type: Requested type of buffer object. * @flags: Initial placement flags. * @page_alignment: Data alignment in pages. * @ctx: TTM operation context for memory allocation. - * @acc_size: Accounted size for this object. * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. * @destroy: Destroy function. Use NULL for kfree(). * @@ -396,20 +388,19 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ -int ttm_bo_init_reserved(struct ttm_bo_device *bdev, +int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, size_t size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, struct ttm_operation_ctx *ctx, - size_t acc_size, struct sg_table *sg, - struct dma_resv *resv, + struct sg_table *sg, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)); /** * ttm_bo_init * - * @bdev: Pointer to a ttm_bo_device struct. + * @bdev: Pointer to a ttm_device struct. * @bo: Pointer to a ttm_buffer_object to be initialized. * @size: Requested size of buffer object. * @type: Requested type of buffer object. @@ -421,7 +412,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * holds a pointer to a persistent shmem object. Typically, this would * point to the shmem object backing a GEM object if TTM is used to back a * GEM user interface. - * @acc_size: Accounted size for this object. * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. * @destroy: Destroy function. Use NULL for kfree(). * @@ -443,10 +433,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * -EINVAL: Invalid placement flags. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ -int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, +int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo, size_t size, enum ttm_bo_type type, struct ttm_placement *placement, - uint32_t page_alignment, bool interrubtible, size_t acc_size, + uint32_t page_alignment, bool interrubtible, struct sg_table *sg, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)); @@ -537,18 +527,18 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); * * @filp: filp as input from the mmap method. * @vma: vma as input from the mmap method. - * @bdev: Pointer to the ttm_bo_device with the address space manager. + * @bdev: Pointer to the ttm_device with the address space manager. * * This function is intended to be called by the device mmap method. * if the device address space is to be backed by the bo manager. */ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, - struct ttm_bo_device *bdev); + struct ttm_device *bdev); /** * ttm_bo_io * - * @bdev: Pointer to the struct ttm_bo_device. + * @bdev: Pointer to the struct ttm_device. * @filp: Pointer to the struct file attempting to read / write. * @wbuf: User-space pointer to address of buffer to write. NULL on read. * @rbuf: User-space pointer to address of buffer to read into. @@ -565,11 +555,12 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, * the function may return -ERESTARTSYS if * interrupted by a signal. */ -ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, +ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp, const char __user *wbuf, char __user *rbuf, size_t count, loff_t *f_pos, bool write); -int ttm_bo_swapout(struct ttm_operation_ctx *ctx); +int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, + gfp_t gfp_flags); /** * ttm_bo_uses_embedded_gem_object - check if the given bo uses the @@ -619,7 +610,7 @@ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo) WARN_ON_ONCE(true); } -int ttm_mem_evict_first(struct ttm_bo_device *bdev, +int ttm_mem_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man, const struct ttm_place *place, struct ttm_operation_ctx *ctx, @@ -644,5 +635,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma); int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); +bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all); #endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 423348414c59..dbccac957f8f 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -37,302 +37,14 @@ #include <linux/spinlock.h> #include <linux/dma-resv.h> +#include <drm/ttm/ttm_device.h> + #include "ttm_bo_api.h" -#include "ttm_memory.h" #include "ttm_placement.h" #include "ttm_tt.h" #include "ttm_pool.h" /** - * struct ttm_bo_driver - * - * @create_ttm_backend_entry: Callback to create a struct ttm_backend. - * @evict_flags: Callback to obtain placement flags when a buffer is evicted. - * @move: Callback for a driver to hook in accelerated functions to - * move a buffer. - * If set to NULL, a potentially slow memcpy() move is used. - */ - -struct ttm_bo_driver { - /** - * ttm_tt_create - * - * @bo: The buffer object to create the ttm for. - * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. - * - * Create a struct ttm_tt to back data with system memory pages. - * No pages are actually allocated. - * Returns: - * NULL: Out of memory. - */ - struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, - uint32_t page_flags); - - /** - * ttm_tt_populate - * - * @ttm: The struct ttm_tt to contain the backing pages. - * - * Allocate all backing pages - * Returns: - * -ENOMEM: Out of memory. - */ - int (*ttm_tt_populate)(struct ttm_bo_device *bdev, - struct ttm_tt *ttm, - struct ttm_operation_ctx *ctx); - - /** - * ttm_tt_unpopulate - * - * @ttm: The struct ttm_tt to contain the backing pages. - * - * Free all backing page - */ - void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); - - /** - * ttm_tt_destroy - * - * @bdev: Pointer to a ttm device - * @ttm: Pointer to a struct ttm_tt. - * - * Destroy the backend. This will be call back from ttm_tt_destroy so - * don't call ttm_tt_destroy from the callback or infinite loop. - */ - void (*ttm_tt_destroy)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); - - /** - * struct ttm_bo_driver member eviction_valuable - * - * @bo: the buffer object to be evicted - * @place: placement we need room for - * - * Check with the driver if it is valuable to evict a BO to make room - * for a certain placement. - */ - bool (*eviction_valuable)(struct ttm_buffer_object *bo, - const struct ttm_place *place); - /** - * struct ttm_bo_driver member evict_flags: - * - * @bo: the buffer object to be evicted - * - * Return the bo flags for a buffer which is not mapped to the hardware. - * These will be placed in proposed_flags so that when the move is - * finished, they'll end up in bo->mem.flags - * This should not cause multihop evictions, and the core will warn - * if one is proposed. - */ - - void (*evict_flags)(struct ttm_buffer_object *bo, - struct ttm_placement *placement); - - /** - * struct ttm_bo_driver member move: - * - * @bo: the buffer to move - * @evict: whether this motion is evicting the buffer from - * the graphics address space - * @ctx: context for this move with parameters - * @new_mem: the new memory region receiving the buffer - @ @hop: placement for driver directed intermediate hop - * - * Move a buffer between two memory regions. - * Returns errno -EMULTIHOP if driver requests a hop - */ - int (*move)(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem, - struct ttm_place *hop); - - /** - * struct ttm_bo_driver_member verify_access - * - * @bo: Pointer to a buffer object. - * @filp: Pointer to a struct file trying to access the object. - * - * Called from the map / write / read methods to verify that the - * caller is permitted to access the buffer object. - * This member may be set to NULL, which will refuse this kind of - * access for all buffer objects. - * This function should return 0 if access is granted, -EPERM otherwise. - */ - int (*verify_access)(struct ttm_buffer_object *bo, - struct file *filp); - - /** - * Hook to notify driver about a resource delete. - */ - void (*delete_mem_notify)(struct ttm_buffer_object *bo); - - /** - * notify the driver that we're about to swap out this bo - */ - void (*swap_notify)(struct ttm_buffer_object *bo); - - /** - * Driver callback on when mapping io memory (for bo_move_memcpy - * for instance). TTM will take care to call io_mem_free whenever - * the mapping is not use anymore. io_mem_reserve & io_mem_free - * are balanced. - */ - int (*io_mem_reserve)(struct ttm_bo_device *bdev, - struct ttm_resource *mem); - void (*io_mem_free)(struct ttm_bo_device *bdev, - struct ttm_resource *mem); - - /** - * Return the pfn for a given page_offset inside the BO. - * - * @bo: the BO to look up the pfn for - * @page_offset: the offset to look up - */ - unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, - unsigned long page_offset); - - /** - * Read/write memory buffers for ptrace access - * - * @bo: the BO to access - * @offset: the offset from the start of the BO - * @buf: pointer to source/destination buffer - * @len: number of bytes to copy - * @write: whether to read (0) from or write (non-0) to BO - * - * If successful, this function should return the number of - * bytes copied, -EIO otherwise. If the number of bytes - * returned is < len, the function may be called again with - * the remainder of the buffer to copy. - */ - int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, - void *buf, int len, int write); - - /** - * struct ttm_bo_driver member del_from_lru_notify - * - * @bo: the buffer object deleted from lru - * - * notify driver that a BO was deleted from LRU. - */ - void (*del_from_lru_notify)(struct ttm_buffer_object *bo); - - /** - * Notify the driver that we're about to release a BO - * - * @bo: BO that is about to be released - * - * Gives the driver a chance to do any cleanup, including - * adding fences that may force a delayed delete - */ - void (*release_notify)(struct ttm_buffer_object *bo); -}; - -/** - * struct ttm_bo_global - Buffer object driver global data. - * - * @dummy_read_page: Pointer to a dummy page used for mapping requests - * of unpopulated pages. - * @shrink: A shrink callback object used for buffer object swap. - * @device_list_mutex: Mutex protecting the device list. - * This mutex is held while traversing the device list for pm options. - * @lru_lock: Spinlock protecting the bo subsystem lru lists. - * @device_list: List of buffer object devices. - * @swap_lru: Lru list of buffer objects used for swapping. - */ - -extern struct ttm_bo_global { - - /** - * Constant after init. - */ - - struct kobject kobj; - struct page *dummy_read_page; - spinlock_t lru_lock; - - /** - * Protected by ttm_global_mutex. - */ - struct list_head device_list; - - /** - * Protected by the lru_lock. - */ - struct list_head swap_lru[TTM_MAX_BO_PRIORITY]; - - /** - * Internal protection. - */ - atomic_t bo_count; -} ttm_bo_glob; - - -#define TTM_NUM_MEM_TYPES 8 - -/** - * struct ttm_bo_device - Buffer object driver device-specific data. - * - * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. - * @man: An array of resource_managers. - * @vma_manager: Address space manager (pointer) - * lru_lock: Spinlock that protects the buffer+device lru lists and - * ddestroy lists. - * @dev_mapping: A pointer to the struct address_space representing the - * device address space. - * @wq: Work queue structure for the delayed delete workqueue. - * - */ - -struct ttm_bo_device { - - /* - * Constant after bo device init / atomic. - */ - struct list_head device_list; - struct ttm_bo_driver *driver; - /* - * access via ttm_manager_type. - */ - struct ttm_resource_manager sysman; - struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; - /* - * Protected by internal locks. - */ - struct drm_vma_offset_manager *vma_manager; - struct ttm_pool pool; - - /* - * Protected by the global:lru lock. - */ - struct list_head ddestroy; - - /* - * Protected by load / firstopen / lastclose /unload sync. - */ - - struct address_space *dev_mapping; - - /* - * Internal protection. - */ - - struct delayed_work wq; -}; - -static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev, - int mem_type) -{ - return bdev->man_drv[mem_type]; -} - -static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev, - int type, - struct ttm_resource_manager *manager) -{ - bdev->man_drv[type] = manager; -} - -/** * struct ttm_lru_bulk_move_pos * * @first: first BO in the bulk move range @@ -357,7 +69,6 @@ struct ttm_lru_bulk_move_pos { struct ttm_lru_bulk_move { struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY]; struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY]; - struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY]; }; /* @@ -388,31 +99,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_resource *mem, struct ttm_operation_ctx *ctx); -int ttm_bo_device_release(struct ttm_bo_device *bdev); - -/** - * ttm_bo_device_init - * - * @bdev: A pointer to a struct ttm_bo_device to initialize. - * @glob: A pointer to an initialized struct ttm_bo_global. - * @driver: A pointer to a struct ttm_bo_driver set up by the caller. - * @dev: The core kernel device pointer for DMA mappings and allocations. - * @mapping: The address space to use for this bo. - * @vma_manager: A pointer to a vma manager. - * @use_dma_alloc: If coherent DMA allocation API should be used. - * @use_dma32: If we should use GFP_DMA32 for device memory allocations. - * - * Initializes a struct ttm_bo_device: - * Returns: - * !0: Failure. - */ -int ttm_bo_device_init(struct ttm_bo_device *bdev, - struct ttm_bo_driver *driver, - struct device *dev, - struct address_space *mapping, - struct drm_vma_offset_manager *vma_manager, - bool use_dma_alloc, bool use_dma32); - /** * ttm_bo_unmap_virtual * @@ -494,9 +180,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, static inline void ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) { - spin_lock(&ttm_bo_glob.lru_lock); + spin_lock(&bo->bdev->lru_lock); ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); - spin_unlock(&ttm_bo_glob.lru_lock); + spin_unlock(&bo->bdev->lru_lock); } static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, @@ -538,9 +224,9 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) /* * ttm_bo_util.c */ -int ttm_mem_io_reserve(struct ttm_bo_device *bdev, +int ttm_mem_io_reserve(struct ttm_device *bdev, struct ttm_resource *mem); -void ttm_mem_io_free(struct ttm_bo_device *bdev, +void ttm_mem_io_free(struct ttm_device *bdev, struct ttm_resource *mem); /** @@ -631,7 +317,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); * Initialise a generic range manager for the selected memory type. * The range manager is installed for this device in the type slot. */ -int ttm_range_man_init(struct ttm_bo_device *bdev, +int ttm_range_man_init(struct ttm_device *bdev, unsigned type, bool use_tt, unsigned long p_size); @@ -643,7 +329,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, * * Remove the generic range manager from a slot and tear it down. */ -int ttm_range_man_fini(struct ttm_bo_device *bdev, +int ttm_range_man_fini(struct ttm_device *bdev, unsigned type); #endif diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h new file mode 100644 index 000000000000..7c8f87bd52d3 --- /dev/null +++ b/include/drm/ttm/ttm_device.h @@ -0,0 +1,317 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef _TTM_DEVICE_H_ +#define _TTM_DEVICE_H_ + +#include <linux/types.h> +#include <linux/workqueue.h> +#include <drm/ttm/ttm_resource.h> +#include <drm/ttm/ttm_pool.h> + +#define TTM_NUM_MEM_TYPES 8 + +struct ttm_device; +struct ttm_placement; +struct ttm_buffer_object; +struct ttm_operation_ctx; + +/** + * struct ttm_global - Buffer object driver global data. + * + * @dummy_read_page: Pointer to a dummy page used for mapping requests + * of unpopulated pages. + * @shrink: A shrink callback object used for buffer object swap. + * @device_list_mutex: Mutex protecting the device list. + * This mutex is held while traversing the device list for pm options. + * @lru_lock: Spinlock protecting the bo subsystem lru lists. + * @device_list: List of buffer object devices. + * @swap_lru: Lru list of buffer objects used for swapping. + */ +extern struct ttm_global { + + /** + * Constant after init. + */ + + struct page *dummy_read_page; + + /** + * Protected by ttm_global_mutex. + */ + struct list_head device_list; + + /** + * Internal protection. + */ + atomic_t bo_count; +} ttm_glob; + +struct ttm_device_funcs { + /** + * ttm_tt_create + * + * @bo: The buffer object to create the ttm for. + * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * + * Create a struct ttm_tt to back data with system memory pages. + * No pages are actually allocated. + * Returns: + * NULL: Out of memory. + */ + struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, + uint32_t page_flags); + + /** + * ttm_tt_populate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Allocate all backing pages + * Returns: + * -ENOMEM: Out of memory. + */ + int (*ttm_tt_populate)(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); + + /** + * ttm_tt_unpopulate + * + * @ttm: The struct ttm_tt to contain the backing pages. + * + * Free all backing page + */ + void (*ttm_tt_unpopulate)(struct ttm_device *bdev, + struct ttm_tt *ttm); + + /** + * ttm_tt_destroy + * + * @bdev: Pointer to a ttm device + * @ttm: Pointer to a struct ttm_tt. + * + * Destroy the backend. This will be call back from ttm_tt_destroy so + * don't call ttm_tt_destroy from the callback or infinite loop. + */ + void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm); + + /** + * struct ttm_bo_driver member eviction_valuable + * + * @bo: the buffer object to be evicted + * @place: placement we need room for + * + * Check with the driver if it is valuable to evict a BO to make room + * for a certain placement. + */ + bool (*eviction_valuable)(struct ttm_buffer_object *bo, + const struct ttm_place *place); + /** + * struct ttm_bo_driver member evict_flags: + * + * @bo: the buffer object to be evicted + * + * Return the bo flags for a buffer which is not mapped to the hardware. + * These will be placed in proposed_flags so that when the move is + * finished, they'll end up in bo->mem.flags + * This should not cause multihop evictions, and the core will warn + * if one is proposed. + */ + + void (*evict_flags)(struct ttm_buffer_object *bo, + struct ttm_placement *placement); + + /** + * struct ttm_bo_driver member move: + * + * @bo: the buffer to move + * @evict: whether this motion is evicting the buffer from + * the graphics address space + * @ctx: context for this move with parameters + * @new_mem: the new memory region receiving the buffer + @ @hop: placement for driver directed intermediate hop + * + * Move a buffer between two memory regions. + * Returns errno -EMULTIHOP if driver requests a hop + */ + int (*move)(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem, + struct ttm_place *hop); + + /** + * struct ttm_bo_driver_member verify_access + * + * @bo: Pointer to a buffer object. + * @filp: Pointer to a struct file trying to access the object. + * + * Called from the map / write / read methods to verify that the + * caller is permitted to access the buffer object. + * This member may be set to NULL, which will refuse this kind of + * access for all buffer objects. + * This function should return 0 if access is granted, -EPERM otherwise. + */ + int (*verify_access)(struct ttm_buffer_object *bo, + struct file *filp); + + /** + * Hook to notify driver about a resource delete. + */ + void (*delete_mem_notify)(struct ttm_buffer_object *bo); + + /** + * notify the driver that we're about to swap out this bo + */ + void (*swap_notify)(struct ttm_buffer_object *bo); + + /** + * Driver callback on when mapping io memory (for bo_move_memcpy + * for instance). TTM will take care to call io_mem_free whenever + * the mapping is not use anymore. io_mem_reserve & io_mem_free + * are balanced. + */ + int (*io_mem_reserve)(struct ttm_device *bdev, + struct ttm_resource *mem); + void (*io_mem_free)(struct ttm_device *bdev, + struct ttm_resource *mem); + + /** + * Return the pfn for a given page_offset inside the BO. + * + * @bo: the BO to look up the pfn for + * @page_offset: the offset to look up + */ + unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, + unsigned long page_offset); + + /** + * Read/write memory buffers for ptrace access + * + * @bo: the BO to access + * @offset: the offset from the start of the BO + * @buf: pointer to source/destination buffer + * @len: number of bytes to copy + * @write: whether to read (0) from or write (non-0) to BO + * + * If successful, this function should return the number of + * bytes copied, -EIO otherwise. If the number of bytes + * returned is < len, the function may be called again with + * the remainder of the buffer to copy. + */ + int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, + void *buf, int len, int write); + + /** + * struct ttm_bo_driver member del_from_lru_notify + * + * @bo: the buffer object deleted from lru + * + * notify driver that a BO was deleted from LRU. + */ + void (*del_from_lru_notify)(struct ttm_buffer_object *bo); + + /** + * Notify the driver that we're about to release a BO + * + * @bo: BO that is about to be released + * + * Gives the driver a chance to do any cleanup, including + * adding fences that may force a delayed delete + */ + void (*release_notify)(struct ttm_buffer_object *bo); +}; + +/** + * struct ttm_device - Buffer object driver device-specific data. + * + * @device_list: Our entry in the global device list. + * @funcs: Function table for the device. + * @sysman: Resource manager for the system domain. + * @man_drv: An array of resource_managers. + * @vma_manager: Address space manager. + * @pool: page pool for the device. + * @dev_mapping: A pointer to the struct address_space representing the + * device address space. + * @wq: Work queue structure for the delayed delete workqueue. + */ +struct ttm_device { + /* + * Constant after bo device init + */ + struct list_head device_list; + struct ttm_device_funcs *funcs; + + /* + * Access via ttm_manager_type. + */ + struct ttm_resource_manager sysman; + struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; + + /* + * Protected by internal locks. + */ + struct drm_vma_offset_manager *vma_manager; + struct ttm_pool pool; + + /* + * Protection for the per manager LRU and ddestroy lists. + */ + spinlock_t lru_lock; + struct list_head ddestroy; + + /* + * Protected by load / firstopen / lastclose /unload sync. + */ + struct address_space *dev_mapping; + + /* + * Internal protection. + */ + struct delayed_work wq; +}; + +int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags); +int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, + gfp_t gfp_flags); + +static inline struct ttm_resource_manager * +ttm_manager_type(struct ttm_device *bdev, int mem_type) +{ + return bdev->man_drv[mem_type]; +} + +static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type, + struct ttm_resource_manager *manager) +{ + bdev->man_drv[type] = manager; +} + +int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs, + struct device *dev, struct address_space *mapping, + struct drm_vma_offset_manager *vma_manager, + bool use_dma_alloc, bool use_dma32); +void ttm_device_fini(struct ttm_device *bdev); + +#endif diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h deleted file mode 100644 index c1f167881e33..000000000000 --- a/include/drm/ttm/ttm_memory.h +++ /dev/null @@ -1,95 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#ifndef TTM_MEMORY_H -#define TTM_MEMORY_H - -#include <linux/workqueue.h> -#include <linux/spinlock.h> -#include <linux/bug.h> -#include <linux/wait.h> -#include <linux/errno.h> -#include <linux/kobject.h> -#include <linux/mm.h> -#include "ttm_bo_api.h" - -/** - * struct ttm_mem_global - Global memory accounting structure. - * - * @shrink: A single callback to shrink TTM memory usage. Extend this - * to a linked list to be able to handle multiple callbacks when needed. - * @swap_queue: A workqueue to handle shrinking in low memory situations. We - * need a separate workqueue since it will spend a lot of time waiting - * for the GPU, and this will otherwise block other workqueue tasks(?) - * At this point we use only a single-threaded workqueue. - * @work: The workqueue callback for the shrink queue. - * @lock: Lock to protect the @shrink - and the memory accounting members, - * that is, essentially the whole structure with some exceptions. - * @lower_mem_limit: include lower limit of swap space and lower limit of - * system memory. - * @zones: Array of pointers to accounting zones. - * @num_zones: Number of populated entries in the @zones array. - * @zone_kernel: Pointer to the kernel zone. - * @zone_highmem: Pointer to the highmem zone if there is one. - * @zone_dma32: Pointer to the dma32 zone if there is one. - * - * Note that this structure is not per device. It should be global for all - * graphics devices. - */ - -#define TTM_MEM_MAX_ZONES 2 -struct ttm_mem_zone; -extern struct ttm_mem_global { - struct kobject kobj; - struct workqueue_struct *swap_queue; - struct work_struct work; - spinlock_t lock; - uint64_t lower_mem_limit; - struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; - unsigned int num_zones; - struct ttm_mem_zone *zone_kernel; -#ifdef CONFIG_HIGHMEM - struct ttm_mem_zone *zone_highmem; -#else - struct ttm_mem_zone *zone_dma32; -#endif -} ttm_mem_glob; - -int ttm_mem_global_init(struct ttm_mem_global *glob); -void ttm_mem_global_release(struct ttm_mem_global *glob); -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); -int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size); -size_t ttm_round_pot(size_t size); -bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages, - struct ttm_operation_ctx *ctx); -#endif diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index da0ed7e8c915..6164ccf4f308 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -33,7 +33,7 @@ #define TTM_MAX_BO_PRIORITY 4U -struct ttm_bo_device; +struct ttm_device; struct ttm_resource_manager; struct ttm_resource; struct ttm_place; @@ -233,7 +233,7 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res); void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size); -int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev, +int ttm_resource_manager_evict_all(struct ttm_device *bdev, struct ttm_resource_manager *man); void ttm_resource_manager_debug(struct ttm_resource_manager *man, diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 6c8eb9a4de81..134d09ef7766 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -30,6 +30,7 @@ #include <linux/types.h> #include <drm/ttm/ttm_caching.h> +struct ttm_bo_device; struct ttm_tt; struct ttm_resource; struct ttm_buffer_object; @@ -118,14 +119,14 @@ void ttm_tt_fini(struct ttm_tt *ttm); * * Unbind, unpopulate and destroy common struct ttm_tt. */ -void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm); /** * ttm_tt_destroy_common: * * Called from driver to destroy common path. */ -void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm); /** * ttm_tt_swapin: @@ -135,7 +136,8 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm); * Swap in a previously swap out ttm_tt. */ int ttm_tt_swapin(struct ttm_tt *ttm); -int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm, + gfp_t gfp_flags); /** * ttm_tt_populate - allocate pages for a ttm @@ -144,7 +146,7 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm); * * Calls the driver method to allocate pages for a ttm */ -int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); +int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); /** * ttm_tt_unpopulate - free pages from a ttm @@ -153,7 +155,9 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_o * * Calls the driver method to free all pages from a ttm */ -void ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm); +void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm); + +void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages); #if IS_ENABLED(CONFIG_AGP) #include <linux/agp_backend.h> diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h index e8d68fbb6e3f..43927a1b9e94 100644 --- a/include/dt-bindings/clock/imx8mp-clock.h +++ b/include/dt-bindings/clock/imx8mp-clock.h @@ -125,7 +125,6 @@ #define IMX8MP_CLK_CAN1 116 #define IMX8MP_CLK_CAN2 117 #define IMX8MP_CLK_MEMREPAIR 118 -#define IMX8MP_CLK_PCIE_PHY 119 #define IMX8MP_CLK_PCIE_AUX 120 #define IMX8MP_CLK_I2C5 121 #define IMX8MP_CLK_I2C6 122 @@ -182,8 +181,6 @@ #define IMX8MP_CLK_MEDIA_CAM2_PIX 173 #define IMX8MP_CLK_MEDIA_LDB 174 #define IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC 175 -#define IMX8MP_CLK_PCIE2_CTRL 176 -#define IMX8MP_CLK_PCIE2_PHY 177 #define IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE 178 #define IMX8MP_CLK_ECSPI3 179 #define IMX8MP_CLK_PDM 180 diff --git a/include/dt-bindings/clock/mt7621-clk.h b/include/dt-bindings/clock/mt7621-clk.h new file mode 100644 index 000000000000..1422badcf9de --- /dev/null +++ b/include/dt-bindings/clock/mt7621-clk.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Author: Sergio Paracuellos <sergio.paracuellos@gmail.com> + */ + +#ifndef _DT_BINDINGS_CLK_MT7621_H +#define _DT_BINDINGS_CLK_MT7621_H + +#define MT7621_CLK_XTAL 0 +#define MT7621_CLK_CPU 1 +#define MT7621_CLK_BUS 2 +#define MT7621_CLK_50M 3 +#define MT7621_CLK_125M 4 +#define MT7621_CLK_150M 5 +#define MT7621_CLK_250M 6 +#define MT7621_CLK_270M 7 + +#define MT7621_CLK_HSDMA 8 +#define MT7621_CLK_FE 9 +#define MT7621_CLK_SP_DIVTX 10 +#define MT7621_CLK_TIMER 11 +#define MT7621_CLK_PCM 12 +#define MT7621_CLK_PIO 13 +#define MT7621_CLK_GDMA 14 +#define MT7621_CLK_NAND 15 +#define MT7621_CLK_I2C 16 +#define MT7621_CLK_I2S 17 +#define MT7621_CLK_SPI 18 +#define MT7621_CLK_UART1 19 +#define MT7621_CLK_UART2 20 +#define MT7621_CLK_UART3 21 +#define MT7621_CLK_ETH 22 +#define MT7621_CLK_PCIE0 23 +#define MT7621_CLK_PCIE1 24 +#define MT7621_CLK_PCIE2 25 +#define MT7621_CLK_CRYPTO 26 +#define MT7621_CLK_SHXC 27 + +#define MT7621_CLK_MAX 28 + +#endif /* _DT_BINDINGS_CLK_MT7621_H */ diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h index 41775272fd27..90e0d4b00127 100644 --- a/include/dt-bindings/clock/omap5.h +++ b/include/dt-bindings/clock/omap5.h @@ -32,6 +32,8 @@ /* l3main2 clocks */ #define OMAP5_L3_MAIN_2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_L3_MAIN_2_GPMC_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_L3_MAIN_2_OCMC_RAM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) /* ipu clocks */ #define OMAP5_MMU_IPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) diff --git a/include/dt-bindings/clock/rk3568-cru.h b/include/dt-bindings/clock/rk3568-cru.h new file mode 100644 index 000000000000..d29890865150 --- /dev/null +++ b/include/dt-bindings/clock/rk3568-cru.h @@ -0,0 +1,926 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 Rockchip Electronics Co. Ltd. + * Author: Elaine Zhang <zhangqing@rock-chips.com> + */ + +#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3568_H +#define _DT_BINDINGS_CLK_ROCKCHIP_RK3568_H + +/* pmucru-clocks indices */ + +/* pmucru plls */ +#define PLL_PPLL 1 +#define PLL_HPLL 2 + +/* pmucru clocks */ +#define XIN_OSC0_DIV 4 +#define CLK_RTC_32K 5 +#define CLK_PMU 6 +#define CLK_I2C0 7 +#define CLK_RTC32K_FRAC 8 +#define CLK_UART0_DIV 9 +#define CLK_UART0_FRAC 10 +#define SCLK_UART0 11 +#define DBCLK_GPIO0 12 +#define CLK_PWM0 13 +#define CLK_CAPTURE_PWM0_NDFT 14 +#define CLK_PMUPVTM 15 +#define CLK_CORE_PMUPVTM 16 +#define CLK_REF24M 17 +#define XIN_OSC0_USBPHY0_G 18 +#define CLK_USBPHY0_REF 19 +#define XIN_OSC0_USBPHY1_G 20 +#define CLK_USBPHY1_REF 21 +#define XIN_OSC0_MIPIDSIPHY0_G 22 +#define CLK_MIPIDSIPHY0_REF 23 +#define XIN_OSC0_MIPIDSIPHY1_G 24 +#define CLK_MIPIDSIPHY1_REF 25 +#define CLK_WIFI_DIV 26 +#define CLK_WIFI_OSC0 27 +#define CLK_WIFI 28 +#define CLK_PCIEPHY0_DIV 29 +#define CLK_PCIEPHY0_OSC0 30 +#define CLK_PCIEPHY0_REF 31 +#define CLK_PCIEPHY1_DIV 32 +#define CLK_PCIEPHY1_OSC0 33 +#define CLK_PCIEPHY1_REF 34 +#define CLK_PCIEPHY2_DIV 35 +#define CLK_PCIEPHY2_OSC0 36 +#define CLK_PCIEPHY2_REF 37 +#define CLK_PCIE30PHY_REF_M 38 +#define CLK_PCIE30PHY_REF_N 39 +#define CLK_HDMI_REF 40 +#define XIN_OSC0_EDPPHY_G 41 +#define PCLK_PDPMU 42 +#define PCLK_PMU 43 +#define PCLK_UART0 44 +#define PCLK_I2C0 45 +#define PCLK_GPIO0 46 +#define PCLK_PMUPVTM 47 +#define PCLK_PWM0 48 +#define CLK_PDPMU 49 +#define SCLK_32K_IOE 50 + +#define CLKPMU_NR_CLKS (SCLK_32K_IOE + 1) + +/* cru-clocks indices */ + +/* cru plls */ +#define PLL_APLL 1 +#define PLL_DPLL 2 +#define PLL_CPLL 3 +#define PLL_GPLL 4 +#define PLL_VPLL 5 +#define PLL_NPLL 6 + +/* cru clocks */ +#define CPLL_333M 9 +#define ARMCLK 10 +#define USB480M 11 +#define ACLK_CORE_NIU2BUS 18 +#define CLK_CORE_PVTM 19 +#define CLK_CORE_PVTM_CORE 20 +#define CLK_CORE_PVTPLL 21 +#define CLK_GPU_SRC 22 +#define CLK_GPU_PRE_NDFT 23 +#define CLK_GPU_PRE_MUX 24 +#define ACLK_GPU_PRE 25 +#define PCLK_GPU_PRE 26 +#define CLK_GPU 27 +#define CLK_GPU_NP5 28 +#define PCLK_GPU_PVTM 29 +#define CLK_GPU_PVTM 30 +#define CLK_GPU_PVTM_CORE 31 +#define CLK_GPU_PVTPLL 32 +#define CLK_NPU_SRC 33 +#define CLK_NPU_PRE_NDFT 34 +#define CLK_NPU 35 +#define CLK_NPU_NP5 36 +#define HCLK_NPU_PRE 37 +#define PCLK_NPU_PRE 38 +#define ACLK_NPU_PRE 39 +#define ACLK_NPU 40 +#define HCLK_NPU 41 +#define PCLK_NPU_PVTM 42 +#define CLK_NPU_PVTM 43 +#define CLK_NPU_PVTM_CORE 44 +#define CLK_NPU_PVTPLL 45 +#define CLK_DDRPHY1X_SRC 46 +#define CLK_DDRPHY1X_HWFFC_SRC 47 +#define CLK_DDR1X 48 +#define CLK_MSCH 49 +#define CLK24_DDRMON 50 +#define ACLK_GIC_AUDIO 51 +#define HCLK_GIC_AUDIO 52 +#define HCLK_SDMMC_BUFFER 53 +#define DCLK_SDMMC_BUFFER 54 +#define ACLK_GIC600 55 +#define ACLK_SPINLOCK 56 +#define HCLK_I2S0_8CH 57 +#define HCLK_I2S1_8CH 58 +#define HCLK_I2S2_2CH 59 +#define HCLK_I2S3_2CH 60 +#define CLK_I2S0_8CH_TX_SRC 61 +#define CLK_I2S0_8CH_TX_FRAC 62 +#define MCLK_I2S0_8CH_TX 63 +#define I2S0_MCLKOUT_TX 64 +#define CLK_I2S0_8CH_RX_SRC 65 +#define CLK_I2S0_8CH_RX_FRAC 66 +#define MCLK_I2S0_8CH_RX 67 +#define I2S0_MCLKOUT_RX 68 +#define CLK_I2S1_8CH_TX_SRC 69 +#define CLK_I2S1_8CH_TX_FRAC 70 +#define MCLK_I2S1_8CH_TX 71 +#define I2S1_MCLKOUT_TX 72 +#define CLK_I2S1_8CH_RX_SRC 73 +#define CLK_I2S1_8CH_RX_FRAC 74 +#define MCLK_I2S1_8CH_RX 75 +#define I2S1_MCLKOUT_RX 76 +#define CLK_I2S2_2CH_SRC 77 +#define CLK_I2S2_2CH_FRAC 78 +#define MCLK_I2S2_2CH 79 +#define I2S2_MCLKOUT 80 +#define CLK_I2S3_2CH_TX_SRC 81 +#define CLK_I2S3_2CH_TX_FRAC 82 +#define MCLK_I2S3_2CH_TX 83 +#define I2S3_MCLKOUT_TX 84 +#define CLK_I2S3_2CH_RX_SRC 85 +#define CLK_I2S3_2CH_RX_FRAC 86 +#define MCLK_I2S3_2CH_RX 87 +#define I2S3_MCLKOUT_RX 88 +#define HCLK_PDM 89 +#define MCLK_PDM 90 +#define HCLK_VAD 91 +#define HCLK_SPDIF_8CH 92 +#define MCLK_SPDIF_8CH_SRC 93 +#define MCLK_SPDIF_8CH_FRAC 94 +#define MCLK_SPDIF_8CH 95 +#define HCLK_AUDPWM 96 +#define SCLK_AUDPWM_SRC 97 +#define SCLK_AUDPWM_FRAC 98 +#define SCLK_AUDPWM 99 +#define HCLK_ACDCDIG 100 +#define CLK_ACDCDIG_I2C 101 +#define CLK_ACDCDIG_DAC 102 +#define CLK_ACDCDIG_ADC 103 +#define ACLK_SECURE_FLASH 104 +#define HCLK_SECURE_FLASH 105 +#define ACLK_CRYPTO_NS 106 +#define HCLK_CRYPTO_NS 107 +#define CLK_CRYPTO_NS_CORE 108 +#define CLK_CRYPTO_NS_PKA 109 +#define CLK_CRYPTO_NS_RNG 110 +#define HCLK_TRNG_NS 111 +#define CLK_TRNG_NS 112 +#define PCLK_OTPC_NS 113 +#define CLK_OTPC_NS_SBPI 114 +#define CLK_OTPC_NS_USR 115 +#define HCLK_NANDC 116 +#define NCLK_NANDC 117 +#define HCLK_SFC 118 +#define HCLK_SFC_XIP 119 +#define SCLK_SFC 120 +#define ACLK_EMMC 121 +#define HCLK_EMMC 122 +#define BCLK_EMMC 123 +#define CCLK_EMMC 124 +#define TCLK_EMMC 125 +#define ACLK_PIPE 126 +#define PCLK_PIPE 127 +#define PCLK_PIPE_GRF 128 +#define ACLK_PCIE20_MST 129 +#define ACLK_PCIE20_SLV 130 +#define ACLK_PCIE20_DBI 131 +#define PCLK_PCIE20 132 +#define CLK_PCIE20_AUX_NDFT 133 +#define CLK_PCIE20_AUX_DFT 134 +#define CLK_PCIE20_PIPE_DFT 135 +#define ACLK_PCIE30X1_MST 136 +#define ACLK_PCIE30X1_SLV 137 +#define ACLK_PCIE30X1_DBI 138 +#define PCLK_PCIE30X1 139 +#define CLK_PCIE30X1_AUX_NDFT 140 +#define CLK_PCIE30X1_AUX_DFT 141 +#define CLK_PCIE30X1_PIPE_DFT 142 +#define ACLK_PCIE30X2_MST 143 +#define ACLK_PCIE30X2_SLV 144 +#define ACLK_PCIE30X2_DBI 145 +#define PCLK_PCIE30X2 146 +#define CLK_PCIE30X2_AUX_NDFT 147 +#define CLK_PCIE30X2_AUX_DFT 148 +#define CLK_PCIE30X2_PIPE_DFT 149 +#define ACLK_SATA0 150 +#define CLK_SATA0_PMALIVE 151 +#define CLK_SATA0_RXOOB 152 +#define CLK_SATA0_PIPE_NDFT 153 +#define CLK_SATA0_PIPE_DFT 154 +#define ACLK_SATA1 155 +#define CLK_SATA1_PMALIVE 156 +#define CLK_SATA1_RXOOB 157 +#define CLK_SATA1_PIPE_NDFT 158 +#define CLK_SATA1_PIPE_DFT 159 +#define ACLK_SATA2 160 +#define CLK_SATA2_PMALIVE 161 +#define CLK_SATA2_RXOOB 162 +#define CLK_SATA2_PIPE_NDFT 163 +#define CLK_SATA2_PIPE_DFT 164 +#define ACLK_USB3OTG0 165 +#define CLK_USB3OTG0_REF 166 +#define CLK_USB3OTG0_SUSPEND 167 +#define ACLK_USB3OTG1 168 +#define CLK_USB3OTG1_REF 169 +#define CLK_USB3OTG1_SUSPEND 170 +#define CLK_XPCS_EEE 171 +#define PCLK_XPCS 172 +#define ACLK_PHP 173 +#define HCLK_PHP 174 +#define PCLK_PHP 175 +#define HCLK_SDMMC0 176 +#define CLK_SDMMC0 177 +#define HCLK_SDMMC1 178 +#define CLK_SDMMC1 179 +#define ACLK_GMAC0 180 +#define PCLK_GMAC0 181 +#define CLK_MAC0_2TOP 182 +#define CLK_MAC0_OUT 183 +#define CLK_MAC0_REFOUT 184 +#define CLK_GMAC0_PTP_REF 185 +#define ACLK_USB 186 +#define HCLK_USB 187 +#define PCLK_USB 188 +#define HCLK_USB2HOST0 189 +#define HCLK_USB2HOST0_ARB 190 +#define HCLK_USB2HOST1 191 +#define HCLK_USB2HOST1_ARB 192 +#define HCLK_SDMMC2 193 +#define CLK_SDMMC2 194 +#define ACLK_GMAC1 195 +#define PCLK_GMAC1 196 +#define CLK_MAC1_2TOP 197 +#define CLK_MAC1_OUT 198 +#define CLK_MAC1_REFOUT 199 +#define CLK_GMAC1_PTP_REF 200 +#define ACLK_PERIMID 201 +#define HCLK_PERIMID 202 +#define ACLK_VI 203 +#define HCLK_VI 204 +#define PCLK_VI 205 +#define ACLK_VICAP 206 +#define HCLK_VICAP 207 +#define DCLK_VICAP 208 +#define ICLK_VICAP_G 209 +#define ACLK_ISP 210 +#define HCLK_ISP 211 +#define CLK_ISP 212 +#define PCLK_CSI2HOST1 213 +#define CLK_CIF_OUT 214 +#define CLK_CAM0_OUT 215 +#define CLK_CAM1_OUT 216 +#define ACLK_VO 217 +#define HCLK_VO 218 +#define PCLK_VO 219 +#define ACLK_VOP_PRE 220 +#define ACLK_VOP 221 +#define HCLK_VOP 222 +#define DCLK_VOP0 223 +#define DCLK_VOP1 224 +#define DCLK_VOP2 225 +#define CLK_VOP_PWM 226 +#define ACLK_HDCP 227 +#define HCLK_HDCP 228 +#define PCLK_HDCP 229 +#define PCLK_HDMI_HOST 230 +#define CLK_HDMI_SFR 231 +#define PCLK_DSITX_0 232 +#define PCLK_DSITX_1 233 +#define PCLK_EDP_CTRL 234 +#define CLK_EDP_200M 235 +#define ACLK_VPU_PRE 236 +#define HCLK_VPU_PRE 237 +#define ACLK_VPU 238 +#define HCLK_VPU 239 +#define ACLK_RGA_PRE 240 +#define HCLK_RGA_PRE 241 +#define PCLK_RGA_PRE 242 +#define ACLK_RGA 243 +#define HCLK_RGA 244 +#define CLK_RGA_CORE 245 +#define ACLK_IEP 246 +#define HCLK_IEP 247 +#define CLK_IEP_CORE 248 +#define HCLK_EBC 249 +#define DCLK_EBC 250 +#define ACLK_JDEC 251 +#define HCLK_JDEC 252 +#define ACLK_JENC 253 +#define HCLK_JENC 254 +#define PCLK_EINK 255 +#define HCLK_EINK 256 +#define ACLK_RKVENC_PRE 257 +#define HCLK_RKVENC_PRE 258 +#define ACLK_RKVENC 259 +#define HCLK_RKVENC 260 +#define CLK_RKVENC_CORE 261 +#define ACLK_RKVDEC_PRE 262 +#define HCLK_RKVDEC_PRE 263 +#define ACLK_RKVDEC 264 +#define HCLK_RKVDEC 265 +#define CLK_RKVDEC_CA 266 +#define CLK_RKVDEC_CORE 267 +#define CLK_RKVDEC_HEVC_CA 268 +#define ACLK_BUS 269 +#define PCLK_BUS 270 +#define PCLK_TSADC 271 +#define CLK_TSADC_TSEN 272 +#define CLK_TSADC 273 +#define PCLK_SARADC 274 +#define CLK_SARADC 275 +#define PCLK_SCR 276 +#define PCLK_WDT_NS 277 +#define TCLK_WDT_NS 278 +#define ACLK_DMAC0 279 +#define ACLK_DMAC1 280 +#define ACLK_MCU 281 +#define PCLK_INTMUX 282 +#define PCLK_MAILBOX 283 +#define PCLK_UART1 284 +#define CLK_UART1_SRC 285 +#define CLK_UART1_FRAC 286 +#define SCLK_UART1 287 +#define PCLK_UART2 288 +#define CLK_UART2_SRC 289 +#define CLK_UART2_FRAC 290 +#define SCLK_UART2 291 +#define PCLK_UART3 292 +#define CLK_UART3_SRC 293 +#define CLK_UART3_FRAC 294 +#define SCLK_UART3 295 +#define PCLK_UART4 296 +#define CLK_UART4_SRC 297 +#define CLK_UART4_FRAC 298 +#define SCLK_UART4 299 +#define PCLK_UART5 300 +#define CLK_UART5_SRC 301 +#define CLK_UART5_FRAC 302 +#define SCLK_UART5 303 +#define PCLK_UART6 304 +#define CLK_UART6_SRC 305 +#define CLK_UART6_FRAC 306 +#define SCLK_UART6 307 +#define PCLK_UART7 308 +#define CLK_UART7_SRC 309 +#define CLK_UART7_FRAC 310 +#define SCLK_UART7 311 +#define PCLK_UART8 312 +#define CLK_UART8_SRC 313 +#define CLK_UART8_FRAC 314 +#define SCLK_UART8 315 +#define PCLK_UART9 316 +#define CLK_UART9_SRC 317 +#define CLK_UART9_FRAC 318 +#define SCLK_UART9 319 +#define PCLK_CAN0 320 +#define CLK_CAN0 321 +#define PCLK_CAN1 322 +#define CLK_CAN1 323 +#define PCLK_CAN2 324 +#define CLK_CAN2 325 +#define CLK_I2C 326 +#define PCLK_I2C1 327 +#define CLK_I2C1 328 +#define PCLK_I2C2 329 +#define CLK_I2C2 330 +#define PCLK_I2C3 331 +#define CLK_I2C3 332 +#define PCLK_I2C4 333 +#define CLK_I2C4 334 +#define PCLK_I2C5 335 +#define CLK_I2C5 336 +#define PCLK_SPI0 337 +#define CLK_SPI0 338 +#define PCLK_SPI1 339 +#define CLK_SPI1 340 +#define PCLK_SPI2 341 +#define CLK_SPI2 342 +#define PCLK_SPI3 343 +#define CLK_SPI3 344 +#define PCLK_PWM1 345 +#define CLK_PWM1 346 +#define CLK_PWM1_CAPTURE 347 +#define PCLK_PWM2 348 +#define CLK_PWM2 349 +#define CLK_PWM2_CAPTURE 350 +#define PCLK_PWM3 351 +#define CLK_PWM3 352 +#define CLK_PWM3_CAPTURE 353 +#define DBCLK_GPIO 354 +#define PCLK_GPIO1 355 +#define DBCLK_GPIO1 356 +#define PCLK_GPIO2 357 +#define DBCLK_GPIO2 358 +#define PCLK_GPIO3 359 +#define DBCLK_GPIO3 360 +#define PCLK_GPIO4 361 +#define DBCLK_GPIO4 362 +#define OCC_SCAN_CLK_GPIO 363 +#define PCLK_TIMER 364 +#define CLK_TIMER0 365 +#define CLK_TIMER1 366 +#define CLK_TIMER2 367 +#define CLK_TIMER3 368 +#define CLK_TIMER4 369 +#define CLK_TIMER5 370 +#define ACLK_TOP_HIGH 371 +#define ACLK_TOP_LOW 372 +#define HCLK_TOP 373 +#define PCLK_TOP 374 +#define PCLK_PCIE30PHY 375 +#define CLK_OPTC_ARB 376 +#define PCLK_MIPICSIPHY 377 +#define PCLK_MIPIDSIPHY0 378 +#define PCLK_MIPIDSIPHY1 379 +#define PCLK_PIPEPHY0 380 +#define PCLK_PIPEPHY1 381 +#define PCLK_PIPEPHY2 382 +#define PCLK_CPU_BOOST 383 +#define CLK_CPU_BOOST 384 +#define PCLK_OTPPHY 385 +#define SCLK_GMAC0 386 +#define SCLK_GMAC0_RGMII_SPEED 387 +#define SCLK_GMAC0_RMII_SPEED 388 +#define SCLK_GMAC0_RX_TX 389 +#define SCLK_GMAC1 390 +#define SCLK_GMAC1_RGMII_SPEED 391 +#define SCLK_GMAC1_RMII_SPEED 392 +#define SCLK_GMAC1_RX_TX 393 +#define SCLK_SDMMC0_DRV 394 +#define SCLK_SDMMC0_SAMPLE 395 +#define SCLK_SDMMC1_DRV 396 +#define SCLK_SDMMC1_SAMPLE 397 +#define SCLK_SDMMC2_DRV 398 +#define SCLK_SDMMC2_SAMPLE 399 +#define SCLK_EMMC_DRV 400 +#define SCLK_EMMC_SAMPLE 401 +#define PCLK_EDPPHY_GRF 402 +#define CLK_HDMI_CEC 403 +#define CLK_I2S0_8CH_TX 404 +#define CLK_I2S0_8CH_RX 405 +#define CLK_I2S1_8CH_TX 406 +#define CLK_I2S1_8CH_RX 407 +#define CLK_I2S2_2CH 408 +#define CLK_I2S3_2CH_TX 409 +#define CLK_I2S3_2CH_RX 410 +#define CPLL_500M 411 +#define CPLL_250M 412 +#define CPLL_125M 413 +#define CPLL_62P5M 414 +#define CPLL_50M 415 +#define CPLL_25M 416 +#define CPLL_100M 417 +#define SCLK_DDRCLK 418 + +#define PCLK_CORE_PVTM 450 + +#define CLK_NR_CLKS (PCLK_CORE_PVTM + 1) + +/* pmu soft-reset indices */ +/* pmucru_softrst_con0 */ +#define SRST_P_PDPMU_NIU 0 +#define SRST_P_PMUCRU 1 +#define SRST_P_PMUGRF 2 +#define SRST_P_I2C0 3 +#define SRST_I2C0 4 +#define SRST_P_UART0 5 +#define SRST_S_UART0 6 +#define SRST_P_PWM0 7 +#define SRST_PWM0 8 +#define SRST_P_GPIO0 9 +#define SRST_GPIO0 10 +#define SRST_P_PMUPVTM 11 +#define SRST_PMUPVTM 12 + +/* soft-reset indices */ + +/* cru_softrst_con0 */ +#define SRST_NCORERESET0 0 +#define SRST_NCORERESET1 1 +#define SRST_NCORERESET2 2 +#define SRST_NCORERESET3 3 +#define SRST_NCPUPORESET0 4 +#define SRST_NCPUPORESET1 5 +#define SRST_NCPUPORESET2 6 +#define SRST_NCPUPORESET3 7 +#define SRST_NSRESET 8 +#define SRST_NSPORESET 9 +#define SRST_NATRESET 10 +#define SRST_NGICRESET 11 +#define SRST_NPRESET 12 +#define SRST_NPERIPHRESET 13 + +/* cru_softrst_con1 */ +#define SRST_A_CORE_NIU2DDR 16 +#define SRST_A_CORE_NIU2BUS 17 +#define SRST_P_DBG_NIU 18 +#define SRST_P_DBG 19 +#define SRST_P_DBG_DAPLITE 20 +#define SRST_DAP 21 +#define SRST_A_ADB400_CORE2GIC 22 +#define SRST_A_ADB400_GIC2CORE 23 +#define SRST_P_CORE_GRF 24 +#define SRST_P_CORE_PVTM 25 +#define SRST_CORE_PVTM 26 +#define SRST_CORE_PVTPLL 27 + +/* cru_softrst_con2 */ +#define SRST_GPU 32 +#define SRST_A_GPU_NIU 33 +#define SRST_P_GPU_NIU 34 +#define SRST_P_GPU_PVTM 35 +#define SRST_GPU_PVTM 36 +#define SRST_GPU_PVTPLL 37 +#define SRST_A_NPU_NIU 40 +#define SRST_H_NPU_NIU 41 +#define SRST_P_NPU_NIU 42 +#define SRST_A_NPU 43 +#define SRST_H_NPU 44 +#define SRST_P_NPU_PVTM 45 +#define SRST_NPU_PVTM 46 +#define SRST_NPU_PVTPLL 47 + +/* cru_softrst_con3 */ +#define SRST_A_MSCH 51 +#define SRST_HWFFC_CTRL 52 +#define SRST_DDR_ALWAYSON 53 +#define SRST_A_DDRSPLIT 54 +#define SRST_DDRDFI_CTL 55 +#define SRST_A_DMA2DDR 57 + +/* cru_softrst_con4 */ +#define SRST_A_PERIMID_NIU 64 +#define SRST_H_PERIMID_NIU 65 +#define SRST_A_GIC_AUDIO_NIU 66 +#define SRST_H_GIC_AUDIO_NIU 67 +#define SRST_A_GIC600 68 +#define SRST_A_GIC600_DEBUG 69 +#define SRST_A_GICADB_CORE2GIC 70 +#define SRST_A_GICADB_GIC2CORE 71 +#define SRST_A_SPINLOCK 72 +#define SRST_H_SDMMC_BUFFER 73 +#define SRST_D_SDMMC_BUFFER 74 +#define SRST_H_I2S0_8CH 75 +#define SRST_H_I2S1_8CH 76 +#define SRST_H_I2S2_2CH 77 +#define SRST_H_I2S3_2CH 78 + +/* cru_softrst_con5 */ +#define SRST_M_I2S0_8CH_TX 80 +#define SRST_M_I2S0_8CH_RX 81 +#define SRST_M_I2S1_8CH_TX 82 +#define SRST_M_I2S1_8CH_RX 83 +#define SRST_M_I2S2_2CH 84 +#define SRST_M_I2S3_2CH_TX 85 +#define SRST_M_I2S3_2CH_RX 86 +#define SRST_H_PDM 87 +#define SRST_M_PDM 88 +#define SRST_H_VAD 89 +#define SRST_H_SPDIF_8CH 90 +#define SRST_M_SPDIF_8CH 91 +#define SRST_H_AUDPWM 92 +#define SRST_S_AUDPWM 93 +#define SRST_H_ACDCDIG 94 +#define SRST_ACDCDIG 95 + +/* cru_softrst_con6 */ +#define SRST_A_SECURE_FLASH_NIU 96 +#define SRST_H_SECURE_FLASH_NIU 97 +#define SRST_A_CRYPTO_NS 103 +#define SRST_H_CRYPTO_NS 104 +#define SRST_CRYPTO_NS_CORE 105 +#define SRST_CRYPTO_NS_PKA 106 +#define SRST_CRYPTO_NS_RNG 107 +#define SRST_H_TRNG_NS 108 +#define SRST_TRNG_NS 109 + +/* cru_softrst_con7 */ +#define SRST_H_NANDC 112 +#define SRST_N_NANDC 113 +#define SRST_H_SFC 114 +#define SRST_H_SFC_XIP 115 +#define SRST_S_SFC 116 +#define SRST_A_EMMC 117 +#define SRST_H_EMMC 118 +#define SRST_B_EMMC 119 +#define SRST_C_EMMC 120 +#define SRST_T_EMMC 121 + +/* cru_softrst_con8 */ +#define SRST_A_PIPE_NIU 128 +#define SRST_P_PIPE_NIU 130 +#define SRST_P_PIPE_GRF 133 +#define SRST_A_SATA0 134 +#define SRST_SATA0_PIPE 135 +#define SRST_SATA0_PMALIVE 136 +#define SRST_SATA0_RXOOB 137 +#define SRST_A_SATA1 138 +#define SRST_SATA1_PIPE 139 +#define SRST_SATA1_PMALIVE 140 +#define SRST_SATA1_RXOOB 141 + +/* cru_softrst_con9 */ +#define SRST_A_SATA2 144 +#define SRST_SATA2_PIPE 145 +#define SRST_SATA2_PMALIVE 146 +#define SRST_SATA2_RXOOB 147 +#define SRST_USB3OTG0 148 +#define SRST_USB3OTG1 149 +#define SRST_XPCS 150 +#define SRST_XPCS_TX_DIV10 151 +#define SRST_XPCS_RX_DIV10 152 +#define SRST_XPCS_XGXS_RX 153 + +/* cru_softrst_con10 */ +#define SRST_P_PCIE20 160 +#define SRST_PCIE20_POWERUP 161 +#define SRST_MSTR_ARESET_PCIE20 162 +#define SRST_SLV_ARESET_PCIE20 163 +#define SRST_DBI_ARESET_PCIE20 164 +#define SRST_BRESET_PCIE20 165 +#define SRST_PERST_PCIE20 166 +#define SRST_CORE_RST_PCIE20 167 +#define SRST_NSTICKY_RST_PCIE20 168 +#define SRST_STICKY_RST_PCIE20 169 +#define SRST_PWR_RST_PCIE20 170 + +/* cru_softrst_con11 */ +#define SRST_P_PCIE30X1 176 +#define SRST_PCIE30X1_POWERUP 177 +#define SRST_M_ARESET_PCIE30X1 178 +#define SRST_S_ARESET_PCIE30X1 179 +#define SRST_D_ARESET_PCIE30X1 180 +#define SRST_BRESET_PCIE30X1 181 +#define SRST_PERST_PCIE30X1 182 +#define SRST_CORE_RST_PCIE30X1 183 +#define SRST_NSTC_RST_PCIE30X1 184 +#define SRST_STC_RST_PCIE30X1 185 +#define SRST_PWR_RST_PCIE30X1 186 + +/* cru_softrst_con12 */ +#define SRST_P_PCIE30X2 192 +#define SRST_PCIE30X2_POWERUP 193 +#define SRST_M_ARESET_PCIE30X2 194 +#define SRST_S_ARESET_PCIE30X2 195 +#define SRST_D_ARESET_PCIE30X2 196 +#define SRST_BRESET_PCIE30X2 197 +#define SRST_PERST_PCIE30X2 198 +#define SRST_CORE_RST_PCIE30X2 199 +#define SRST_NSTC_RST_PCIE30X2 200 +#define SRST_STC_RST_PCIE30X2 201 +#define SRST_PWR_RST_PCIE30X2 202 + +/* cru_softrst_con13 */ +#define SRST_A_PHP_NIU 208 +#define SRST_H_PHP_NIU 209 +#define SRST_P_PHP_NIU 210 +#define SRST_H_SDMMC0 211 +#define SRST_SDMMC0 212 +#define SRST_H_SDMMC1 213 +#define SRST_SDMMC1 214 +#define SRST_A_GMAC0 215 +#define SRST_GMAC0_TIMESTAMP 216 + +/* cru_softrst_con14 */ +#define SRST_A_USB_NIU 224 +#define SRST_H_USB_NIU 225 +#define SRST_P_USB_NIU 226 +#define SRST_P_USB_GRF 227 +#define SRST_H_USB2HOST0 228 +#define SRST_H_USB2HOST0_ARB 229 +#define SRST_USB2HOST0_UTMI 230 +#define SRST_H_USB2HOST1 231 +#define SRST_H_USB2HOST1_ARB 232 +#define SRST_USB2HOST1_UTMI 233 +#define SRST_H_SDMMC2 234 +#define SRST_SDMMC2 235 +#define SRST_A_GMAC1 236 +#define SRST_GMAC1_TIMESTAMP 237 + +/* cru_softrst_con15 */ +#define SRST_A_VI_NIU 240 +#define SRST_H_VI_NIU 241 +#define SRST_P_VI_NIU 242 +#define SRST_A_VICAP 247 +#define SRST_H_VICAP 248 +#define SRST_D_VICAP 249 +#define SRST_I_VICAP 250 +#define SRST_P_VICAP 251 +#define SRST_H_ISP 252 +#define SRST_ISP 253 +#define SRST_P_CSI2HOST1 255 + +/* cru_softrst_con16 */ +#define SRST_A_VO_NIU 256 +#define SRST_H_VO_NIU 257 +#define SRST_P_VO_NIU 258 +#define SRST_A_VOP_NIU 259 +#define SRST_A_VOP 260 +#define SRST_H_VOP 261 +#define SRST_VOP0 262 +#define SRST_VOP1 263 +#define SRST_VOP2 264 +#define SRST_VOP_PWM 265 +#define SRST_A_HDCP 266 +#define SRST_H_HDCP 267 +#define SRST_P_HDCP 268 +#define SRST_P_HDMI_HOST 270 +#define SRST_HDMI_HOST 271 + +/* cru_softrst_con17 */ +#define SRST_P_DSITX_0 272 +#define SRST_P_DSITX_1 273 +#define SRST_P_EDP_CTRL 274 +#define SRST_EDP_24M 275 +#define SRST_A_VPU_NIU 280 +#define SRST_H_VPU_NIU 281 +#define SRST_A_VPU 282 +#define SRST_H_VPU 283 +#define SRST_H_EINK 286 +#define SRST_P_EINK 287 + +/* cru_softrst_con18 */ +#define SRST_A_RGA_NIU 288 +#define SRST_H_RGA_NIU 289 +#define SRST_P_RGA_NIU 290 +#define SRST_A_RGA 292 +#define SRST_H_RGA 293 +#define SRST_RGA_CORE 294 +#define SRST_A_IEP 295 +#define SRST_H_IEP 296 +#define SRST_IEP_CORE 297 +#define SRST_H_EBC 298 +#define SRST_D_EBC 299 +#define SRST_A_JDEC 300 +#define SRST_H_JDEC 301 +#define SRST_A_JENC 302 +#define SRST_H_JENC 303 + +/* cru_softrst_con19 */ +#define SRST_A_VENC_NIU 304 +#define SRST_H_VENC_NIU 305 +#define SRST_A_RKVENC 307 +#define SRST_H_RKVENC 308 +#define SRST_RKVENC_CORE 309 + +/* cru_softrst_con20 */ +#define SRST_A_RKVDEC_NIU 320 +#define SRST_H_RKVDEC_NIU 321 +#define SRST_A_RKVDEC 322 +#define SRST_H_RKVDEC 323 +#define SRST_RKVDEC_CA 324 +#define SRST_RKVDEC_CORE 325 +#define SRST_RKVDEC_HEVC_CA 326 + +/* cru_softrst_con21 */ +#define SRST_A_BUS_NIU 336 +#define SRST_P_BUS_NIU 338 +#define SRST_P_CAN0 340 +#define SRST_CAN0 341 +#define SRST_P_CAN1 342 +#define SRST_CAN1 343 +#define SRST_P_CAN2 344 +#define SRST_CAN2 345 +#define SRST_P_GPIO1 346 +#define SRST_GPIO1 347 +#define SRST_P_GPIO2 348 +#define SRST_GPIO2 349 +#define SRST_P_GPIO3 350 +#define SRST_GPIO3 351 + +/* cru_softrst_con22 */ +#define SRST_P_GPIO4 352 +#define SRST_GPIO4 353 +#define SRST_P_I2C1 354 +#define SRST_I2C1 355 +#define SRST_P_I2C2 356 +#define SRST_I2C2 357 +#define SRST_P_I2C3 358 +#define SRST_I2C3 359 +#define SRST_P_I2C4 360 +#define SRST_I2C4 361 +#define SRST_P_I2C5 362 +#define SRST_I2C5 363 +#define SRST_P_OTPC_NS 364 +#define SRST_OTPC_NS_SBPI 365 +#define SRST_OTPC_NS_USR 366 + +/* cru_softrst_con23 */ +#define SRST_P_PWM1 368 +#define SRST_PWM1 369 +#define SRST_P_PWM2 370 +#define SRST_PWM2 371 +#define SRST_P_PWM3 372 +#define SRST_PWM3 373 +#define SRST_P_SPI0 374 +#define SRST_SPI0 375 +#define SRST_P_SPI1 376 +#define SRST_SPI1 377 +#define SRST_P_SPI2 378 +#define SRST_SPI2 379 +#define SRST_P_SPI3 380 +#define SRST_SPI3 381 + +/* cru_softrst_con24 */ +#define SRST_P_SARADC 384 +#define SRST_P_TSADC 385 +#define SRST_TSADC 386 +#define SRST_P_TIMER 387 +#define SRST_TIMER0 388 +#define SRST_TIMER1 389 +#define SRST_TIMER2 390 +#define SRST_TIMER3 391 +#define SRST_TIMER4 392 +#define SRST_TIMER5 393 +#define SRST_P_UART1 394 +#define SRST_S_UART1 395 + +/* cru_softrst_con25 */ +#define SRST_P_UART2 400 +#define SRST_S_UART2 401 +#define SRST_P_UART3 402 +#define SRST_S_UART3 403 +#define SRST_P_UART4 404 +#define SRST_S_UART4 405 +#define SRST_P_UART5 406 +#define SRST_S_UART5 407 +#define SRST_P_UART6 408 +#define SRST_S_UART6 409 +#define SRST_P_UART7 410 +#define SRST_S_UART7 411 +#define SRST_P_UART8 412 +#define SRST_S_UART8 413 +#define SRST_P_UART9 414 +#define SRST_S_UART9 415 + +/* cru_softrst_con26 */ +#define SRST_P_GRF 416 +#define SRST_P_GRF_VCCIO12 417 +#define SRST_P_GRF_VCCIO34 418 +#define SRST_P_GRF_VCCIO567 419 +#define SRST_P_SCR 420 +#define SRST_P_WDT_NS 421 +#define SRST_T_WDT_NS 422 +#define SRST_P_DFT2APB 423 +#define SRST_A_MCU 426 +#define SRST_P_INTMUX 427 +#define SRST_P_MAILBOX 428 + +/* cru_softrst_con27 */ +#define SRST_A_TOP_HIGH_NIU 432 +#define SRST_A_TOP_LOW_NIU 433 +#define SRST_H_TOP_NIU 434 +#define SRST_P_TOP_NIU 435 +#define SRST_P_TOP_CRU 438 +#define SRST_P_DDRPHY 439 +#define SRST_DDRPHY 440 +#define SRST_P_MIPICSIPHY 442 +#define SRST_P_MIPIDSIPHY0 443 +#define SRST_P_MIPIDSIPHY1 444 +#define SRST_P_PCIE30PHY 445 +#define SRST_PCIE30PHY 446 +#define SRST_P_PCIE30PHY_GRF 447 + +/* cru_softrst_con28 */ +#define SRST_P_APB2ASB_LEFT 448 +#define SRST_P_APB2ASB_BOTTOM 449 +#define SRST_P_ASB2APB_LEFT 450 +#define SRST_P_ASB2APB_BOTTOM 451 +#define SRST_P_PIPEPHY0 452 +#define SRST_PIPEPHY0 453 +#define SRST_P_PIPEPHY1 454 +#define SRST_PIPEPHY1 455 +#define SRST_P_PIPEPHY2 456 +#define SRST_PIPEPHY2 457 +#define SRST_P_USB2PHY0_GRF 458 +#define SRST_P_USB2PHY1_GRF 459 +#define SRST_P_CPU_BOOST 460 +#define SRST_CPU_BOOST 461 +#define SRST_P_OTPPHY 462 +#define SRST_OTPPHY 463 + +/* cru_softrst_con29 */ +#define SRST_USB2PHY0_POR 464 +#define SRST_USB2PHY0_USB3OTG0 465 +#define SRST_USB2PHY0_USB3OTG1 466 +#define SRST_USB2PHY1_POR 467 +#define SRST_USB2PHY1_USB2HOST0 468 +#define SRST_USB2PHY1_USB2HOST1 469 +#define SRST_P_EDPPHY_GRF 470 +#define SRST_TSADCPHY 471 +#define SRST_GMAC0_DELAYLINE 472 +#define SRST_GMAC1_DELAYLINE 473 +#define SRST_OTPC_ARB 474 +#define SRST_P_PIPEPHY0_GRF 475 +#define SRST_P_PIPEPHY1_GRF 476 +#define SRST_P_PIPEPHY2_GRF 477 + +#endif diff --git a/include/dt-bindings/clock/sifive-fu740-prci.h b/include/dt-bindings/clock/sifive-fu740-prci.h index cd7706ea5677..7899b7fee7db 100644 --- a/include/dt-bindings/clock/sifive-fu740-prci.h +++ b/include/dt-bindings/clock/sifive-fu740-prci.h @@ -19,5 +19,6 @@ #define PRCI_CLK_CLTXPLL 5 #define PRCI_CLK_TLCLK 6 #define PRCI_CLK_PCLK 7 +#define PRCI_CLK_PCIE_AUX 8 #endif /* __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H */ diff --git a/include/dt-bindings/input/atmel-maxtouch.h b/include/dt-bindings/input/atmel-maxtouch.h new file mode 100644 index 000000000000..7345ab32224d --- /dev/null +++ b/include/dt-bindings/input/atmel-maxtouch.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _DT_BINDINGS_ATMEL_MAXTOUCH_H +#define _DT_BINDINGS_ATMEL_MAXTOUCH_H + +#define ATMEL_MXT_WAKEUP_NONE 0 +#define ATMEL_MXT_WAKEUP_I2C_SCL 1 +#define ATMEL_MXT_WAKEUP_GPIO 2 + +#endif /* _DT_BINDINGS_ATMEL_MAXTOUCH_H */ diff --git a/include/dt-bindings/interconnect/qcom,sdm660.h b/include/dt-bindings/interconnect/qcom,sdm660.h new file mode 100644 index 000000000000..62e8d8670d5e --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sdm660.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* SDM660 interconnect IDs */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM660_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM660_H + +/* A2NOC */ +#define MASTER_IPA 0 +#define MASTER_CNOC_A2NOC 1 +#define MASTER_SDCC_1 2 +#define MASTER_SDCC_2 3 +#define MASTER_BLSP_1 4 +#define MASTER_BLSP_2 5 +#define MASTER_UFS 6 +#define MASTER_USB_HS 7 +#define MASTER_USB3 8 +#define MASTER_CRYPTO_C0 9 +#define SLAVE_A2NOC_SNOC 10 + +/* BIMC */ +#define MASTER_GNOC_BIMC 0 +#define MASTER_OXILI 1 +#define MASTER_MNOC_BIMC 2 +#define MASTER_SNOC_BIMC 3 +#define MASTER_PIMEM 4 +#define SLAVE_EBI 5 +#define SLAVE_HMSS_L3 6 +#define SLAVE_BIMC_SNOC 7 + +/* CNOC */ +#define MASTER_SNOC_CNOC 0 +#define MASTER_QDSS_DAP 1 +#define SLAVE_CNOC_A2NOC 2 +#define SLAVE_MPM 3 +#define SLAVE_PMIC_ARB 4 +#define SLAVE_TLMM_NORTH 5 +#define SLAVE_TCSR 6 +#define SLAVE_PIMEM_CFG 7 +#define SLAVE_IMEM_CFG 8 +#define SLAVE_MESSAGE_RAM 9 +#define SLAVE_GLM 10 +#define SLAVE_BIMC_CFG 11 +#define SLAVE_PRNG 12 +#define SLAVE_SPDM 13 +#define SLAVE_QDSS_CFG 14 +#define SLAVE_CNOC_MNOC_CFG 15 +#define SLAVE_SNOC_CFG 16 +#define SLAVE_QM_CFG 17 +#define SLAVE_CLK_CTL 18 +#define SLAVE_MSS_CFG 19 +#define SLAVE_TLMM_SOUTH 20 +#define SLAVE_UFS_CFG 21 +#define SLAVE_A2NOC_CFG 22 +#define SLAVE_A2NOC_SMMU_CFG 23 +#define SLAVE_GPUSS_CFG 24 +#define SLAVE_AHB2PHY 25 +#define SLAVE_BLSP_1 26 +#define SLAVE_SDCC_1 27 +#define SLAVE_SDCC_2 28 +#define SLAVE_TLMM_CENTER 29 +#define SLAVE_BLSP_2 30 +#define SLAVE_PDM 31 +#define SLAVE_CNOC_MNOC_MMSS_CFG 32 +#define SLAVE_USB_HS 33 +#define SLAVE_USB3_0 34 +#define SLAVE_SRVC_CNOC 35 + +/* GNOC */ +#define MASTER_APSS_PROC 0 +#define SLAVE_GNOC_BIMC 1 +#define SLAVE_GNOC_SNOC 2 + +/* MNOC */ +#define MASTER_CPP 0 +#define MASTER_JPEG 1 +#define MASTER_MDP_P0 2 +#define MASTER_MDP_P1 3 +#define MASTER_VENUS 4 +#define MASTER_VFE 5 +#define SLAVE_MNOC_BIMC 6 +#define MASTER_CNOC_MNOC_MMSS_CFG 7 +#define MASTER_CNOC_MNOC_CFG 8 +#define SLAVE_CAMERA_CFG 9 +#define SLAVE_CAMERA_THROTTLE_CFG 10 +#define SLAVE_MISC_CFG 11 +#define SLAVE_VENUS_THROTTLE_CFG 12 +#define SLAVE_VENUS_CFG 13 +#define SLAVE_MMSS_CLK_XPU_CFG 14 +#define SLAVE_MMSS_CLK_CFG 15 +#define SLAVE_MNOC_MPU_CFG 16 +#define SLAVE_DISPLAY_CFG 17 +#define SLAVE_CSI_PHY_CFG 18 +#define SLAVE_DISPLAY_THROTTLE_CFG 19 +#define SLAVE_SMMU_CFG 20 +#define SLAVE_SRVC_MNOC 21 + +/* SNOC */ +#define MASTER_QDSS_ETR 0 +#define MASTER_QDSS_BAM 1 +#define MASTER_SNOC_CFG 2 +#define MASTER_BIMC_SNOC 3 +#define MASTER_A2NOC_SNOC 4 +#define MASTER_GNOC_SNOC 5 +#define SLAVE_HMSS 6 +#define SLAVE_LPASS 7 +#define SLAVE_WLAN 8 +#define SLAVE_CDSP 9 +#define SLAVE_IPA 10 +#define SLAVE_SNOC_BIMC 11 +#define SLAVE_SNOC_CNOC 12 +#define SLAVE_IMEM 13 +#define SLAVE_PIMEM 14 +#define SLAVE_QDSS_STM 15 +#define SLAVE_SRVC_SNOC 16 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,sm8350.h b/include/dt-bindings/interconnect/qcom,sm8350.h new file mode 100644 index 000000000000..c7f7ed315aeb --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sm8350.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Qualcomm SM8350 interconnect IDs + * + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Linaro Limited + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8350_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8350_H + +#define MASTER_QSPI_0 0 +#define MASTER_QUP_1 1 +#define MASTER_A1NOC_CFG 2 +#define MASTER_SDCC_4 3 +#define MASTER_UFS_MEM 4 +#define MASTER_USB3_0 5 +#define MASTER_USB3_1 6 +#define SLAVE_A1NOC_SNOC 7 +#define SLAVE_SERVICE_A1NOC 8 + +#define MASTER_QDSS_BAM 0 +#define MASTER_QUP_0 1 +#define MASTER_QUP_2 2 +#define MASTER_A2NOC_CFG 3 +#define MASTER_CRYPTO 4 +#define MASTER_IPA 5 +#define MASTER_PCIE_0 6 +#define MASTER_PCIE_1 7 +#define MASTER_QDSS_ETR 8 +#define MASTER_SDCC_2 9 +#define MASTER_UFS_CARD 10 +#define SLAVE_A2NOC_SNOC 11 +#define SLAVE_ANOC_PCIE_GEM_NOC 12 +#define SLAVE_SERVICE_A2NOC 13 + +#define MASTER_GEM_NOC_CNOC 0 +#define MASTER_GEM_NOC_PCIE_SNOC 1 +#define MASTER_QDSS_DAP 2 +#define SLAVE_AHB2PHY_SOUTH 3 +#define SLAVE_AHB2PHY_NORTH 4 +#define SLAVE_AOSS 5 +#define SLAVE_APPSS 6 +#define SLAVE_CAMERA_CFG 7 +#define SLAVE_CLK_CTL 8 +#define SLAVE_CDSP_CFG 9 +#define SLAVE_RBCPR_CX_CFG 10 +#define SLAVE_RBCPR_MMCX_CFG 11 +#define SLAVE_RBCPR_MX_CFG 12 +#define SLAVE_CRYPTO_0_CFG 13 +#define SLAVE_CX_RDPM 14 +#define SLAVE_DCC_CFG 15 +#define SLAVE_DISPLAY_CFG 16 +#define SLAVE_GFX3D_CFG 17 +#define SLAVE_HWKM 18 +#define SLAVE_IMEM_CFG 19 +#define SLAVE_IPA_CFG 20 +#define SLAVE_IPC_ROUTER_CFG 21 +#define SLAVE_LPASS 22 +#define SLAVE_CNOC_MSS 23 +#define SLAVE_MX_RDPM 24 +#define SLAVE_PCIE_0_CFG 25 +#define SLAVE_PCIE_1_CFG 26 +#define SLAVE_PDM 27 +#define SLAVE_PIMEM_CFG 28 +#define SLAVE_PKA_WRAPPER_CFG 29 +#define SLAVE_PMU_WRAPPER_CFG 30 +#define SLAVE_QDSS_CFG 31 +#define SLAVE_QSPI_0 32 +#define SLAVE_QUP_0 33 +#define SLAVE_QUP_1 34 +#define SLAVE_QUP_2 35 +#define SLAVE_SDCC_2 36 +#define SLAVE_SDCC_4 37 +#define SLAVE_SECURITY 38 +#define SLAVE_SPSS_CFG 39 +#define SLAVE_TCSR 40 +#define SLAVE_TLMM 41 +#define SLAVE_UFS_CARD_CFG 42 +#define SLAVE_UFS_MEM_CFG 43 +#define SLAVE_USB3_0 44 +#define SLAVE_USB3_1 45 +#define SLAVE_VENUS_CFG 46 +#define SLAVE_VSENSE_CTRL_CFG 47 +#define SLAVE_A1NOC_CFG 48 +#define SLAVE_A2NOC_CFG 49 +#define SLAVE_DDRSS_CFG 50 +#define SLAVE_CNOC_MNOC_CFG 51 +#define SLAVE_SNOC_CFG 52 +#define SLAVE_BOOT_IMEM 53 +#define SLAVE_IMEM 54 +#define SLAVE_PIMEM 55 +#define SLAVE_SERVICE_CNOC 56 +#define SLAVE_PCIE_0 57 +#define SLAVE_PCIE_1 58 +#define SLAVE_QDSS_STM 59 +#define SLAVE_TCU 60 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_LLCC_CFG 1 +#define SLAVE_GEM_NOC_CFG 2 + +#define MASTER_GPU_TCU 0 +#define MASTER_SYS_TCU 1 +#define MASTER_APPSS_PROC 2 +#define MASTER_COMPUTE_NOC 3 +#define MASTER_GEM_NOC_CFG 4 +#define MASTER_GFX3D 5 +#define MASTER_MNOC_HF_MEM_NOC 6 +#define MASTER_MNOC_SF_MEM_NOC 7 +#define MASTER_ANOC_PCIE_GEM_NOC 8 +#define MASTER_SNOC_GC_MEM_NOC 9 +#define MASTER_SNOC_SF_MEM_NOC 10 +#define SLAVE_MSS_PROC_MS_MPU_CFG 11 +#define SLAVE_MCDMA_MS_MPU_CFG 12 +#define SLAVE_GEM_NOC_CNOC 13 +#define SLAVE_LLCC 14 +#define SLAVE_MEM_NOC_PCIE_SNOC 15 +#define SLAVE_SERVICE_GEM_NOC_1 16 +#define SLAVE_SERVICE_GEM_NOC_2 17 +#define SLAVE_SERVICE_GEM_NOC 18 +#define MASTER_MNOC_HF_MEM_NOC_DISP 19 +#define MASTER_MNOC_SF_MEM_NOC_DISP 20 +#define SLAVE_LLCC_DISP 21 + +#define MASTER_CNOC_LPASS_AG_NOC 0 +#define SLAVE_LPASS_CORE_CFG 1 +#define SLAVE_LPASS_LPI_CFG 2 +#define SLAVE_LPASS_MPU_CFG 3 +#define SLAVE_LPASS_TOP_CFG 4 +#define SLAVE_SERVICES_LPASS_AML_NOC 5 +#define SLAVE_SERVICE_LPASS_AG_NOC 6 + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 +#define MASTER_LLCC_DISP 2 +#define SLAVE_EBI1_DISP 3 + +#define MASTER_CAMNOC_HF 0 +#define MASTER_CAMNOC_ICP 1 +#define MASTER_CAMNOC_SF 2 +#define MASTER_CNOC_MNOC_CFG 3 +#define MASTER_VIDEO_P0 4 +#define MASTER_VIDEO_P1 5 +#define MASTER_VIDEO_PROC 6 +#define MASTER_MDP0 7 +#define MASTER_MDP1 8 +#define MASTER_ROTATOR 9 +#define SLAVE_MNOC_HF_MEM_NOC 10 +#define SLAVE_MNOC_SF_MEM_NOC 11 +#define SLAVE_SERVICE_MNOC 12 +#define MASTER_MDP0_DISP 13 +#define MASTER_MDP1_DISP 14 +#define MASTER_ROTATOR_DISP 15 +#define SLAVE_MNOC_HF_MEM_NOC_DISP 16 +#define SLAVE_MNOC_SF_MEM_NOC_DISP 17 + +#define MASTER_CDSP_NOC_CFG 0 +#define MASTER_CDSP_PROC 1 +#define SLAVE_CDSP_MEM_NOC 2 +#define SLAVE_SERVICE_NSP_NOC 3 + +#define MASTER_A1NOC_SNOC 0 +#define MASTER_A2NOC_SNOC 1 +#define MASTER_SNOC_CFG 2 +#define MASTER_PIMEM 3 +#define MASTER_GIC 4 +#define SLAVE_SNOC_GEM_NOC_GC 5 +#define SLAVE_SNOC_GEM_NOC_SF 6 +#define SLAVE_SERVICE_SNOC 7 + +#endif diff --git a/include/dt-bindings/interrupt-controller/apple-aic.h b/include/dt-bindings/interrupt-controller/apple-aic.h new file mode 100644 index 000000000000..604f2bb30ac0 --- /dev/null +++ b/include/dt-bindings/interrupt-controller/apple-aic.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR MIT */ +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_APPLE_AIC_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_APPLE_AIC_H + +#include <dt-bindings/interrupt-controller/irq.h> + +#define AIC_IRQ 0 +#define AIC_FIQ 1 + +#define AIC_TMR_HV_PHYS 0 +#define AIC_TMR_HV_VIRT 1 +#define AIC_TMR_GUEST_PHYS 2 +#define AIC_TMR_GUEST_VIRT 3 + +#endif diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h index 9047ec6bd3cf..d417b9268b16 100644 --- a/include/dt-bindings/mux/ti-serdes.h +++ b/include/dt-bindings/mux/ti-serdes.h @@ -90,4 +90,9 @@ #define J7200_SERDES0_LANE3_USB 0x2 #define J7200_SERDES0_LANE3_IP4_UNUSED 0x3 +/* AM64 */ + +#define AM64_SERDES0_LANE0_PCIE0 0x0 +#define AM64_SERDES0_LANE0_USB 0x1 + #endif /* _DT_BINDINGS_MUX_TI_SERDES */ diff --git a/include/dt-bindings/phy/phy-cadence-torrent.h b/include/dt-bindings/phy/phy-cadence-torrent.h deleted file mode 100644 index e387b6a95741..000000000000 --- a/include/dt-bindings/phy/phy-cadence-torrent.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * This header provides constants for Cadence Torrent SERDES. - */ - -#ifndef _DT_BINDINGS_TORRENT_SERDES_H -#define _DT_BINDINGS_TORRENT_SERDES_H - -#define TORRENT_SERDES_NO_SSC 0 -#define TORRENT_SERDES_EXTERNAL_SSC 1 -#define TORRENT_SERDES_INTERNAL_SSC 2 - -#endif /* _DT_BINDINGS_TORRENT_SERDES_H */ diff --git a/include/dt-bindings/phy/phy-cadence.h b/include/dt-bindings/phy/phy-cadence.h new file mode 100644 index 000000000000..4652bcb86265 --- /dev/null +++ b/include/dt-bindings/phy/phy-cadence.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for Cadence SERDES. + */ + +#ifndef _DT_BINDINGS_CADENCE_SERDES_H +#define _DT_BINDINGS_CADENCE_SERDES_H + +/* Torrent */ +#define TORRENT_SERDES_NO_SSC 0 +#define TORRENT_SERDES_EXTERNAL_SSC 1 +#define TORRENT_SERDES_INTERNAL_SSC 2 + +#define CDNS_TORRENT_REFCLK_DRIVER 0 + +/* Sierra */ +#define CDNS_SIERRA_PLL_CMNLC 0 +#define CDNS_SIERRA_PLL_CMNLC1 1 + +#endif /* _DT_BINDINGS_CADENCE_SERDES_H */ diff --git a/include/dt-bindings/phy/phy-ti.h b/include/dt-bindings/phy/phy-ti.h new file mode 100644 index 000000000000..ad955d3a56b4 --- /dev/null +++ b/include/dt-bindings/phy/phy-ti.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for TI SERDES. + */ + +#ifndef _DT_BINDINGS_TI_SERDES +#define _DT_BINDINGS_TI_SERDES + +/* Clock index for output clocks from WIZ */ + +/* MUX Clocks */ +#define TI_WIZ_PLL0_REFCLK 0 +#define TI_WIZ_PLL1_REFCLK 1 +#define TI_WIZ_REFCLK_DIG 2 + +/* Reserve index here for future additions */ + +/* MISC Clocks */ +#define TI_WIZ_PHY_EN_REFCLK 16 + +#endif /* _DT_BINDINGS_TI_SERDES */ diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h index b0eea7cc6e23..e085f102b283 100644 --- a/include/dt-bindings/pinctrl/k3.h +++ b/include/dt-bindings/pinctrl/k3.h @@ -3,7 +3,7 @@ * This header provides constants for pinctrl bindings for TI's K3 SoC * family. * - * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2018-2021 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _DT_BINDINGS_PINCTRL_TI_K3_H #define _DT_BINDINGS_PINCTRL_TI_K3_H @@ -35,4 +35,7 @@ #define J721E_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) #define J721E_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) +#define AM64X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) +#define AM64X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) + #endif diff --git a/include/dt-bindings/pinctrl/mt8195-pinfunc.h b/include/dt-bindings/pinctrl/mt8195-pinfunc.h new file mode 100644 index 000000000000..666331bb9b40 --- /dev/null +++ b/include/dt-bindings/pinctrl/mt8195-pinfunc.h @@ -0,0 +1,962 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 MediaTek Inc. + * Author: Zhiyong Tao <zhiyong.tao@mediatek.com> + */ + +#ifndef __MT8195_PINFUNC_H +#define __MT8195_PINFUNC_H + +#include "mt65xx.h" + +#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 1) +#define PINMUX_GPIO0__FUNC_MSDC2_CMD (MTK_PIN_NO(0) | 2) +#define PINMUX_GPIO0__FUNC_TDMIN_MCK (MTK_PIN_NO(0) | 3) +#define PINMUX_GPIO0__FUNC_CLKM0 (MTK_PIN_NO(0) | 4) +#define PINMUX_GPIO0__FUNC_PERSTN_1 (MTK_PIN_NO(0) | 5) +#define PINMUX_GPIO0__FUNC_IDDIG_1P (MTK_PIN_NO(0) | 6) +#define PINMUX_GPIO0__FUNC_DMIC4_CLK (MTK_PIN_NO(0) | 7) + +#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 1) +#define PINMUX_GPIO1__FUNC_MSDC2_CLK (MTK_PIN_NO(1) | 2) +#define PINMUX_GPIO1__FUNC_TDMIN_DI (MTK_PIN_NO(1) | 3) +#define PINMUX_GPIO1__FUNC_CLKM1 (MTK_PIN_NO(1) | 4) +#define PINMUX_GPIO1__FUNC_CLKREQN_1 (MTK_PIN_NO(1) | 5) +#define PINMUX_GPIO1__FUNC_USB_DRVVBUS_1P (MTK_PIN_NO(1) | 6) +#define PINMUX_GPIO1__FUNC_DMIC4_DAT (MTK_PIN_NO(1) | 7) + +#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 1) +#define PINMUX_GPIO2__FUNC_MSDC2_DAT3 (MTK_PIN_NO(2) | 2) +#define PINMUX_GPIO2__FUNC_TDMIN_LRCK (MTK_PIN_NO(2) | 3) +#define PINMUX_GPIO2__FUNC_CLKM2 (MTK_PIN_NO(2) | 4) +#define PINMUX_GPIO2__FUNC_WAKEN_1 (MTK_PIN_NO(2) | 5) +#define PINMUX_GPIO2__FUNC_DMIC2_CLK (MTK_PIN_NO(2) | 7) + +#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 1) +#define PINMUX_GPIO3__FUNC_MSDC2_DAT0 (MTK_PIN_NO(3) | 2) +#define PINMUX_GPIO3__FUNC_TDMIN_BCK (MTK_PIN_NO(3) | 3) +#define PINMUX_GPIO3__FUNC_CLKM3 (MTK_PIN_NO(3) | 4) +#define PINMUX_GPIO3__FUNC_DMIC2_DAT (MTK_PIN_NO(3) | 7) + +#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 1) +#define PINMUX_GPIO4__FUNC_MSDC2_DAT2 (MTK_PIN_NO(4) | 2) +#define PINMUX_GPIO4__FUNC_SPDIF_IN1 (MTK_PIN_NO(4) | 3) +#define PINMUX_GPIO4__FUNC_UTXD3 (MTK_PIN_NO(4) | 4) +#define PINMUX_GPIO4__FUNC_SDA2 (MTK_PIN_NO(4) | 5) +#define PINMUX_GPIO4__FUNC_IDDIG_2P (MTK_PIN_NO(4) | 7) + +#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 1) +#define PINMUX_GPIO5__FUNC_MSDC2_DAT1 (MTK_PIN_NO(5) | 2) +#define PINMUX_GPIO5__FUNC_SPDIF_IN0 (MTK_PIN_NO(5) | 3) +#define PINMUX_GPIO5__FUNC_URXD3 (MTK_PIN_NO(5) | 4) +#define PINMUX_GPIO5__FUNC_SCL2 (MTK_PIN_NO(5) | 5) +#define PINMUX_GPIO5__FUNC_USB_DRVVBUS_2P (MTK_PIN_NO(5) | 7) + +#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 1) +#define PINMUX_GPIO6__FUNC_DP_TX_HPD (MTK_PIN_NO(6) | 2) +#define PINMUX_GPIO6__FUNC_I2SO1_D4 (MTK_PIN_NO(6) | 3) +#define PINMUX_GPIO6__FUNC_UTXD4 (MTK_PIN_NO(6) | 4) +#define PINMUX_GPIO6__FUNC_CMVREF3 (MTK_PIN_NO(6) | 5) +#define PINMUX_GPIO6__FUNC_DMIC3_CLK (MTK_PIN_NO(6) | 7) + +#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 1) +#define PINMUX_GPIO7__FUNC_EDP_TX_HPD (MTK_PIN_NO(7) | 2) +#define PINMUX_GPIO7__FUNC_I2SO1_D5 (MTK_PIN_NO(7) | 3) +#define PINMUX_GPIO7__FUNC_URXD4 (MTK_PIN_NO(7) | 4) +#define PINMUX_GPIO7__FUNC_CMVREF4 (MTK_PIN_NO(7) | 5) +#define PINMUX_GPIO7__FUNC_DMIC3_DAT (MTK_PIN_NO(7) | 7) + +#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define PINMUX_GPIO8__FUNC_SDA0 (MTK_PIN_NO(8) | 1) +#define PINMUX_GPIO8__FUNC_PWM_0 (MTK_PIN_NO(8) | 2) +#define PINMUX_GPIO8__FUNC_SPDIF_OUT (MTK_PIN_NO(8) | 4) +#define PINMUX_GPIO8__FUNC_LVTS_FOUT (MTK_PIN_NO(8) | 6) +#define PINMUX_GPIO8__FUNC_DBG_MON_A0 (MTK_PIN_NO(8) | 7) + +#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define PINMUX_GPIO9__FUNC_SCL0 (MTK_PIN_NO(9) | 1) +#define PINMUX_GPIO9__FUNC_PWM_1 (MTK_PIN_NO(9) | 2) +#define PINMUX_GPIO9__FUNC_IR_IN (MTK_PIN_NO(9) | 4) +#define PINMUX_GPIO9__FUNC_LVTS_SDO (MTK_PIN_NO(9) | 6) +#define PINMUX_GPIO9__FUNC_DBG_MON_A1 (MTK_PIN_NO(9) | 7) + +#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define PINMUX_GPIO10__FUNC_SDA1 (MTK_PIN_NO(10) | 1) +#define PINMUX_GPIO10__FUNC_PWM_2 (MTK_PIN_NO(10) | 2) +#define PINMUX_GPIO10__FUNC_ADSP_URXD0 (MTK_PIN_NO(10) | 3) +#define PINMUX_GPIO10__FUNC_SPDIF_IN1 (MTK_PIN_NO(10) | 4) +#define PINMUX_GPIO10__FUNC_LVTS_SCF (MTK_PIN_NO(10) | 6) +#define PINMUX_GPIO10__FUNC_DBG_MON_A2 (MTK_PIN_NO(10) | 7) + +#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define PINMUX_GPIO11__FUNC_SCL1 (MTK_PIN_NO(11) | 1) +#define PINMUX_GPIO11__FUNC_PWM_3 (MTK_PIN_NO(11) | 2) +#define PINMUX_GPIO11__FUNC_ADSP_UTXD0 (MTK_PIN_NO(11) | 3) +#define PINMUX_GPIO11__FUNC_SPDIF_IN0 (MTK_PIN_NO(11) | 4) +#define PINMUX_GPIO11__FUNC_LVTS_SCK (MTK_PIN_NO(11) | 6) +#define PINMUX_GPIO11__FUNC_DBG_MON_A3 (MTK_PIN_NO(11) | 7) + +#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define PINMUX_GPIO12__FUNC_SDA2 (MTK_PIN_NO(12) | 1) +#define PINMUX_GPIO12__FUNC_DMIC3_DAT_R (MTK_PIN_NO(12) | 2) +#define PINMUX_GPIO12__FUNC_I2SO1_D6 (MTK_PIN_NO(12) | 3) +#define PINMUX_GPIO12__FUNC_LVTS_SDI (MTK_PIN_NO(12) | 6) +#define PINMUX_GPIO12__FUNC_DBG_MON_A4 (MTK_PIN_NO(12) | 7) + +#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define PINMUX_GPIO13__FUNC_SCL2 (MTK_PIN_NO(13) | 1) +#define PINMUX_GPIO13__FUNC_DMIC4_DAT_R (MTK_PIN_NO(13) | 2) +#define PINMUX_GPIO13__FUNC_I2SO1_D7 (MTK_PIN_NO(13) | 3) +#define PINMUX_GPIO13__FUNC_DBG_MON_A5 (MTK_PIN_NO(13) | 7) + +#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define PINMUX_GPIO14__FUNC_SDA3 (MTK_PIN_NO(14) | 1) +#define PINMUX_GPIO14__FUNC_DMIC3_DAT (MTK_PIN_NO(14) | 2) +#define PINMUX_GPIO14__FUNC_TDMIN_MCK (MTK_PIN_NO(14) | 3) +#define PINMUX_GPIO14__FUNC_DBG_MON_A6 (MTK_PIN_NO(14) | 7) + +#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define PINMUX_GPIO15__FUNC_SCL3 (MTK_PIN_NO(15) | 1) +#define PINMUX_GPIO15__FUNC_DMIC3_CLK (MTK_PIN_NO(15) | 2) +#define PINMUX_GPIO15__FUNC_TDMIN_DI (MTK_PIN_NO(15) | 3) +#define PINMUX_GPIO15__FUNC_DBG_MON_A7 (MTK_PIN_NO(15) | 7) + +#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0) +#define PINMUX_GPIO16__FUNC_SDA4 (MTK_PIN_NO(16) | 1) +#define PINMUX_GPIO16__FUNC_DMIC4_DAT (MTK_PIN_NO(16) | 2) +#define PINMUX_GPIO16__FUNC_TDMIN_LRCK (MTK_PIN_NO(16) | 3) +#define PINMUX_GPIO16__FUNC_DBG_MON_A8 (MTK_PIN_NO(16) | 7) + +#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0) +#define PINMUX_GPIO17__FUNC_SCL4 (MTK_PIN_NO(17) | 1) +#define PINMUX_GPIO17__FUNC_DMIC4_CLK (MTK_PIN_NO(17) | 2) +#define PINMUX_GPIO17__FUNC_TDMIN_BCK (MTK_PIN_NO(17) | 3) +#define PINMUX_GPIO17__FUNC_DBG_MON_A9 (MTK_PIN_NO(17) | 7) + +#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define PINMUX_GPIO18__FUNC_DP_TX_HPD (MTK_PIN_NO(18) | 1) + +#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define PINMUX_GPIO19__FUNC_WAKEN (MTK_PIN_NO(19) | 1) +#define PINMUX_GPIO19__FUNC_SCP_SDA1 (MTK_PIN_NO(19) | 2) +#define PINMUX_GPIO19__FUNC_MD32_0_JTAG_TCK (MTK_PIN_NO(19) | 3) +#define PINMUX_GPIO19__FUNC_ADSP_JTAG0_TCK (MTK_PIN_NO(19) | 4) +#define PINMUX_GPIO19__FUNC_SDA6 (MTK_PIN_NO(19) | 5) + +#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define PINMUX_GPIO20__FUNC_PERSTN (MTK_PIN_NO(20) | 1) +#define PINMUX_GPIO20__FUNC_SCP_SCL1 (MTK_PIN_NO(20) | 2) +#define PINMUX_GPIO20__FUNC_MD32_0_JTAG_TMS (MTK_PIN_NO(20) | 3) +#define PINMUX_GPIO20__FUNC_ADSP_JTAG0_TMS (MTK_PIN_NO(20) | 4) +#define PINMUX_GPIO20__FUNC_SCL6 (MTK_PIN_NO(20) | 5) + +#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define PINMUX_GPIO21__FUNC_CLKREQN (MTK_PIN_NO(21) | 1) +#define PINMUX_GPIO21__FUNC_MD32_0_JTAG_TDI (MTK_PIN_NO(21) | 3) +#define PINMUX_GPIO21__FUNC_ADSP_JTAG0_TDI (MTK_PIN_NO(21) | 4) +#define PINMUX_GPIO21__FUNC_SCP_SDA1 (MTK_PIN_NO(21) | 5) + +#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define PINMUX_GPIO22__FUNC_CMMCLK0 (MTK_PIN_NO(22) | 1) +#define PINMUX_GPIO22__FUNC_PERSTN_1 (MTK_PIN_NO(22) | 2) +#define PINMUX_GPIO22__FUNC_SCP_SCL1 (MTK_PIN_NO(22) | 5) +#define PINMUX_GPIO22__FUNC_MD32_0_GPIO0 (MTK_PIN_NO(22) | 7) + +#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define PINMUX_GPIO23__FUNC_CMMCLK1 (MTK_PIN_NO(23) | 1) +#define PINMUX_GPIO23__FUNC_CLKREQN_1 (MTK_PIN_NO(23) | 2) +#define PINMUX_GPIO23__FUNC_SDA4 (MTK_PIN_NO(23) | 3) +#define PINMUX_GPIO23__FUNC_DMIC1_CLK (MTK_PIN_NO(23) | 4) +#define PINMUX_GPIO23__FUNC_SCP_SDA0 (MTK_PIN_NO(23) | 5) +#define PINMUX_GPIO23__FUNC_MD32_0_GPIO1 (MTK_PIN_NO(23) | 7) + +#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define PINMUX_GPIO24__FUNC_CMMCLK2 (MTK_PIN_NO(24) | 1) +#define PINMUX_GPIO24__FUNC_WAKEN_1 (MTK_PIN_NO(24) | 2) +#define PINMUX_GPIO24__FUNC_SCL4 (MTK_PIN_NO(24) | 3) +#define PINMUX_GPIO24__FUNC_DMIC1_DAT (MTK_PIN_NO(24) | 4) +#define PINMUX_GPIO24__FUNC_SCP_SCL0 (MTK_PIN_NO(24) | 5) +#define PINMUX_GPIO24__FUNC_LVTS_26M (MTK_PIN_NO(24) | 6) +#define PINMUX_GPIO24__FUNC_MD32_0_GPIO2 (MTK_PIN_NO(24) | 7) + +#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define PINMUX_GPIO25__FUNC_CMMRST (MTK_PIN_NO(25) | 1) +#define PINMUX_GPIO25__FUNC_CMMCLK3 (MTK_PIN_NO(25) | 2) +#define PINMUX_GPIO25__FUNC_SPDIF_OUT (MTK_PIN_NO(25) | 3) +#define PINMUX_GPIO25__FUNC_SDA6 (MTK_PIN_NO(25) | 4) +#define PINMUX_GPIO25__FUNC_ADSP_JTAG0_TRSTN (MTK_PIN_NO(25) | 5) +#define PINMUX_GPIO25__FUNC_MD32_0_JTAG_TRST (MTK_PIN_NO(25) | 6) + +#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define PINMUX_GPIO26__FUNC_CMMPDN (MTK_PIN_NO(26) | 1) +#define PINMUX_GPIO26__FUNC_CMMCLK4 (MTK_PIN_NO(26) | 2) +#define PINMUX_GPIO26__FUNC_IR_IN (MTK_PIN_NO(26) | 3) +#define PINMUX_GPIO26__FUNC_SCL6 (MTK_PIN_NO(26) | 4) +#define PINMUX_GPIO26__FUNC_ADSP_JTAG0_TDO (MTK_PIN_NO(26) | 5) +#define PINMUX_GPIO26__FUNC_MD32_0_JTAG_TDO (MTK_PIN_NO(26) | 6) + +#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define PINMUX_GPIO27__FUNC_HDMIRX20_HTPLG (MTK_PIN_NO(27) | 1) +#define PINMUX_GPIO27__FUNC_CMFLASH0 (MTK_PIN_NO(27) | 2) +#define PINMUX_GPIO27__FUNC_MD32_0_TXD (MTK_PIN_NO(27) | 3) +#define PINMUX_GPIO27__FUNC_TP_UTXD2_AO (MTK_PIN_NO(27) | 4) +#define PINMUX_GPIO27__FUNC_SCL7 (MTK_PIN_NO(27) | 5) +#define PINMUX_GPIO27__FUNC_UCTS2 (MTK_PIN_NO(27) | 6) +#define PINMUX_GPIO27__FUNC_DBG_MON_A18 (MTK_PIN_NO(27) | 7) + +#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define PINMUX_GPIO28__FUNC_HDMIRX20_PWR5V (MTK_PIN_NO(28) | 1) +#define PINMUX_GPIO28__FUNC_CMFLASH1 (MTK_PIN_NO(28) | 2) +#define PINMUX_GPIO28__FUNC_MD32_0_RXD (MTK_PIN_NO(28) | 3) +#define PINMUX_GPIO28__FUNC_TP_URXD2_AO (MTK_PIN_NO(28) | 4) +#define PINMUX_GPIO28__FUNC_SDA7 (MTK_PIN_NO(28) | 5) +#define PINMUX_GPIO28__FUNC_URTS2 (MTK_PIN_NO(28) | 6) +#define PINMUX_GPIO28__FUNC_DBG_MON_A19 (MTK_PIN_NO(28) | 7) + +#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define PINMUX_GPIO29__FUNC_HDMIRX20_SCL (MTK_PIN_NO(29) | 1) +#define PINMUX_GPIO29__FUNC_CMFLASH2 (MTK_PIN_NO(29) | 2) +#define PINMUX_GPIO29__FUNC_SCL5 (MTK_PIN_NO(29) | 3) +#define PINMUX_GPIO29__FUNC_TP_URTS2_AO (MTK_PIN_NO(29) | 4) +#define PINMUX_GPIO29__FUNC_UTXD2 (MTK_PIN_NO(29) | 6) +#define PINMUX_GPIO29__FUNC_DBG_MON_A20 (MTK_PIN_NO(29) | 7) + +#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0) +#define PINMUX_GPIO30__FUNC_HDMIRX20_SDA (MTK_PIN_NO(30) | 1) +#define PINMUX_GPIO30__FUNC_CMFLASH3 (MTK_PIN_NO(30) | 2) +#define PINMUX_GPIO30__FUNC_SDA5 (MTK_PIN_NO(30) | 3) +#define PINMUX_GPIO30__FUNC_TP_UCTS2_AO (MTK_PIN_NO(30) | 4) +#define PINMUX_GPIO30__FUNC_URXD2 (MTK_PIN_NO(30) | 6) +#define PINMUX_GPIO30__FUNC_DBG_MON_A21 (MTK_PIN_NO(30) | 7) + +#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0) +#define PINMUX_GPIO31__FUNC_HDMITX20_PWR5V (MTK_PIN_NO(31) | 1) +#define PINMUX_GPIO31__FUNC_DMIC1_DAT_R (MTK_PIN_NO(31) | 2) +#define PINMUX_GPIO31__FUNC_PERSTN (MTK_PIN_NO(31) | 3) +#define PINMUX_GPIO31__FUNC_DBG_MON_A22 (MTK_PIN_NO(31) | 7) + +#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0) +#define PINMUX_GPIO32__FUNC_HDMITX20_HTPLG (MTK_PIN_NO(32) | 1) +#define PINMUX_GPIO32__FUNC_CLKREQN (MTK_PIN_NO(32) | 3) +#define PINMUX_GPIO32__FUNC_DBG_MON_A23 (MTK_PIN_NO(32) | 7) + +#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define PINMUX_GPIO33__FUNC_HDMITX20_CEC (MTK_PIN_NO(33) | 1) +#define PINMUX_GPIO33__FUNC_CMVREF0 (MTK_PIN_NO(33) | 2) +#define PINMUX_GPIO33__FUNC_WAKEN (MTK_PIN_NO(33) | 3) + +#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define PINMUX_GPIO34__FUNC_HDMITX20_SCL (MTK_PIN_NO(34) | 1) +#define PINMUX_GPIO34__FUNC_CMVREF1 (MTK_PIN_NO(34) | 2) +#define PINMUX_GPIO34__FUNC_SCL7 (MTK_PIN_NO(34) | 3) +#define PINMUX_GPIO34__FUNC_SCL6 (MTK_PIN_NO(34) | 4) +#define PINMUX_GPIO34__FUNC_DBG_MON_A24 (MTK_PIN_NO(34) | 7) + +#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define PINMUX_GPIO35__FUNC_HDMITX20_SDA (MTK_PIN_NO(35) | 1) +#define PINMUX_GPIO35__FUNC_CMVREF2 (MTK_PIN_NO(35) | 2) +#define PINMUX_GPIO35__FUNC_SDA7 (MTK_PIN_NO(35) | 3) +#define PINMUX_GPIO35__FUNC_SDA6 (MTK_PIN_NO(35) | 4) +#define PINMUX_GPIO35__FUNC_DBG_MON_A25 (MTK_PIN_NO(35) | 7) + +#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define PINMUX_GPIO36__FUNC_RTC32K_CK (MTK_PIN_NO(36) | 1) +#define PINMUX_GPIO36__FUNC_DBG_MON_A27 (MTK_PIN_NO(36) | 7) + +#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define PINMUX_GPIO37__FUNC_WATCHDOG (MTK_PIN_NO(37) | 1) +#define PINMUX_GPIO37__FUNC_DBG_MON_A28 (MTK_PIN_NO(37) | 7) + +#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0) +#define PINMUX_GPIO38__FUNC_SRCLKENA0 (MTK_PIN_NO(38) | 1) +#define PINMUX_GPIO38__FUNC_DBG_MON_A29 (MTK_PIN_NO(38) | 7) + +#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define PINMUX_GPIO39__FUNC_SRCLKENA1 (MTK_PIN_NO(39) | 1) +#define PINMUX_GPIO39__FUNC_DMIC2_DAT_R (MTK_PIN_NO(39) | 2) +#define PINMUX_GPIO39__FUNC_DBG_MON_A30 (MTK_PIN_NO(39) | 7) + +#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define PINMUX_GPIO40__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(40) | 1) +#define PINMUX_GPIO40__FUNC_SPIM3_CSB (MTK_PIN_NO(40) | 3) +#define PINMUX_GPIO40__FUNC_DBG_MON_A31 (MTK_PIN_NO(40) | 7) + +#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define PINMUX_GPIO41__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(41) | 1) +#define PINMUX_GPIO41__FUNC_SPIM3_CLK (MTK_PIN_NO(41) | 3) +#define PINMUX_GPIO41__FUNC_DBG_MON_A32 (MTK_PIN_NO(41) | 7) + +#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define PINMUX_GPIO42__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(42) | 1) +#define PINMUX_GPIO42__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(42) | 2) +#define PINMUX_GPIO42__FUNC_SPIM3_MO (MTK_PIN_NO(42) | 3) +#define PINMUX_GPIO42__FUNC_DBG_MON_B0 (MTK_PIN_NO(42) | 7) + +#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define PINMUX_GPIO43__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(43) | 1) +#define PINMUX_GPIO43__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(43) | 2) +#define PINMUX_GPIO43__FUNC_SPIM3_MI (MTK_PIN_NO(43) | 3) +#define PINMUX_GPIO43__FUNC_DBG_MON_B1 (MTK_PIN_NO(43) | 7) + +#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define PINMUX_GPIO44__FUNC_SPMI_M_SCL (MTK_PIN_NO(44) | 1) +#define PINMUX_GPIO44__FUNC_I2SI00_DATA1 (MTK_PIN_NO(44) | 2) +#define PINMUX_GPIO44__FUNC_SCL5 (MTK_PIN_NO(44) | 3) +#define PINMUX_GPIO44__FUNC_UTXD5 (MTK_PIN_NO(44) | 4) +#define PINMUX_GPIO44__FUNC_DBG_MON_B2 (MTK_PIN_NO(44) | 7) + +#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define PINMUX_GPIO45__FUNC_SPMI_M_SDA (MTK_PIN_NO(45) | 1) +#define PINMUX_GPIO45__FUNC_I2SI00_DATA2 (MTK_PIN_NO(45) | 2) +#define PINMUX_GPIO45__FUNC_SDA5 (MTK_PIN_NO(45) | 3) +#define PINMUX_GPIO45__FUNC_URXD5 (MTK_PIN_NO(45) | 4) +#define PINMUX_GPIO45__FUNC_DBG_MON_B3 (MTK_PIN_NO(45) | 7) + +#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define PINMUX_GPIO46__FUNC_I2SIN_MCK (MTK_PIN_NO(46) | 1) +#define PINMUX_GPIO46__FUNC_I2SI00_DATA3 (MTK_PIN_NO(46) | 2) +#define PINMUX_GPIO46__FUNC_SPLIN_MCK (MTK_PIN_NO(46) | 3) +#define PINMUX_GPIO46__FUNC_DBG_MON_B4 (MTK_PIN_NO(46) | 7) + +#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define PINMUX_GPIO47__FUNC_I2SIN_BCK (MTK_PIN_NO(47) | 1) +#define PINMUX_GPIO47__FUNC_I2SIN0_BCK (MTK_PIN_NO(47) | 2) +#define PINMUX_GPIO47__FUNC_SPLIN_LRCK (MTK_PIN_NO(47) | 3) +#define PINMUX_GPIO47__FUNC_DBG_MON_B5 (MTK_PIN_NO(47) | 7) + +#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define PINMUX_GPIO48__FUNC_I2SIN_WS (MTK_PIN_NO(48) | 1) +#define PINMUX_GPIO48__FUNC_I2SIN0_LRCK (MTK_PIN_NO(48) | 2) +#define PINMUX_GPIO48__FUNC_SPLIN_BCK (MTK_PIN_NO(48) | 3) +#define PINMUX_GPIO48__FUNC_DBG_MON_B6 (MTK_PIN_NO(48) | 7) + +#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define PINMUX_GPIO49__FUNC_I2SIN_D0 (MTK_PIN_NO(49) | 1) +#define PINMUX_GPIO49__FUNC_I2SI00_DATA0 (MTK_PIN_NO(49) | 2) +#define PINMUX_GPIO49__FUNC_SPLIN_D0 (MTK_PIN_NO(49) | 3) +#define PINMUX_GPIO49__FUNC_DBG_MON_B7 (MTK_PIN_NO(49) | 7) + +#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0) +#define PINMUX_GPIO50__FUNC_I2SO1_MCK (MTK_PIN_NO(50) | 1) +#define PINMUX_GPIO50__FUNC_I2SI5_D0 (MTK_PIN_NO(50) | 2) +#define PINMUX_GPIO50__FUNC_I2SO4_MCK (MTK_PIN_NO(50) | 4) +#define PINMUX_GPIO50__FUNC_DBG_MON_B8 (MTK_PIN_NO(50) | 7) + +#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0) +#define PINMUX_GPIO51__FUNC_I2SO1_BCK (MTK_PIN_NO(51) | 1) +#define PINMUX_GPIO51__FUNC_I2SI5_BCK (MTK_PIN_NO(51) | 2) +#define PINMUX_GPIO51__FUNC_DBG_MON_B9 (MTK_PIN_NO(51) | 7) + +#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0) +#define PINMUX_GPIO52__FUNC_I2SO1_WS (MTK_PIN_NO(52) | 1) +#define PINMUX_GPIO52__FUNC_I2SI5_WS (MTK_PIN_NO(52) | 2) +#define PINMUX_GPIO52__FUNC_DBG_MON_B10 (MTK_PIN_NO(52) | 7) + +#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define PINMUX_GPIO53__FUNC_I2SO1_D0 (MTK_PIN_NO(53) | 1) +#define PINMUX_GPIO53__FUNC_I2SI5_MCK (MTK_PIN_NO(53) | 2) +#define PINMUX_GPIO53__FUNC_DBG_MON_B11 (MTK_PIN_NO(53) | 7) + +#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define PINMUX_GPIO54__FUNC_I2SO1_D1 (MTK_PIN_NO(54) | 1) +#define PINMUX_GPIO54__FUNC_I2SI01_DATA1 (MTK_PIN_NO(54) | 2) +#define PINMUX_GPIO54__FUNC_SPLIN_D1 (MTK_PIN_NO(54) | 3) +#define PINMUX_GPIO54__FUNC_I2SO4_BCK (MTK_PIN_NO(54) | 4) +#define PINMUX_GPIO54__FUNC_DBG_MON_B12 (MTK_PIN_NO(54) | 7) + +#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define PINMUX_GPIO55__FUNC_I2SO1_D2 (MTK_PIN_NO(55) | 1) +#define PINMUX_GPIO55__FUNC_I2SI01_DATA2 (MTK_PIN_NO(55) | 2) +#define PINMUX_GPIO55__FUNC_SPLIN_D2 (MTK_PIN_NO(55) | 3) +#define PINMUX_GPIO55__FUNC_I2SO4_WS (MTK_PIN_NO(55) | 4) +#define PINMUX_GPIO55__FUNC_DBG_MON_B13 (MTK_PIN_NO(55) | 7) + +#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define PINMUX_GPIO56__FUNC_I2SO1_D3 (MTK_PIN_NO(56) | 1) +#define PINMUX_GPIO56__FUNC_I2SI01_DATA3 (MTK_PIN_NO(56) | 2) +#define PINMUX_GPIO56__FUNC_SPLIN_D3 (MTK_PIN_NO(56) | 3) +#define PINMUX_GPIO56__FUNC_I2SO4_D0 (MTK_PIN_NO(56) | 4) +#define PINMUX_GPIO56__FUNC_DBG_MON_B14 (MTK_PIN_NO(56) | 7) + +#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0) +#define PINMUX_GPIO57__FUNC_I2SO2_MCK (MTK_PIN_NO(57) | 1) +#define PINMUX_GPIO57__FUNC_I2SO1_D12 (MTK_PIN_NO(57) | 2) +#define PINMUX_GPIO57__FUNC_LCM1_RST (MTK_PIN_NO(57) | 3) +#define PINMUX_GPIO57__FUNC_DBG_MON_B15 (MTK_PIN_NO(57) | 7) + +#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0) +#define PINMUX_GPIO58__FUNC_I2SO2_BCK (MTK_PIN_NO(58) | 1) +#define PINMUX_GPIO58__FUNC_I2SO1_D13 (MTK_PIN_NO(58) | 2) +#define PINMUX_GPIO58__FUNC_I2SIN1_BCK (MTK_PIN_NO(58) | 3) +#define PINMUX_GPIO58__FUNC_DBG_MON_B16 (MTK_PIN_NO(58) | 7) + +#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0) +#define PINMUX_GPIO59__FUNC_I2SO2_WS (MTK_PIN_NO(59) | 1) +#define PINMUX_GPIO59__FUNC_I2SO1_D14 (MTK_PIN_NO(59) | 2) +#define PINMUX_GPIO59__FUNC_I2SIN1_LRCK (MTK_PIN_NO(59) | 3) +#define PINMUX_GPIO59__FUNC_DBG_MON_B17 (MTK_PIN_NO(59) | 7) + +#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define PINMUX_GPIO60__FUNC_I2SO2_D0 (MTK_PIN_NO(60) | 1) +#define PINMUX_GPIO60__FUNC_I2SO1_D15 (MTK_PIN_NO(60) | 2) +#define PINMUX_GPIO60__FUNC_I2SI01_DATA0 (MTK_PIN_NO(60) | 3) +#define PINMUX_GPIO60__FUNC_DBG_MON_B18 (MTK_PIN_NO(60) | 7) + +#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define PINMUX_GPIO61__FUNC_DMIC1_CLK (MTK_PIN_NO(61) | 1) +#define PINMUX_GPIO61__FUNC_I2SO2_BCK (MTK_PIN_NO(61) | 2) +#define PINMUX_GPIO61__FUNC_SCP_SPI2_CK (MTK_PIN_NO(61) | 3) +#define PINMUX_GPIO61__FUNC_DBG_MON_B19 (MTK_PIN_NO(61) | 7) + +#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define PINMUX_GPIO62__FUNC_DMIC1_DAT (MTK_PIN_NO(62) | 1) +#define PINMUX_GPIO62__FUNC_I2SO2_WS (MTK_PIN_NO(62) | 2) +#define PINMUX_GPIO62__FUNC_SCP_SPI2_MI (MTK_PIN_NO(62) | 3) +#define PINMUX_GPIO62__FUNC_DBG_MON_B20 (MTK_PIN_NO(62) | 7) + +#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define PINMUX_GPIO63__FUNC_DMIC2_CLK (MTK_PIN_NO(63) | 1) +#define PINMUX_GPIO63__FUNC_VBUSVALID (MTK_PIN_NO(63) | 2) +#define PINMUX_GPIO63__FUNC_SCP_SPI2_MO (MTK_PIN_NO(63) | 3) +#define PINMUX_GPIO63__FUNC_SCP_SCL2 (MTK_PIN_NO(63) | 4) +#define PINMUX_GPIO63__FUNC_SCP_JTAG1_TDO (MTK_PIN_NO(63) | 5) +#define PINMUX_GPIO63__FUNC_JTDO_SEL1 (MTK_PIN_NO(63) | 6) +#define PINMUX_GPIO63__FUNC_DBG_MON_B21 (MTK_PIN_NO(63) | 7) + +#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define PINMUX_GPIO64__FUNC_DMIC2_DAT (MTK_PIN_NO(64) | 1) +#define PINMUX_GPIO64__FUNC_VBUSVALID_1P (MTK_PIN_NO(64) | 2) +#define PINMUX_GPIO64__FUNC_SCP_SPI2_CS (MTK_PIN_NO(64) | 3) +#define PINMUX_GPIO64__FUNC_SCP_SDA2 (MTK_PIN_NO(64) | 4) +#define PINMUX_GPIO64__FUNC_DBG_MON_B22 (MTK_PIN_NO(64) | 7) + +#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define PINMUX_GPIO65__FUNC_PCM_DO (MTK_PIN_NO(65) | 1) +#define PINMUX_GPIO65__FUNC_AUXIF_ST0 (MTK_PIN_NO(65) | 2) +#define PINMUX_GPIO65__FUNC_UCTS2 (MTK_PIN_NO(65) | 3) +#define PINMUX_GPIO65__FUNC_SCP_JTAG1_TMS (MTK_PIN_NO(65) | 5) +#define PINMUX_GPIO65__FUNC_JTMS_SEL1 (MTK_PIN_NO(65) | 6) +#define PINMUX_GPIO65__FUNC_DBG_MON_B23 (MTK_PIN_NO(65) | 7) + +#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define PINMUX_GPIO66__FUNC_PCM_CLK (MTK_PIN_NO(66) | 1) +#define PINMUX_GPIO66__FUNC_AUXIF_CLK0 (MTK_PIN_NO(66) | 2) +#define PINMUX_GPIO66__FUNC_URTS2 (MTK_PIN_NO(66) | 3) +#define PINMUX_GPIO66__FUNC_SCP_JTAG1_TCK (MTK_PIN_NO(66) | 5) +#define PINMUX_GPIO66__FUNC_JTCK_SEL1 (MTK_PIN_NO(66) | 6) +#define PINMUX_GPIO66__FUNC_DBG_MON_B24 (MTK_PIN_NO(66) | 7) + +#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define PINMUX_GPIO67__FUNC_PCM_DI (MTK_PIN_NO(67) | 1) +#define PINMUX_GPIO67__FUNC_AUXIF_ST1 (MTK_PIN_NO(67) | 2) +#define PINMUX_GPIO67__FUNC_UTXD2 (MTK_PIN_NO(67) | 3) +#define PINMUX_GPIO67__FUNC_SCP_JTAG1_TRSTN (MTK_PIN_NO(67) | 5) +#define PINMUX_GPIO67__FUNC_JTRSTn_SEL1 (MTK_PIN_NO(67) | 6) +#define PINMUX_GPIO67__FUNC_DBG_MON_B25 (MTK_PIN_NO(67) | 7) + +#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define PINMUX_GPIO68__FUNC_PCM_SYNC (MTK_PIN_NO(68) | 1) +#define PINMUX_GPIO68__FUNC_AUXIF_CLK1 (MTK_PIN_NO(68) | 2) +#define PINMUX_GPIO68__FUNC_URXD2 (MTK_PIN_NO(68) | 3) +#define PINMUX_GPIO68__FUNC_SCP_JTAG1_TDI (MTK_PIN_NO(68) | 5) +#define PINMUX_GPIO68__FUNC_JTDI_SEL1 (MTK_PIN_NO(68) | 6) +#define PINMUX_GPIO68__FUNC_DBG_MON_B26 (MTK_PIN_NO(68) | 7) + +#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define PINMUX_GPIO69__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(69) | 1) +#define PINMUX_GPIO69__FUNC_I2SIN2_BCK (MTK_PIN_NO(69) | 2) +#define PINMUX_GPIO69__FUNC_PWM_0 (MTK_PIN_NO(69) | 3) +#define PINMUX_GPIO69__FUNC_WAKEN (MTK_PIN_NO(69) | 4) +#define PINMUX_GPIO69__FUNC_DBG_MON_B27 (MTK_PIN_NO(69) | 7) + +#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define PINMUX_GPIO70__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(70) | 1) +#define PINMUX_GPIO70__FUNC_I2SIN2_LRCK (MTK_PIN_NO(70) | 2) +#define PINMUX_GPIO70__FUNC_PWM_1 (MTK_PIN_NO(70) | 3) +#define PINMUX_GPIO70__FUNC_PERSTN (MTK_PIN_NO(70) | 4) +#define PINMUX_GPIO70__FUNC_DBG_MON_B28 (MTK_PIN_NO(70) | 7) + +#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define PINMUX_GPIO71__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(71) | 1) +#define PINMUX_GPIO71__FUNC_IDDIG_2P (MTK_PIN_NO(71) | 2) +#define PINMUX_GPIO71__FUNC_PWM_2 (MTK_PIN_NO(71) | 3) +#define PINMUX_GPIO71__FUNC_CLKREQN (MTK_PIN_NO(71) | 4) +#define PINMUX_GPIO71__FUNC_DBG_MON_B29 (MTK_PIN_NO(71) | 7) + +#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +#define PINMUX_GPIO72__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(72) | 1) +#define PINMUX_GPIO72__FUNC_USB_DRVVBUS_2P (MTK_PIN_NO(72) | 2) +#define PINMUX_GPIO72__FUNC_PWM_3 (MTK_PIN_NO(72) | 3) +#define PINMUX_GPIO72__FUNC_PERSTN_1 (MTK_PIN_NO(72) | 4) +#define PINMUX_GPIO72__FUNC_DBG_MON_B30 (MTK_PIN_NO(72) | 7) + +#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +#define PINMUX_GPIO73__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(73) | 1) +#define PINMUX_GPIO73__FUNC_I2SI02_DATA0 (MTK_PIN_NO(73) | 2) +#define PINMUX_GPIO73__FUNC_CLKREQN_1 (MTK_PIN_NO(73) | 4) +#define PINMUX_GPIO73__FUNC_VOW_DAT_MISO (MTK_PIN_NO(73) | 5) +#define PINMUX_GPIO73__FUNC_DBG_MON_B31 (MTK_PIN_NO(73) | 7) + +#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define PINMUX_GPIO74__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(74) | 1) +#define PINMUX_GPIO74__FUNC_I2SI02_DATA1 (MTK_PIN_NO(74) | 2) +#define PINMUX_GPIO74__FUNC_WAKEN_1 (MTK_PIN_NO(74) | 4) +#define PINMUX_GPIO74__FUNC_VOW_CLK_MISO (MTK_PIN_NO(74) | 5) +#define PINMUX_GPIO74__FUNC_DBG_MON_B32 (MTK_PIN_NO(74) | 7) + +#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define PINMUX_GPIO75__FUNC_AUD_DAT_MISO2 (MTK_PIN_NO(75) | 1) +#define PINMUX_GPIO75__FUNC_I2SI02_DATA2 (MTK_PIN_NO(75) | 2) + +#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define PINMUX_GPIO76__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(76) | 1) +#define PINMUX_GPIO76__FUNC_I2SI02_DATA3 (MTK_PIN_NO(76) | 2) +#define PINMUX_GPIO76__FUNC_DBG_MON_A26 (MTK_PIN_NO(76) | 7) + +#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0) +#define PINMUX_GPIO77__FUNC_DGI_D0 (MTK_PIN_NO(77) | 1) +#define PINMUX_GPIO77__FUNC_DPI_D0 (MTK_PIN_NO(77) | 2) +#define PINMUX_GPIO77__FUNC_I2SI4_MCK (MTK_PIN_NO(77) | 3) +#define PINMUX_GPIO77__FUNC_SPIM4_CLK (MTK_PIN_NO(77) | 4) +#define PINMUX_GPIO77__FUNC_GBE_TXD3 (MTK_PIN_NO(77) | 5) +#define PINMUX_GPIO77__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(77) | 6) + +#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0) +#define PINMUX_GPIO78__FUNC_DGI_D1 (MTK_PIN_NO(78) | 1) +#define PINMUX_GPIO78__FUNC_DPI_D1 (MTK_PIN_NO(78) | 2) +#define PINMUX_GPIO78__FUNC_I2SI4_BCK (MTK_PIN_NO(78) | 3) +#define PINMUX_GPIO78__FUNC_SPIM4_MO (MTK_PIN_NO(78) | 4) +#define PINMUX_GPIO78__FUNC_GBE_TXD2 (MTK_PIN_NO(78) | 5) +#define PINMUX_GPIO78__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(78) | 6) + +#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0) +#define PINMUX_GPIO79__FUNC_DGI_D2 (MTK_PIN_NO(79) | 1) +#define PINMUX_GPIO79__FUNC_DPI_D2 (MTK_PIN_NO(79) | 2) +#define PINMUX_GPIO79__FUNC_I2SI4_WS (MTK_PIN_NO(79) | 3) +#define PINMUX_GPIO79__FUNC_SPIM4_CSB (MTK_PIN_NO(79) | 4) +#define PINMUX_GPIO79__FUNC_GBE_TXD1 (MTK_PIN_NO(79) | 5) +#define PINMUX_GPIO79__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(79) | 6) + +#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0) +#define PINMUX_GPIO80__FUNC_DGI_D3 (MTK_PIN_NO(80) | 1) +#define PINMUX_GPIO80__FUNC_DPI_D3 (MTK_PIN_NO(80) | 2) +#define PINMUX_GPIO80__FUNC_I2SI4_D0 (MTK_PIN_NO(80) | 3) +#define PINMUX_GPIO80__FUNC_SPIM4_MI (MTK_PIN_NO(80) | 4) +#define PINMUX_GPIO80__FUNC_GBE_TXD0 (MTK_PIN_NO(80) | 5) +#define PINMUX_GPIO80__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(80) | 6) + +#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0) +#define PINMUX_GPIO81__FUNC_DGI_D4 (MTK_PIN_NO(81) | 1) +#define PINMUX_GPIO81__FUNC_DPI_D4 (MTK_PIN_NO(81) | 2) +#define PINMUX_GPIO81__FUNC_I2SI5_MCK (MTK_PIN_NO(81) | 3) +#define PINMUX_GPIO81__FUNC_SPIM5_CLK (MTK_PIN_NO(81) | 4) +#define PINMUX_GPIO81__FUNC_GBE_RXD3 (MTK_PIN_NO(81) | 5) +#define PINMUX_GPIO81__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(81) | 6) + +#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0) +#define PINMUX_GPIO82__FUNC_DGI_D5 (MTK_PIN_NO(82) | 1) +#define PINMUX_GPIO82__FUNC_DPI_D5 (MTK_PIN_NO(82) | 2) +#define PINMUX_GPIO82__FUNC_I2SI5_BCK (MTK_PIN_NO(82) | 3) +#define PINMUX_GPIO82__FUNC_SPIM5_MO (MTK_PIN_NO(82) | 4) +#define PINMUX_GPIO82__FUNC_GBE_RXD2 (MTK_PIN_NO(82) | 5) +#define PINMUX_GPIO82__FUNC_MCUPM_JTAG_TDO (MTK_PIN_NO(82) | 6) + +#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define PINMUX_GPIO83__FUNC_DGI_D6 (MTK_PIN_NO(83) | 1) +#define PINMUX_GPIO83__FUNC_DPI_D6 (MTK_PIN_NO(83) | 2) +#define PINMUX_GPIO83__FUNC_I2SI5_WS (MTK_PIN_NO(83) | 3) +#define PINMUX_GPIO83__FUNC_SPIM5_CSB (MTK_PIN_NO(83) | 4) +#define PINMUX_GPIO83__FUNC_GBE_RXD1 (MTK_PIN_NO(83) | 5) +#define PINMUX_GPIO83__FUNC_MCUPM_JTAG_TMS (MTK_PIN_NO(83) | 6) + +#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define PINMUX_GPIO84__FUNC_DGI_D7 (MTK_PIN_NO(84) | 1) +#define PINMUX_GPIO84__FUNC_DPI_D7 (MTK_PIN_NO(84) | 2) +#define PINMUX_GPIO84__FUNC_I2SI5_D0 (MTK_PIN_NO(84) | 3) +#define PINMUX_GPIO84__FUNC_SPIM5_MI (MTK_PIN_NO(84) | 4) +#define PINMUX_GPIO84__FUNC_GBE_RXD0 (MTK_PIN_NO(84) | 5) +#define PINMUX_GPIO84__FUNC_MCUPM_JTAG_TCK (MTK_PIN_NO(84) | 6) + +#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0) +#define PINMUX_GPIO85__FUNC_DGI_D8 (MTK_PIN_NO(85) | 1) +#define PINMUX_GPIO85__FUNC_DPI_D8 (MTK_PIN_NO(85) | 2) +#define PINMUX_GPIO85__FUNC_I2SO4_MCK (MTK_PIN_NO(85) | 3) +#define PINMUX_GPIO85__FUNC_SCP_SPI1_B_CK (MTK_PIN_NO(85) | 4) +#define PINMUX_GPIO85__FUNC_GBE_TXC (MTK_PIN_NO(85) | 5) +#define PINMUX_GPIO85__FUNC_MCUPM_JTAG_TDI (MTK_PIN_NO(85) | 6) + +#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0) +#define PINMUX_GPIO86__FUNC_DGI_D9 (MTK_PIN_NO(86) | 1) +#define PINMUX_GPIO86__FUNC_DPI_D9 (MTK_PIN_NO(86) | 2) +#define PINMUX_GPIO86__FUNC_I2SO4_BCK (MTK_PIN_NO(86) | 3) +#define PINMUX_GPIO86__FUNC_SCP_SPI1_B_MI (MTK_PIN_NO(86) | 4) +#define PINMUX_GPIO86__FUNC_GBE_RXC (MTK_PIN_NO(86) | 5) +#define PINMUX_GPIO86__FUNC_MCUPM_JTAG_TRSTN (MTK_PIN_NO(86) | 6) + +#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0) +#define PINMUX_GPIO87__FUNC_DGI_D10 (MTK_PIN_NO(87) | 1) +#define PINMUX_GPIO87__FUNC_DPI_D10 (MTK_PIN_NO(87) | 2) +#define PINMUX_GPIO87__FUNC_I2SO4_WS (MTK_PIN_NO(87) | 3) +#define PINMUX_GPIO87__FUNC_SCP_SPI1_B_CS (MTK_PIN_NO(87) | 4) +#define PINMUX_GPIO87__FUNC_GBE_RXDV (MTK_PIN_NO(87) | 5) +#define PINMUX_GPIO87__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(87) | 6) + +#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0) +#define PINMUX_GPIO88__FUNC_DGI_D11 (MTK_PIN_NO(88) | 1) +#define PINMUX_GPIO88__FUNC_DPI_D11 (MTK_PIN_NO(88) | 2) +#define PINMUX_GPIO88__FUNC_I2SO4_D0 (MTK_PIN_NO(88) | 3) +#define PINMUX_GPIO88__FUNC_SCP_SPI1_B_MO (MTK_PIN_NO(88) | 4) +#define PINMUX_GPIO88__FUNC_GBE_TXEN (MTK_PIN_NO(88) | 5) +#define PINMUX_GPIO88__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(88) | 6) + +#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0) +#define PINMUX_GPIO89__FUNC_DGI_D12 (MTK_PIN_NO(89) | 1) +#define PINMUX_GPIO89__FUNC_DPI_D12 (MTK_PIN_NO(89) | 2) +#define PINMUX_GPIO89__FUNC_MSDC2_CMD_A (MTK_PIN_NO(89) | 3) +#define PINMUX_GPIO89__FUNC_I2SO5_BCK (MTK_PIN_NO(89) | 4) +#define PINMUX_GPIO89__FUNC_GBE_MDC (MTK_PIN_NO(89) | 5) +#define PINMUX_GPIO89__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(89) | 6) + +#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0) +#define PINMUX_GPIO90__FUNC_DGI_D13 (MTK_PIN_NO(90) | 1) +#define PINMUX_GPIO90__FUNC_DPI_D13 (MTK_PIN_NO(90) | 2) +#define PINMUX_GPIO90__FUNC_MSDC2_CLK_A (MTK_PIN_NO(90) | 3) +#define PINMUX_GPIO90__FUNC_I2SO5_WS (MTK_PIN_NO(90) | 4) +#define PINMUX_GPIO90__FUNC_GBE_MDIO (MTK_PIN_NO(90) | 5) +#define PINMUX_GPIO90__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(90) | 6) + +#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0) +#define PINMUX_GPIO91__FUNC_DGI_D14 (MTK_PIN_NO(91) | 1) +#define PINMUX_GPIO91__FUNC_DPI_D14 (MTK_PIN_NO(91) | 2) +#define PINMUX_GPIO91__FUNC_MSDC2_DAT3_A (MTK_PIN_NO(91) | 3) +#define PINMUX_GPIO91__FUNC_I2SO5_D0 (MTK_PIN_NO(91) | 4) +#define PINMUX_GPIO91__FUNC_GBE_TXER (MTK_PIN_NO(91) | 5) +#define PINMUX_GPIO91__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(91) | 6) + +#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0) +#define PINMUX_GPIO92__FUNC_DGI_D15 (MTK_PIN_NO(92) | 1) +#define PINMUX_GPIO92__FUNC_DPI_D15 (MTK_PIN_NO(92) | 2) +#define PINMUX_GPIO92__FUNC_MSDC2_DAT0_A (MTK_PIN_NO(92) | 3) +#define PINMUX_GPIO92__FUNC_I2SO2_D1 (MTK_PIN_NO(92) | 4) +#define PINMUX_GPIO92__FUNC_GBE_RXER (MTK_PIN_NO(92) | 5) +#define PINMUX_GPIO92__FUNC_CCU0_JTAG_TDO (MTK_PIN_NO(92) | 6) + +#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0) +#define PINMUX_GPIO93__FUNC_DGI_HSYNC (MTK_PIN_NO(93) | 1) +#define PINMUX_GPIO93__FUNC_DPI_HSYNC (MTK_PIN_NO(93) | 2) +#define PINMUX_GPIO93__FUNC_MSDC2_DAT2_A (MTK_PIN_NO(93) | 3) +#define PINMUX_GPIO93__FUNC_I2SO2_D2 (MTK_PIN_NO(93) | 4) +#define PINMUX_GPIO93__FUNC_GBE_COL (MTK_PIN_NO(93) | 5) +#define PINMUX_GPIO93__FUNC_CCU0_JTAG_TMS (MTK_PIN_NO(93) | 6) + +#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0) +#define PINMUX_GPIO94__FUNC_DGI_VSYNC (MTK_PIN_NO(94) | 1) +#define PINMUX_GPIO94__FUNC_DPI_VSYNC (MTK_PIN_NO(94) | 2) +#define PINMUX_GPIO94__FUNC_MSDC2_DAT1_A (MTK_PIN_NO(94) | 3) +#define PINMUX_GPIO94__FUNC_I2SO2_D3 (MTK_PIN_NO(94) | 4) +#define PINMUX_GPIO94__FUNC_GBE_INTR (MTK_PIN_NO(94) | 5) +#define PINMUX_GPIO94__FUNC_CCU0_JTAG_TDI (MTK_PIN_NO(94) | 6) + +#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define PINMUX_GPIO95__FUNC_DGI_DE (MTK_PIN_NO(95) | 1) +#define PINMUX_GPIO95__FUNC_DPI_DE (MTK_PIN_NO(95) | 2) +#define PINMUX_GPIO95__FUNC_UTXD2 (MTK_PIN_NO(95) | 3) +#define PINMUX_GPIO95__FUNC_I2SIN_D1 (MTK_PIN_NO(95) | 5) +#define PINMUX_GPIO95__FUNC_CCU0_JTAG_TCK (MTK_PIN_NO(95) | 6) + +#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define PINMUX_GPIO96__FUNC_DGI_CK (MTK_PIN_NO(96) | 1) +#define PINMUX_GPIO96__FUNC_DPI_CK (MTK_PIN_NO(96) | 2) +#define PINMUX_GPIO96__FUNC_URXD2 (MTK_PIN_NO(96) | 3) +#define PINMUX_GPIO96__FUNC_I2SO5_MCK (MTK_PIN_NO(96) | 4) +#define PINMUX_GPIO96__FUNC_I2SIN_D2 (MTK_PIN_NO(96) | 5) +#define PINMUX_GPIO96__FUNC_CCU0_JTAG_TRST (MTK_PIN_NO(96) | 6) + +#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define PINMUX_GPIO97__FUNC_DISP_PWM0 (MTK_PIN_NO(97) | 1) +#define PINMUX_GPIO97__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(97) | 2) + +#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define PINMUX_GPIO98__FUNC_UTXD0 (MTK_PIN_NO(98) | 1) + +#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define PINMUX_GPIO99__FUNC_URXD0 (MTK_PIN_NO(99) | 1) + +#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define PINMUX_GPIO100__FUNC_URTS1 (MTK_PIN_NO(100) | 1) +#define PINMUX_GPIO100__FUNC_DSI_TE (MTK_PIN_NO(100) | 2) +#define PINMUX_GPIO100__FUNC_I2SO1_D8 (MTK_PIN_NO(100) | 3) +#define PINMUX_GPIO100__FUNC_KPROW2 (MTK_PIN_NO(100) | 4) +#define PINMUX_GPIO100__FUNC_PWM_0 (MTK_PIN_NO(100) | 5) +#define PINMUX_GPIO100__FUNC_TP_URTS1_AO (MTK_PIN_NO(100) | 6) +#define PINMUX_GPIO100__FUNC_I2SIN_D0 (MTK_PIN_NO(100) | 7) + +#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0) +#define PINMUX_GPIO101__FUNC_UCTS1 (MTK_PIN_NO(101) | 1) +#define PINMUX_GPIO101__FUNC_DSI1_TE (MTK_PIN_NO(101) | 2) +#define PINMUX_GPIO101__FUNC_I2SO1_D9 (MTK_PIN_NO(101) | 3) +#define PINMUX_GPIO101__FUNC_KPCOL2 (MTK_PIN_NO(101) | 4) +#define PINMUX_GPIO101__FUNC_PWM_1 (MTK_PIN_NO(101) | 5) +#define PINMUX_GPIO101__FUNC_TP_UCTS1_AO (MTK_PIN_NO(101) | 6) +#define PINMUX_GPIO101__FUNC_I2SIN_D1 (MTK_PIN_NO(101) | 7) + +#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0) +#define PINMUX_GPIO102__FUNC_UTXD1 (MTK_PIN_NO(102) | 1) +#define PINMUX_GPIO102__FUNC_VBUSVALID_2P (MTK_PIN_NO(102) | 2) +#define PINMUX_GPIO102__FUNC_I2SO1_D10 (MTK_PIN_NO(102) | 3) +#define PINMUX_GPIO102__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(102) | 4) +#define PINMUX_GPIO102__FUNC_TP_UTXD1_AO (MTK_PIN_NO(102) | 5) +#define PINMUX_GPIO102__FUNC_MD32_1_TXD (MTK_PIN_NO(102) | 6) +#define PINMUX_GPIO102__FUNC_I2SIN_D2 (MTK_PIN_NO(102) | 7) + +#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0) +#define PINMUX_GPIO103__FUNC_URXD1 (MTK_PIN_NO(103) | 1) +#define PINMUX_GPIO103__FUNC_VBUSVALID_3P (MTK_PIN_NO(103) | 2) +#define PINMUX_GPIO103__FUNC_I2SO1_D11 (MTK_PIN_NO(103) | 3) +#define PINMUX_GPIO103__FUNC_SSPM_URXD_AO (MTK_PIN_NO(103) | 4) +#define PINMUX_GPIO103__FUNC_TP_URXD1_AO (MTK_PIN_NO(103) | 5) +#define PINMUX_GPIO103__FUNC_MD32_1_RXD (MTK_PIN_NO(103) | 6) +#define PINMUX_GPIO103__FUNC_I2SIN_D3 (MTK_PIN_NO(103) | 7) + +#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0) +#define PINMUX_GPIO104__FUNC_KPROW0 (MTK_PIN_NO(104) | 1) +#define PINMUX_GPIO104__FUNC_DISP_PWM1 (MTK_PIN_NO(104) | 2) + +#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define PINMUX_GPIO105__FUNC_KPROW1 (MTK_PIN_NO(105) | 1) +#define PINMUX_GPIO105__FUNC_EDP_TX_HPD (MTK_PIN_NO(105) | 2) +#define PINMUX_GPIO105__FUNC_PWM_2 (MTK_PIN_NO(105) | 3) + +#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define PINMUX_GPIO106__FUNC_KPCOL0 (MTK_PIN_NO(106) | 1) + +#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define PINMUX_GPIO107__FUNC_KPCOL1 (MTK_PIN_NO(107) | 1) +#define PINMUX_GPIO107__FUNC_DSI1_TE (MTK_PIN_NO(107) | 2) +#define PINMUX_GPIO107__FUNC_PWM_3 (MTK_PIN_NO(107) | 3) +#define PINMUX_GPIO107__FUNC_SCP_SCL3 (MTK_PIN_NO(107) | 4) +#define PINMUX_GPIO107__FUNC_I2SIN_MCK (MTK_PIN_NO(107) | 5) + +#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define PINMUX_GPIO108__FUNC_LCM_RST (MTK_PIN_NO(108) | 1) +#define PINMUX_GPIO108__FUNC_KPCOL1 (MTK_PIN_NO(108) | 2) +#define PINMUX_GPIO108__FUNC_SCP_SDA3 (MTK_PIN_NO(108) | 4) +#define PINMUX_GPIO108__FUNC_I2SIN_BCK (MTK_PIN_NO(108) | 5) + +#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define PINMUX_GPIO109__FUNC_DSI_TE (MTK_PIN_NO(109) | 1) +#define PINMUX_GPIO109__FUNC_I2SIN_D3 (MTK_PIN_NO(109) | 2) +#define PINMUX_GPIO109__FUNC_I2SIN_WS (MTK_PIN_NO(109) | 5) + +#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define PINMUX_GPIO110__FUNC_MSDC1_CMD (MTK_PIN_NO(110) | 1) +#define PINMUX_GPIO110__FUNC_JTMS_SEL3 (MTK_PIN_NO(110) | 2) +#define PINMUX_GPIO110__FUNC_UDI_TMS (MTK_PIN_NO(110) | 3) +#define PINMUX_GPIO110__FUNC_CCU1_JTAG_TMS (MTK_PIN_NO(110) | 5) +#define PINMUX_GPIO110__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(110) | 6) + +#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define PINMUX_GPIO111__FUNC_MSDC1_CLK (MTK_PIN_NO(111) | 1) +#define PINMUX_GPIO111__FUNC_JTCK_SEL3 (MTK_PIN_NO(111) | 2) +#define PINMUX_GPIO111__FUNC_UDI_TCK (MTK_PIN_NO(111) | 3) +#define PINMUX_GPIO111__FUNC_CCU1_JTAG_TCK (MTK_PIN_NO(111) | 5) +#define PINMUX_GPIO111__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(111) | 6) + +#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define PINMUX_GPIO112__FUNC_MSDC1_DAT0 (MTK_PIN_NO(112) | 1) +#define PINMUX_GPIO112__FUNC_JTDI_SEL3 (MTK_PIN_NO(112) | 2) +#define PINMUX_GPIO112__FUNC_UDI_TDI (MTK_PIN_NO(112) | 3) +#define PINMUX_GPIO112__FUNC_I2SO2_D0 (MTK_PIN_NO(112) | 4) +#define PINMUX_GPIO112__FUNC_CCU1_JTAG_TDI (MTK_PIN_NO(112) | 5) +#define PINMUX_GPIO112__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(112) | 6) + +#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define PINMUX_GPIO113__FUNC_MSDC1_DAT1 (MTK_PIN_NO(113) | 1) +#define PINMUX_GPIO113__FUNC_JTDO_SEL3 (MTK_PIN_NO(113) | 2) +#define PINMUX_GPIO113__FUNC_UDI_TDO (MTK_PIN_NO(113) | 3) +#define PINMUX_GPIO113__FUNC_I2SO2_D1 (MTK_PIN_NO(113) | 4) +#define PINMUX_GPIO113__FUNC_CCU1_JTAG_TDO (MTK_PIN_NO(113) | 5) +#define PINMUX_GPIO113__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(113) | 6) + +#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define PINMUX_GPIO114__FUNC_MSDC1_DAT2 (MTK_PIN_NO(114) | 1) +#define PINMUX_GPIO114__FUNC_JTRSTn_SEL3 (MTK_PIN_NO(114) | 2) +#define PINMUX_GPIO114__FUNC_UDI_NTRST (MTK_PIN_NO(114) | 3) +#define PINMUX_GPIO114__FUNC_I2SO2_D2 (MTK_PIN_NO(114) | 4) +#define PINMUX_GPIO114__FUNC_CCU1_JTAG_TRST (MTK_PIN_NO(114) | 5) +#define PINMUX_GPIO114__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(114) | 6) + +#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define PINMUX_GPIO115__FUNC_MSDC1_DAT3 (MTK_PIN_NO(115) | 1) +#define PINMUX_GPIO115__FUNC_I2SO2_D3 (MTK_PIN_NO(115) | 4) +#define PINMUX_GPIO115__FUNC_MD32_1_GPIO2 (MTK_PIN_NO(115) | 6) + +#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define PINMUX_GPIO116__FUNC_MSDC0_DAT7 (MTK_PIN_NO(116) | 1) + +#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define PINMUX_GPIO117__FUNC_MSDC0_DAT6 (MTK_PIN_NO(117) | 1) + +#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define PINMUX_GPIO118__FUNC_MSDC0_DAT5 (MTK_PIN_NO(118) | 1) + +#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define PINMUX_GPIO119__FUNC_MSDC0_DAT4 (MTK_PIN_NO(119) | 1) + +#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define PINMUX_GPIO120__FUNC_MSDC0_RSTB (MTK_PIN_NO(120) | 1) + +#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define PINMUX_GPIO121__FUNC_MSDC0_CMD (MTK_PIN_NO(121) | 1) + +#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define PINMUX_GPIO122__FUNC_MSDC0_CLK (MTK_PIN_NO(122) | 1) + +#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define PINMUX_GPIO123__FUNC_MSDC0_DAT3 (MTK_PIN_NO(123) | 1) + +#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define PINMUX_GPIO124__FUNC_MSDC0_DAT2 (MTK_PIN_NO(124) | 1) + +#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define PINMUX_GPIO125__FUNC_MSDC0_DAT1 (MTK_PIN_NO(125) | 1) + +#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define PINMUX_GPIO126__FUNC_MSDC0_DAT0 (MTK_PIN_NO(126) | 1) + +#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0) +#define PINMUX_GPIO127__FUNC_MSDC0_DSL (MTK_PIN_NO(127) | 1) + +#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0) +#define PINMUX_GPIO128__FUNC_IDDIG (MTK_PIN_NO(128) | 1) +#define PINMUX_GPIO128__FUNC_UCTS2 (MTK_PIN_NO(128) | 2) +#define PINMUX_GPIO128__FUNC_UTXD5 (MTK_PIN_NO(128) | 3) +#define PINMUX_GPIO128__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(128) | 4) +#define PINMUX_GPIO128__FUNC_mbistreaden_trigger (MTK_PIN_NO(128) | 5) +#define PINMUX_GPIO128__FUNC_MD32_1_GPIO0 (MTK_PIN_NO(128) | 6) +#define PINMUX_GPIO128__FUNC_SCP_SCL2 (MTK_PIN_NO(128) | 7) + +#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0) +#define PINMUX_GPIO129__FUNC_USB_DRVVBUS (MTK_PIN_NO(129) | 1) +#define PINMUX_GPIO129__FUNC_URTS2 (MTK_PIN_NO(129) | 2) +#define PINMUX_GPIO129__FUNC_URXD5 (MTK_PIN_NO(129) | 3) +#define PINMUX_GPIO129__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(129) | 4) +#define PINMUX_GPIO129__FUNC_mbistwriteen_trigger (MTK_PIN_NO(129) | 5) +#define PINMUX_GPIO129__FUNC_MD32_1_GPIO1 (MTK_PIN_NO(129) | 6) +#define PINMUX_GPIO129__FUNC_SCP_SDA2 (MTK_PIN_NO(129) | 7) + +#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0) +#define PINMUX_GPIO130__FUNC_IDDIG_1P (MTK_PIN_NO(130) | 1) +#define PINMUX_GPIO130__FUNC_SPINOR_IO2 (MTK_PIN_NO(130) | 2) +#define PINMUX_GPIO130__FUNC_SNFI_WP (MTK_PIN_NO(130) | 3) +#define PINMUX_GPIO130__FUNC_VPU_UDI_NTRST (MTK_PIN_NO(130) | 4) + +#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0) +#define PINMUX_GPIO131__FUNC_USB_DRVVBUS_1P (MTK_PIN_NO(131) | 1) +#define PINMUX_GPIO131__FUNC_SPINOR_IO3 (MTK_PIN_NO(131) | 2) +#define PINMUX_GPIO131__FUNC_SNFI_HOLD (MTK_PIN_NO(131) | 3) +#define PINMUX_GPIO131__FUNC_MD32_1_JTAG_TRST (MTK_PIN_NO(131) | 4) +#define PINMUX_GPIO131__FUNC_SCP_JTAG0_TRSTN (MTK_PIN_NO(131) | 5) +#define PINMUX_GPIO131__FUNC_APU_JTAG_TRST (MTK_PIN_NO(131) | 6) + +#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0) +#define PINMUX_GPIO132__FUNC_SPIM0_CSB (MTK_PIN_NO(132) | 1) +#define PINMUX_GPIO132__FUNC_SCP_SPI0_CS (MTK_PIN_NO(132) | 2) +#define PINMUX_GPIO132__FUNC_SPIS0_CSB (MTK_PIN_NO(132) | 3) +#define PINMUX_GPIO132__FUNC_VPU_UDI_TMS (MTK_PIN_NO(132) | 4) +#define PINMUX_GPIO132__FUNC_I2SO5_D0 (MTK_PIN_NO(132) | 6) + +#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0) +#define PINMUX_GPIO133__FUNC_SPIM0_CLK (MTK_PIN_NO(133) | 1) +#define PINMUX_GPIO133__FUNC_SCP_SPI0_CK (MTK_PIN_NO(133) | 2) +#define PINMUX_GPIO133__FUNC_SPIS0_CLK (MTK_PIN_NO(133) | 3) +#define PINMUX_GPIO133__FUNC_VPU_UDI_TCK (MTK_PIN_NO(133) | 4) +#define PINMUX_GPIO133__FUNC_I2SO5_BCK (MTK_PIN_NO(133) | 6) + +#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0) +#define PINMUX_GPIO134__FUNC_SPIM0_MO (MTK_PIN_NO(134) | 1) +#define PINMUX_GPIO134__FUNC_SCP_SPI0_MO (MTK_PIN_NO(134) | 2) +#define PINMUX_GPIO134__FUNC_SPIS0_SI (MTK_PIN_NO(134) | 3) +#define PINMUX_GPIO134__FUNC_VPU_UDI_TDO (MTK_PIN_NO(134) | 4) +#define PINMUX_GPIO134__FUNC_I2SO5_WS (MTK_PIN_NO(134) | 6) + +#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0) +#define PINMUX_GPIO135__FUNC_SPIM0_MI (MTK_PIN_NO(135) | 1) +#define PINMUX_GPIO135__FUNC_SCP_SPI0_MI (MTK_PIN_NO(135) | 2) +#define PINMUX_GPIO135__FUNC_SPIS0_SO (MTK_PIN_NO(135) | 3) +#define PINMUX_GPIO135__FUNC_VPU_UDI_TDI (MTK_PIN_NO(135) | 4) +#define PINMUX_GPIO135__FUNC_I2SO5_MCK (MTK_PIN_NO(135) | 6) + +#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0) +#define PINMUX_GPIO136__FUNC_SPIM1_CSB (MTK_PIN_NO(136) | 1) +#define PINMUX_GPIO136__FUNC_SCP_SPI1_A_CS (MTK_PIN_NO(136) | 2) +#define PINMUX_GPIO136__FUNC_SPIS1_CSB (MTK_PIN_NO(136) | 3) +#define PINMUX_GPIO136__FUNC_MD32_1_JTAG_TMS (MTK_PIN_NO(136) | 4) +#define PINMUX_GPIO136__FUNC_SCP_JTAG0_TMS (MTK_PIN_NO(136) | 5) +#define PINMUX_GPIO136__FUNC_APU_JTAG_TMS (MTK_PIN_NO(136) | 6) +#define PINMUX_GPIO136__FUNC_DBG_MON_A15 (MTK_PIN_NO(136) | 7) + +#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0) +#define PINMUX_GPIO137__FUNC_SPIM1_CLK (MTK_PIN_NO(137) | 1) +#define PINMUX_GPIO137__FUNC_SCP_SPI1_A_CK (MTK_PIN_NO(137) | 2) +#define PINMUX_GPIO137__FUNC_SPIS1_CLK (MTK_PIN_NO(137) | 3) +#define PINMUX_GPIO137__FUNC_MD32_1_JTAG_TCK (MTK_PIN_NO(137) | 4) +#define PINMUX_GPIO137__FUNC_SCP_JTAG0_TCK (MTK_PIN_NO(137) | 5) +#define PINMUX_GPIO137__FUNC_APU_JTAG_TCK (MTK_PIN_NO(137) | 6) +#define PINMUX_GPIO137__FUNC_DBG_MON_A14 (MTK_PIN_NO(137) | 7) + +#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0) +#define PINMUX_GPIO138__FUNC_SPIM1_MO (MTK_PIN_NO(138) | 1) +#define PINMUX_GPIO138__FUNC_SCP_SPI1_A_MO (MTK_PIN_NO(138) | 2) +#define PINMUX_GPIO138__FUNC_SPIS1_SI (MTK_PIN_NO(138) | 3) +#define PINMUX_GPIO138__FUNC_MD32_1_JTAG_TDO (MTK_PIN_NO(138) | 4) +#define PINMUX_GPIO138__FUNC_SCP_JTAG0_TDO (MTK_PIN_NO(138) | 5) +#define PINMUX_GPIO138__FUNC_APU_JTAG_TDO (MTK_PIN_NO(138) | 6) +#define PINMUX_GPIO138__FUNC_DBG_MON_A16 (MTK_PIN_NO(138) | 7) + +#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0) +#define PINMUX_GPIO139__FUNC_SPIM1_MI (MTK_PIN_NO(139) | 1) +#define PINMUX_GPIO139__FUNC_SCP_SPI1_A_MI (MTK_PIN_NO(139) | 2) +#define PINMUX_GPIO139__FUNC_SPIS1_SO (MTK_PIN_NO(139) | 3) +#define PINMUX_GPIO139__FUNC_MD32_1_JTAG_TDI (MTK_PIN_NO(139) | 4) +#define PINMUX_GPIO139__FUNC_SCP_JTAG0_TDI (MTK_PIN_NO(139) | 5) +#define PINMUX_GPIO139__FUNC_APU_JTAG_TDI (MTK_PIN_NO(139) | 6) +#define PINMUX_GPIO139__FUNC_DBG_MON_A17 (MTK_PIN_NO(139) | 7) + +#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0) +#define PINMUX_GPIO140__FUNC_SPIM2_CSB (MTK_PIN_NO(140) | 1) +#define PINMUX_GPIO140__FUNC_SPINOR_CS (MTK_PIN_NO(140) | 2) +#define PINMUX_GPIO140__FUNC_SNFI_CS (MTK_PIN_NO(140) | 3) +#define PINMUX_GPIO140__FUNC_DMIC3_DAT (MTK_PIN_NO(140) | 4) +#define PINMUX_GPIO140__FUNC_DBG_MON_A11 (MTK_PIN_NO(140) | 7) + +#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0) +#define PINMUX_GPIO141__FUNC_SPIM2_CLK (MTK_PIN_NO(141) | 1) +#define PINMUX_GPIO141__FUNC_SPINOR_CK (MTK_PIN_NO(141) | 2) +#define PINMUX_GPIO141__FUNC_SNFI_CLK (MTK_PIN_NO(141) | 3) +#define PINMUX_GPIO141__FUNC_DMIC3_CLK (MTK_PIN_NO(141) | 4) +#define PINMUX_GPIO141__FUNC_DBG_MON_A10 (MTK_PIN_NO(141) | 7) + +#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0) +#define PINMUX_GPIO142__FUNC_SPIM2_MO (MTK_PIN_NO(142) | 1) +#define PINMUX_GPIO142__FUNC_SPINOR_IO0 (MTK_PIN_NO(142) | 2) +#define PINMUX_GPIO142__FUNC_SNFI_MOSI (MTK_PIN_NO(142) | 3) +#define PINMUX_GPIO142__FUNC_DMIC4_DAT (MTK_PIN_NO(142) | 4) +#define PINMUX_GPIO142__FUNC_DBG_MON_A12 (MTK_PIN_NO(142) | 7) + +#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0) +#define PINMUX_GPIO143__FUNC_SPIM2_MI (MTK_PIN_NO(143) | 1) +#define PINMUX_GPIO143__FUNC_SPINOR_IO1 (MTK_PIN_NO(143) | 2) +#define PINMUX_GPIO143__FUNC_SNFI_MISO (MTK_PIN_NO(143) | 3) +#define PINMUX_GPIO143__FUNC_DMIC4_CLK (MTK_PIN_NO(143) | 4) +#define PINMUX_GPIO143__FUNC_DBG_MON_A13 (MTK_PIN_NO(143) | 7) + +#endif /* __MT8195-PINFUNC_H */ diff --git a/include/dt-bindings/pinctrl/pinctrl-zynqmp.h b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h new file mode 100644 index 000000000000..cdb215734bdf --- /dev/null +++ b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * MIO pin configuration defines for Xilinx ZynqMP + * + * Copyright (C) 2020 Xilinx, Inc. + */ + +#ifndef _DT_BINDINGS_PINCTRL_ZYNQMP_H +#define _DT_BINDINGS_PINCTRL_ZYNQMP_H + +/* Bit value for different voltage levels */ +#define IO_STANDARD_LVCMOS33 0 +#define IO_STANDARD_LVCMOS18 1 + +/* Bit values for Slew Rates */ +#define SLEW_RATE_FAST 0 +#define SLEW_RATE_SLOW 1 + +#endif /* _DT_BINDINGS_PINCTRL_ZYNQMP_H */ diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index d711e250cf2c..eedb5d94c020 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -45,6 +45,21 @@ #define SM8250_MX 8 #define SM8250_MX_AO 9 +/* SM8350 Power Domain Indexes */ +#define SM8350_CX 0 +#define SM8350_CX_AO 1 +#define SM8350_EBI 2 +#define SM8350_GFX 3 +#define SM8350_LCX 4 +#define SM8350_LMX 5 +#define SM8350_MMCX 6 +#define SM8350_MMCX_AO 7 +#define SM8350_MX 8 +#define SM8350_MX_AO 9 +#define SM8350_MXC 10 +#define SM8350_MXC_AO 11 +#define SM8350_MSS 12 + /* SC7180 Power Domain Indexes */ #define SC7180_CX 0 #define SC7180_CX_AO 1 @@ -55,6 +70,17 @@ #define SC7180_LCX 6 #define SC7180_MSS 7 +/* SC7280 Power Domain Indexes */ +#define SC7280_CX 0 +#define SC7280_CX_AO 1 +#define SC7280_EBI 2 +#define SC7280_GFX 3 +#define SC7280_MX 4 +#define SC7280_MX_AO 5 +#define SC7280_LMX 6 +#define SC7280_LCX 7 +#define SC7280_MSS 8 + /* SDM845 Power Domain performance levels */ #define RPMH_REGULATOR_LEVEL_RETENTION 16 #define RPMH_REGULATOR_LEVEL_MIN_SVS 48 diff --git a/include/dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h b/include/dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h new file mode 100644 index 000000000000..27c5ce68847b --- /dev/null +++ b/include/dt-bindings/pwm/raspberrypi,firmware-poe-pwm.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 Nicolas Saenz Julienne + * Author: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> + */ + +#ifndef _DT_BINDINGS_RASPBERRYPI_FIRMWARE_PWM_H +#define _DT_BINDINGS_RASPBERRYPI_FIRMWARE_PWM_H + +#define RASPBERRYPI_FIRMWARE_PWM_POE 0 +#define RASPBERRYPI_FIRMWARE_PWM_NUM 1 + +#endif diff --git a/include/dt-bindings/soc/bcm-pmb.h b/include/dt-bindings/soc/bcm-pmb.h index 744dc3af4d41..385884468007 100644 --- a/include/dt-bindings/soc/bcm-pmb.h +++ b/include/dt-bindings/soc/bcm-pmb.h @@ -7,5 +7,6 @@ #define BCM_PMB_PCIE1 0x02 #define BCM_PMB_PCIE2 0x03 #define BCM_PMB_HOST_USB 0x04 +#define BCM_PMB_SATA 0x05 #endif diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index a29d3ff2e7e8..c432fdb8547f 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -72,6 +72,12 @@ const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key) return key->payload.data[asym_key_ids]; } +static inline +const struct public_key *asymmetric_key_public_key(const struct key *key) +{ + return key->payload.data[asym_crypto]; +} + extern struct key *find_asymmetric_key(struct key *keyring, const struct asymmetric_key_id *id_0, const struct asymmetric_key_id *id_1, diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h index fb8b07daa9d1..6acd3cf13a18 100644 --- a/include/keys/system_keyring.h +++ b/include/keys/system_keyring.h @@ -16,9 +16,16 @@ extern int restrict_link_by_builtin_trusted(struct key *keyring, const struct key_type *type, const union key_payload *payload, struct key *restriction_key); +extern __init int load_module_cert(struct key *keyring); #else #define restrict_link_by_builtin_trusted restrict_link_reject + +static inline __init int load_module_cert(struct key *keyring) +{ + return 0; +} + #endif #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING @@ -31,6 +38,7 @@ extern int restrict_link_by_builtin_and_secondary_trusted( #define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted #endif +extern struct pkcs7_message *pkcs7; #ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING extern int mark_hash_blacklisted(const char *hash); extern int is_hash_blacklisted(const u8 *hash, size_t hash_len, @@ -49,6 +57,20 @@ static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len) } #endif +#ifdef CONFIG_SYSTEM_REVOCATION_LIST +extern int add_key_to_revocation_list(const char *data, size_t size); +extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7); +#else +static inline int add_key_to_revocation_list(const char *data, size_t size) +{ + return 0; +} +static inline int is_key_on_revocation_list(struct pkcs7_message *pkcs7) +{ + return -ENOKEY; +} +#endif + #ifdef CONFIG_IMA_BLACKLIST_KEYRING extern struct key *ima_blacklist_keyring; diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h index a94c03a61d8f..d89fa2579ac0 100644 --- a/include/keys/trusted-type.h +++ b/include/keys/trusted-type.h @@ -11,6 +11,12 @@ #include <linux/rcupdate.h> #include <linux/tpm.h> +#ifdef pr_fmt +#undef pr_fmt +#endif + +#define pr_fmt(fmt) "trusted_key: " fmt + #define MIN_KEY_SIZE 32 #define MAX_KEY_SIZE 128 #define MAX_BLOB_SIZE 512 @@ -22,6 +28,7 @@ struct trusted_key_payload { unsigned int key_len; unsigned int blob_len; unsigned char migratable; + unsigned char old_format; unsigned char key[MAX_KEY_SIZE + 1]; unsigned char blob[MAX_BLOB_SIZE]; }; @@ -30,6 +37,7 @@ struct trusted_key_options { uint16_t keytype; uint32_t keyhandle; unsigned char keyauth[TPM_DIGEST_SIZE]; + uint32_t blobauth_len; unsigned char blobauth[TPM_DIGEST_SIZE]; uint32_t pcrinfo_len; unsigned char pcrinfo[MAX_PCRINFO_SIZE]; @@ -40,6 +48,53 @@ struct trusted_key_options { uint32_t policyhandle; }; +struct trusted_key_ops { + /* + * flag to indicate if trusted key implementation supports migration + * or not. + */ + unsigned char migratable; + + /* Initialize key interface. */ + int (*init)(void); + + /* Seal a key. */ + int (*seal)(struct trusted_key_payload *p, char *datablob); + + /* Unseal a key. */ + int (*unseal)(struct trusted_key_payload *p, char *datablob); + + /* Get a randomized key. */ + int (*get_random)(unsigned char *key, size_t key_len); + + /* Exit key interface. */ + void (*exit)(void); +}; + +struct trusted_key_source { + char *name; + struct trusted_key_ops *ops; +}; + extern struct key_type key_type_trusted; +#define TRUSTED_DEBUG 0 + +#if TRUSTED_DEBUG +static inline void dump_payload(struct trusted_key_payload *p) +{ + pr_info("key_len %d\n", p->key_len); + print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE, + 16, 1, p->key, p->key_len, 0); + pr_info("bloblen %d\n", p->blob_len); + print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE, + 16, 1, p->blob, p->blob_len, 0); + pr_info("migratable %d\n", p->migratable); +} +#else +static inline void dump_payload(struct trusted_key_payload *p) +{ +} +#endif + #endif /* _KEYS_TRUSTED_TYPE_H */ diff --git a/include/keys/trusted_tee.h b/include/keys/trusted_tee.h new file mode 100644 index 000000000000..151be25a979e --- /dev/null +++ b/include/keys/trusted_tee.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019-2021 Linaro Ltd. + * + * Author: + * Sumit Garg <sumit.garg@linaro.org> + */ + +#ifndef __TEE_TRUSTED_KEY_H +#define __TEE_TRUSTED_KEY_H + +#include <keys/trusted-type.h> + +extern struct trusted_key_ops trusted_key_tee_ops; + +#endif diff --git a/include/keys/trusted_tpm.h b/include/keys/trusted_tpm.h index a56d8e1298f2..7769b726863a 100644 --- a/include/keys/trusted_tpm.h +++ b/include/keys/trusted_tpm.h @@ -16,6 +16,8 @@ #define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset]) #define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset])) +extern struct trusted_key_ops trusted_key_tpm_ops; + struct osapsess { uint32_t handle; unsigned char secret[SHA1_DIGEST_SIZE]; @@ -52,30 +54,19 @@ int tpm2_unseal_trusted(struct tpm_chip *chip, #if TPM_DEBUG static inline void dump_options(struct trusted_key_options *o) { - pr_info("trusted_key: sealing key type %d\n", o->keytype); - pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle); - pr_info("trusted_key: pcrlock %d\n", o->pcrlock); - pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len); + pr_info("sealing key type %d\n", o->keytype); + pr_info("sealing key handle %0X\n", o->keyhandle); + pr_info("pcrlock %d\n", o->pcrlock); + pr_info("pcrinfo %d\n", o->pcrinfo_len); print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE, 16, 1, o->pcrinfo, o->pcrinfo_len, 0); } -static inline void dump_payload(struct trusted_key_payload *p) -{ - pr_info("trusted_key: key_len %d\n", p->key_len); - print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE, - 16, 1, p->key, p->key_len, 0); - pr_info("trusted_key: bloblen %d\n", p->blob_len); - print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE, - 16, 1, p->blob, p->blob_len, 0); - pr_info("trusted_key: migratable %d\n", p->migratable); -} - static inline void dump_sess(struct osapsess *s) { print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE, 16, 1, &s->handle, 4, 0); - pr_info("trusted-key: secret:\n"); + pr_info("secret:\n"); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, &s->secret, SHA1_DIGEST_SIZE, 0); pr_info("trusted-key: enonce:\n"); @@ -87,7 +78,7 @@ static inline void dump_tpm_buf(unsigned char *buf) { int len; - pr_info("\ntrusted-key: tpm buffer\n"); + pr_info("\ntpm buffer\n"); len = LOAD32(buf, TPM_SIZE_OFFSET); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0); } @@ -96,10 +87,6 @@ static inline void dump_options(struct trusted_key_options *o) { } -static inline void dump_payload(struct trusted_key_payload *p) -{ -} - static inline void dump_sess(struct osapsess *s) { } diff --git a/include/kunit/test-bug.h b/include/kunit/test-bug.h new file mode 100644 index 000000000000..5fc58081d511 --- /dev/null +++ b/include/kunit/test-bug.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * KUnit API allowing dynamic analysis tools to interact with KUnit tests + * + * Copyright (C) 2020, Google LLC. + * Author: Uriel Guajardo <urielguajardo@google.com> + */ + +#ifndef _KUNIT_TEST_BUG_H +#define _KUNIT_TEST_BUG_H + +#define kunit_fail_current_test(fmt, ...) \ + __kunit_fail_current_test(__FILE__, __LINE__, fmt, ##__VA_ARGS__) + +#if IS_BUILTIN(CONFIG_KUNIT) + +extern __printf(3, 4) void __kunit_fail_current_test(const char *file, int line, + const char *fmt, ...); + +#else + +static inline __printf(3, 4) void __kunit_fail_current_test(const char *file, int line, + const char *fmt, ...) +{ +} + +#endif + +#endif /* _KUNIT_TEST_BUG_H */ diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 6fd3cda608e4..864b9997efb2 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -61,6 +61,7 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); +int kvm_pmu_probe_pmuver(void); #else struct kvm_pmu { }; @@ -116,6 +117,9 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) { return 0; } + +static inline int kvm_pmu_probe_pmuver(void) { return 0xf; } + #endif #endif diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 3d74f1060bd1..ec621180ef09 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -322,6 +322,7 @@ struct vgic_cpu { */ struct vgic_io_device rd_iodev; struct vgic_redist_region *rdreg; + u32 rdreg_index; /* Contains the attributes and gpa of the LPI pending tables. */ u64 pendbaser; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 3bdcfc4401b7..c60745f657e9 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -748,6 +748,11 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv) return NULL; } +static inline bool acpi_reduced_hardware(void) +{ + return false; +} + static inline void acpi_dev_put(struct acpi_device *adev) {} static inline bool is_acpi_node(const struct fwnode_handle *fwnode) @@ -1034,9 +1039,14 @@ static inline void acpi_ec_set_gpe_wake_mask(u8 action) {} __printf(3, 4) void acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...); +void acpi_evaluation_failure_warn(acpi_handle handle, const char *name, + acpi_status status); #else /* !CONFIG_ACPI */ static inline __printf(3, 4) void acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} +static inline void acpi_evaluation_failure_warn(acpi_handle handle, + const char *name, + acpi_status status) {} #endif /* !CONFIG_ACPI */ #if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) diff --git a/include/linux/align.h b/include/linux/align.h new file mode 100644 index 000000000000..2b4acec7b95a --- /dev/null +++ b/include/linux/align.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ALIGN_H +#define _LINUX_ALIGN_H + +#include <linux/const.h> + +/* @a is a power of 2 value */ +#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a))) +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + +#endif /* _LINUX_ALIGN_H */ diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h index 131b27c97209..9bf58aac0df2 100644 --- a/include/linux/amba/pl022.h +++ b/include/linux/amba/pl022.h @@ -223,10 +223,6 @@ struct dma_chan; /** * struct pl022_ssp_master - device.platform_data for SPI controller devices. * @bus_id: identifier for this bus - * @num_chipselect: chipselects are used to distinguish individual - * SPI slaves, and are numbered from zero to num_chipselects - 1. - * each slave has a chipselect signal, but it's common that not - * every chipselect is connected to a slave. * @enable_dma: if true enables DMA driven transfers. * @dma_rx_param: parameter to locate an RX DMA channel. * @dma_tx_param: parameter to locate a TX DMA channel. @@ -235,18 +231,15 @@ struct dma_chan; * indicates no delay and the device will be suspended immediately. * @rt: indicates the controller should run the message pump with realtime * priority to minimise the transfer latency on the bus. - * @chipselects: list of <num_chipselects> chip select gpios */ struct pl022_ssp_controller { u16 bus_id; - u8 num_chipselect; u8 enable_dma:1; bool (*dma_filter)(struct dma_chan *chan, void *filter_param); void *dma_rx_param; void *dma_tx_param; int autosuspend_delay; bool rt; - int *chipselects; }; /** @@ -265,8 +258,6 @@ struct pl022_ssp_controller { * @duplex: Microwire interface: Full/Half duplex * @clkdelay: on the PL023 variant, the delay in feeback clock cycles * before sampling the incoming line - * @cs_control: function pointer to board-specific function to - * assert/deassert I/O port to control HW generation of devices chip-select. */ struct pl022_config_chip { enum ssp_interface iface; @@ -280,7 +271,6 @@ struct pl022_config_chip { enum ssp_microwire_wait_state wait_state; enum ssp_duplex duplex; enum ssp_clkdelay clkdelay; - void (*cs_control) (u32 control); }; #endif /* _SSP_PL022_H */ diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 450717299928..58e6c3806c09 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -10,6 +10,8 @@ #include <linux/types.h> +struct amd_iommu; + /* * This is mainly used to communicate information back-and-forth * between SVM and IOMMU for setting up and tearing down posted @@ -33,24 +35,6 @@ extern int amd_iommu_detect(void); extern int amd_iommu_init_hardware(void); /** - * amd_iommu_enable_device_erratum() - Enable erratum workaround for device - * in the IOMMUv2 driver - * @pdev: The PCI device the workaround is necessary for - * @erratum: The erratum workaround to enable - * - * The function needs to be called before amd_iommu_init_device(). - * Possible values for the erratum number are for now: - * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI - * is enabled - * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI - * requests to one - */ -#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0 -#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1 - -extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum); - -/** * amd_iommu_init_device() - Init device for use with IOMMUv2 driver * @pdev: The PCI device to initialize * @pasids: Number of PASIDs to support for this device @@ -212,4 +196,14 @@ static inline int amd_iommu_deactivate_guest_mode(void *data) } #endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ +int amd_iommu_get_num_iommus(void); +bool amd_iommu_pc_supported(void); +u8 amd_iommu_pc_get_max_banks(unsigned int idx); +u8 amd_iommu_pc_get_max_counters(unsigned int idx); +int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, + u64 *value); +int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, + u64 *value); +struct amd_iommu *get_amd_iommu(unsigned int idx); + #endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 0f6cd6b73a61..f180240dc95f 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -23,18 +23,31 @@ static inline unsigned long topology_get_cpu_scale(int cpu) void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); -DECLARE_PER_CPU(unsigned long, freq_scale); +DECLARE_PER_CPU(unsigned long, arch_freq_scale); static inline unsigned long topology_get_freq_scale(int cpu) { - return per_cpu(freq_scale, cpu); + return per_cpu(arch_freq_scale, cpu); } void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, unsigned long max_freq); bool topology_scale_freq_invariant(void); -bool arch_freq_counters_available(const struct cpumask *cpus); +enum scale_freq_source { + SCALE_FREQ_SOURCE_CPUFREQ = 0, + SCALE_FREQ_SOURCE_ARCH, + SCALE_FREQ_SOURCE_CPPC, +}; + +struct scale_freq_data { + enum scale_freq_source source; + void (*set_freq_scale)(void); +}; + +void topology_scale_freq_tick(void); +void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus); +void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus); DECLARE_PER_CPU(unsigned long, thermal_pressure); diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 62c54234576c..6861489a1890 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -55,6 +55,8 @@ #define ARM_SMCCC_OWNER_TRUSTED_OS 50 #define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 +#define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01 + #define ARM_SMCCC_QUIRK_NONE 0 #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ @@ -87,8 +89,47 @@ ARM_SMCCC_SMC_32, \ 0, 0x7fff) +#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_FUNC_QUERY_CALL_UID) + +/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */ +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U +#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU + +/* KVM "vendor specific" services */ +#define ARM_SMCCC_KVM_FUNC_FEATURES 0 +#define ARM_SMCCC_KVM_FUNC_PTP 1 +#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127 +#define ARM_SMCCC_KVM_NUM_FUNCS 128 + +#define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_FEATURES) + #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 +/* + * ptp_kvm is a feature used for time sync between vm and host. + * ptp_kvm module in guest kernel will get service from host using + * this hypercall ID. + */ +#define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_PTP) + +/* ptp_kvm counter type ID */ +#define KVM_PTP_VIRT_COUNTER 0 +#define KVM_PTP_PHYS_COUNTER 1 + /* Paravirtualised time calls (defined by ARM DEN0057A) */ #define ARM_SMCCC_HV_PV_TIME_FEATURES \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h new file mode 100644 index 000000000000..08cd0c2ad34f --- /dev/null +++ b/include/linux/asn1_encoder.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _LINUX_ASN1_ENCODER_H +#define _LINUX_ASN1_ENCODER_H + +#include <linux/types.h> +#include <linux/asn1.h> +#include <linux/asn1_ber_bytecode.h> +#include <linux/bug.h> + +#define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32)) +unsigned char * +asn1_encode_integer(unsigned char *data, const unsigned char *end_data, + s64 integer); +unsigned char * +asn1_encode_oid(unsigned char *data, const unsigned char *end_data, + u32 oid[], int oid_len); +unsigned char * +asn1_encode_tag(unsigned char *data, const unsigned char *end_data, + u32 tag, const unsigned char *string, int len); +unsigned char * +asn1_encode_octet_string(unsigned char *data, + const unsigned char *end_data, + const unsigned char *string, u32 len); +unsigned char * +asn1_encode_sequence(unsigned char *data, const unsigned char *end_data, + const unsigned char *seq, int len); +unsigned char * +asn1_encode_boolean(unsigned char *data, const unsigned char *end_data, + bool val); + +#endif diff --git a/include/linux/async.h b/include/linux/async.h index 0a17cd27f348..cce4ad31e8fc 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -112,7 +112,6 @@ async_schedule_dev_domain(async_func_t func, struct device *dev, return async_schedule_node_domain(func, dev, dev_to_node(dev), domain); } -void async_unregister_domain(struct async_domain *domain); extern void async_synchronize_full(void); extern void async_synchronize_full_domain(struct async_domain *domain); extern void async_synchronize_cookie(async_cookie_t cookie); diff --git a/include/linux/atm_suni.h b/include/linux/atm_suni.h deleted file mode 100644 index 84f3aab54468..000000000000 --- a/include/linux/atm_suni.h +++ /dev/null @@ -1,12 +0,0 @@ -/* atm_suni.h - Driver-specific declarations of the SUNI driver (for use by - driver-specific utilities) */ - -/* Written 1998,2000 by Werner Almesberger, EPFL ICA */ - - -#ifndef LINUX_ATM_SUNI_H -#define LINUX_ATM_SUNI_H - -/* everything obsoleted */ - -#endif diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 532bcbfc4716..565deea6ffe8 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -136,6 +136,12 @@ enum virtchnl_ops { VIRTCHNL_OP_DISABLE_CHANNELS = 31, VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, + /* opcode 34 - 44 are reserved */ + VIRTCHNL_OP_ADD_RSS_CFG = 45, + VIRTCHNL_OP_DEL_RSS_CFG = 46, + VIRTCHNL_OP_ADD_FDIR_FILTER = 47, + VIRTCHNL_OP_DEL_FDIR_FILTER = 48, + VIRTCHNL_OP_MAX, }; /* These macros are used to generate compilation errors if a structure/union @@ -247,6 +253,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 #define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 +#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000 +#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF 0X08000000 +#define VIRTCHNL_VF_OFFLOAD_FDIR_PF 0X10000000 /* Define below the capability flags that are not offloads */ #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080 @@ -557,6 +566,11 @@ enum virtchnl_action { /* action types */ VIRTCHNL_ACTION_DROP = 0, VIRTCHNL_ACTION_TC_REDIRECT, + VIRTCHNL_ACTION_PASSTHRU, + VIRTCHNL_ACTION_QUEUE, + VIRTCHNL_ACTION_Q_REGION, + VIRTCHNL_ACTION_MARK, + VIRTCHNL_ACTION_COUNT, }; enum virtchnl_flow_type { @@ -666,6 +680,285 @@ enum virtchnl_vfr_states { VIRTCHNL_VFR_VFACTIVE, }; +/* Type of RSS algorithm */ +enum virtchnl_rss_algorithm { + VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0, + VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1, + VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2, + VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3, +}; + +#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 +#define PROTO_HDR_SHIFT 5 +#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT) +#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1) + +/* VF use these macros to configure each protocol header. + * Specify which protocol headers and protocol header fields base on + * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field. + * @param hdr: a struct of virtchnl_proto_hdr + * @param hdr_type: ETH/IPV4/TCP, etc + * @param field: SRC/DST/TEID/SPI, etc + */ +#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \ + ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK)) +#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \ + ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK)) +#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \ + ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK)) +#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector) + +#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ + (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \ + VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) +#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ + (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \ + VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) + +#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \ + ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type) +#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \ + (((hdr)->type) >> PROTO_HDR_SHIFT) +#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \ + ((hdr)->type == ((val) >> PROTO_HDR_SHIFT)) +#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \ + (VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \ + VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val))) + +/* Protocol header type within a packet segment. A segment consists of one or + * more protocol headers that make up a logical group of protocol headers. Each + * logical group of protocol headers encapsulates or is encapsulated using/by + * tunneling or encapsulation protocols for network virtualization. + */ +enum virtchnl_proto_hdr_type { + VIRTCHNL_PROTO_HDR_NONE, + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_S_VLAN, + VIRTCHNL_PROTO_HDR_C_VLAN, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_TCP, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_SCTP, + VIRTCHNL_PROTO_HDR_GTPU_IP, + VIRTCHNL_PROTO_HDR_GTPU_EH, + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, + VIRTCHNL_PROTO_HDR_PPPOE, + VIRTCHNL_PROTO_HDR_L2TPV3, + VIRTCHNL_PROTO_HDR_ESP, + VIRTCHNL_PROTO_HDR_AH, + VIRTCHNL_PROTO_HDR_PFCP, +}; + +/* Protocol header field within a protocol header. */ +enum virtchnl_proto_hdr_field { + /* ETHER */ + VIRTCHNL_PROTO_HDR_ETH_SRC = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH), + VIRTCHNL_PROTO_HDR_ETH_DST, + VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, + /* S-VLAN */ + VIRTCHNL_PROTO_HDR_S_VLAN_ID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN), + /* C-VLAN */ + VIRTCHNL_PROTO_HDR_C_VLAN_ID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN), + /* IPV4 */ + VIRTCHNL_PROTO_HDR_IPV4_SRC = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4), + VIRTCHNL_PROTO_HDR_IPV4_DST, + VIRTCHNL_PROTO_HDR_IPV4_DSCP, + VIRTCHNL_PROTO_HDR_IPV4_TTL, + VIRTCHNL_PROTO_HDR_IPV4_PROT, + /* IPV6 */ + VIRTCHNL_PROTO_HDR_IPV6_SRC = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6), + VIRTCHNL_PROTO_HDR_IPV6_DST, + VIRTCHNL_PROTO_HDR_IPV6_TC, + VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, + VIRTCHNL_PROTO_HDR_IPV6_PROT, + /* TCP */ + VIRTCHNL_PROTO_HDR_TCP_SRC_PORT = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP), + VIRTCHNL_PROTO_HDR_TCP_DST_PORT, + /* UDP */ + VIRTCHNL_PROTO_HDR_UDP_SRC_PORT = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP), + VIRTCHNL_PROTO_HDR_UDP_DST_PORT, + /* SCTP */ + VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP), + VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, + /* GTPU_IP */ + VIRTCHNL_PROTO_HDR_GTPU_IP_TEID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP), + /* GTPU_EH */ + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH), + VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, + /* PPPOE */ + VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE), + /* L2TPV3 */ + VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3), + /* ESP */ + VIRTCHNL_PROTO_HDR_ESP_SPI = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP), + /* AH */ + VIRTCHNL_PROTO_HDR_AH_SPI = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH), + /* PFCP */ + VIRTCHNL_PROTO_HDR_PFCP_S_FIELD = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP), + VIRTCHNL_PROTO_HDR_PFCP_SEID, +}; + +struct virtchnl_proto_hdr { + enum virtchnl_proto_hdr_type type; + u32 field_selector; /* a bit mask to select field for header type */ + u8 buffer[64]; + /** + * binary buffer in network order for specific header type. + * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4 + * header is expected to be copied into the buffer. + */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr); + +struct virtchnl_proto_hdrs { + u8 tunnel_level; + /** + * specify where protocol header start from. + * 0 - from the outer layer + * 1 - from the first inner layer + * 2 - from the second inner layer + * .... + **/ + int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ + struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs); + +struct virtchnl_rss_cfg { + struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */ + enum virtchnl_rss_algorithm rss_algorithm; /* RSS algorithm type */ + u8 reserved[128]; /* reserve for future */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg); + +/* action configuration for FDIR */ +struct virtchnl_filter_action { + enum virtchnl_action type; + union { + /* used for queue and qgroup action */ + struct { + u16 index; + u8 region; + } queue; + /* used for count action */ + struct { + /* share counter ID with other flow rules */ + u8 shared; + u32 id; /* counter ID */ + } count; + /* used for mark action */ + u32 mark_id; + u8 reserve[32]; + } act_conf; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action); + +#define VIRTCHNL_MAX_NUM_ACTIONS 8 + +struct virtchnl_filter_action_set { + /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */ + int count; + struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set); + +/* pattern and action for FDIR rule */ +struct virtchnl_fdir_rule { + struct virtchnl_proto_hdrs proto_hdrs; + struct virtchnl_filter_action_set action_set; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule); + +/* Status returned to VF after VF requests FDIR commands + * VIRTCHNL_FDIR_SUCCESS + * VF FDIR related request is successfully done by PF + * The request can be OP_ADD/DEL. + * + * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE + * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource. + * + * VIRTCHNL_FDIR_FAILURE_RULE_EXIST + * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed. + * + * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT + * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule. + * + * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST + * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist. + * + * VIRTCHNL_FDIR_FAILURE_RULE_INVALID + * OP_ADD_FDIR_FILTER request is failed due to parameters validation + * or HW doesn't support. + * + * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT + * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out + * for programming. + */ +enum virtchnl_fdir_prgm_status { + VIRTCHNL_FDIR_SUCCESS = 0, + VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE, + VIRTCHNL_FDIR_FAILURE_RULE_EXIST, + VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT, + VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST, + VIRTCHNL_FDIR_FAILURE_RULE_INVALID, + VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT, +}; + +/* VIRTCHNL_OP_ADD_FDIR_FILTER + * VF sends this request to PF by filling out vsi_id, + * validate_only and rule_cfg. PF will return flow_id + * if the request is successfully done and return add_status to VF. + */ +struct virtchnl_fdir_add { + u16 vsi_id; /* INPUT */ + /* + * 1 for validating a fdir rule, 0 for creating a fdir rule. + * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER. + */ + u16 validate_only; /* INPUT */ + u32 flow_id; /* OUTPUT */ + struct virtchnl_fdir_rule rule_cfg; /* INPUT */ + enum virtchnl_fdir_prgm_status status; /* OUTPUT */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add); + +/* VIRTCHNL_OP_DEL_FDIR_FILTER + * VF sends this request to PF by filling out vsi_id + * and flow_id. PF will return del_status to VF. + */ +struct virtchnl_fdir_del { + u16 vsi_id; /* INPUT */ + u16 pad; + u32 flow_id; /* INPUT */ + enum virtchnl_fdir_prgm_status status; /* OUTPUT */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); + /** * virtchnl_vc_validate_vf_msg * @ver: Virtchnl version info @@ -826,6 +1119,16 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_DEL_CLOUD_FILTER: valid_len = sizeof(struct virtchnl_filter); break; + case VIRTCHNL_OP_ADD_RSS_CFG: + case VIRTCHNL_OP_DEL_RSS_CFG: + valid_len = sizeof(struct virtchnl_rss_cfg); + break; + case VIRTCHNL_OP_ADD_FDIR_FILTER: + valid_len = sizeof(struct virtchnl_fdir_add); + break; + case VIRTCHNL_OP_DEL_FDIR_FILTER: + valid_len = sizeof(struct virtchnl_fdir_del); + break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 0abd93efc181..049cf9421d83 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -86,6 +86,7 @@ struct coredump_params { unsigned long mm_flags; loff_t written; loff_t pos; + loff_t to_skip; }; /* diff --git a/include/linux/bio.h b/include/linux/bio.h index d0246c92a6e8..a0b4cfdf62a4 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -483,16 +483,10 @@ extern void bio_check_pages_dirty(struct bio *bio); extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter); extern void bio_copy_data(struct bio *dst, struct bio *src); -extern void bio_list_copy_data(struct bio *dst, struct bio *src); extern void bio_free_pages(struct bio *bio); -void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); void bio_truncate(struct bio *bio, unsigned new_size); void guard_bio_eod(struct bio *bio); - -static inline void zero_fill_bio(struct bio *bio) -{ - zero_fill_bio_iter(bio, bio->bi_iter); -} +void zero_fill_bio(struct bio *bio); extern const char *bio_devname(struct bio *bio, char *buffer); diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 70a932470b2d..a36cfcec4e77 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -4,10 +4,13 @@ #ifndef __ASSEMBLY__ -#include <linux/types.h> +#include <linux/align.h> #include <linux/bitops.h> +#include <linux/limits.h> #include <linux/string.h> -#include <linux/kernel.h> +#include <linux/types.h> + +struct device; /* * bitmaps provide bit arrays that consume one or more unsigned @@ -118,54 +121,59 @@ * Allocation and deallocation of bitmap. * Provided in lib/bitmap.c to avoid circular dependency. */ -extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); -extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); -extern void bitmap_free(const unsigned long *bitmap); +unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); +unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); +void bitmap_free(const unsigned long *bitmap); + +/* Managed variants of the above. */ +unsigned long *devm_bitmap_alloc(struct device *dev, + unsigned int nbits, gfp_t flags); +unsigned long *devm_bitmap_zalloc(struct device *dev, + unsigned int nbits, gfp_t flags); /* * lib/bitmap.c provides these functions: */ -extern int __bitmap_equal(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern bool __pure __bitmap_or_equal(const unsigned long *src1, - const unsigned long *src2, - const unsigned long *src3, - unsigned int nbits); -extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, - unsigned int nbits); -extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, - unsigned int shift, unsigned int nbits); -extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, - unsigned int shift, unsigned int nbits); -extern void bitmap_cut(unsigned long *dst, const unsigned long *src, - unsigned int first, unsigned int cut, - unsigned int nbits); -extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, +int __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +bool __pure __bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits); +void __bitmap_complement(unsigned long *dst, const unsigned long *src, + unsigned int nbits); +void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +void bitmap_cut(unsigned long *dst, const unsigned long *src, + unsigned int first, unsigned int cut, unsigned int nbits); +int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +void __bitmap_replace(unsigned long *dst, + const unsigned long *old, const unsigned long *new, + const unsigned long *mask, unsigned int nbits); +int __bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); -extern void __bitmap_replace(unsigned long *dst, - const unsigned long *old, const unsigned long *new, - const unsigned long *mask, unsigned int nbits); -extern int __bitmap_intersects(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_subset(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int nbits); -extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); -extern void __bitmap_set(unsigned long *map, unsigned int start, int len); -extern void __bitmap_clear(unsigned long *map, unsigned int start, int len); - -extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask, - unsigned long align_offset); +int __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int nbits); +int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); +void __bitmap_set(unsigned long *map, unsigned int start, int len); +void __bitmap_clear(unsigned long *map, unsigned int start, int len); + +unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); /** * bitmap_find_next_zero_area - find a contiguous aligned zero area @@ -190,46 +198,38 @@ bitmap_find_next_zero_area(unsigned long *map, align_mask, 0); } -extern int bitmap_parse(const char *buf, unsigned int buflen, +int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits); -extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, +int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -extern int bitmap_parselist(const char *buf, unsigned long *maskp, +int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); -extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, +int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -extern void bitmap_remap(unsigned long *dst, const unsigned long *src, +void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, unsigned int nbits); -extern int bitmap_bitremap(int oldbit, +int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits); -extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, +void bitmap_onto(unsigned long *dst, const unsigned long *orig, const unsigned long *relmap, unsigned int bits); -extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, +void bitmap_fold(unsigned long *dst, const unsigned long *orig, unsigned int sz, unsigned int nbits); -extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); -extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); -extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); +int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); +void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); +int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); #ifdef __BIG_ENDIAN -extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); +void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); #else #define bitmap_copy_le bitmap_copy #endif -extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); -extern int bitmap_print_to_pagebuf(bool list, char *buf, +unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); +int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) -/* - * The static inlines below do not handle constant nbits==0 correctly, - * so make such users (should any ever turn up) call the out-of-line - * versions. - */ -#define small_const_nbits(nbits) \ - (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0) - static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); @@ -265,9 +265,9 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst, * therefore conversion is not needed when copying data from/to arrays of u32. */ #if BITS_PER_LONG == 64 -extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits); -extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, +void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits); #else #define bitmap_from_arr32(bitmap, buf, nbits) \ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index a5a48303b0f1..26bf15e6cd35 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -286,17 +286,5 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, }) #endif -#ifndef find_last_bit -/** - * find_last_bit - find the last set bit in a memory region - * @addr: The address to start the search at - * @size: The number of bits to search - * - * Returns the bit number of the last set bit, or size. - */ -extern unsigned long find_last_bit(const unsigned long *addr, - unsigned long size); -#endif - #endif /* __KERNEL__ */ #endif diff --git a/include/linux/bits.h b/include/linux/bits.h index 7f475d59a097..87d112650dfb 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -22,7 +22,7 @@ #include <linux/build_bug.h> #define GENMASK_INPUT_CHECK(h, l) \ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ - __builtin_constant_p((l) > (h)), (l) > (h), 0))) + __is_constexpr((l) > (h)), (l) > (h), 0))) #else /* * BUILD_BUG_ON_ZERO is not available in h files included from asm files, diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2c473c9b8990..359486940fa0 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -306,12 +306,21 @@ struct blk_mq_ops { * reserved budget. Also we have to handle failure case * of .get_budget for avoiding I/O deadlock. */ - bool (*get_budget)(struct request_queue *); + int (*get_budget)(struct request_queue *); /** * @put_budget: Release the reserved budget. */ - void (*put_budget)(struct request_queue *); + void (*put_budget)(struct request_queue *, int); + + /** + * @set_rq_budget_token: store rq's budget token + */ + void (*set_rq_budget_token)(struct request *, int); + /** + * @get_rq_budget_token: retrieve rq's budget token + */ + int (*get_rq_budget_token)(struct request *); /** * @timeout: Called on request timeout. diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 158aefae1030..f69c75bd6d27 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -11,7 +11,6 @@ #include <linux/minmax.h> #include <linux/timer.h> #include <linux/workqueue.h> -#include <linux/pagemap.h> #include <linux/backing-dev-defs.h> #include <linux/wait.h> #include <linux/mempool.h> @@ -272,6 +271,12 @@ static inline bool bio_is_passthrough(struct bio *bio) return blk_op_is_scsi(op) || blk_op_is_private(op); } +static inline bool blk_op_is_passthrough(unsigned int op) +{ + return (blk_op_is_scsi(op & REQ_OP_MASK) || + blk_op_is_private(op & REQ_OP_MASK)); +} + static inline unsigned short req_get_ioprio(struct request *req) { return req->ioprio; @@ -311,8 +316,17 @@ enum blk_zoned_model { BLK_ZONED_HM, /* Host-managed zoned block device */ }; +/* + * BLK_BOUNCE_NONE: never bounce (default) + * BLK_BOUNCE_HIGH: bounce all highmem pages + */ +enum blk_bounce { + BLK_BOUNCE_NONE, + BLK_BOUNCE_HIGH, +}; + struct queue_limits { - unsigned long bounce_pfn; + enum blk_bounce bounce; unsigned long seg_boundary_mask; unsigned long virt_boundary_mask; @@ -434,11 +448,6 @@ struct request_queue { */ int id; - /* - * queue needs bounce pages for pages above this limit - */ - gfp_t bounce_gfp; - spinlock_t queue_lock; /* @@ -667,11 +676,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); extern void blk_set_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q); -static inline bool blk_account_rq(struct request *rq) -{ - return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); -} - #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) @@ -683,6 +687,8 @@ static inline bool blk_account_rq(struct request *rq) dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ (dir), (attrs)) +#define queue_to_disk(q) (dev_to_disk(kobj_to_dev((q)->kobj.parent))) + static inline bool queue_is_mq(struct request_queue *q) { return q->mq_ops; @@ -838,24 +844,6 @@ static inline unsigned int blk_queue_depth(struct request_queue *q) return q->nr_requests; } -extern unsigned long blk_max_low_pfn, blk_max_pfn; - -/* - * standard bounce addresses: - * - * BLK_BOUNCE_HIGH : bounce all highmem pages - * BLK_BOUNCE_ANY : don't bounce anything - * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary - */ - -#if BITS_PER_LONG == 32 -#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) -#else -#define BLK_BOUNCE_HIGH -1ULL -#endif -#define BLK_BOUNCE_ANY (-1ULL) -#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) - /* * default timeout for SG_IO if none specified */ @@ -921,7 +909,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, extern void blk_rq_unprep_clone(struct request *rq); extern blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq); -extern int blk_rq_append_bio(struct request *rq, struct bio **bio); +int blk_rq_append_bio(struct request *rq, struct bio *bio); extern void blk_queue_split(struct bio **); extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, @@ -1139,7 +1127,7 @@ extern void blk_abort_request(struct request *); * Access functions for manipulating queue properties */ extern void blk_cleanup_queue(struct request_queue *); -extern void blk_queue_bounce_limit(struct request_queue *, u64); +void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_segments(struct request_queue *, unsigned short); @@ -1868,7 +1856,6 @@ struct block_device_operations { unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); void (*unlock_native_capacity) (struct gendisk *); - int (*revalidate_disk) (struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); int (*set_read_only)(struct block_device *bdev, bool ro); /* this callback is with swap_lock and sometimes page table lock held */ diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index a19519f4241d..eed86eb0a1de 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -4,7 +4,7 @@ #include <linux/preempt.h> -#ifdef CONFIG_TRACE_IRQFLAGS +#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS) extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); #else static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) @@ -32,4 +32,10 @@ static inline void local_bh_enable(void) __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); } +#ifdef CONFIG_PREEMPT_RT +extern bool local_bh_blocked(void); +#else +static inline bool local_bh_blocked(void) { return false; } +#endif + #endif /* _LINUX_BH_H */ diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index c42e02b4d84b..8b77d08d4b47 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -20,14 +20,25 @@ struct bpf_sock_ops_kern; struct bpf_cgroup_storage; struct ctl_table; struct ctl_table_header; +struct task_struct; #ifdef CONFIG_CGROUP_BPF extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE]; #define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type]) -DECLARE_PER_CPU(struct bpf_cgroup_storage*, - bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); +#define BPF_CGROUP_STORAGE_NEST_MAX 8 + +struct bpf_cgroup_storage_info { + struct task_struct *task; + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; +}; + +/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks + * to use bpf cgroup storage simultaneously. + */ +DECLARE_PER_CPU(struct bpf_cgroup_storage_info, + bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]); #define for_each_cgroup_storage_type(stype) \ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) @@ -161,13 +172,42 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type( return BPF_CGROUP_STORAGE_SHARED; } -static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage - *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) +static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage + *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { enum bpf_cgroup_storage_type stype; + int i, err = 0; + + preempt_disable(); + for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { + if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL)) + continue; + + this_cpu_write(bpf_cgroup_storage_info[i].task, current); + for_each_cgroup_storage_type(stype) + this_cpu_write(bpf_cgroup_storage_info[i].storage[stype], + storage[stype]); + goto out; + } + err = -EBUSY; + WARN_ON_ONCE(1); + +out: + preempt_enable(); + return err; +} + +static inline void bpf_cgroup_storage_unset(void) +{ + int i; - for_each_cgroup_storage_type(stype) - this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); + for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { + if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) + continue; + + this_cpu_write(bpf_cgroup_storage_info[i].task, NULL); + return; + } } struct bpf_cgroup_storage * @@ -418,7 +458,6 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); #else -struct bpf_prog; struct cgroup_bpf {}; static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} @@ -448,8 +487,9 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, return -EINVAL; } -static inline void bpf_cgroup_storage_set( - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} +static inline int bpf_cgroup_storage_set( + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; } +static inline void bpf_cgroup_storage_unset(void) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map) { return 0; } static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( diff --git a/include/linux/bpf.h b/include/linux/bpf.h index fdac0534ce79..02b02cb29ce2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -41,6 +41,7 @@ struct bpf_local_storage_map; struct kobject; struct mem_cgroup; struct module; +struct bpf_func_state; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; @@ -56,7 +57,7 @@ struct bpf_iter_seq_info { u32 seq_priv_size; }; -/* map is generic key/value storage optionally accesible by eBPF programs */ +/* map is generic key/value storage optionally accessible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ int (*map_alloc_check)(union bpf_attr *attr); @@ -119,6 +120,9 @@ struct bpf_map_ops { void *owner, u32 size); struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); + /* Misc helpers.*/ + int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags); + /* map_meta_equal must be implemented for maps that can be * used as an inner map. It is a runtime check to ensure * an inner map can be inserted to an outer map. @@ -131,6 +135,13 @@ struct bpf_map_ops { bool (*map_meta_equal)(const struct bpf_map *meta0, const struct bpf_map *meta1); + + int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee); + int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn, + void *callback_ctx, u64 flags); + /* BTF name and id of struct allocated by map_alloc */ const char * const map_btf_name; int *map_btf_id; @@ -297,6 +308,9 @@ enum bpf_arg_type { ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ + ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ + ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ + ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ __BPF_ARG_TYPE_MAX, }; @@ -413,6 +427,9 @@ enum bpf_reg_type { PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ + PTR_TO_FUNC, /* reg points to a bpf program function */ + PTR_TO_MAP_KEY, /* reg points to a map element key */ + __BPF_REG_TYPE_MAX, }; /* The information passed from prog-specific *_is_valid_access @@ -466,6 +483,7 @@ struct bpf_verifier_ops { const struct btf_type *t, int off, int size, enum bpf_access_type atype, u32 *next_btf_id); + bool (*check_kfunc_call)(u32 kfunc_btf_id); }; struct bpf_prog_offload_ops { @@ -508,6 +526,11 @@ enum bpf_cgroup_storage_type { */ #define MAX_BPF_FUNC_ARGS 12 +/* The maximum number of arguments passed through registers + * a single function may have. + */ +#define MAX_BPF_FUNC_REG_ARGS 5 + struct btf_func_model { u8 ret_size; u8 nr_args; @@ -652,7 +675,7 @@ struct bpf_dispatcher { struct bpf_ksym ksym; }; -static __always_inline unsigned int bpf_dispatcher_nop_func( +static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( const void *ctx, const struct bpf_insn *insnsi, unsigned int (*bpf_func)(const void *, @@ -680,7 +703,7 @@ void bpf_trampoline_put(struct bpf_trampoline *tr); } #define DEFINE_BPF_DISPATCHER(name) \ - noinline unsigned int bpf_dispatcher_##name##_func( \ + noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ const void *ctx, \ const struct bpf_insn *insnsi, \ unsigned int (*bpf_func)(const void *, \ @@ -778,6 +801,8 @@ struct btf_mod_pair { struct module *module; }; +struct bpf_kfunc_desc_tab; + struct bpf_prog_aux { atomic64_t refcnt; u32 used_map_cnt; @@ -814,6 +839,7 @@ struct bpf_prog_aux { struct bpf_prog **func; void *jit_data; /* JIT specific data. arch dependent */ struct bpf_jit_poke_descriptor *poke_tab; + struct bpf_kfunc_desc_tab *kfunc_tab; u32 size_poke_tab; struct bpf_ksym ksym; const struct bpf_prog_ops *ops; @@ -905,7 +931,6 @@ struct bpf_link_primer { }; struct bpf_struct_ops_value; -struct btf_type; struct btf_member; #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 @@ -1088,6 +1113,13 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array, /* BPF program asks to set CN on the packet. */ #define BPF_RET_SET_CN (1 << 0) +/* For BPF_PROG_RUN_ARRAY_FLAGS and __BPF_PROG_RUN_ARRAY, + * if bpf_cgroup_storage_set() failed, the rest of programs + * will not execute. This should be a really rare scenario + * as it requires BPF_CGROUP_STORAGE_NEST_MAX number of + * preemptions all between bpf_cgroup_storage_set() and + * bpf_cgroup_storage_unset() on the same cpu. + */ #define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \ ({ \ struct bpf_prog_array_item *_item; \ @@ -1100,10 +1132,12 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array, _array = rcu_dereference(array); \ _item = &_array->items[0]; \ while ((_prog = READ_ONCE(_item->prog))) { \ - bpf_cgroup_storage_set(_item->cgroup_storage); \ + if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ + break; \ func_ret = func(_prog, ctx); \ _ret &= (func_ret & 1); \ *(ret_flags) |= (func_ret >> 1); \ + bpf_cgroup_storage_unset(); \ _item++; \ } \ rcu_read_unlock(); \ @@ -1124,9 +1158,14 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array, goto _out; \ _item = &_array->items[0]; \ while ((_prog = READ_ONCE(_item->prog))) { \ - if (set_cg_storage) \ - bpf_cgroup_storage_set(_item->cgroup_storage); \ - _ret &= func(_prog, ctx); \ + if (!set_cg_storage) { \ + _ret &= func(_prog, ctx); \ + } else { \ + if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ + break; \ + _ret &= func(_prog, ctx); \ + bpf_cgroup_storage_unset(); \ + } \ _item++; \ } \ _out: \ @@ -1399,6 +1438,10 @@ void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info); +int map_set_for_each_callback_args(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee); + int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, @@ -1448,9 +1491,9 @@ struct btf *bpf_get_btf_vmlinux(void); /* Map specifics */ struct xdp_buff; struct sk_buff; +struct bpf_dtab_netdev; +struct bpf_cpu_map_entry; -struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); -struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); void __dev_flush(void); int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, struct net_device *dev_rx); @@ -1460,7 +1503,6 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, struct bpf_prog *xdp_prog); bool dev_map_can_have_prog(struct bpf_map *map); -struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); void __cpu_map_flush(void); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx); @@ -1489,6 +1531,10 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); +int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr); +bool bpf_prog_test_check_kfunc_call(u32 kfunc_id); bool btf_ctx_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info); @@ -1507,8 +1553,11 @@ int btf_distill_func_proto(struct bpf_verifier_log *log, struct btf_func_model *m); struct bpf_reg_state; -int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, - struct bpf_reg_state *regs); +int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, + struct bpf_reg_state *regs); +int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, + const struct btf *btf, u32 func_id, + struct bpf_reg_state *regs); int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, struct bpf_reg_state *reg); int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, @@ -1518,6 +1567,11 @@ struct bpf_prog *bpf_prog_by_id(u32 id); struct bpf_link *bpf_link_by_id(u32 id); const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); +void bpf_task_storage_free(struct task_struct *task); +bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); +const struct btf_func_model * +bpf_jit_find_kfunc_model(const struct bpf_prog *prog, + const struct bpf_insn *insn); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -1587,17 +1641,6 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags) return -EOPNOTSUPP; } -static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, - u32 key) -{ - return NULL; -} - -static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, - u32 key) -{ - return NULL; -} static inline bool dev_map_can_have_prog(struct bpf_map *map) { return false; @@ -1609,6 +1652,7 @@ static inline void __dev_flush(void) struct xdp_buff; struct bpf_dtab_netdev; +struct bpf_cpu_map_entry; static inline int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, @@ -1633,12 +1677,6 @@ static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, return 0; } -static inline -struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) -{ - return NULL; -} - static inline void __cpu_map_flush(void) { } @@ -1689,6 +1727,18 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, return -ENOTSUPP; } +static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + return -ENOTSUPP; +} + +static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) +{ + return false; +} + static inline void bpf_map_put(struct bpf_map *map) { } @@ -1703,6 +1753,22 @@ bpf_base_func_proto(enum bpf_func_id func_id) { return NULL; } + +static inline void bpf_task_storage_free(struct task_struct *task) +{ +} + +static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) +{ + return false; +} + +static inline const struct btf_func_model * +bpf_jit_find_kfunc_model(const struct bpf_prog *prog, + const struct bpf_insn *insn) +{ + return NULL; +} #endif /* CONFIG_BPF_SYSCALL */ void __bpf_free_used_btfs(struct bpf_prog_aux *aux, @@ -1787,22 +1853,24 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) } #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ -#if defined(CONFIG_BPF_STREAM_PARSER) -int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, - struct bpf_prog *old, u32 which); +#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); void sock_map_unhash(struct sock *sk); void sock_map_close(struct sock *sk, long timeout); + +void bpf_sk_reuseport_detach(struct sock *sk); +int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, + void *value); +int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags); #else -static inline int sock_map_prog_update(struct bpf_map *map, - struct bpf_prog *prog, - struct bpf_prog *old, u32 which) +static inline void bpf_sk_reuseport_detach(struct sock *sk) { - return -EOPNOTSUPP; } +#ifdef CONFIG_BPF_SYSCALL static inline int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) { @@ -1820,20 +1888,7 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void { return -EOPNOTSUPP; } -#endif /* CONFIG_BPF_STREAM_PARSER */ - -#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) -void bpf_sk_reuseport_detach(struct sock *sk); -int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, - void *value); -int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, - void *value, u64 map_flags); -#else -static inline void bpf_sk_reuseport_detach(struct sock *sk) -{ -} -#ifdef CONFIG_BPF_SYSCALL static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, void *value) { @@ -1900,11 +1955,15 @@ extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; extern const struct bpf_func_proto bpf_copy_from_user_proto; extern const struct bpf_func_proto bpf_snprintf_btf_proto; +extern const struct bpf_func_proto bpf_snprintf_proto; extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; extern const struct bpf_func_proto bpf_sock_from_file_proto; extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; +extern const struct bpf_func_proto bpf_task_storage_get_proto; +extern const struct bpf_func_proto bpf_task_storage_delete_proto; +extern const struct bpf_func_proto bpf_for_each_map_elem_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); @@ -2022,4 +2081,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, struct btf_id_set; bool btf_id_set_contains(const struct btf_id_set *set, u32 id); +int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, + u32 **bin_buf, u32 num_args); +void bpf_bprintf_cleanup(void); + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index b2c9463f36a1..b902c580c48d 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -126,7 +126,8 @@ bpf_local_storage_lookup(struct bpf_local_storage *local_storage, struct bpf_local_storage_map *smap, bool cacheit_lockit); -void bpf_local_storage_map_free(struct bpf_local_storage_map *smap); +void bpf_local_storage_map_free(struct bpf_local_storage_map *smap, + int __percpu *busy_counter); int bpf_local_storage_map_check_btf(const struct bpf_map *map, const struct btf *btf, diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h index 0d1c33ace398..479c101546ad 100644 --- a/include/linux/bpf_lsm.h +++ b/include/linux/bpf_lsm.h @@ -38,21 +38,9 @@ static inline struct bpf_storage_blob *bpf_inode( return inode->i_security + bpf_lsm_blob_sizes.lbs_inode; } -static inline struct bpf_storage_blob *bpf_task( - const struct task_struct *task) -{ - if (unlikely(!task->security)) - return NULL; - - return task->security + bpf_lsm_blob_sizes.lbs_task; -} - extern const struct bpf_func_proto bpf_inode_storage_get_proto; extern const struct bpf_func_proto bpf_inode_storage_delete_proto; -extern const struct bpf_func_proto bpf_task_storage_get_proto; -extern const struct bpf_func_proto bpf_task_storage_delete_proto; void bpf_inode_storage_free(struct inode *inode); -void bpf_task_storage_free(struct task_struct *task); #else /* !CONFIG_BPF_LSM */ @@ -73,20 +61,10 @@ static inline struct bpf_storage_blob *bpf_inode( return NULL; } -static inline struct bpf_storage_blob *bpf_task( - const struct task_struct *task) -{ - return NULL; -} - static inline void bpf_inode_storage_free(struct inode *inode) { } -static inline void bpf_task_storage_free(struct task_struct *task) -{ -} - #endif /* CONFIG_BPF_LSM */ #endif /* _LINUX_BPF_LSM_H */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 99f7fd657d87..f883f01a5061 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -103,19 +103,17 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops) -#if defined(CONFIG_BPF_STREAM_PARSER) -BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) -BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops) -#endif #ifdef CONFIG_BPF_LSM BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops) -BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops) #endif +BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) #if defined(CONFIG_XDP_SOCKETS) BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops) #endif #ifdef CONFIG_INET +BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops) #endif #endif diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 971b33aca13d..06841517ab1e 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -68,6 +68,8 @@ struct bpf_reg_state { unsigned long raw1; unsigned long raw2; } raw; + + u32 subprogno; /* for PTR_TO_FUNC */ }; /* For PTR_TO_PACKET, used to find other pointers with the same variable * offset, so they can share range knowledge. @@ -204,6 +206,7 @@ struct bpf_func_state { int acquired_refs; struct bpf_reference_state *refs; int allocated_stack; + bool in_callback_fn; struct bpf_stack_state *stack; }; @@ -299,10 +302,11 @@ struct bpf_verifier_state_list { }; /* Possible states for alu_state member. */ -#define BPF_ALU_SANITIZE_SRC 1U -#define BPF_ALU_SANITIZE_DST 2U +#define BPF_ALU_SANITIZE_SRC (1U << 0) +#define BPF_ALU_SANITIZE_DST (1U << 1) #define BPF_ALU_NEG_VALUE (1U << 2) #define BPF_ALU_NON_POINTER (1U << 3) +#define BPF_ALU_IMMEDIATE (1U << 4) #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ BPF_ALU_SANITIZE_DST) @@ -484,6 +488,15 @@ static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id; } +/* unpack the IDs from the key as constructed above */ +static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id) +{ + if (obj_id) + *obj_id = key >> 32; + if (btf_id) + *btf_id = key & 0x7FFFFFFF; +} + int bpf_check_attach_target(struct bpf_verifier_log *log, const struct bpf_prog *prog, const struct bpf_prog *tgt_prog, diff --git a/include/linux/btf.h b/include/linux/btf.h index 7fabf1428093..3bac66e0183a 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -9,6 +9,7 @@ #include <uapi/linux/bpf.h> #define BTF_TYPE_EMIT(type) ((void)(type *)0) +#define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val) struct btf; struct btf_member; @@ -109,6 +110,7 @@ const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf, const struct btf_type * btf_resolve_size(const struct btf *btf, const struct btf_type *type, u32 *type_size); +const char *btf_type_str(const struct btf_type *t); #define for_each_member(i, struct_type, member) \ for (i = 0, member = btf_type_member(struct_type); \ @@ -140,6 +142,11 @@ static inline bool btf_type_is_enum(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM; } +static inline bool btf_type_is_scalar(const struct btf_type *t) +{ + return btf_type_is_int(t) || btf_type_is_enum(t); +} + static inline bool btf_type_is_typedef(const struct btf_type *t) { return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF; diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 6b47f94378c5..e7e99da31349 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -194,6 +194,8 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, struct buffer_head *__bread_gfp(struct block_device *, sector_t block, unsigned size, gfp_t gfp); void invalidate_bh_lrus(void); +void invalidate_bh_lrus_cpu(int cpu); +bool has_bh_in_lru(int cpu, void *dummy); struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void unlock_buffer(struct buffer_head *bh); @@ -406,6 +408,8 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } +static inline void invalidate_bh_lrus_cpu(int cpu) {} +static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; } #define buffer_heads_over_limit 0 #endif /* CONFIG_BLOCK */ diff --git a/include/linux/bug.h b/include/linux/bug.h index f639bd0122f3..348acf2558f3 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -36,6 +36,9 @@ static inline int is_warning_bug(const struct bug_entry *bug) return bug->flags & BUGFLAG_WARNING; } +void bug_get_file_line(struct bug_entry *bug, const char **file, + unsigned int *line); + struct bug_entry *find_bug(unsigned long bugaddr); enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); @@ -58,6 +61,13 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, return BUG_TRAP_TYPE_BUG; } +struct bug_entry; +static inline void bug_get_file_line(struct bug_entry *bug, const char **file, + unsigned int *line) +{ + *file = NULL; + *line = 0; +} static inline void generic_bug_clear_once(void) {} diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h index 707575c668f4..ae7a3411167c 100644 --- a/include/linux/can/bittiming.h +++ b/include/linux/can/bittiming.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> + * Copyright (c) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> */ #ifndef _CAN_BITTIMING_H @@ -10,9 +11,83 @@ #define CAN_SYNC_SEG 1 + +/* Kilobits and Megabits per second */ +#define CAN_KBPS 1000UL +#define CAN_MBPS 1000000UL + +/* Megahertz */ +#define CAN_MHZ 1000000UL + +/* + * struct can_tdc - CAN FD Transmission Delay Compensation parameters + * + * At high bit rates, the propagation delay from the TX pin to the RX + * pin of the transceiver causes measurement errors: the sample point + * on the RX pin might occur on the previous bit. + * + * To solve this issue, ISO 11898-1 introduces in section 11.3.3 + * "Transmitter delay compensation" a SSP (Secondary Sample Point) + * equal to the distance, in time quanta, from the start of the bit + * time on the TX pin to the actual measurement on the RX pin. + * + * This structure contains the parameters to calculate that SSP. + * + * @tdcv: Transmitter Delay Compensation Value. Distance, in time + * quanta, from when the bit is sent on the TX pin to when it is + * received on the RX pin of the transmitter. Possible options: + * + * O: automatic mode. The controller dynamically measure @tdcv + * for each transmitted CAN FD frame. + * + * Other values: manual mode. Use the fixed provided value. + * + * @tdco: Transmitter Delay Compensation Offset. Offset value, in time + * quanta, defining the distance between the start of the bit + * reception on the RX pin of the transceiver and the SSP + * position such as SSP = @tdcv + @tdco. + * + * If @tdco is zero, then TDC is disabled and both @tdcv and + * @tdcf should be ignored. + * + * @tdcf: Transmitter Delay Compensation Filter window. Defines the + * minimum value for the SSP position in time quanta. If SSP is + * less than @tdcf, then no delay compensations occur and the + * normal sampling point is used instead. The feature is enabled + * if and only if @tdcv is set to zero (automatic mode) and @tdcf + * is configured to a value greater than @tdco. + */ +struct can_tdc { + u32 tdcv; + u32 tdco; + u32 tdcf; +}; + +/* + * struct can_tdc_const - CAN hardware-dependent constant for + * Transmission Delay Compensation + * + * @tdcv_max: Transmitter Delay Compensation Value maximum value. + * Should be set to zero if the controller does not support + * manual mode for tdcv. + * @tdco_max: Transmitter Delay Compensation Offset maximum value. + * Should not be zero. If the controller does not support TDC, + * then the pointer to this structure should be NULL. + * @tdcf_max: Transmitter Delay Compensation Filter window maximum + * value. Should be set to zero if the controller does not + * support this feature. + */ +struct can_tdc_const { + u32 tdcv_max; + u32 tdco_max; + u32 tdcf_max; +}; + #ifdef CONFIG_CAN_CALC_BITTIMING int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc); + +void can_calc_tdco(struct net_device *dev); #else /* !CONFIG_CAN_CALC_BITTIMING */ static inline int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, @@ -21,6 +96,10 @@ can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, netdev_err(dev, "bit-timing calculation not available\n"); return -EINVAL; } + +static inline void can_calc_tdco(struct net_device *dev) +{ +} #endif /* CONFIG_CAN_CALC_BITTIMING */ int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index ac4d83a1ab81..27b275e463da 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -39,19 +39,23 @@ struct can_priv { struct net_device *dev; struct can_device_stats can_stats; - struct can_bittiming bittiming, data_bittiming; const struct can_bittiming_const *bittiming_const, *data_bittiming_const; - const u16 *termination_const; - unsigned int termination_const_cnt; - u16 termination; - const u32 *bitrate_const; + struct can_bittiming bittiming, data_bittiming; + const struct can_tdc_const *tdc_const; + struct can_tdc tdc; + unsigned int bitrate_const_cnt; + const u32 *bitrate_const; const u32 *data_bitrate_const; unsigned int data_bitrate_const_cnt; u32 bitrate_max; struct can_clock clock; + unsigned int termination_const_cnt; + const u16 *termination_const; + u16 termination; + enum can_state state; /* CAN controller features - see include/uapi/linux/can/netlink.h */ diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index d438eb058069..d311bc369a39 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -23,7 +23,8 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr, unsigned int *frame_len_ptr); unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx, unsigned int *frame_len_ptr); -void can_free_echo_skb(struct net_device *dev, unsigned int idx); +void can_free_echo_skb(struct net_device *dev, unsigned int idx, + unsigned int *frame_len_ptr); struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); struct sk_buff *alloc_canfd_skb(struct net_device *dev, struct canfd_frame **cfd); diff --git a/include/linux/cfi.h b/include/linux/cfi.h new file mode 100644 index 000000000000..879744aaa6e0 --- /dev/null +++ b/include/linux/cfi.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Clang Control Flow Integrity (CFI) support. + * + * Copyright (C) 2021 Google LLC + */ +#ifndef _LINUX_CFI_H +#define _LINUX_CFI_H + +#ifdef CONFIG_CFI_CLANG +typedef void (*cfi_check_fn)(uint64_t id, void *ptr, void *diag); + +/* Compiler-generated function in each module, and the kernel */ +extern void __cfi_check(uint64_t id, void *ptr, void *diag); + +/* + * Force the compiler to generate a CFI jump table entry for a function + * and store the jump table address to __cfi_jt_<function>. + */ +#define __CFI_ADDRESSABLE(fn, __attr) \ + const void *__cfi_jt_ ## fn __visible __attr = (void *)&fn + +#ifdef CONFIG_CFI_CLANG_SHADOW + +extern void cfi_module_add(struct module *mod, unsigned long base_addr); +extern void cfi_module_remove(struct module *mod, unsigned long base_addr); + +#else + +static inline void cfi_module_add(struct module *mod, unsigned long base_addr) {} +static inline void cfi_module_remove(struct module *mod, unsigned long base_addr) {} + +#endif /* CONFIG_CFI_CLANG_SHADOW */ + +#else /* !CONFIG_CFI_CLANG */ + +#define __CFI_ADDRESSABLE(fn, __attr) + +#endif /* CONFIG_CFI_CLANG */ + +#endif /* _LINUX_CFI_H */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 559ee05f86b2..fb8f6d2cd104 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -232,7 +232,7 @@ struct css_set { struct list_head task_iters; /* - * On the default hierarhcy, ->subsys[ssid] may point to a css + * On the default hierarchy, ->subsys[ssid] may point to a css * attached to an ancestor instead of the cgroup this css_set is * associated with. The following node is anchored at * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to @@ -668,7 +668,7 @@ struct cgroup_subsys { */ bool threaded:1; - /* the following two fields are initialized automtically during boot */ + /* the following two fields are initialized automatically during boot */ int id; const char *name; @@ -757,7 +757,7 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer. * On boot, sock_cgroup_data records the cgroup that the sock was created * in so that cgroup2 matches can be made; however, once either net_prio or - * net_cls starts being used, the area is overriden to carry prioidx and/or + * net_cls starts being used, the area is overridden to carry prioidx and/or * classid. The two modes are distinguished by whether the lowest bit is * set. Clear bit indicates cgroup pointer while set bit prioidx and * classid. diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 4f2f79de083e..6bc9c76680b2 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -32,7 +32,7 @@ struct kernel_clone_args; #ifdef CONFIG_CGROUPS /* - * All weight knobs on the default hierarhcy should use the following min, + * All weight knobs on the default hierarchy should use the following min, * default and max values. The default value is the logarithmic center of * MIN and MAX and allows 100x to be expressed in both directions. */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index acb77dcff3b4..445235487230 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -61,6 +61,10 @@ SUBSYS(pids) SUBSYS(rdma) #endif +#if IS_ENABLED(CONFIG_CGROUP_MISC) +SUBSYS(misc) +#endif + /* * The following subsystems are not supported on the default hierarchy. */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 58f6fe866ae9..162a2e5546a3 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -786,6 +786,23 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name, (width), (clk_divider_flags), (table), \ (lock)) /** + * devm_clk_hw_register_divider - register a divider clock with the clock framework + * @dev: device registering this clock + * @name: name of this clock + * @parent_name: name of clock's parent + * @flags: framework-specific flags + * @reg: register address to adjust divider + * @shift: number of bits to shift the bitfield + * @width: width of the bitfield + * @clk_divider_flags: divider-specific flags for this clock + * @lock: shared register lock for this clock + */ +#define devm_clk_hw_register_divider(dev, name, parent_name, flags, reg, shift, \ + width, clk_divider_flags, lock) \ + __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \ + NULL, (flags), (reg), (shift), (width), \ + (clk_divider_flags), NULL, (lock)) +/** * devm_clk_hw_register_divider_table - register a table based divider clock * with the clock framework (devres variant) * @dev: device registering this clock @@ -868,6 +885,13 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np, const struct clk_parent_data *parent_data, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, spinlock_t *lock); +struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np, + const char *name, u8 num_parents, + const char * const *parent_names, + const struct clk_hw **parent_hws, + const struct clk_parent_data *parent_data, + unsigned long flags, void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table, spinlock_t *lock); struct clk *clk_register_mux_table(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, @@ -902,6 +926,12 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name, __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \ (parent_data), (flags), (reg), (shift), \ BIT((width)) - 1, (clk_mux_flags), NULL, (lock)) +#define devm_clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \ + shift, width, clk_mux_flags, lock) \ + __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), \ + (parent_names), NULL, NULL, (flags), (reg), \ + (shift), BIT((width)) - 1, (clk_mux_flags), \ + NULL, (lock)) int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags, unsigned int val); diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index eb016fc9cc0b..f7ff722a03dd 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2012-2020, NVIDIA CORPORATION. All rights reserved. */ #ifndef __LINUX_CLK_TEGRA_H_ @@ -123,6 +123,8 @@ static inline void tegra_cpu_clock_resume(void) } #endif +extern int tegra210_plle_hw_sequence_start(void); +extern bool tegra210_plle_hw_sequence_is_enabled(void); extern void tegra210_xusb_pll_hw_control_enable(void); extern void tegra210_xusb_pll_hw_sequence_start(void); extern void tegra210_sata_pll_hw_control_enable(void); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 86d143db6523..d6ab416ee2d2 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -17,6 +17,7 @@ #include <linux/timer.h> #include <linux/init.h> #include <linux/of.h> +#include <linux/clocksource_ids.h> #include <asm/div64.h> #include <asm/io.h> @@ -62,6 +63,10 @@ struct module; * 400-499: Perfect * The ideal clocksource. A must-use where * available. + * @id: Defaults to CSID_GENERIC. The id value is captured + * in certain snapshot functions to allow callers to + * validate the clocksource from which the snapshot was + * taken. * @flags: Flags describing special properties * @enable: Optional function to enable the clocksource * @disable: Optional function to disable the clocksource @@ -70,7 +75,7 @@ struct module; * @mark_unstable: Optional function to inform the clocksource driver that * the watchdog marked the clocksource unstable * @tick_stable: Optional function called periodically from the watchdog - * code to provide stable syncrhonization points + * code to provide stable synchronization points * @wd_list: List head to enqueue into the watchdog list (internal) * @cs_last: Last clocksource value for clocksource watchdog * @wd_last: Last watchdog value corresponding to @cs_last @@ -100,6 +105,7 @@ struct clocksource { const char *name; struct list_head list; int rating; + enum clocksource_ids id; enum vdso_clock_mode vdso_clock_mode; unsigned long flags; diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h new file mode 100644 index 000000000000..16775d7d8f8d --- /dev/null +++ b/include/linux/clocksource_ids.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CLOCKSOURCE_IDS_H +#define _LINUX_CLOCKSOURCE_IDS_H + +/* Enum to give clocksources a unique identifier */ +enum clocksource_ids { + CSID_GENERIC = 0, + CSID_ARM_ARCH_COUNTER, + CSID_MAX, +}; + +#endif diff --git a/include/linux/cma.h b/include/linux/cma.h index 217999c8a762..53fd8c3cdbd0 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -44,9 +44,9 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, const char *name, struct cma **res_cma); -extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, +extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, bool no_warn); -extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); +extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); #endif diff --git a/include/linux/compaction.h b/include/linux/compaction.h index ed4070ed41ef..4221888bdcd6 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -81,7 +81,6 @@ static inline unsigned long compact_gap(unsigned int order) } #ifdef CONFIG_COMPACTION -extern int sysctl_compact_memory; extern unsigned int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); diff --git a/include/linux/compat.h b/include/linux/compat.h index 6e65be753603..8855b1b702b2 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -75,7 +75,6 @@ __diag_push(); \ __diag_ignore(GCC, 8, "-Wattribute-alias", \ "Type aliasing is used to sanitize syscall arguments");\ - asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ __attribute__((alias(__stringify(__se_compat_sys##name)))); \ ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \ @@ -214,12 +213,11 @@ typedef struct compat_siginfo { /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ struct { compat_uptr_t _addr; /* faulting insn/memory ref. */ -#ifdef __ARCH_SI_TRAPNO - int _trapno; /* TRAP # which caused the signal */ -#endif #define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \ sizeof(short) : __alignof__(compat_uptr_t)) union { + /* used on alpha and sparc */ + int _trapno; /* TRAP # which caused the signal */ /* * used when si_code=BUS_MCEERR_AR or * used when si_code=BUS_MCEERR_AO @@ -236,6 +234,11 @@ typedef struct compat_siginfo { char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD]; u32 _pkey; } _addr_pkey; + /* used when si_code=TRAP_PERF */ + struct { + compat_ulong_t _data; + u32 _type; + } _perf; }; } _sigfault; @@ -465,6 +468,34 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, unsafe_put_user(__s->sig[0], &__c->sig[0], label); \ } \ } while (0) + +#define unsafe_get_compat_sigset(set, compat, label) do { \ + const compat_sigset_t __user *__c = compat; \ + compat_sigset_word hi, lo; \ + sigset_t *__s = set; \ + \ + switch (_NSIG_WORDS) { \ + case 4: \ + unsafe_get_user(lo, &__c->sig[7], label); \ + unsafe_get_user(hi, &__c->sig[6], label); \ + __s->sig[3] = hi | (((long)lo) << 32); \ + fallthrough; \ + case 3: \ + unsafe_get_user(lo, &__c->sig[5], label); \ + unsafe_get_user(hi, &__c->sig[4], label); \ + __s->sig[2] = hi | (((long)lo) << 32); \ + fallthrough; \ + case 2: \ + unsafe_get_user(lo, &__c->sig[3], label); \ + unsafe_get_user(hi, &__c->sig[2], label); \ + __s->sig[1] = hi | (((long)lo) << 32); \ + fallthrough; \ + case 1: \ + unsafe_get_user(lo, &__c->sig[1], label); \ + unsafe_get_user(hi, &__c->sig[0], label); \ + __s->sig[0] = hi | (((long)lo) << 32); \ + } \ +} while (0) #else #define unsafe_put_compat_sigset(compat, set, label) do { \ compat_sigset_t __user *__c = compat; \ @@ -472,6 +503,13 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, \ unsafe_copy_to_user(__c, __s, sizeof(*__c), label); \ } while (0) + +#define unsafe_get_compat_sigset(set, compat, label) do { \ + const compat_sigset_t __user *__c = compat; \ + sigset_t *__s = set; \ + \ + unsafe_copy_from_user(__s, __c, sizeof(*__c), label); \ +} while (0) #endif extern int compat_ptrace_request(struct task_struct *child, diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index d217c382b02d..adbe76b203e2 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -61,3 +61,6 @@ #if __has_feature(shadow_call_stack) # define __noscs __attribute__((__no_sanitize__("shadow-call-stack"))) #endif + +#define __nocfi __attribute__((__no_sanitize__("cfi"))) +#define __cficanonical __attribute__((__cfi_canonical_jump_table__)) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 48750243db4c..5d97ef738a57 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -90,15 +90,11 @@ */ #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) -/* - * sparse (__CHECKER__) pretends to be gcc, but can't do constant - * folding in __builtin_bswap*() (yet), so don't set these for it. - */ -#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) +#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ #define __HAVE_BUILTIN_BSWAP16__ -#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ +#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ #if GCC_VERSION >= 70000 #define KASAN_ABI_VERSION 5 diff --git a/include/linux/compiler-version.h b/include/linux/compiler-version.h index 2b2972c77c62..573fa85b6c0c 100644 --- a/include/linux/compiler-version.h +++ b/include/linux/compiler-version.h @@ -9,6 +9,6 @@ * This header exists to force full rebuild when the compiler is upgraded. * * When fixdep scans this, it will find this string "CONFIG_CC_VERSION_TEXT" - * and add dependency on include/config/cc/version/text.h, which is touched + * and add dependency on include/config/CC_VERSION_TEXT, which is touched * by Kconfig when the version string from the compiler changes. */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index e5dd5a4ae946..d29bda7f6ebd 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -242,6 +242,14 @@ struct ftrace_likely_data { # define __noscs #endif +#ifndef __nocfi +# define __nocfi +#endif + +#ifndef __cficanonical +# define __cficanonical +#endif + #ifndef asm_volatile_goto #define asm_volatile_goto(x...) asm goto(x) #endif diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 2e8c69b43c64..97cfd13bae51 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/* -*- mode: c; c-basic-offset: 8; -*- - * vim: noexpandtab sw=8 ts=8 sts=0: - * +/* * configfs.h - definitions for the device driver filesystem * * Based on sysfs: diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 153734816b49..d5b9c8d40c18 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -101,6 +101,7 @@ struct vc_data { unsigned int vc_rows; unsigned int vc_size_row; /* Bytes per row */ unsigned int vc_scan_lines; /* # of scan lines */ + unsigned int vc_cell_height; /* CRTC character cell height */ unsigned long vc_origin; /* [!] Start of real screen */ unsigned long vc_scr_end; /* [!] End of real screen */ unsigned long vc_visible_origin; /* [!] Top of visible window */ diff --git a/include/linux/const.h b/include/linux/const.h index 81b8aae5a855..435ddd72d2c4 100644 --- a/include/linux/const.h +++ b/include/linux/const.h @@ -3,4 +3,12 @@ #include <vdso/const.h> +/* + * This returns a constant expression while determining if an argument is + * a constant expression, most importantly without evaluating the argument. + * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> + */ +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) + #endif /* _LINUX_CONST_H */ diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index bceb06498521..4d7fced3a39f 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -71,6 +71,19 @@ static inline void exception_exit(enum ctx_state prev_ctx) } } +static __always_inline bool context_tracking_guest_enter(void) +{ + if (context_tracking_enabled()) + __context_tracking_enter(CONTEXT_GUEST); + + return context_tracking_enabled_this_cpu(); +} + +static __always_inline void context_tracking_guest_exit(void) +{ + if (context_tracking_enabled()) + __context_tracking_exit(CONTEXT_GUEST); +} /** * ct_state() - return the current context tracking state if known @@ -92,6 +105,9 @@ static inline void user_exit_irqoff(void) { } static inline enum ctx_state exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } +static inline bool context_tracking_guest_enter(void) { return false; } +static inline void context_tracking_guest_exit(void) { } + #endif /* !CONFIG_CONTEXT_TRACKING */ #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) @@ -102,80 +118,4 @@ extern void context_tracking_init(void); static inline void context_tracking_init(void) { } #endif /* CONFIG_CONTEXT_TRACKING_FORCE */ - -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -/* must be called with irqs disabled */ -static __always_inline void guest_enter_irqoff(void) -{ - instrumentation_begin(); - if (vtime_accounting_enabled_this_cpu()) - vtime_guest_enter(current); - else - current->flags |= PF_VCPU; - instrumentation_end(); - - if (context_tracking_enabled()) - __context_tracking_enter(CONTEXT_GUEST); - - /* KVM does not hold any references to rcu protected data when it - * switches CPU into a guest mode. In fact switching to a guest mode - * is very similar to exiting to userspace from rcu point of view. In - * addition CPU may stay in a guest mode for quite a long time (up to - * one time slice). Lets treat guest mode as quiescent state, just like - * we do with user-mode execution. - */ - if (!context_tracking_enabled_this_cpu()) { - instrumentation_begin(); - rcu_virt_note_context_switch(smp_processor_id()); - instrumentation_end(); - } -} - -static __always_inline void guest_exit_irqoff(void) -{ - if (context_tracking_enabled()) - __context_tracking_exit(CONTEXT_GUEST); - - instrumentation_begin(); - if (vtime_accounting_enabled_this_cpu()) - vtime_guest_exit(current); - else - current->flags &= ~PF_VCPU; - instrumentation_end(); -} - -#else -static __always_inline void guest_enter_irqoff(void) -{ - /* - * This is running in ioctl context so its safe - * to assume that it's the stime pending cputime - * to flush. - */ - instrumentation_begin(); - vtime_account_kernel(current); - current->flags |= PF_VCPU; - rcu_virt_note_context_switch(smp_processor_id()); - instrumentation_end(); -} - -static __always_inline void guest_exit_irqoff(void) -{ - instrumentation_begin(); - /* Flush the guest cputime we spent on the guest */ - vtime_account_kernel(current); - current->flags &= ~PF_VCPU; - instrumentation_end(); -} -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ - -static inline void guest_exit(void) -{ - unsigned long flags; - - local_irq_save(flags); - guest_exit_irqoff(); - local_irq_restore(flags); -} - #endif diff --git a/include/linux/coredump.h b/include/linux/coredump.h index e58e8c207782..78fcd776b185 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -7,34 +7,34 @@ #include <linux/fs.h> #include <asm/siginfo.h> +#ifdef CONFIG_COREDUMP struct core_vma_metadata { unsigned long start, end; unsigned long flags; unsigned long dump_size; }; +extern int core_uses_pid; +extern char core_pattern[]; +extern unsigned int core_pipe_limit; + /* * These are the only things you should do on a core-file: use only these * functions to write out all the necessary info. */ struct coredump_params; -extern int dump_skip(struct coredump_params *cprm, size_t nr); +extern void dump_skip_to(struct coredump_params *cprm, unsigned long to); +extern void dump_skip(struct coredump_params *cprm, size_t nr); extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); -extern void dump_truncate(struct coredump_params *cprm); int dump_user_range(struct coredump_params *cprm, unsigned long start, unsigned long len); int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, struct core_vma_metadata **vma_meta, size_t *vma_data_size_ptr); -#ifdef CONFIG_COREDUMP extern void do_coredump(const kernel_siginfo_t *siginfo); #else static inline void do_coredump(const kernel_siginfo_t *siginfo) {} #endif -extern int core_uses_pid; -extern char core_pattern[]; -extern unsigned int core_pipe_limit; - #endif /* _LINUX_COREDUMP_H */ diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 976ec2697610..85008a65e21f 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -50,6 +50,7 @@ enum coresight_dev_subtype_sink { CORESIGHT_DEV_SUBTYPE_SINK_PORT, CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM, + CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM, }; enum coresight_dev_subtype_link { @@ -455,6 +456,18 @@ static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 o } #endif /* CONFIG_64BIT */ +static inline bool coresight_is_percpu_source(struct coresight_device *csdev) +{ + return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) && + (csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC); +} + +static inline bool coresight_is_percpu_sink(struct coresight_device *csdev) +{ + return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SINK) && + (csdev->subtype.sink_subtype == CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM); +} + extern struct coresight_device * coresight_register(struct coresight_desc *desc); extern void coresight_unregister(struct coresight_device *csdev); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index f14adb882338..4a62b3980642 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -57,7 +57,7 @@ enum cpuhp_state { CPUHP_PAGE_ALLOC_DEAD, CPUHP_NET_DEV_DEAD, CPUHP_PCI_XGENE_DEAD, - CPUHP_IOMMU_INTEL_DEAD, + CPUHP_IOMMU_IOVA_DEAD, CPUHP_LUSTRE_CFS_DEAD, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_PADATA_DEAD, @@ -100,6 +100,7 @@ enum cpuhp_state { CPUHP_AP_CPU_PM_STARTING, CPUHP_AP_IRQ_GIC_STARTING, CPUHP_AP_IRQ_HIP04_STARTING, + CPUHP_AP_IRQ_APPLE_AIC_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, @@ -135,6 +136,7 @@ enum cpuhp_state { CPUHP_AP_RISCV_TIMER_STARTING, CPUHP_AP_CLINT_TIMER_STARTING, CPUHP_AP_CSKY_TIMER_STARTING, + CPUHP_AP_TI_GP_TIMER_STARTING, CPUHP_AP_HYPERV_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, @@ -167,6 +169,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, + CPUHP_AP_PERF_X86_IDXD_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_CFD_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, @@ -175,6 +178,8 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, + CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, CPUHP_AP_PERF_ARM_L2X0_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index bd605b5585cf..fce476275e16 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -49,8 +49,8 @@ struct cpuidle_state { char name[CPUIDLE_NAME_LEN]; char desc[CPUIDLE_DESC_LEN]; - u64 exit_latency_ns; - u64 target_residency_ns; + s64 exit_latency_ns; + s64 target_residency_ns; unsigned int flags; unsigned int exit_latency; /* in US */ int power_usage; /* in mW */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 383684e30f12..bfc4690de4f4 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -91,44 +91,15 @@ extern struct cpumask __cpu_possible_mask; extern struct cpumask __cpu_online_mask; extern struct cpumask __cpu_present_mask; extern struct cpumask __cpu_active_mask; +extern struct cpumask __cpu_dying_mask; #define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) #define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) +#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask) extern atomic_t __num_online_cpus; -#if NR_CPUS > 1 -/** - * num_online_cpus() - Read the number of online CPUs - * - * Despite the fact that __num_online_cpus is of type atomic_t, this - * interface gives only a momentary snapshot and is not protected against - * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held - * region. - */ -static inline unsigned int num_online_cpus(void) -{ - return atomic_read(&__num_online_cpus); -} -#define num_possible_cpus() cpumask_weight(cpu_possible_mask) -#define num_present_cpus() cpumask_weight(cpu_present_mask) -#define num_active_cpus() cpumask_weight(cpu_active_mask) -#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) -#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) -#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) -#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) -#else -#define num_online_cpus() 1U -#define num_possible_cpus() 1U -#define num_present_cpus() 1U -#define num_active_cpus() 1U -#define cpu_online(cpu) ((cpu) == 0) -#define cpu_possible(cpu) ((cpu) == 0) -#define cpu_present(cpu) ((cpu) == 0) -#define cpu_active(cpu) ((cpu) == 0) -#endif - extern cpumask_t cpus_booted_once_mask; static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) @@ -235,7 +206,7 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp) return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits); } -unsigned int cpumask_next(int n, const struct cpumask *srcp); +unsigned int __pure cpumask_next(int n, const struct cpumask *srcp); /** * cpumask_next_zero - get the next unset cpu in a cpumask @@ -252,8 +223,8 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } -int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); -int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); +int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); +int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu); unsigned int cpumask_local_spread(unsigned int i, int node); int cpumask_any_and_distribute(const struct cpumask *src1p, const struct cpumask *src2p); @@ -857,6 +828,14 @@ set_cpu_active(unsigned int cpu, bool active) cpumask_clear_cpu(cpu, &__cpu_active_mask); } +static inline void +set_cpu_dying(unsigned int cpu, bool dying) +{ + if (dying) + cpumask_set_cpu(cpu, &__cpu_dying_mask); + else + cpumask_clear_cpu(cpu, &__cpu_dying_mask); +} /** * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * @@ -894,6 +873,82 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) return to_cpumask(p); } +#if NR_CPUS > 1 +/** + * num_online_cpus() - Read the number of online CPUs + * + * Despite the fact that __num_online_cpus is of type atomic_t, this + * interface gives only a momentary snapshot and is not protected against + * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held + * region. + */ +static inline unsigned int num_online_cpus(void) +{ + return atomic_read(&__num_online_cpus); +} +#define num_possible_cpus() cpumask_weight(cpu_possible_mask) +#define num_present_cpus() cpumask_weight(cpu_present_mask) +#define num_active_cpus() cpumask_weight(cpu_active_mask) + +static inline bool cpu_online(unsigned int cpu) +{ + return cpumask_test_cpu(cpu, cpu_online_mask); +} + +static inline bool cpu_possible(unsigned int cpu) +{ + return cpumask_test_cpu(cpu, cpu_possible_mask); +} + +static inline bool cpu_present(unsigned int cpu) +{ + return cpumask_test_cpu(cpu, cpu_present_mask); +} + +static inline bool cpu_active(unsigned int cpu) +{ + return cpumask_test_cpu(cpu, cpu_active_mask); +} + +static inline bool cpu_dying(unsigned int cpu) +{ + return cpumask_test_cpu(cpu, cpu_dying_mask); +} + +#else + +#define num_online_cpus() 1U +#define num_possible_cpus() 1U +#define num_present_cpus() 1U +#define num_active_cpus() 1U + +static inline bool cpu_online(unsigned int cpu) +{ + return cpu == 0; +} + +static inline bool cpu_possible(unsigned int cpu) +{ + return cpu == 0; +} + +static inline bool cpu_present(unsigned int cpu) +{ + return cpu == 0; +} + +static inline bool cpu_active(unsigned int cpu) +{ + return cpu == 0; +} + +static inline bool cpu_dying(unsigned int cpu) +{ + return false; +} + +#endif /* NR_CPUS > 1 */ + #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) #if NR_CPUS <= BITS_PER_LONG diff --git a/include/linux/crc8.h b/include/linux/crc8.h index 13c8dabb0441..674045c59a04 100644 --- a/include/linux/crc8.h +++ b/include/linux/crc8.h @@ -96,6 +96,6 @@ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial); * Williams, Ross N., ross<at>ross.net * (see URL http://www.ross.net/crc/download/crc_v3.txt). */ -u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc); +u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc); #endif /* __CRC8_H_ */ diff --git a/include/linux/cred.h b/include/linux/cred.h index 4c6350503697..14971322e1a0 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -53,7 +53,6 @@ do { \ groups_free(group_info); \ } while (0) -extern struct group_info init_groups; #ifdef CONFIG_MULTIUSER extern struct group_info *groups_alloc(int); extern void groups_free(struct group_info *); @@ -140,7 +139,7 @@ struct cred { struct key *request_key_auth; /* assumed request_key authority */ #endif #ifdef CONFIG_SECURITY - void *security; /* subjective LSM security */ + void *security; /* LSM security */ #endif struct user_struct *user; /* real user ID subscription */ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h deleted file mode 100644 index 05ee0f19448a..000000000000 --- a/include/linux/cyclades.h +++ /dev/null @@ -1,364 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $ - * linux/include/linux/cyclades.h - * - * This file was initially written by - * Randolph Bentson <bentson@grieg.seaslug.org> and is maintained by - * Ivan Passos <ivan@cyclades.com>. - * - * This file contains the general definitions for the cyclades.c driver - *$Log: cyclades.h,v $ - *Revision 3.1 2002/01/29 11:36:16 henrique - *added throttle field on struct cyclades_port to indicate whether the - *port is throttled or not - * - *Revision 3.1 2000/04/19 18:52:52 ivan - *converted address fields to unsigned long and added fields for physical - *addresses on cyclades_card structure; - * - *Revision 3.0 1998/11/02 14:20:59 ivan - *added nports field on cyclades_card structure; - * - *Revision 2.5 1998/08/03 16:57:01 ivan - *added cyclades_idle_stats structure; - * - *Revision 2.4 1998/06/01 12:09:53 ivan - *removed closing_wait2 from cyclades_port structure; - * - *Revision 2.3 1998/03/16 18:01:12 ivan - *changes in the cyclades_port structure to get it closer to the - *standard serial port structure; - *added constants for new ioctls; - * - *Revision 2.2 1998/02/17 16:50:00 ivan - *changes in the cyclades_port structure (addition of shutdown_wait and - *chip_rev variables); - *added constants for new ioctls and for CD1400 rev. numbers. - * - *Revision 2.1 1997/10/24 16:03:00 ivan - *added rflow (which allows enabling the CD1400 special flow control - *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to - *cyclades_port structure; - *added Alpha support - * - *Revision 2.0 1997/06/30 10:30:00 ivan - *added some new doorbell command constants related to IOCTLW and - *UART error signaling - * - *Revision 1.8 1997/06/03 15:30:00 ivan - *added constant ZFIRM_HLT - *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin) - * - *Revision 1.7 1997/03/26 10:30:00 daniel - *new entries at the end of cyclades_port struct to reallocate - *variables illegally allocated within card memory. - * - *Revision 1.6 1996/09/09 18:35:30 bentson - *fold in changes for Cyclom-Z -- including structures for - *communicating with board as well modest changes to original - *structures to support new features. - * - *Revision 1.5 1995/11/13 21:13:31 bentson - *changes suggested by Michael Chastain <mec@duracef.shout.net> - *to support use of this file in non-kernel applications - * - * - */ -#ifndef _LINUX_CYCLADES_H -#define _LINUX_CYCLADES_H - -#include <uapi/linux/cyclades.h> - - -/* Per card data structure */ -struct cyclades_card { - void __iomem *base_addr; - union { - void __iomem *p9050; - struct RUNTIME_9060 __iomem *p9060; - } ctl_addr; - struct BOARD_CTRL __iomem *board_ctrl; /* cyz specific */ - int irq; - unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ - unsigned int first_line; /* minor number of first channel on card */ - unsigned int nports; /* Number of ports in the card */ - int bus_index; /* address shift - 0 for ISA, 1 for PCI */ - int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ - u32 hw_ver; - spinlock_t card_lock; - struct cyclades_port *ports; -}; - -/*************************************** - * Memory access functions/macros * - * (required to support Alpha systems) * - ***************************************/ - -#define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0) -#define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0) -#define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0) - -/* - * Statistics counters - */ -struct cyclades_icount { - __u32 cts, dsr, rng, dcd, tx, rx; - __u32 frame, parity, overrun, brk; - __u32 buf_overrun; -}; - -/* - * This is our internal structure for each serial port's state. - * - * Many fields are paralleled by the structure used by the serial_struct - * structure. - * - * For definitions of the flags field, see tty.h - */ - -struct cyclades_port { - int magic; - struct tty_port port; - struct cyclades_card *card; - union { - struct { - void __iomem *base_addr; - } cyy; - struct { - struct CH_CTRL __iomem *ch_ctrl; - struct BUF_CTRL __iomem *buf_ctrl; - } cyz; - } u; - int line; - int flags; /* defined in tty.h */ - int type; /* UART type */ - int read_status_mask; - int ignore_status_mask; - int timeout; - int xmit_fifo_size; - int cor1,cor2,cor3,cor4,cor5; - int tbpr,tco,rbpr,rco; - int baud; - int rflow; - int rtsdtr_inv; - int chip_rev; - int custom_divisor; - u8 x_char; /* to be pushed out ASAP */ - int breakon; - int breakoff; - int xmit_head; - int xmit_tail; - int xmit_cnt; - int default_threshold; - int default_timeout; - unsigned long rflush_count; - struct cyclades_monitor mon; - struct cyclades_idle_stats idle_stats; - struct cyclades_icount icount; - struct completion shutdown_wait; - int throttle; -#ifdef CONFIG_CYZ_INTR - struct timer_list rx_full_timer; -#endif -}; - -#define CLOSING_WAIT_DELAY 30*HZ -#define CY_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE -#define CY_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF - - -#define CyMAX_CHIPS_PER_CARD 8 -#define CyMAX_CHAR_FIFO 12 -#define CyPORTS_PER_CHIP 4 -#define CD1400_MAX_SPEED 115200 - -#define CyISA_Ywin 0x2000 - -#define CyPCI_Ywin 0x4000 -#define CyPCI_Yctl 0x80 -#define CyPCI_Zctl CTRL_WINDOW_SIZE -#define CyPCI_Zwin 0x80000 -#define CyPCI_Ze_win (2 * CyPCI_Zwin) - -#define PCI_DEVICE_ID_MASK 0x06 - -/**** CD1400 registers ****/ - -#define CD1400_REV_G 0x46 -#define CD1400_REV_J 0x48 - -#define CyRegSize 0x0400 -#define Cy_HwReset 0x1400 -#define Cy_ClrIntr 0x1800 -#define Cy_EpldRev 0x1e00 - -/* Global Registers */ - -#define CyGFRCR (0x40*2) -#define CyRevE (44) -#define CyCAR (0x68*2) -#define CyCHAN_0 (0x00) -#define CyCHAN_1 (0x01) -#define CyCHAN_2 (0x02) -#define CyCHAN_3 (0x03) -#define CyGCR (0x4B*2) -#define CyCH0_SERIAL (0x00) -#define CyCH0_PARALLEL (0x80) -#define CySVRR (0x67*2) -#define CySRModem (0x04) -#define CySRTransmit (0x02) -#define CySRReceive (0x01) -#define CyRICR (0x44*2) -#define CyTICR (0x45*2) -#define CyMICR (0x46*2) -#define CyICR0 (0x00) -#define CyICR1 (0x01) -#define CyICR2 (0x02) -#define CyICR3 (0x03) -#define CyRIR (0x6B*2) -#define CyTIR (0x6A*2) -#define CyMIR (0x69*2) -#define CyIRDirEq (0x80) -#define CyIRBusy (0x40) -#define CyIRUnfair (0x20) -#define CyIRContext (0x1C) -#define CyIRChannel (0x03) -#define CyPPR (0x7E*2) -#define CyCLOCK_20_1MS (0x27) -#define CyCLOCK_25_1MS (0x31) -#define CyCLOCK_25_5MS (0xf4) -#define CyCLOCK_60_1MS (0x75) -#define CyCLOCK_60_2MS (0xea) - -/* Virtual Registers */ - -#define CyRIVR (0x43*2) -#define CyTIVR (0x42*2) -#define CyMIVR (0x41*2) -#define CyIVRMask (0x07) -#define CyIVRRxEx (0x07) -#define CyIVRRxOK (0x03) -#define CyIVRTxOK (0x02) -#define CyIVRMdmOK (0x01) -#define CyTDR (0x63*2) -#define CyRDSR (0x62*2) -#define CyTIMEOUT (0x80) -#define CySPECHAR (0x70) -#define CyBREAK (0x08) -#define CyPARITY (0x04) -#define CyFRAME (0x02) -#define CyOVERRUN (0x01) -#define CyMISR (0x4C*2) -/* see CyMCOR_ and CyMSVR_ for bits*/ -#define CyEOSRR (0x60*2) - -/* Channel Registers */ - -#define CyLIVR (0x18*2) -#define CyMscsr (0x01) -#define CyTdsr (0x02) -#define CyRgdsr (0x03) -#define CyRedsr (0x07) -#define CyCCR (0x05*2) -/* Format 1 */ -#define CyCHAN_RESET (0x80) -#define CyCHIP_RESET (0x81) -#define CyFlushTransFIFO (0x82) -/* Format 2 */ -#define CyCOR_CHANGE (0x40) -#define CyCOR1ch (0x02) -#define CyCOR2ch (0x04) -#define CyCOR3ch (0x08) -/* Format 3 */ -#define CySEND_SPEC_1 (0x21) -#define CySEND_SPEC_2 (0x22) -#define CySEND_SPEC_3 (0x23) -#define CySEND_SPEC_4 (0x24) -/* Format 4 */ -#define CyCHAN_CTL (0x10) -#define CyDIS_RCVR (0x01) -#define CyENB_RCVR (0x02) -#define CyDIS_XMTR (0x04) -#define CyENB_XMTR (0x08) -#define CySRER (0x06*2) -#define CyMdmCh (0x80) -#define CyRxData (0x10) -#define CyTxRdy (0x04) -#define CyTxMpty (0x02) -#define CyNNDT (0x01) -#define CyCOR1 (0x08*2) -#define CyPARITY_NONE (0x00) -#define CyPARITY_0 (0x20) -#define CyPARITY_1 (0xA0) -#define CyPARITY_E (0x40) -#define CyPARITY_O (0xC0) -#define Cy_1_STOP (0x00) -#define Cy_1_5_STOP (0x04) -#define Cy_2_STOP (0x08) -#define Cy_5_BITS (0x00) -#define Cy_6_BITS (0x01) -#define Cy_7_BITS (0x02) -#define Cy_8_BITS (0x03) -#define CyCOR2 (0x09*2) -#define CyIXM (0x80) -#define CyTxIBE (0x40) -#define CyETC (0x20) -#define CyAUTO_TXFL (0x60) -#define CyLLM (0x10) -#define CyRLM (0x08) -#define CyRtsAO (0x04) -#define CyCtsAE (0x02) -#define CyDsrAE (0x01) -#define CyCOR3 (0x0A*2) -#define CySPL_CH_DRANGE (0x80) /* special character detect range */ -#define CySPL_CH_DET1 (0x40) /* enable special character detection - on SCHR4-SCHR3 */ -#define CyFL_CTRL_TRNSP (0x20) /* Flow Control Transparency */ -#define CySPL_CH_DET2 (0x10) /* Enable special character detection - on SCHR2-SCHR1 */ -#define CyREC_FIFO (0x0F) /* Receive FIFO threshold */ -#define CyCOR4 (0x1E*2) -#define CyCOR5 (0x1F*2) -#define CyCCSR (0x0B*2) -#define CyRxEN (0x80) -#define CyRxFloff (0x40) -#define CyRxFlon (0x20) -#define CyTxEN (0x08) -#define CyTxFloff (0x04) -#define CyTxFlon (0x02) -#define CyRDCR (0x0E*2) -#define CySCHR1 (0x1A*2) -#define CySCHR2 (0x1B*2) -#define CySCHR3 (0x1C*2) -#define CySCHR4 (0x1D*2) -#define CySCRL (0x22*2) -#define CySCRH (0x23*2) -#define CyLNC (0x24*2) -#define CyMCOR1 (0x15*2) -#define CyMCOR2 (0x16*2) -#define CyRTPR (0x21*2) -#define CyMSVR1 (0x6C*2) -#define CyMSVR2 (0x6D*2) -#define CyANY_DELTA (0xF0) -#define CyDSR (0x80) -#define CyCTS (0x40) -#define CyRI (0x20) -#define CyDCD (0x10) -#define CyDTR (0x02) -#define CyRTS (0x01) -#define CyPVSR (0x6F*2) -#define CyRBPR (0x78*2) -#define CyRCOR (0x7C*2) -#define CyTBPR (0x72*2) -#define CyTCOR (0x76*2) - -/* Custom Registers */ - -#define CyPLX_VER (0x3400) -#define PLX_9050 0x0b -#define PLX_9060 0x0c -#define PLX_9080 0x0d - -/***************************************************************************/ - -#endif /* _LINUX_CYCLADES_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c1e48014106f..9e23d33bb6f1 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -59,6 +59,7 @@ struct qstr { extern const struct qstr empty_name; extern const struct qstr slash_name; +extern const struct qstr dotdot_name; struct dentry_stat_t { long nr_dentry; @@ -300,8 +301,8 @@ char *dynamic_dname(struct dentry *, char *, int, const char *, ...); extern char *__d_path(const struct path *, const struct path *, char *, int); extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); -extern char *dentry_path_raw(struct dentry *, char *, int); -extern char *dentry_path(struct dentry *, char *, int); +extern char *dentry_path_raw(const struct dentry *, char *, int); +extern char *dentry_path(const struct dentry *, char *, int); /* Allocation counts.. */ diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index d6c4cc9ecc77..1fdb4343af9c 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -128,6 +128,8 @@ void debugfs_create_atomic_t(const char *name, umode_t mode, struct dentry *parent, atomic_t *value); struct dentry *debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, bool *value); +void debugfs_create_str(const char *name, umode_t mode, + struct dentry *parent, char **value); struct dentry *debugfs_create_blob(const char *name, umode_t mode, struct dentry *parent, @@ -156,6 +158,9 @@ ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf, ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos); +ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); + #else #include <linux/err.h> @@ -297,6 +302,11 @@ static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode, return ERR_PTR(-ENODEV); } +static inline void debugfs_create_str(const char *name, umode_t mode, + struct dentry *parent, + char **value) +{ } + static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode, struct dentry *parent, struct debugfs_blob_wrapper *blob) @@ -348,6 +358,13 @@ static inline ssize_t debugfs_write_file_bool(struct file *file, return -ENODEV; } +static inline ssize_t debugfs_read_file_str(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + return -ENODEV; +} + #endif /** diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 2d3bdcccf5eb..21651f946751 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -82,16 +82,16 @@ static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) return 0; } -static inline void delayacct_set_flag(int flag) +static inline void delayacct_set_flag(struct task_struct *p, int flag) { - if (current->delays) - current->delays->flags |= flag; + if (p->delays) + p->delays->flags |= flag; } -static inline void delayacct_clear_flag(int flag) +static inline void delayacct_clear_flag(struct task_struct *p, int flag) { - if (current->delays) - current->delays->flags &= ~flag; + if (p->delays) + p->delays->flags &= ~flag; } static inline void delayacct_tsk_init(struct task_struct *tsk) @@ -114,7 +114,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk) static inline void delayacct_blkio_start(void) { - delayacct_set_flag(DELAYACCT_PF_BLKIO); + delayacct_set_flag(current, DELAYACCT_PF_BLKIO); if (current->delays) __delayacct_blkio_start(); } @@ -123,7 +123,7 @@ static inline void delayacct_blkio_end(struct task_struct *p) { if (p->delays) __delayacct_blkio_end(p); - delayacct_clear_flag(DELAYACCT_PF_BLKIO); + delayacct_clear_flag(p, DELAYACCT_PF_BLKIO); } static inline int delayacct_add_tsk(struct taskstats *d, @@ -166,9 +166,9 @@ static inline void delayacct_thrashing_end(void) } #else -static inline void delayacct_set_flag(int flag) +static inline void delayacct_set_flag(struct task_struct *p, int flag) {} -static inline void delayacct_clear_flag(int flag) +static inline void delayacct_clear_flag(struct task_struct *p, int flag) {} static inline void delayacct_init(void) {} diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 26ea0850be9b..142474b4af96 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -38,6 +38,7 @@ enum devfreq_timer { struct devfreq; struct devfreq_governor; +struct thermal_cooling_device; /** * struct devfreq_dev_status - Data given from devfreq user device to @@ -98,11 +99,15 @@ struct devfreq_dev_status { * @freq_table: Optional list of frequencies to support statistics * and freq_table must be generated in ascending order. * @max_state: The size of freq_table. + * + * @is_cooling_device: A self-explanatory boolean giving the device a + * cooling effect property. */ struct devfreq_dev_profile { unsigned long initial_freq; unsigned int polling_ms; enum devfreq_timer timer; + bool is_cooling_device; int (*target)(struct device *dev, unsigned long *freq, u32 flags); int (*get_dev_status)(struct device *dev, @@ -156,6 +161,7 @@ struct devfreq_stats { * @suspend_count: suspend requests counter for a device. * @stats: Statistics of devfreq device behavior * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier + * @cdev: Cooling device pointer if the devfreq has cooling property * @nb_min: Notifier block for DEV_PM_QOS_MIN_FREQUENCY * @nb_max: Notifier block for DEV_PM_QOS_MAX_FREQUENCY * @@ -198,6 +204,9 @@ struct devfreq { struct srcu_notifier_head transition_notifier_list; + /* Pointer to the cooling device if used for thermal mitigation */ + struct thermal_cooling_device *cdev; + struct notifier_block nb_min; struct notifier_block nb_max; }; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 5c641f930caf..ff700fb6ce1d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -574,11 +574,6 @@ struct dm_table *dm_swap_table(struct mapped_device *md, */ void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); -/* - * A wrapper around vmalloc. - */ -void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); - /*----------------------------------------------------------------- * Macros. *---------------------------------------------------------------*/ diff --git a/include/linux/device.h b/include/linux/device.h index ba660731bd25..38a2071cf776 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -49,7 +49,7 @@ struct dev_iommu; /** * struct subsys_interface - interfaces to device functions * @name: name of the device function - * @subsys: subsytem of the devices to attach to + * @subsys: subsystem of the devices to attach to * @node: the list of functions registered at the subsystem * @add_dev: device hookup to device function handler * @remove_dev: device hookup to device function handler @@ -439,6 +439,9 @@ struct dev_links_info { * @state_synced: The hardware state of this device has been synced to match * the software state of this device by calling the driver/bus * sync_state() callback. + * @can_match: The device has matched with a driver at least once or it is in + * a bus (like AMBA) which can't check for matching drivers until + * other devices probe successfully. * @dma_coherent: this particular device is dma coherent, even if the * architecture supports non-coherent devices. * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the @@ -545,6 +548,7 @@ struct device { bool offline:1; bool of_node_reused:1; bool state_synced:1; + bool can_match:1; #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h new file mode 100644 index 000000000000..f40f77717a24 --- /dev/null +++ b/include/linux/devm-helpers.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __LINUX_DEVM_HELPERS_H +#define __LINUX_DEVM_HELPERS_H + +/* + * Functions which do automatically cancel operations or release resources upon + * driver detach. + * + * These should be helpful to avoid mixing the manual and devm-based resource + * management which can be source of annoying, rarely occurring, + * hard-to-reproduce bugs. + * + * Please take into account that devm based cancellation may be performed some + * time after the remove() is ran. + * + * Thus mixing devm and manual resource management can easily cause problems + * when unwinding operations with dependencies. IRQ scheduling a work in a queue + * is typical example where IRQs are often devm-managed and WQs are manually + * cleaned at remove(). If IRQs are not manually freed at remove() (and this is + * often the case when we use devm for IRQs) we have a period of time after + * remove() - and before devm managed IRQs are freed - where new IRQ may fire + * and schedule a work item which won't be cancelled because remove() was + * already ran. + */ + +#include <linux/device.h> +#include <linux/workqueue.h> + +static inline void devm_delayed_work_drop(void *res) +{ + cancel_delayed_work_sync(res); +} + +/** + * devm_delayed_work_autocancel - Resource-managed delayed work allocation + * @dev: Device which lifetime work is bound to + * @w: Work item to be queued + * @worker: Worker function + * + * Initialize delayed work which is automatically cancelled when driver is + * detached. A few drivers need delayed work which must be cancelled before + * driver is detached to avoid accessing removed resources. + * devm_delayed_work_autocancel() can be used to omit the explicit + * cancelleation when driver is detached. + */ +static inline int devm_delayed_work_autocancel(struct device *dev, + struct delayed_work *w, + work_func_t worker) +{ + INIT_DELAYED_WORK(w, worker); + return devm_add_action(dev, devm_delayed_work_drop, w); +} + +#endif diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 9f12efaaa93a..6ffb4b2c6371 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -587,6 +587,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) } struct dma_fence *dma_fence_get_stub(void); +struct dma_fence *dma_fence_allocate_private_stub(void); u64 dma_fence_context_alloc(unsigned num); #define DMA_FENCE_TRACE(f, fmt, args...) \ diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h index 5bc5c946af58..0c05561cad6e 100644 --- a/include/linux/dma-heap.h +++ b/include/linux/dma-heap.h @@ -51,6 +51,15 @@ struct dma_heap_export_info { void *dma_heap_get_drvdata(struct dma_heap *heap); /** + * dma_heap_get_name() - get heap name + * @heap: DMA-Heap to retrieve private data for + * + * Returns: + * The char* for the heap name. + */ +const char *dma_heap_get_name(struct dma_heap *heap); + +/** * dma_heap_add - adds a heap to dmabuf heaps * @exp_info: information needed to register this heap */ diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 706b68d1359b..6e75a2d689b4 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, struct iommu_domain *domain); +extern bool iommu_dma_forcedac; + #else /* CONFIG_IOMMU_DMA */ struct iommu_domain; @@ -81,10 +83,5 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he { } -static inline void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, - struct iommu_domain *domain) -{ -} - #endif /* CONFIG_IOMMU_DMA */ #endif /* __DMA_IOMMU_H */ diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 51872e736e7b..0d53a96a3d64 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -22,6 +22,11 @@ struct dma_map_ops { gfp_t gfp); void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir); + struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs); + void (*free_noncontiguous)(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, unsigned long attrs); @@ -198,6 +203,20 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, } #endif /* CONFIG_DMA_DECLARE_COHERENT */ +/* + * This is the actual return value from the ->alloc_noncontiguous method. + * The users of the DMA API should only care about the sg_table, but to make + * the DMA-API internal vmaping and freeing easier we stash away the page + * array as well (except for the fallback case). This can go away any time, + * e.g. when a vmap-variant that takes a scatterlist comes along. + */ +struct dma_sgt_handle { + struct sg_table sgt; + struct page **pages; +}; +#define sgt_handle(sgt) \ + container_of((sgt), struct dma_sgt_handle, sgt) + int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 2a984cb4d1e0..183e7103a66d 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -95,7 +95,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { debug_dma_mapping_error(dev, dma_addr); - if (dma_addr == DMA_MAPPING_ERROR) + if (unlikely(dma_addr == DMA_MAPPING_ERROR)) return -ENOMEM; return 0; } @@ -144,6 +144,15 @@ u64 dma_get_required_mask(struct device *dev); size_t dma_max_mapping_size(struct device *dev); bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); unsigned long dma_get_merge_boundary(struct device *dev); +struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); +void dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir); +void *dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt); +void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); +int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, + size_t size, struct sg_table *sgt); #else /* CONFIG_HAS_DMA */ static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, @@ -257,12 +266,37 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev) { return 0; } +static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, + size_t size, enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs) +{ + return NULL; +} +static inline void dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ +} +static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt) +{ + return NULL; +} +static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) +{ +} +static inline int dma_mmap_noncontiguous(struct device *dev, + struct vm_area_struct *vma, size_t size, struct sg_table *sgt) +{ + return -EINVAL; +} #endif /* CONFIG_HAS_DMA */ struct page *dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); void dma_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_handle, enum dma_data_direction dir); +int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, + size_t size, struct page *page); static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) @@ -401,7 +435,6 @@ static inline void dma_sync_sgtable_for_device(struct device *dev, static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - return dma_alloc_attrs(dev, size, dma_handle, gfp, (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); } diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h index 4265f328681a..c6bc45ae5e03 100644 --- a/include/linux/dsa/ocelot.h +++ b/include/linux/dsa/ocelot.h @@ -160,11 +160,6 @@ static inline void ocelot_xfh_get_src_port(void *extraction, u64 *src_port) packing(extraction, src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0); } -static inline void ocelot_xfh_get_cpuq(void *extraction, u64 *cpuq) -{ - packing(extraction, cpuq, 28, 20, OCELOT_TAG_LEN, UNPACK, 0); -} - static inline void ocelot_xfh_get_qos_class(void *extraction, u64 *qos_class) { packing(extraction, qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0); diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index dd93735ae228..1eb84562b311 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -47,11 +47,12 @@ struct sja1105_tagger_data { }; struct sja1105_skb_cb { + struct sk_buff *clone; u32 meta_tstamp; }; #define SJA1105_SKB_CB(skb) \ - ((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb)) + ((struct sja1105_skb_cb *)((skb)->cb)) struct sja1105_port { u16 subvlan_map[DSA_8021Q_N_SUBVLAN]; diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index a57ee75342cf..dce631e678dd 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -32,6 +32,11 @@ struct _ddebug { #define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2) #define _DPRINTK_FLAGS_INCL_LINENO (1<<3) #define _DPRINTK_FLAGS_INCL_TID (1<<4) + +#define _DPRINTK_FLAGS_INCL_ANY \ + (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\ + _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID) + #if defined DEBUG #define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT #else diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 1fe8e105b83b..dcb2f9022c1d 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -34,7 +34,7 @@ struct elevator_mq_ops { void (*depth_updated)(struct blk_mq_hw_ctx *); bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); - bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int); + bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); int (*request_merge)(struct request_queue *q, struct request **, struct bio *); void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); void (*requests_merged)(struct request_queue *, struct request *, struct request *); diff --git a/include/linux/elfnote-lto.h b/include/linux/elfnote-lto.h new file mode 100644 index 000000000000..d4635a3ecc4f --- /dev/null +++ b/include/linux/elfnote-lto.h @@ -0,0 +1,14 @@ +#ifndef __ELFNOTE_LTO_H +#define __ELFNOTE_LTO_H + +#include <linux/elfnote.h> + +#define LINUX_ELFNOTE_LTO_INFO 0x101 + +#ifdef CONFIG_LTO +#define BUILD_LTO_INFO ELFNOTE32("Linux", LINUX_ELFNOTE_LTO_INFO, 1) +#else +#define BUILD_LTO_INFO ELFNOTE32("Linux", LINUX_ELFNOTE_LTO_INFO, 0) +#endif + +#endif /* __ELFNOTE_LTO_H */ diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 883acef895bc..2e2b8d6140ed 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -360,7 +360,7 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs); * * This is a combination of syscall_exit_to_user_mode_work() (1,2) and * exit_to_user_mode(). This function is preferred unless there is a - * compelling architectural reason to use the seperate functions. + * compelling architectural reason to use the separate functions. */ void syscall_exit_to_user_mode(struct pt_regs *regs); @@ -381,7 +381,7 @@ void irqentry_enter_from_user_mode(struct pt_regs *regs); * irqentry_exit_to_user_mode - Interrupt exit work * @regs: Pointer to current's pt_regs * - * Invoked with interrupts disbled and fully valid regs. Returns with all + * Invoked with interrupts disabled and fully valid regs. Returns with all * work handled, interrupts disabled such that the caller can immediately * switch to user mode. Called from architecture specific interrupt * handling code. diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 2e5debc0373c..330345b1be54 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -11,7 +11,7 @@ * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * - * Relocated to include/linux where it belongs by Alan Cox + * Relocated to include/linux where it belongs by Alan Cox * <gw4pts@gw4pts.ampr.org> */ #ifndef _LINUX_ETHERDEVICE_H @@ -29,7 +29,7 @@ struct device; int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); unsigned char *arch_get_platform_mac_address(void); int nvmem_get_mac_address(struct device *dev, void *addrbuf); -u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len); +u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index cdca84e6dd6b..e030f7510cd3 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -81,6 +81,7 @@ enum { #define ETH_RSS_HASH_NO_CHANGE 0 struct net_device; +struct netlink_ext_ack; /* Some generic methods drivers may use in their ethtool_ops */ u32 ethtool_op_get_link(struct net_device *dev); @@ -243,6 +244,56 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, #define ETHTOOL_STAT_NOT_SET (~0ULL) +static inline void ethtool_stats_init(u64 *stats, unsigned int n) +{ + while (n--) + stats[n] = ETHTOOL_STAT_NOT_SET; +} + +/* Basic IEEE 802.3 MAC statistics (30.3.1.1.*), not otherwise exposed + * via a more targeted API. + */ +struct ethtool_eth_mac_stats { + u64 FramesTransmittedOK; + u64 SingleCollisionFrames; + u64 MultipleCollisionFrames; + u64 FramesReceivedOK; + u64 FrameCheckSequenceErrors; + u64 AlignmentErrors; + u64 OctetsTransmittedOK; + u64 FramesWithDeferredXmissions; + u64 LateCollisions; + u64 FramesAbortedDueToXSColls; + u64 FramesLostDueToIntMACXmitError; + u64 CarrierSenseErrors; + u64 OctetsReceivedOK; + u64 FramesLostDueToIntMACRcvError; + u64 MulticastFramesXmittedOK; + u64 BroadcastFramesXmittedOK; + u64 FramesWithExcessiveDeferral; + u64 MulticastFramesReceivedOK; + u64 BroadcastFramesReceivedOK; + u64 InRangeLengthErrors; + u64 OutOfRangeLengthField; + u64 FrameTooLongErrors; +}; + +/* Basic IEEE 802.3 PHY statistics (30.3.2.1.*), not otherwise exposed + * via a more targeted API. + */ +struct ethtool_eth_phy_stats { + u64 SymbolErrorDuringCarrier; +}; + +/* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed + * via a more targeted API. + */ +struct ethtool_eth_ctrl_stats { + u64 MACControlFramesTransmitted; + u64 MACControlFramesReceived; + u64 UnsupportedOpcodesReceived; +}; + /** * struct ethtool_pause_stats - statistics for IEEE 802.3x pause frames * @tx_pause_frames: transmitted pause frame count. Reported to user space @@ -262,6 +313,102 @@ struct ethtool_pause_stats { u64 rx_pause_frames; }; +#define ETHTOOL_MAX_LANES 8 + +/** + * struct ethtool_fec_stats - statistics for IEEE 802.3 FEC + * @corrected_blocks: number of received blocks corrected by FEC + * Reported to user space as %ETHTOOL_A_FEC_STAT_CORRECTED. + * + * Equivalent to `30.5.1.1.17 aFECCorrectedBlocks` from the standard. + * + * @uncorrectable_blocks: number of received blocks FEC was not able to correct + * Reported to user space as %ETHTOOL_A_FEC_STAT_UNCORR. + * + * Equivalent to `30.5.1.1.18 aFECUncorrectableBlocks` from the standard. + * + * @corrected_bits: number of bits corrected by FEC + * Similar to @corrected_blocks but counts individual bit changes, + * not entire FEC data blocks. This is a non-standard statistic. + * Reported to user space as %ETHTOOL_A_FEC_STAT_CORR_BITS. + * + * @lane: per-lane/PCS-instance counts as defined by the standard + * @total: error counts for the entire port, for drivers incapable of reporting + * per-lane stats + * + * Drivers should fill in either only total or per-lane statistics, core + * will take care of adding lane values up to produce the total. + */ +struct ethtool_fec_stats { + struct ethtool_fec_stat { + u64 total; + u64 lanes[ETHTOOL_MAX_LANES]; + } corrected_blocks, uncorrectable_blocks, corrected_bits; +}; + +/** + * struct ethtool_rmon_hist_range - byte range for histogram statistics + * @low: low bound of the bucket (inclusive) + * @high: high bound of the bucket (inclusive) + */ +struct ethtool_rmon_hist_range { + u16 low; + u16 high; +}; + +#define ETHTOOL_RMON_HIST_MAX 10 + +/** + * struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics + * @undersize_pkts: Equivalent to `etherStatsUndersizePkts` from the RFC. + * @oversize_pkts: Equivalent to `etherStatsOversizePkts` from the RFC. + * @fragments: Equivalent to `etherStatsFragments` from the RFC. + * @jabbers: Equivalent to `etherStatsJabbers` from the RFC. + * @hist: Packet counter for packet length buckets (e.g. + * `etherStatsPkts128to255Octets` from the RFC). + * @hist_tx: Tx counters in similar form to @hist, not defined in the RFC. + * + * Selection of RMON (RFC 2819) statistics which are not exposed via different + * APIs, primarily the packet-length-based counters. + * Unfortunately different designs choose different buckets beyond + * the 1024B mark (jumbo frame teritory), so the definition of the bucket + * ranges is left to the driver. + */ +struct ethtool_rmon_stats { + u64 undersize_pkts; + u64 oversize_pkts; + u64 fragments; + u64 jabbers; + + u64 hist[ETHTOOL_RMON_HIST_MAX]; + u64 hist_tx[ETHTOOL_RMON_HIST_MAX]; +}; + +#define ETH_MODULE_EEPROM_PAGE_LEN 128 +#define ETH_MODULE_MAX_I2C_ADDRESS 0x7f + +/** + * struct ethtool_module_eeprom - EEPROM dump from specified page + * @offset: Offset within the specified EEPROM page to begin read, in bytes. + * @length: Number of bytes to read. + * @page: Page number to read from. + * @bank: Page bank number to read from, if applicable by EEPROM spec. + * @i2c_address: I2C address of a page. Value less than 0x7f expected. Most + * EEPROMs use 0x50 or 0x51. + * @data: Pointer to buffer with EEPROM data of @length size. + * + * This can be used to manage pages during EEPROM dump in ethtool and pass + * required information to the driver. + */ +struct ethtool_module_eeprom { + __u32 offset; + __u32 length; + __u8 page; + __u8 bank; + __u8 i2c_address; + __u8 *data; +}; + /** * struct ethtool_ops - optional netdev operations * @cap_link_lanes_supported: indicates if the driver supports lanes @@ -407,11 +554,26 @@ struct ethtool_pause_stats { * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), * any change to them will be overwritten by kernel. Returns a negative * error code or zero. + * @get_fec_stats: Report FEC statistics. + * Core will sum up per-lane stats to get the total. + * Drivers must not zero statistics which they don't report. The stats + * structure is initialized to ETHTOOL_STAT_NOT_SET indicating driver does + * not report statistics. * @get_fecparam: Get the network device Forward Error Correction parameters. * @set_fecparam: Set the network device Forward Error Correction parameters. * @get_ethtool_phy_stats: Return extended statistics about the PHY device. * This is only useful if the device maintains PHY statistics and * cannot use the standard PHY library helpers. + * @get_phy_tunable: Read the value of a PHY tunable. + * @set_phy_tunable: Set the value of a PHY tunable. + * @get_module_eeprom_by_page: Get a region of plug-in module EEPROM data from + * specified page. Returns a negative error code or the amount of bytes + * read. + * @get_eth_phy_stats: Query some of the IEEE 802.3 PHY statistics. + * @get_eth_mac_stats: Query some of the IEEE 802.3 MAC statistics. + * @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics. + * @get_rmon_stats: Query some of the RMON (RFC 2819) statistics. + * Set %ranges to a pointer to zero-terminated array of byte ranges. * * All operations are optional (i.e. the function pointer may be set * to %NULL) and callers must take this into account. Callers must @@ -507,6 +669,8 @@ struct ethtool_ops { struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); + void (*get_fec_stats)(struct net_device *dev, + struct ethtool_fec_stats *fec_stats); int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *); int (*set_fecparam)(struct net_device *, @@ -517,6 +681,18 @@ struct ethtool_ops { const struct ethtool_tunable *, void *); int (*set_phy_tunable)(struct net_device *, const struct ethtool_tunable *, const void *); + int (*get_module_eeprom_by_page)(struct net_device *dev, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack); + void (*get_eth_phy_stats)(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats); + void (*get_eth_mac_stats)(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats); + void (*get_eth_ctrl_stats)(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats); + void (*get_rmon_stats)(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges); }; int ethtool_check_ops(const struct ethtool_ops *ops); @@ -540,7 +716,6 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd, u32 *dev_speed, u8 *dev_duplex); -struct netlink_ext_ack; struct phy_device; struct phy_tdr_config; @@ -573,7 +748,7 @@ struct ethtool_phy_ops { */ void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops); -/* +/** * ethtool_params_from_link_mode - Derive link parameters from a given link mode * @link_ksettings: Link parameters to be derived from the link mode * @link_mode: Link mode @@ -581,4 +756,14 @@ void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops); void ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, enum ethtool_link_mode_bit_indices link_mode); + +/** + * ethtool_sprintf - Write formatted string to ethtool string data + * @data: Pointer to start of string to update + * @fmt: Format of string to write + * + * Write formatted string to data. Update data to point at start of + * next string. + */ +extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...); #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index c6cc0a566ef5..5487a80617a3 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -168,7 +168,7 @@ struct f2fs_checkpoint { unsigned char alloc_type[MAX_ACTIVE_LOGS]; /* SIT and NAT version bitmap */ - unsigned char sit_nat_version_bitmap[1]; + unsigned char sit_nat_version_bitmap[]; } __packed; #define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */ diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index 3e9c56ee651f..bad41bcb25df 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -2,8 +2,11 @@ #ifndef _LINUX_FANOTIFY_H #define _LINUX_FANOTIFY_H +#include <linux/sysctl.h> #include <uapi/linux/fanotify.h> +extern struct ctl_table fanotify_table[]; /* for sysctl */ + #define FAN_GROUP_FLAG(group, flag) \ ((group)->fanotify_data.flags & (flag)) @@ -15,15 +18,38 @@ * these constant, the programs may break if re-compiled with new uapi headers * and then run on an old kernel. */ -#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \ + +/* Group classes where permission events are allowed */ +#define FANOTIFY_PERM_CLASSES (FAN_CLASS_CONTENT | \ FAN_CLASS_PRE_CONTENT) +#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FANOTIFY_PERM_CLASSES) + #define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME) -#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | FANOTIFY_FID_BITS | \ - FAN_REPORT_TID | \ - FAN_CLOEXEC | FAN_NONBLOCK | \ - FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS) +/* + * fanotify_init() flags that require CAP_SYS_ADMIN. + * We do not allow unprivileged groups to request permission events. + * We do not allow unprivileged groups to get other process pid in events. + * We do not allow unprivileged groups to use unlimited resources. + */ +#define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \ + FAN_REPORT_TID | \ + FAN_UNLIMITED_QUEUE | \ + FAN_UNLIMITED_MARKS) + +/* + * fanotify_init() flags that are allowed for user without CAP_SYS_ADMIN. + * FAN_CLASS_NOTIF is the only class we allow for unprivileged group. + * We do not allow unprivileged groups to get file descriptors in events, + * so one of the flags for reporting file handles is required. + */ +#define FANOTIFY_USER_INIT_FLAGS (FAN_CLASS_NOTIF | \ + FANOTIFY_FID_BITS | \ + FAN_CLOEXEC | FAN_NONBLOCK) + +#define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \ + FANOTIFY_USER_INIT_FLAGS) #define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \ FAN_MARK_FILESYSTEM) diff --git a/include/linux/fb.h b/include/linux/fb.h index ecfbcc0553a5..a8dccd23c249 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -659,9 +659,6 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, /* drivers/video/fb_defio.c */ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma); extern void fb_deferred_io_init(struct fb_info *info); -extern void fb_deferred_io_open(struct fb_info *info, - struct inode *inode, - struct file *file); extern void fb_deferred_io_cleanup(struct fb_info *info); extern int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync); diff --git a/include/linux/file.h b/include/linux/file.h index 225982792fa2..2de2e4613d7b 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -92,23 +92,20 @@ extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); -extern int __receive_fd(int fd, struct file *file, int __user *ufd, +extern int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags); static inline int receive_fd_user(struct file *file, int __user *ufd, unsigned int o_flags) { if (ufd == NULL) return -EFAULT; - return __receive_fd(-1, file, ufd, o_flags); + return __receive_fd(file, ufd, o_flags); } static inline int receive_fd(struct file *file, unsigned int o_flags) { - return __receive_fd(-1, file, NULL, o_flags); -} -static inline int receive_fd_replace(int fd, struct file *file, unsigned int o_flags) -{ - return __receive_fd(fd, file, NULL, o_flags); + return __receive_fd(file, NULL, o_flags); } +int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags); extern void flush_delayed_fput(void); extern void __fput_sync(struct file *); diff --git a/include/linux/fileattr.h b/include/linux/fileattr.h new file mode 100644 index 000000000000..9e37e063ac69 --- /dev/null +++ b/include/linux/fileattr.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_FILEATTR_H +#define _LINUX_FILEATTR_H + +/* Flags shared betwen flags/xflags */ +#define FS_COMMON_FL \ + (FS_SYNC_FL | FS_IMMUTABLE_FL | FS_APPEND_FL | \ + FS_NODUMP_FL | FS_NOATIME_FL | FS_DAX_FL | \ + FS_PROJINHERIT_FL) + +#define FS_XFLAG_COMMON \ + (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND | \ + FS_XFLAG_NODUMP | FS_XFLAG_NOATIME | FS_XFLAG_DAX | \ + FS_XFLAG_PROJINHERIT) + +/* + * Merged interface for miscellaneous file attributes. 'flags' originates from + * ext* and 'fsx_flags' from xfs. There's some overlap between the two, which + * is handled by the VFS helpers, so filesystems are free to implement just one + * or both of these sub-interfaces. + */ +struct fileattr { + u32 flags; /* flags (FS_IOC_GETFLAGS/FS_IOC_SETFLAGS) */ + /* struct fsxattr: */ + u32 fsx_xflags; /* xflags field value (get/set) */ + u32 fsx_extsize; /* extsize field value (get/set)*/ + u32 fsx_nextents; /* nextents field value (get) */ + u32 fsx_projid; /* project identifier (get/set) */ + u32 fsx_cowextsize; /* CoW extsize field value (get/set)*/ + /* selectors: */ + bool flags_valid:1; + bool fsx_valid:1; +}; + +int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa); + +void fileattr_fill_xflags(struct fileattr *fa, u32 xflags); +void fileattr_fill_flags(struct fileattr *fa, u32 flags); + +/** + * fileattr_has_fsx - check for extended flags/attributes + * @fa: fileattr pointer + * + * Return: true if any attributes are present that are not represented in + * ->flags. + */ +static inline bool fileattr_has_fsx(const struct fileattr *fa) +{ + return fa->fsx_valid && + ((fa->fsx_xflags & ~FS_XFLAG_COMMON) || fa->fsx_extsize != 0 || + fa->fsx_projid != 0 || fa->fsx_cowextsize != 0); +} + +int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa); +int vfs_fileattr_set(struct user_namespace *mnt_userns, struct dentry *dentry, + struct fileattr *fa); + +#endif /* _LINUX_FILEATTR_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 3b00fc906ccd..9a09547bc7ba 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -646,7 +646,8 @@ struct bpf_redirect_info { u32 flags; u32 tgt_index; void *tgt_value; - struct bpf_map *map; + u32 map_id; + enum bpf_map_type map_type; u32 kern_flags; struct bpf_nh_params nh; }; @@ -876,8 +877,7 @@ void bpf_prog_free_linfo(struct bpf_prog *prog); void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, const u32 *insn_to_jit_off); int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); -void bpf_prog_free_jited_linfo(struct bpf_prog *prog); -void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog); +void bpf_prog_jit_attempt_done(struct bpf_prog *prog); struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags); @@ -918,6 +918,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); +bool bpf_jit_supports_kfunc_call(void); bool bpf_helper_changes_pkt_data(void *func); static inline bool bpf_dump_raw_ok(const struct cred *cred) @@ -1245,15 +1246,6 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest) void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size); -static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, - unsigned int size, void *buffer) -{ - if (k >= 0) - return skb_header_pointer(skb, k, size, buffer); - - return bpf_internal_load_pointer_neg_helper(skb, k, size); -} - static inline int bpf_tell_extensions(void) { return SKF_AD_MAX; @@ -1472,4 +1464,32 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, } #endif /* IS_ENABLED(CONFIG_IPV6) */ +static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex, u64 flags, + void *lookup_elem(struct bpf_map *map, u32 key)) +{ + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + + /* Lower bits of the flags are used as return code on lookup failure */ + if (unlikely(flags > XDP_TX)) + return XDP_ABORTED; + + ri->tgt_value = lookup_elem(map, ifindex); + if (unlikely(!ri->tgt_value)) { + /* If the lookup fails we want to clear out the state in the + * redirect_info struct completely, so that if an eBPF program + * performs multiple lookups, the last one always takes + * precedence. + */ + ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */ + ri->map_type = BPF_MAP_TYPE_UNSPEC; + return flags; + } + + ri->tgt_index = ifindex; + ri->map_id = map->id; + ri->map_type = map->map_type; + + return XDP_REDIRECT; +} + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 71177b17eee5..9d1a5c175065 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -72,6 +72,12 @@ enum pm_api_id { PM_FPGA_LOAD = 22, PM_FPGA_GET_STATUS = 23, PM_GET_CHIPID = 24, + PM_PINCTRL_REQUEST = 28, + PM_PINCTRL_RELEASE = 29, + PM_PINCTRL_GET_FUNCTION = 30, + PM_PINCTRL_SET_FUNCTION = 31, + PM_PINCTRL_CONFIG_PARAM_GET = 32, + PM_PINCTRL_CONFIG_PARAM_SET = 33, PM_IOCTL = 34, PM_QUERY_DATA = 35, PM_CLOCK_ENABLE = 36, @@ -122,6 +128,12 @@ enum pm_query_id { PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS = 3, PM_QID_CLOCK_GET_PARENTS = 4, PM_QID_CLOCK_GET_ATTRIBUTES = 5, + PM_QID_PINCTRL_GET_NUM_PINS = 6, + PM_QID_PINCTRL_GET_NUM_FUNCTIONS = 7, + PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS = 8, + PM_QID_PINCTRL_GET_FUNCTION_NAME = 9, + PM_QID_PINCTRL_GET_FUNCTION_GROUPS = 10, + PM_QID_PINCTRL_GET_PIN_GROUPS = 11, PM_QID_CLOCK_GET_NUM_CLOCKS = 12, PM_QID_CLOCK_GET_MAX_DIVISOR = 13, }; @@ -285,6 +297,44 @@ enum dll_reset_type { PM_DLL_RESET_PULSE = 2, }; +enum pm_pinctrl_config_param { + PM_PINCTRL_CONFIG_SLEW_RATE = 0, + PM_PINCTRL_CONFIG_BIAS_STATUS = 1, + PM_PINCTRL_CONFIG_PULL_CTRL = 2, + PM_PINCTRL_CONFIG_SCHMITT_CMOS = 3, + PM_PINCTRL_CONFIG_DRIVE_STRENGTH = 4, + PM_PINCTRL_CONFIG_VOLTAGE_STATUS = 5, + PM_PINCTRL_CONFIG_TRI_STATE = 6, + PM_PINCTRL_CONFIG_MAX = 7, +}; + +enum pm_pinctrl_slew_rate { + PM_PINCTRL_SLEW_RATE_FAST = 0, + PM_PINCTRL_SLEW_RATE_SLOW = 1, +}; + +enum pm_pinctrl_bias_status { + PM_PINCTRL_BIAS_DISABLE = 0, + PM_PINCTRL_BIAS_ENABLE = 1, +}; + +enum pm_pinctrl_pull_ctrl { + PM_PINCTRL_BIAS_PULL_DOWN = 0, + PM_PINCTRL_BIAS_PULL_UP = 1, +}; + +enum pm_pinctrl_schmitt_cmos { + PM_PINCTRL_INPUT_TYPE_CMOS = 0, + PM_PINCTRL_INPUT_TYPE_SCHMITT = 1, +}; + +enum pm_pinctrl_drive_strength { + PM_PINCTRL_DRIVE_STRENGTH_2MA = 0, + PM_PINCTRL_DRIVE_STRENGTH_4MA = 1, + PM_PINCTRL_DRIVE_STRENGTH_8MA = 2, + PM_PINCTRL_DRIVE_STRENGTH_12MA = 3, +}; + enum zynqmp_pm_shutdown_type { ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0, ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1, @@ -353,12 +403,15 @@ int zynqmp_pm_write_pggs(u32 index, u32 value); int zynqmp_pm_read_pggs(u32 index, u32 *value); int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype); int zynqmp_pm_set_boot_health_status(u32 value); +int zynqmp_pm_pinctrl_request(const u32 pin); +int zynqmp_pm_pinctrl_release(const u32 pin); +int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id); +int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id); +int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, + u32 *value); +int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, + u32 value); #else -static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void) -{ - return ERR_PTR(-ENODEV); -} - static inline int zynqmp_pm_get_api_version(u32 *version) { return -ENODEV; @@ -537,6 +590,38 @@ static inline int zynqmp_pm_set_boot_health_status(u32 value) { return -ENODEV; } + +static inline int zynqmp_pm_pinctrl_request(const u32 pin) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_release(const u32 pin) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, + u32 *value) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, + u32 value) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 27828145ca09..0621c5f86c39 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -279,7 +279,6 @@ static inline int freeze_kernel_threads(void) { return -ENOSYS; } static inline void thaw_processes(void) {} static inline void thaw_kernel_threads(void) {} -static inline bool try_to_freeze_nowarn(void) { return false; } static inline bool try_to_freeze(void) { return false; } static inline void freezer_do_not_count(void) {} diff --git a/include/linux/fs.h b/include/linux/fs.h index ec8f3ddf4a6a..c3c88fdb9b2a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -70,6 +70,7 @@ struct fsverity_info; struct fsverity_operations; struct fs_context; struct fs_parameter_spec; +struct fileattr; extern void __init inode_init(void); extern void __init inode_init_early(void); @@ -144,7 +145,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* Expect random access pattern */ #define FMODE_RANDOM ((__force fmode_t)0x1000) -/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */ +/* File is huge (eg. /dev/mem): treat loff_t as unsigned */ #define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) /* File is opened with O_PATH; almost nothing can be done with it */ @@ -441,7 +442,6 @@ int pagecache_write_end(struct file *, struct address_space *mapping, * @i_mmap: Tree of private and shared mappings. * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. * @nrpages: Number of page entries, protected by the i_pages lock. - * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock. * @writeback_index: Writeback starts here. * @a_ops: Methods. * @flags: Error bits and flags (AS_*). @@ -462,7 +462,6 @@ struct address_space { struct rb_root_cached i_mmap; struct rw_semaphore i_mmap_rwsem; unsigned long nrpages; - unsigned long nrexceptional; pgoff_t writeback_index; const struct address_space_operations *a_ops; unsigned long flags; @@ -891,18 +890,22 @@ struct fown_struct { int signum; /* posix.1b rt signal to be delivered on IO */ }; -/* - * Track a single file's readahead state +/** + * struct file_ra_state - Track a file's readahead state. + * @start: Where the most recent readahead started. + * @size: Number of pages read in the most recent readahead. + * @async_size: Start next readahead when this many pages are left. + * @ra_pages: Maximum size of a readahead request. + * @mmap_miss: How many mmap accesses missed in the page cache. + * @prev_pos: The last byte in the most recent read request. */ struct file_ra_state { - pgoff_t start; /* where readahead started */ - unsigned int size; /* # of readahead pages */ - unsigned int async_size; /* do asynchronous readahead when - there are only # of pages ahead */ - - unsigned int ra_pages; /* Maximum readahead window */ - unsigned int mmap_miss; /* Cache miss stat for mmap accesses */ - loff_t prev_pos; /* Cache last read() position */ + pgoff_t start; + unsigned int size; + unsigned int async_size; + unsigned int ra_pages; + unsigned int mmap_miss; + loff_t prev_pos; }; /* @@ -1574,52 +1577,172 @@ static inline void i_gid_write(struct inode *inode, gid_t gid) inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid); } +/** + * kuid_into_mnt - map a kuid down into a mnt_userns + * @mnt_userns: user namespace of the relevant mount + * @kuid: kuid to be mapped + * + * Return: @kuid mapped according to @mnt_userns. + * If @kuid has no mapping INVALID_UID is returned. + */ static inline kuid_t kuid_into_mnt(struct user_namespace *mnt_userns, kuid_t kuid) { return make_kuid(mnt_userns, __kuid_val(kuid)); } +/** + * kgid_into_mnt - map a kgid down into a mnt_userns + * @mnt_userns: user namespace of the relevant mount + * @kgid: kgid to be mapped + * + * Return: @kgid mapped according to @mnt_userns. + * If @kgid has no mapping INVALID_GID is returned. + */ static inline kgid_t kgid_into_mnt(struct user_namespace *mnt_userns, kgid_t kgid) { return make_kgid(mnt_userns, __kgid_val(kgid)); } +/** + * i_uid_into_mnt - map an inode's i_uid down into a mnt_userns + * @mnt_userns: user namespace of the mount the inode was found from + * @inode: inode to map + * + * Return: the inode's i_uid mapped down according to @mnt_userns. + * If the inode's i_uid has no mapping INVALID_UID is returned. + */ static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns, const struct inode *inode) { return kuid_into_mnt(mnt_userns, inode->i_uid); } +/** + * i_gid_into_mnt - map an inode's i_gid down into a mnt_userns + * @mnt_userns: user namespace of the mount the inode was found from + * @inode: inode to map + * + * Return: the inode's i_gid mapped down according to @mnt_userns. + * If the inode's i_gid has no mapping INVALID_GID is returned. + */ static inline kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns, const struct inode *inode) { return kgid_into_mnt(mnt_userns, inode->i_gid); } +/** + * kuid_from_mnt - map a kuid up into a mnt_userns + * @mnt_userns: user namespace of the relevant mount + * @kuid: kuid to be mapped + * + * Return: @kuid mapped up according to @mnt_userns. + * If @kuid has no mapping INVALID_UID is returned. + */ static inline kuid_t kuid_from_mnt(struct user_namespace *mnt_userns, kuid_t kuid) { return KUIDT_INIT(from_kuid(mnt_userns, kuid)); } +/** + * kgid_from_mnt - map a kgid up into a mnt_userns + * @mnt_userns: user namespace of the relevant mount + * @kgid: kgid to be mapped + * + * Return: @kgid mapped up according to @mnt_userns. + * If @kgid has no mapping INVALID_GID is returned. + */ static inline kgid_t kgid_from_mnt(struct user_namespace *mnt_userns, kgid_t kgid) { return KGIDT_INIT(from_kgid(mnt_userns, kgid)); } -static inline kuid_t fsuid_into_mnt(struct user_namespace *mnt_userns) +/** + * mapped_fsuid - return caller's fsuid mapped up into a mnt_userns + * @mnt_userns: user namespace of the relevant mount + * + * Use this helper to initialize a new vfs or filesystem object based on + * the caller's fsuid. A common example is initializing the i_uid field of + * a newly allocated inode triggered by a creation event such as mkdir or + * O_CREAT. Other examples include the allocation of quotas for a specific + * user. + * + * Return: the caller's current fsuid mapped up according to @mnt_userns. + */ +static inline kuid_t mapped_fsuid(struct user_namespace *mnt_userns) { return kuid_from_mnt(mnt_userns, current_fsuid()); } -static inline kgid_t fsgid_into_mnt(struct user_namespace *mnt_userns) +/** + * mapped_fsgid - return caller's fsgid mapped up into a mnt_userns + * @mnt_userns: user namespace of the relevant mount + * + * Use this helper to initialize a new vfs or filesystem object based on + * the caller's fsgid. A common example is initializing the i_gid field of + * a newly allocated inode triggered by a creation event such as mkdir or + * O_CREAT. Other examples include the allocation of quotas for a specific + * user. + * + * Return: the caller's current fsgid mapped up according to @mnt_userns. + */ +static inline kgid_t mapped_fsgid(struct user_namespace *mnt_userns) { return kgid_from_mnt(mnt_userns, current_fsgid()); } +/** + * inode_fsuid_set - initialize inode's i_uid field with callers fsuid + * @inode: inode to initialize + * @mnt_userns: user namespace of the mount the inode was found from + * + * Initialize the i_uid field of @inode. If the inode was found/created via + * an idmapped mount map the caller's fsuid according to @mnt_users. + */ +static inline void inode_fsuid_set(struct inode *inode, + struct user_namespace *mnt_userns) +{ + inode->i_uid = mapped_fsuid(mnt_userns); +} + +/** + * inode_fsgid_set - initialize inode's i_gid field with callers fsgid + * @inode: inode to initialize + * @mnt_userns: user namespace of the mount the inode was found from + * + * Initialize the i_gid field of @inode. If the inode was found/created via + * an idmapped mount map the caller's fsgid according to @mnt_users. + */ +static inline void inode_fsgid_set(struct inode *inode, + struct user_namespace *mnt_userns) +{ + inode->i_gid = mapped_fsgid(mnt_userns); +} + +/** + * fsuidgid_has_mapping() - check whether caller's fsuid/fsgid is mapped + * @sb: the superblock we want a mapping in + * @mnt_userns: user namespace of the relevant mount + * + * Check whether the caller's fsuid and fsgid have a valid mapping in the + * s_user_ns of the superblock @sb. If the caller is on an idmapped mount map + * the caller's fsuid and fsgid according to the @mnt_userns first. + * + * Return: true if fsuid and fsgid is mapped, false if not. + */ +static inline bool fsuidgid_has_mapping(struct super_block *sb, + struct user_namespace *mnt_userns) +{ + struct user_namespace *s_user_ns = sb->s_user_ns; + + return kuid_has_mapping(s_user_ns, mapped_fsuid(mnt_userns)) && + kgid_has_mapping(s_user_ns, mapped_fsgid(mnt_userns)); +} + extern struct timespec64 current_time(struct inode *inode); /* @@ -1739,7 +1862,7 @@ static inline void sb_start_pagefault(struct super_block *sb) __sb_start_write(sb, SB_FREEZE_PAGEFAULT); } -/* +/** * sb_start_intwrite - get write access to a superblock for internal fs purposes * @sb: the super we write to * @@ -1782,6 +1905,17 @@ int vfs_rmdir(struct user_namespace *, struct inode *, struct dentry *); int vfs_unlink(struct user_namespace *, struct inode *, struct dentry *, struct inode **); +/** + * struct renamedata - contains all information required for renaming + * @old_mnt_userns: old user namespace of the mount the inode was found from + * @old_dir: parent of source + * @old_dentry: source + * @new_mnt_userns: new user namespace of the mount the inode was found from + * @new_dir: parent of destination + * @new_dentry: destination + * @delegated_inode: returns an inode needing a delegation break + * @flags: rename flags + */ struct renamedata { struct user_namespace *old_mnt_userns; struct inode *old_dir; @@ -1963,6 +2097,9 @@ struct inode_operations { struct dentry *, umode_t); int (*set_acl)(struct user_namespace *, struct inode *, struct posix_acl *, int); + int (*fileattr_set)(struct user_namespace *mnt_userns, + struct dentry *dentry, struct fileattr *fa); + int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa); } ____cacheline_aligned; static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, @@ -2739,6 +2876,8 @@ static inline int filemap_fdatawait(struct address_space *mapping) extern bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); +extern bool filemap_range_needs_writeback(struct address_space *, + loff_t lstart, loff_t lend); extern int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend); extern int __filemap_fdatawrite_range(struct address_space *mapping, @@ -2884,6 +3023,11 @@ static inline bool execute_ok(struct inode *inode) return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode); } +static inline bool inode_wrong_type(const struct inode *inode, umode_t mode) +{ + return (inode->i_mode ^ mode) & S_IFMT; +} + static inline void file_start_write(struct file *file) { if (!S_ISREG(file_inode(file)->i_mode)) @@ -3161,7 +3305,7 @@ static inline ssize_t blockdev_direct_IO(struct kiocb *iocb, void inode_dio_wait(struct inode *inode); -/* +/** * inode_dio_begin - signal start of a direct I/O requests * @inode: inode the direct I/O happens on * @@ -3173,7 +3317,7 @@ static inline void inode_dio_begin(struct inode *inode) atomic_inc(&inode->i_dio_count); } -/* +/** * inode_dio_end - signal finish of a direct I/O requests * @inode: inode the direct I/O happens on * @@ -3567,18 +3711,6 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice); -int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags, - unsigned int flags); - -int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa, - struct fsxattr *fa); - -static inline void simple_fill_fsxattr(struct fsxattr *fa, __u32 xflags) -{ - memset(fa, 0, sizeof(*fa)); - fa->fsx_xflags = xflags; -} - /* * Flush file data before changing attributes. Caller must hold any locks * required to prevent further writes to this file until we're done setting diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 3f0b19dcfae7..3235ddbdcc09 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -304,6 +304,10 @@ struct fscache_cache_ops { /* dissociate a cache from all the pages it was backing */ void (*dissociate_pages)(struct fscache_cache *cache); + + /* Begin a read operation for the netfs lib */ + int (*begin_read_operation)(struct netfs_read_request *rreq, + struct fscache_retrieval *op); }; extern struct fscache_cookie fscache_fsdef_index; diff --git a/include/linux/fscache.h b/include/linux/fscache.h index a1c928fe98e7..abc1c4737fb8 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -19,6 +19,7 @@ #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/list_bl.h> +#include <linux/netfs.h> #if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE) #define fscache_available() (1) @@ -29,16 +30,6 @@ #endif -/* - * overload PG_private_2 to give us PG_fscache - this is used to indicate that - * a page is currently backed by a local disk cache - */ -#define PageFsCache(page) PagePrivate2((page)) -#define SetPageFsCache(page) SetPagePrivate2((page)) -#define ClearPageFsCache(page) ClearPagePrivate2((page)) -#define TestSetPageFsCache(page) TestSetPagePrivate2((page)) -#define TestClearPageFsCache(page) TestClearPagePrivate2((page)) - /* pattern used to fill dead space in an index entry */ #define FSCACHE_INDEX_DEADFILL_PATTERN 0x79 @@ -46,6 +37,7 @@ struct pagevec; struct fscache_cache_tag; struct fscache_cookie; struct fscache_netfs; +struct netfs_read_request; typedef void (*fscache_rw_complete_t)(struct page *page, void *context, @@ -200,6 +192,10 @@ extern void __fscache_update_cookie(struct fscache_cookie *, const void *); extern int __fscache_attr_changed(struct fscache_cookie *); extern void __fscache_invalidate(struct fscache_cookie *); extern void __fscache_wait_on_invalidate(struct fscache_cookie *); + +#ifdef FSCACHE_USE_NEW_IO_API +extern int __fscache_begin_read_operation(struct netfs_read_request *, struct fscache_cookie *); +#else extern int __fscache_read_or_alloc_page(struct fscache_cookie *, struct page *, fscache_rw_complete_t, @@ -223,6 +219,8 @@ extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *, struct inode *); extern void __fscache_readpages_cancel(struct fscache_cookie *cookie, struct list_head *pages); +#endif /* FSCACHE_USE_NEW_IO_API */ + extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool); extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t, bool (*)(void *), void *); @@ -507,6 +505,36 @@ int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size) return -ENOBUFS; } +#ifdef FSCACHE_USE_NEW_IO_API + +/** + * fscache_begin_read_operation - Begin a read operation for the netfs lib + * @rreq: The read request being undertaken + * @cookie: The cookie representing the cache object + * + * Begin a read operation on behalf of the netfs helper library. @rreq + * indicates the read request to which the operation state should be attached; + * @cookie indicates the cache object that will be accessed. + * + * This is intended to be called from the ->begin_cache_operation() netfs lib + * operation as implemented by the network filesystem. + * + * Returns: + * * 0 - Success + * * -ENOBUFS - No caching available + * * Other error code from the cache, such as -ENOMEM. + */ +static inline +int fscache_begin_read_operation(struct netfs_read_request *rreq, + struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) + return __fscache_begin_read_operation(rreq, cookie); + return -ENOBUFS; +} + +#else /* FSCACHE_USE_NEW_IO_API */ + /** * fscache_read_or_alloc_page - Read a page from the cache or allocate a block * in which to store it @@ -786,6 +814,8 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, __fscache_uncache_all_inode_pages(cookie, inode); } +#endif /* FSCACHE_USE_NEW_IO_API */ + /** * fscache_disable_cookie - Disable a cookie * @cookie: The cookie representing the cache object diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h index 0ac27b233f12..fdb55ca47a4f 100644 --- a/include/linux/fsl/guts.h +++ b/include/linux/fsl/guts.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/** +/* * Freecale 85xx and 86xx Global Utilties register set * * Authors: Jeff Brown @@ -14,7 +14,7 @@ #include <linux/types.h> #include <linux/io.h> -/** +/* * Global Utility Registers. * * Not all registers defined in this structure are available on all chips, so diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index e5409b83e731..1ce66748a2d2 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -167,7 +167,6 @@ struct fsnotify_ops { */ struct fsnotify_event { struct list_head list; - unsigned long objectid; /* identifier for queue merges */ }; /* @@ -207,9 +206,6 @@ struct fsnotify_group { /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ struct mutex mark_mutex; /* protect marks_list */ - atomic_t num_marks; /* 1 for each mark and 1 for not being - * past the point of no return when freeing - * a group */ atomic_t user_waits; /* Number of tasks waiting for user * response */ struct list_head marks_list; /* all inode marks for this group */ @@ -234,13 +230,14 @@ struct fsnotify_group { #endif #ifdef CONFIG_FANOTIFY struct fanotify_group_private_data { + /* Hash table of events for merge */ + struct hlist_head *merge_hash; /* allows a group to block waiting for a userspace response */ struct list_head access_list; wait_queue_head_t access_waitq; int flags; /* flags from fanotify_init() */ int f_flags; /* event_f_flags from fanotify_init() */ - unsigned int max_marks; - struct user_struct *user; + struct ucounts *ucounts; } fanotify_data; #endif /* CONFIG_FANOTIFY */ }; @@ -487,15 +484,23 @@ extern void fsnotify_destroy_event(struct fsnotify_group *group, /* attach the event to the group notification queue */ extern int fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, - int (*merge)(struct list_head *, - struct fsnotify_event *)); + int (*merge)(struct fsnotify_group *, + struct fsnotify_event *), + void (*insert)(struct fsnotify_group *, + struct fsnotify_event *)); /* Queue overflow event to a notification group */ static inline void fsnotify_queue_overflow(struct fsnotify_group *group) { - fsnotify_add_event(group, group->overflow_event, NULL); + fsnotify_add_event(group, group->overflow_event, NULL, NULL); +} + +static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group) +{ + assert_spin_locked(&group->notification_lock); + + return list_empty(&group->notification_list); } -/* true if the group notification queue is empty */ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); /* return, but do not dequeue the first event on the notification queue */ extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group); @@ -576,11 +581,9 @@ extern void fsnotify_put_mark(struct fsnotify_mark *mark); extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); -static inline void fsnotify_init_event(struct fsnotify_event *event, - unsigned long objectid) +static inline void fsnotify_init_event(struct fsnotify_event *event) { INIT_LIST_HEAD(&event->list); - event->objectid = objectid; } #else diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 86e5028bfa20..a69f363b61bf 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -33,7 +33,7 @@ /* * If the arch's mcount caller does not support all of ftrace's * features, then it must call an indirect function that - * does. Or at least does enough to prevent any unwelcomed side effects. + * does. Or at least does enough to prevent any unwelcome side effects. */ #if !ARCH_SUPPORTS_FTRACE_OPS # define FTRACE_FORCE_LIST_FUNC 1 @@ -389,7 +389,7 @@ DECLARE_PER_CPU(int, disable_stack_tracer); */ static inline void stack_tracer_disable(void) { - /* Preemption or interupts must be disabled */ + /* Preemption or interrupts must be disabled */ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); this_cpu_inc(disable_stack_tracer); diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index ed4e67a7ff1c..59828516ebaf 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -187,5 +187,6 @@ extern u32 fw_devlink_get_flags(void); extern bool fw_devlink_is_strict(void); int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup); void fwnode_links_purge(struct fwnode_handle *fwnode); +void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode); #endif diff --git a/include/linux/genhd.h b/include/linux/genhd.h index f364619092cc..6fc26f7bdf71 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -204,25 +204,6 @@ static inline dev_t disk_devt(struct gendisk *disk) void disk_uevent(struct gendisk *disk, enum kobject_action action); -/* - * Smarter partition iterator without context limits. - */ -#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ -#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ -#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */ - -struct disk_part_iter { - struct gendisk *disk; - struct block_device *part; - unsigned long idx; - unsigned int flags; -}; - -extern void disk_part_iter_init(struct disk_part_iter *piter, - struct gendisk *disk, unsigned int flags); -struct block_device *disk_part_iter_next(struct disk_part_iter *piter); -extern void disk_part_iter_exit(struct disk_part_iter *piter); - /* block/genhd.c */ extern void device_add_disk(struct device *parent, struct gendisk *disk, const struct attribute_group **groups); @@ -273,7 +254,7 @@ static inline sector_t get_capacity(struct gendisk *disk) int bdev_disk_changed(struct block_device *bdev, bool invalidate); int blk_add_partitions(struct gendisk *disk, struct block_device *bdev); -int blk_drop_partitions(struct block_device *bdev); +void blk_drop_partitions(struct gendisk *disk); extern struct gendisk *__alloc_disk_node(int minors, int node_id); extern void put_disk(struct gendisk *disk); @@ -325,8 +306,6 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev, } #endif /* CONFIG_SYSFS */ -extern struct rw_semaphore bdev_lookup_sem; - dev_t blk_lookup_devt(const char *name, int partno); void blk_request_module(dev_t devt); #ifdef CONFIG_BLOCK diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 6cb82301d8e9..939b1a8f571b 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h @@ -404,4 +404,3 @@ s_fields \ /* }}}1 */ #endif /* GENL_MAGIC_FUNC_H */ -/* vim: set foldmethod=marker foldlevel=1 nofoldenable : */ diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h index 35d21fddaf2d..f81d48987528 100644 --- a/include/linux/genl_magic_struct.h +++ b/include/linux/genl_magic_struct.h @@ -283,4 +283,3 @@ enum { \ /* }}}1 */ #endif /* GENL_MAGIC_STRUCT_H */ -/* vim: set foldmethod=marker nofoldenable : */ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 8572a1474e16..11da8af06704 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -490,7 +490,7 @@ static inline int gfp_zonelist(gfp_t flags) /* * We get the zone list from the current node and the gfp_mask. - * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. + * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones. * There are two zonelists per node, one for all zones with memory and * one containing just zones from the node the zonelist belongs to. * @@ -515,14 +515,25 @@ static inline int arch_make_page_accessible(struct page *page) } #endif -struct page * -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, - nodemask_t *nodemask); +struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, + nodemask_t *nodemask); -static inline struct page * -__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) +unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + nodemask_t *nodemask, int nr_pages, + struct list_head *page_list, + struct page **page_array); + +/* Bulk allocate order-0 pages */ +static inline unsigned long +alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) +{ + return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL); +} + +static inline unsigned long +alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array) { - return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); + return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array); } /* @@ -535,7 +546,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); - return __alloc_pages(gfp_mask, order, nid); + return __alloc_pages(gfp_mask, order, nid, NULL); } /* @@ -553,13 +564,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, } #ifdef CONFIG_NUMA -extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); - -static inline struct page * -alloc_pages(gfp_t gfp_mask, unsigned int order) -{ - return alloc_pages_current(gfp_mask, order); -} +struct page *alloc_pages(gfp_t gfp, unsigned int order); extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr, int node, bool hugepage); @@ -652,7 +657,7 @@ extern int alloc_contig_range(unsigned long start, unsigned long end, extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask); #endif -void free_contig_range(unsigned long pfn, unsigned int nr_pages); +void free_contig_range(unsigned long pfn, unsigned long nr_pages); #ifdef CONFIG_CMA /* CMA stuff */ diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 286de0520574..3a268781fcec 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -227,7 +227,7 @@ struct gpio_irq_chip { /** * @valid_mask: * - * If not %NULL holds bitmask of GPIOs which are valid to be included + * If not %NULL, holds bitmask of GPIOs which are valid to be included * in IRQ domain of the chip. */ unsigned long *valid_mask; @@ -346,7 +346,7 @@ struct gpio_irq_chip { * output. * * A gpio_chip can help platforms abstract various sources of GPIOs so - * they can all be accessed through a common programing interface. + * they can all be accessed through a common programming interface. * Example sources would be SOC controllers, FPGAs, multifunction * chips, dedicated GPIO expanders, and so on. * @@ -435,15 +435,15 @@ struct gpio_chip { /** * @valid_mask: * - * If not %NULL holds bitmask of GPIOs which are valid to be used + * If not %NULL, holds bitmask of GPIOs which are valid to be used * from the chip. */ unsigned long *valid_mask; #if defined(CONFIG_OF_GPIO) /* - * If CONFIG_OF is enabled, then all GPIO controllers described in the - * device tree automatically may have an OF translation + * If CONFIG_OF_GPIO is enabled, then all GPIO controllers described in + * the device tree automatically may have an OF translation */ /** @@ -508,7 +508,7 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, * for GPIOs will fail rudely. * * gpiochip_add_data() must only be called after gpiolib initialization, - * ie after core_initcall(). + * i.e. after core_initcall(). * * If gc->base is negative, this requests dynamic assignment of * a range of valid GPIOs. @@ -624,8 +624,17 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain, bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, unsigned int offset); +#ifdef CONFIG_GPIOLIB_IRQCHIP int gpiochip_irqchip_add_domain(struct gpio_chip *gc, struct irq_domain *domain); +#else +static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc, + struct irq_domain *domain) +{ + WARN_ON(1); + return -EINVAL; +} +#endif int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset); void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset); diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h index ad76f3d0a6ba..334dd928042b 100644 --- a/include/linux/gpio/regmap.h +++ b/include/linux/gpio/regmap.h @@ -4,6 +4,7 @@ #define _LINUX_GPIO_REGMAP_H struct device; +struct fwnode_handle; struct gpio_regmap; struct irq_domain; struct regmap; @@ -16,6 +17,8 @@ struct regmap; * @parent: The parent device * @regmap: The regmap used to access the registers * given, the name of the device is used + * @fwnode: (Optional) The firmware node. + * If not given, the fwnode of the parent is used. * @label: (Optional) Descriptive name for GPIO controller. * If not given, the name of the device is used. * @ngpio: Number of GPIOs @@ -57,6 +60,7 @@ struct regmap; struct gpio_regmap_config { struct device *parent; struct regmap *regmap; + struct fwnode_handle *fwnode; const char *label; int ngpio; diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 7c9d6a2d7e90..69bc86ea382c 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -6,6 +6,7 @@ #include <linux/preempt.h> #include <linux/lockdep.h> #include <linux/ftrace_irq.h> +#include <linux/sched.h> #include <linux/vtime.h> #include <asm/hardirq.h> diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 9850d59d6f1c..c8ec982ff498 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -156,7 +156,7 @@ enum hdmi_content_type { }; enum hdmi_metadata_type { - HDMI_STATIC_METADATA_TYPE1 = 1, + HDMI_STATIC_METADATA_TYPE1 = 0, }; enum hdmi_eotf { diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 763802b2b8f9..c27329e2a5ad 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -231,6 +231,7 @@ struct hid_sensor_common { struct hid_sensor_hub_attribute_info report_state; struct hid_sensor_hub_attribute_info power_state; struct hid_sensor_hub_attribute_info sensitivity; + struct hid_sensor_hub_attribute_info sensitivity_rel; struct hid_sensor_hub_attribute_info report_latency; struct work_struct work; }; @@ -248,11 +249,17 @@ static inline int hid_sensor_convert_exponent(int unit_expo) int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, u32 usage_id, - struct hid_sensor_common *st); + struct hid_sensor_common *st, + const u32 *sensitivity_addresses, + u32 sensitivity_addresses_len); int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st, int val1, int val2); +int hid_sensor_write_raw_hyst_rel_value(struct hid_sensor_common *st, int val1, + int val2); int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st, int *val1, int *val2); +int hid_sensor_read_raw_hyst_rel_value(struct hid_sensor_common *st, + int *val1, int *val2); int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st, int val1, int val2); int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st, diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h index 3bbdbccc5805..ac631159403a 100644 --- a/include/linux/hid-sensor-ids.h +++ b/include/linux/hid-sensor-ids.h @@ -149,6 +149,7 @@ /* Per data field properties */ #define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00 #define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000 +#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_REL_PCT 0xE000 /* Power state enumerations */ #define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x200850 diff --git a/include/linux/hid.h b/include/linux/hid.h index ef702b3f56e3..271021e20a3f 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -153,6 +153,7 @@ struct hid_item { #define HID_UP_CONSUMER 0x000c0000 #define HID_UP_DIGITIZER 0x000d0000 #define HID_UP_PID 0x000f0000 +#define HID_UP_BATTERY 0x00850000 #define HID_UP_HPVENDOR 0xff7f0000 #define HID_UP_HPVENDOR2 0xff010000 #define HID_UP_MSVENDOR 0xff000000 @@ -262,6 +263,8 @@ struct hid_item { #define HID_CP_SELECTION 0x000c0080 #define HID_CP_MEDIASELECTION 0x000c0087 #define HID_CP_SELECTDISC 0x000c00ba +#define HID_CP_VOLUMEUP 0x000c00e9 +#define HID_CP_VOLUMEDOWN 0x000c00ea #define HID_CP_PLAYBACKSPEED 0x000c00f1 #define HID_CP_PROXIMITY 0x000c0109 #define HID_CP_SPEAKERSYSTEM 0x000c0160 @@ -297,6 +300,8 @@ struct hid_item { #define HID_DG_TOOLSERIALNUMBER 0x000d005b #define HID_DG_LATENCYMODE 0x000d0060 +#define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065 + #define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076 /* * HID report types --- Ouch! HID spec says 1 2 3! diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 44170f312ae7..832b49b50c7b 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -332,4 +332,11 @@ static inline void memcpy_to_page(struct page *page, size_t offset, kunmap_local(to); } +static inline void memzero_page(struct page *page, size_t offset, size_t len) +{ + char *addr = kmap_atomic(page); + memset(addr + offset, 0, len); + kunmap_atomic(addr); +} + #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 9eb77c87a83b..232e1bd507a7 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -25,14 +25,18 @@ u64 host1x_get_dma_mask(struct host1x *host1x); /** * struct host1x_client_ops - host1x client operations + * @early_init: host1x client early initialization code * @init: host1x client initialization code * @exit: host1x client tear down code + * @late_exit: host1x client late tear down code * @suspend: host1x client suspend code * @resume: host1x client resume code */ struct host1x_client_ops { + int (*early_init)(struct host1x_client *client); int (*init)(struct host1x_client *client); int (*exit)(struct host1x_client *client); + int (*late_exit)(struct host1x_client *client); int (*suspend)(struct host1x_client *client); int (*resume)(struct host1x_client *client); }; @@ -142,7 +146,9 @@ struct host1x_syncpt_base; struct host1x_syncpt; struct host1x; -struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id); +struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id); +struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id); +struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp); u32 host1x_syncpt_id(struct host1x_syncpt *sp); u32 host1x_syncpt_read_min(struct host1x_syncpt *sp); u32 host1x_syncpt_read_max(struct host1x_syncpt *sp); @@ -153,11 +159,17 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, u32 *value); struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, unsigned long flags); -void host1x_syncpt_free(struct host1x_syncpt *sp); +void host1x_syncpt_put(struct host1x_syncpt *sp); +struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, + unsigned long flags, + const char *name); struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp); u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); +void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, + u32 syncpt_id); + /* * host1x channel */ @@ -218,7 +230,7 @@ struct host1x_job { dma_addr_t *reloc_addr_phys; /* Sync point id, number of increments and end related to the submit */ - u32 syncpt_id; + struct host1x_syncpt *syncpt; u32 syncpt_incrs; u32 syncpt_end; diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ba973efcd369..9626fda5efce 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -87,9 +87,6 @@ enum transparent_hugepage_flag { TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, -#ifdef CONFIG_DEBUG_VM - TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, -#endif }; struct kobject; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index cccd1aab69dd..b92f25ccef58 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -11,6 +11,7 @@ #include <linux/kref.h> #include <linux/pgtable.h> #include <linux/gfp.h> +#include <linux/userfaultfd_k.h> struct ctl_table; struct user_struct; @@ -134,11 +135,14 @@ void hugetlb_show_meminfo(void); unsigned long hugetlb_total_pages(void); vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); +#ifdef CONFIG_USERFAULTFD int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, + enum mcopy_atomic_mode mode, struct page **pagep); +#endif /* CONFIG_USERFAULTFD */ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); @@ -152,7 +156,8 @@ void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); -pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); +pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pud_t *pud); struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); @@ -161,7 +166,7 @@ extern struct list_head huge_boot_pages; /* arch callbacks */ -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); @@ -187,6 +192,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); bool is_hugetlb_entry_migration(pte_t pte); +void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); #else /* !CONFIG_HUGETLB_PAGE */ @@ -308,16 +314,19 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, BUG(); } +#ifdef CONFIG_USERFAULTFD static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, + enum mcopy_atomic_mode mode, struct page **pagep) { BUG(); return 0; } +#endif /* CONFIG_USERFAULTFD */ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) @@ -368,6 +377,8 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, return 0; } +static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } + #endif /* !CONFIG_HUGETLB_PAGE */ /* * hugepages at page global directory. If arch support @@ -555,6 +566,7 @@ HPAGEFLAG(Freed, freed) #define HSTATE_NAME_LEN 32 /* Defines one hugetlb page size */ struct hstate { + struct mutex resize_lock; int next_nid_to_alloc; int next_nid_to_free; unsigned int order; @@ -583,6 +595,7 @@ struct huge_bootmem_page { struct hstate *hstate; }; +int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, @@ -865,6 +878,12 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; +static inline int isolate_or_dissolve_huge_page(struct page *page, + struct list_head *list) +{ + return -ENOMEM; +} + static inline struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) @@ -1039,4 +1058,14 @@ static inline __init void hugetlb_cma_check(void) } #endif +bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); + +#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE +/* + * ARCHes with special requirements for evicting HUGETLB backing TLB entries can + * implement this. + */ +#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#endif + #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index f1d74dcf0353..d1e59dbef1dd 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -234,6 +234,7 @@ static inline u32 hv_get_avail_to_write_percent( * 5 . 0 (Newer Windows 10) * 5 . 1 (Windows 10 RS4) * 5 . 2 (Windows Server 2019, RS5) + * 5 . 3 (Windows Server 2022) */ #define VERSION_WS2008 ((0 << 16) | (13)) @@ -245,6 +246,7 @@ static inline u32 hv_get_avail_to_write_percent( #define VERSION_WIN10_V5 ((5 << 16) | (0)) #define VERSION_WIN10_V5_1 ((5 << 16) | (1)) #define VERSION_WIN10_V5_2 ((5 << 16) | (2)) +#define VERSION_WIN10_V5_3 ((5 << 16) | (3)) /* Make maximum size of pipe payload of 16K */ #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) @@ -284,7 +286,7 @@ struct vmbus_channel_offer { /* * Pipes: - * The following sructure is an integrated pipe protocol, which + * The following structure is an integrated pipe protocol, which * is implemented on top of standard user-defined data. Pipe * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own * use. @@ -475,6 +477,7 @@ enum vmbus_channel_message_type { CHANNELMSG_TL_CONNECT_REQUEST = 21, CHANNELMSG_MODIFYCHANNEL = 22, CHANNELMSG_TL_CONNECT_RESULT = 23, + CHANNELMSG_MODIFYCHANNEL_RESPONSE = 24, CHANNELMSG_COUNT }; @@ -588,6 +591,13 @@ struct vmbus_channel_open_result { u32 status; } __packed; +/* Modify Channel Result parameters */ +struct vmbus_channel_modifychannel_response { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 status; +} __packed; + /* Close channel parameters; */ struct vmbus_channel_close_channel { struct vmbus_channel_message_header header; @@ -720,6 +730,7 @@ struct vmbus_channel_msginfo { struct vmbus_channel_gpadl_torndown gpadl_torndown; struct vmbus_channel_gpadl_created gpadl_created; struct vmbus_channel_version_response version_response; + struct vmbus_channel_modifychannel_response modify_response; } response; u32 msgsize; @@ -883,11 +894,11 @@ struct vmbus_channel { * Support for sub-channels. For high performance devices, * it will be useful to have multiple sub-channels to support * a scalable communication infrastructure with the host. - * The support for sub-channels is implemented as an extention + * The support for sub-channels is implemented as an extension * to the current infrastructure. * The initial offer is considered the primary channel and this * offer message will indicate if the host supports sub-channels. - * The guest is free to ask for sub-channels to be offerred and can + * The guest is free to ask for sub-channels to be offered and can * open these sub-channels as a normal "primary" channel. However, * all sub-channels will have the same type and instance guids as the * primary channel. Requests sent on a given channel will result in a @@ -951,7 +962,7 @@ struct vmbus_channel { * Clearly, these optimizations improve throughput at the expense of * latency. Furthermore, since the channel is shared for both * control and data messages, control messages currently suffer - * unnecessary latency adversley impacting performance and boot + * unnecessary latency adversely impacting performance and boot * time. To fix this issue, permit tagging the channel as being * in "low latency" mode. In this mode, we will bypass the monitor * mechanism. @@ -1594,7 +1605,7 @@ extern __u32 vmbus_proto_version; int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, const guid_t *shv_host_servie_id); -int vmbus_send_modifychannel(u32 child_relid, u32 target_vp); +int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp); void vmbus_set_event(struct vmbus_channel *channel); /* Get the start of the ring buffer. */ @@ -1726,6 +1737,7 @@ static inline unsigned long virt_to_hvpfn(void *addr) #define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE) #define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK) #define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT) +#define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT) #define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE) #endif /* _HYPERV_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 56622658b215..e8f2ac8c9c3d 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -51,6 +51,9 @@ struct module; struct property_entry; #if IS_ENABLED(CONFIG_I2C) +/* Return the Frequency mode string based on the bus frequency */ +const char *i2c_freq_mode_string(u32 bus_freq_hz); + /* * The master routines are the ones normally used to transmit data to devices * on a bus (or read from them). Apart from two basic transfer functions to @@ -306,6 +309,8 @@ struct i2c_driver { * userspace_devices list * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter * calls it to pass on slave events to the slave driver. + * @devres_group_id: id of the devres group that will be created for resources + * acquired when probing this device. * * An i2c_client identifies a single device (i.e. chip) connected to an * i2c bus. The behaviour exposed to Linux is defined by the driver @@ -334,6 +339,7 @@ struct i2c_client { #if IS_ENABLED(CONFIG_I2C_SLAVE) i2c_slave_cb_t slave_cb; /* callback for slave mode */ #endif + void *devres_group_id; /* ID of probe devres group */ }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) @@ -391,7 +397,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } * @platform_data: stored in i2c_client.dev.platform_data * @of_node: pointer to OpenFirmware device node * @fwnode: device node supplied by the platform firmware - * @properties: additional device properties for the device + * @swnode: software node for the device * @resources: resources associated with the device * @num_resources: number of resources in the @resources array * @irq: stored in i2c_client.irq @@ -415,7 +421,7 @@ struct i2c_board_info { void *platform_data; struct device_node *of_node; struct fwnode_handle *fwnode; - const struct property_entry *properties; + const struct software_node *swnode; const struct resource *resources; unsigned int num_resources; int irq; @@ -687,6 +693,8 @@ struct i2c_adapter_quirks { #define I2C_AQ_NO_ZERO_LEN_READ BIT(5) #define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6) #define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE) +/* adapter cannot do repeated START */ +#define I2C_AQ_NO_REP_START BIT(7) /* * i2c_adapter is the structure used to identify a physical i2c bus along @@ -844,6 +852,7 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) */ #if IS_ENABLED(CONFIG_I2C) int i2c_add_adapter(struct i2c_adapter *adap); +int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter); void i2c_del_adapter(struct i2c_adapter *adap); int i2c_add_numbered_adapter(struct i2c_adapter *adap); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 72ff75fb1971..2967437f1b11 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2006,24 +2006,22 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the * same field in the HE capabilities. */ -#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT 0x00 -#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1 0x08 -#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2 0x10 -#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED 0x18 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0 0x00 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1 0x08 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2 0x10 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3 0x18 #define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18 #define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20 #define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40 #define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80 -#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_SHIFT 3 - #define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01 #define IEEE80211_HE_MAC_CAP4_QTP 0x02 #define IEEE80211_HE_MAC_CAP4_BQR 0x04 -#define IEEE80211_HE_MAC_CAP4_SRP_RESP 0x08 +#define IEEE80211_HE_MAC_CAP4_PSR_RESP 0x08 #define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10 #define IEEE80211_HE_MAC_CAP4_OPS 0x20 -#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40 +#define IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU 0x40 /* Multi TID agg TX is split between byte #4 and #5 * The value is a combination of B39,B40,B41 */ @@ -2031,7 +2029,7 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, #define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01 #define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02 -#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04 +#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION 0x04 #define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08 #define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10 #define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20 @@ -2089,7 +2087,7 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, #define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00 #define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20 -#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40 +#define IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU 0x40 #define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80 #define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01 @@ -2136,15 +2134,15 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, #define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01 #define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02 -#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04 -#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08 +#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB 0x04 +#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB 0x08 #define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10 #define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20 #define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40 #define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80 -#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01 -#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02 +#define IEEE80211_HE_PHY_CAP7_PSR_BASED_SR 0x01 +#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP 0x02 #define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04 #define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08 #define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10 @@ -3861,4 +3859,11 @@ struct ieee80211_neighbor_ap_info { u8 channel; } __packed; +enum ieee80211_range_params_max_total_ltf { + IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_4 = 0, + IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_8, + IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_16, + IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_UNSPECIFIED, +}; + #endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index b979005ea39c..2cc35038a8ca 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -69,6 +69,8 @@ bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); bool br_multicast_enabled(const struct net_device *dev); bool br_multicast_router(const struct net_device *dev); +int br_mdb_replay(struct net_device *br_dev, struct net_device *dev, + struct notifier_block *nb, struct netlink_ext_ack *extack); #else static inline int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list) @@ -93,6 +95,13 @@ static inline bool br_multicast_router(const struct net_device *dev) { return false; } +static inline int br_mdb_replay(struct net_device *br_dev, + struct net_device *dev, + struct notifier_block *nb, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} #endif #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) @@ -102,6 +111,8 @@ int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid); int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto); int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); +int br_vlan_replay(struct net_device *br_dev, struct net_device *dev, + struct notifier_block *nb, struct netlink_ext_ack *extack); #else static inline bool br_vlan_enabled(const struct net_device *dev) { @@ -128,6 +139,14 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, { return -EINVAL; } + +static inline int br_vlan_replay(struct net_device *br_dev, + struct net_device *dev, + struct notifier_block *nb, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} #endif #if IS_ENABLED(CONFIG_BRIDGE) @@ -136,6 +155,10 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev, __u16 vid); void br_fdb_clear_offload(const struct net_device *dev, u16 vid); bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag); +u8 br_port_get_stp_state(const struct net_device *dev); +clock_t br_get_ageing_time(struct net_device *br_dev); +int br_fdb_replay(struct net_device *br_dev, struct net_device *dev, + struct notifier_block *nb); #else static inline struct net_device * br_fdb_find_port(const struct net_device *br_dev, @@ -154,6 +177,23 @@ br_port_flag_is_set(const struct net_device *dev, unsigned long flag) { return false; } + +static inline u8 br_port_get_stp_state(const struct net_device *dev) +{ + return BR_STATE_DISABLED; +} + +static inline clock_t br_get_ageing_time(struct net_device *br_dev) +{ + return 0; +} + +static inline int br_fdb_replay(struct net_device *br_dev, + struct net_device *dev, + struct notifier_block *nb) +{ + return -EOPNOTSUPP; +} #endif #endif diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h index 9661416a9bb4..4efb537f57f3 100644 --- a/include/linux/if_rmnet.h +++ b/include/linux/if_rmnet.h @@ -6,50 +6,43 @@ #define _LINUX_IF_RMNET_H_ struct rmnet_map_header { -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 pad_len:6; - u8 reserved_bit:1; - u8 cd_bit:1; -#elif defined (__BIG_ENDIAN_BITFIELD) - u8 cd_bit:1; - u8 reserved_bit:1; - u8 pad_len:6; -#else -#error "Please fix <asm/byteorder.h>" -#endif - u8 mux_id; - __be16 pkt_len; + u8 flags; /* MAP_CMD_FLAG, MAP_PAD_LEN_MASK */ + u8 mux_id; + __be16 pkt_len; /* Length of packet, including pad */ } __aligned(1); +/* rmnet_map_header flags field: + * PAD_LEN: number of pad bytes following packet data + * CMD: 1 = packet contains a MAP command; 0 = packet contains data + */ +#define MAP_PAD_LEN_MASK GENMASK(5, 0) +#define MAP_CMD_FLAG BIT(7) + struct rmnet_map_dl_csum_trailer { - u8 reserved1; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u8 valid:1; - u8 reserved2:7; -#elif defined (__BIG_ENDIAN_BITFIELD) - u8 reserved2:7; - u8 valid:1; -#else -#error "Please fix <asm/byteorder.h>" -#endif - u16 csum_start_offset; - u16 csum_length; + u8 reserved1; + u8 flags; /* MAP_CSUM_DL_VALID_FLAG */ + __be16 csum_start_offset; + __be16 csum_length; __be16 csum_value; } __aligned(1); +/* rmnet_map_dl_csum_trailer flags field: + * VALID: 1 = checksum and length valid; 0 = ignore them + */ +#define MAP_CSUM_DL_VALID_FLAG BIT(0) + struct rmnet_map_ul_csum_header { __be16 csum_start_offset; -#if defined(__LITTLE_ENDIAN_BITFIELD) - u16 csum_insert_offset:14; - u16 udp_ind:1; - u16 csum_enabled:1; -#elif defined (__BIG_ENDIAN_BITFIELD) - u16 csum_enabled:1; - u16 udp_ind:1; - u16 csum_insert_offset:14; -#else -#error "Please fix <asm/byteorder.h>" -#endif + __be16 csum_info; /* MAP_CSUM_UL_* */ } __aligned(1); +/* csum_info field: + * OFFSET: where (offset in bytes) to insert computed checksum + * UDP: 1 = UDP checksum (zero checkum means no checksum) + * ENABLED: 1 = checksum computation requested + */ +#define MAP_CSUM_UL_OFFSET_MASK GENMASK(13, 0) +#define MAP_CSUM_UL_UDP_FLAG BIT(14) +#define MAP_CSUM_UL_ENABLED_FLAG BIT(15) + #endif /* !(_LINUX_IF_RMNET_H_) */ diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h index c5d48e1c2d36..52620e5b8052 100644 --- a/include/linux/iio/adc/adi-axi-adc.h +++ b/include/linux/iio/adc/adi-axi-adc.h @@ -15,7 +15,7 @@ struct iio_chan_spec; * struct adi_axi_adc_chip_info - Chip specific information * @name Chip name * @id Chip ID (usually product ID) - * @channels Channel specifications of type @struct axi_adc_chan_spec + * @channels Channel specifications of type @struct iio_chan_spec * @num_channels Number of @channels * @scale_table Supported scales by the chip; tuples of 2 ints * @num_scales Number of scales in the table diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h index 5b502291d6a4..5c355be89814 100644 --- a/include/linux/iio/buffer-dmaengine.h +++ b/include/linux/iio/buffer-dmaengine.h @@ -7,10 +7,11 @@ #ifndef __IIO_DMAENGINE_H__ #define __IIO_DMAENGINE_H__ -struct iio_buffer; +struct iio_dev; struct device; -struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev, - const char *channel); +int devm_iio_dmaengine_buffer_setup(struct device *dev, + struct iio_dev *indio_dev, + const char *channel); #endif diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 8febc23f5f26..b6928ac5c63d 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -41,7 +41,7 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev, bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, const unsigned long *mask); -void iio_device_attach_buffer(struct iio_dev *indio_dev, - struct iio_buffer *buffer); +int iio_device_attach_buffer(struct iio_dev *indio_dev, + struct iio_buffer *buffer); #endif /* _IIO_BUFFER_GENERIC_H_ */ diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h index a63dc07b7350..245b32918ae1 100644 --- a/include/linux/iio/buffer_impl.h +++ b/include/linux/iio/buffer_impl.h @@ -6,6 +6,8 @@ #ifdef CONFIG_IIO_BUFFER +#include <uapi/linux/iio/buffer.h> + struct iio_dev; struct iio_buffer; @@ -72,6 +74,9 @@ struct iio_buffer { /** @length: Number of datums in buffer. */ unsigned int length; + /** @flags: File ops flags including busy flag. */ + unsigned long flags; + /** @bytes_per_datum: Size of individual datum including timestamp. */ size_t bytes_per_datum; @@ -97,17 +102,14 @@ struct iio_buffer { /* @scan_timestamp: Does the scan mode include a timestamp. */ bool scan_timestamp; - /* @scan_el_dev_attr_list: List of scan element related attributes. */ - struct list_head scan_el_dev_attr_list; - - /* @buffer_group: Attributes of the buffer group. */ - struct attribute_group buffer_group; + /* @buffer_attr_list: List of buffer attributes. */ + struct list_head buffer_attr_list; /* - * @scan_el_group: Attribute group for those attributes not - * created from the iio_chan_info array. + * @buffer_group: Attributes of the new buffer group. + * Includes scan elements attributes. */ - struct attribute_group scan_el_group; + struct attribute_group buffer_group; /* @attrs: Standard attributes of the buffer. */ const struct attribute **attrs; @@ -115,6 +117,9 @@ struct iio_buffer { /* @demux_bounce: Buffer for doing gather from incoming scan. */ void *demux_bounce; + /* @attached_entry: Entry in the devices list of buffers attached by the driver. */ + struct list_head attached_entry; + /* @buffer_list: Entry in the devices list of current buffers. */ struct list_head buffer_list; diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index c9b80be82440..7ce8a8adad58 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -96,8 +96,7 @@ struct platform_device; int cros_ec_sensors_core_init(struct platform_device *pdev, struct iio_dev *indio_dev, bool physical_device, cros_ec_sensors_capture_t trigger_capture, - cros_ec_sensorhub_push_data_cb_t push_data, - bool has_hw_fifo); + cros_ec_sensorhub_push_data_cb_t push_data); irqreturn_t cros_ec_sensors_capture(int irq, void *p); int cros_ec_sensors_push_data(struct iio_dev *indio_dev, diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 0a90ba8fa1bb..5fa5957586cf 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -242,6 +242,21 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val); int iio_read_channel_processed(struct iio_channel *chan, int *val); /** + * iio_read_channel_processed_scale() - read and scale a processed value + * @chan: The channel being queried. + * @val: Value read back. + * @scale: Scale factor to apply during the conversion + * + * Returns an error code or 0. + * + * This function will read a processed value from a channel. This will work + * like @iio_read_channel_processed() but also scale with an additional + * scale factor while attempting to minimize any precision loss. + */ +int iio_read_channel_processed_scale(struct iio_channel *chan, int *val, + unsigned int scale); + +/** * iio_write_channel_attribute() - Write values to the device attribute. * @chan: The channel being queried. * @val: Value being written. diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h index e9801c8d49c0..1f7e53c506b6 100644 --- a/include/linux/iio/dac/mcp4725.h +++ b/include/linux/iio/dac/mcp4725.h @@ -15,7 +15,7 @@ * @vref_buffered: Controls buffering of the external reference voltage. * * Vref related settings are available only on MCP4756. See - * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information. + * Documentation/devicetree/bindings/iio/dac/microchip,mcp4725.yaml for more information. */ struct mcp4725_platform_data { bool use_vref; diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index 07c5a8e52ca8..32addd5e790e 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -7,11 +7,18 @@ * struct iio_dev_opaque - industrial I/O device opaque information * @indio_dev: public industrial I/O device information * @event_interface: event chrdevs associated with interrupt lines + * @attached_buffers: array of buffers statically attached by the driver + * @attached_buffers_cnt: number of buffers in the array of statically attached buffers + * @buffer_ioctl_handler: ioctl() handler for this IIO device's buffer interface * @buffer_list: list of all buffers currently attached * @channel_attr_list: keep track of automatically created channel * attributes * @chan_attr_group: group for all attrs in base directory * @ioctl_handlers: ioctl handlers registered with the core handler + * @groups: attribute groups + * @groupcounter: index of next attribute group + * @legacy_scan_el_group: attribute group for legacy scan elements attribute group + * @legacy_buffer_group: attribute group for legacy buffer attributes group * @debugfs_dentry: device specific debugfs dentry * @cached_reg_addr: cached register address for debugfs reads * @read_buf: read buffer to be used for the initial reg read @@ -20,10 +27,17 @@ struct iio_dev_opaque { struct iio_dev indio_dev; struct iio_event_interface *event_interface; + struct iio_buffer **attached_buffers; + unsigned int attached_buffers_cnt; + struct iio_ioctl_handler *buffer_ioctl_handler; struct list_head buffer_list; struct list_head channel_attr_list; struct attribute_group chan_attr_group; struct list_head ioctl_handlers; + const struct attribute_group **groups; + int groupcounter; + struct attribute_group legacy_scan_el_group; + struct attribute_group legacy_buffer_group; #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_dentry; unsigned cached_reg_addr; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index e4a9822e6495..f2d65e2e88b6 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -518,8 +518,6 @@ struct iio_buffer_setup_ops { * @setup_ops: [DRIVER] callbacks to call before and after buffer * enable/disable * @chrdev: [INTERN] associated character device - * @groups: [INTERN] attribute groups - * @groupcounter: [INTERN] index of next attribute group * @flags: [INTERN] file ops related flags including busy flag. * @priv: [DRIVER] reference to driver's private information * **MUST** be accessed **ONLY** via iio_priv() helper @@ -556,9 +554,6 @@ struct iio_dev { struct mutex info_exist_lock; const struct iio_buffer_setup_ops *setup_ops; struct cdev chrdev; -#define IIO_MAX_GROUPS 6 - const struct attribute_group *groups[IIO_MAX_GROUPS + 1]; - int groupcounter; unsigned long flags; void *priv; @@ -698,7 +693,7 @@ static inline void *iio_priv(const struct iio_dev *indio_dev) void iio_device_free(struct iio_dev *indio_dev); struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv); __printf(2, 3) -struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, +struct iio_trigger *devm_iio_trigger_alloc(struct device *parent, const char *fmt, ...); /** * iio_buffer_enabled() - helper function to test if the buffer is enabled diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index 04e96d688ba9..f9b728d490b1 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -428,6 +428,16 @@ static inline int adis_initial_startup(struct adis *adis) return ret; } +static inline void adis_dev_lock(struct adis *adis) +{ + mutex_lock(&adis->state_lock); +} + +static inline void adis_dev_unlock(struct adis *adis) +{ + mutex_unlock(&adis->state_lock); +} + int adis_single_conversion(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int error_mask, int *val); diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h index 1fc1efa7799d..ccd2ceae7b25 100644 --- a/include/linux/iio/kfifo_buf.h +++ b/include/linux/iio/kfifo_buf.h @@ -3,11 +3,20 @@ #define __LINUX_IIO_KFIFO_BUF_H__ struct iio_buffer; +struct iio_buffer_setup_ops; +struct iio_dev; struct device; struct iio_buffer *iio_kfifo_allocate(void); void iio_kfifo_free(struct iio_buffer *r); -struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev); +int devm_iio_kfifo_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + int mode_flags, + const struct iio_buffer_setup_ops *setup_ops, + const struct attribute **buffer_attrs); + +#define devm_iio_kfifo_buffer_setup(dev, indio_dev, mode_flags, setup_ops) \ + devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (mode_flags), (setup_ops), NULL) #endif diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h index b532c875bc24..e51fba66de4b 100644 --- a/include/linux/iio/sysfs.h +++ b/include/linux/iio/sysfs.h @@ -9,6 +9,7 @@ #ifndef _INDUSTRIAL_IO_SYSFS_H_ #define _INDUSTRIAL_IO_SYSFS_H_ +struct iio_buffer; struct iio_chan_spec; /** @@ -17,12 +18,14 @@ struct iio_chan_spec; * @address: associated register address * @l: list head for maintaining list of dynamically created attrs * @c: specification for the underlying channel + * @buffer: the IIO buffer to which this attribute belongs to (if any) */ struct iio_dev_attr { struct device_attribute dev_attr; u64 address; struct list_head l; struct iio_chan_spec const *c; + struct iio_buffer *buffer; }; #define to_iio_dev_attr(_dev_attr) \ diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 055890b6ffcf..096f68dd2e0c 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -161,7 +161,8 @@ void iio_trigger_poll_chained(struct iio_trigger *trig); irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private); -__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...); +__printf(2, 3) +struct iio_trigger *iio_trigger_alloc(struct device *parent, const char *fmt, ...); void iio_trigger_free(struct iio_trigger *trig); /** diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 1e3ed6f55bca..84b3f8175cc6 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -16,6 +16,7 @@ enum iio_event_info { IIO_EV_INFO_PERIOD, IIO_EV_INFO_HIGH_PASS_FILTER_3DB, IIO_EV_INFO_LOW_PASS_FILTER_3DB, + IIO_EV_INFO_TIMEOUT, }; #define IIO_VAL_INT 1 @@ -50,6 +51,7 @@ enum iio_chan_info_enum { IIO_CHAN_INFO_PHASE, IIO_CHAN_INFO_HARDWAREGAIN, IIO_CHAN_INFO_HYSTERESIS, + IIO_CHAN_INFO_HYSTERESIS_RELATIVE, IIO_CHAN_INFO_INT_TIME, IIO_CHAN_INFO_ENABLE, IIO_CHAN_INFO_CALIBHEIGHT, diff --git a/include/linux/init.h b/include/linux/init.h index 31f54de58429..d82b4b2e1d25 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -47,7 +47,7 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline +#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline __nocfi #define __initdata __section(".init.data") #define __initconst __section(".init.rodata") #define __exitdata __section(".exit.data") @@ -220,8 +220,8 @@ extern bool initcall_debug; __initcall_name(initstub, __iid, id) #define __define_initcall_stub(__stub, fn) \ - int __init __stub(void); \ - int __init __stub(void) \ + int __init __cficanonical __stub(void); \ + int __init __cficanonical __stub(void) \ { \ return fn(); \ } \ @@ -242,7 +242,8 @@ extern bool initcall_debug; asm(".section \"" __sec "\", \"a\" \n" \ __stringify(__name) ": \n" \ ".long " __stringify(__stub) " - . \n" \ - ".previous \n"); + ".previous \n"); \ + static_assert(__same_type(initcall_t, &fn)); #else #define ____define_initcall(fn, __unused, __name, __sec) \ static initcall_t __name __used \ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index b2412b4d4c20..40fc5813cf93 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -25,7 +25,6 @@ extern struct files_struct init_files; extern struct fs_struct init_fs; extern struct nsproxy init_nsproxy; -extern struct group_info init_groups; extern struct cred init_cred; #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE diff --git a/include/linux/initrd.h b/include/linux/initrd.h index 85c15717af34..1bbe9af48dc3 100644 --- a/include/linux/initrd.h +++ b/include/linux/initrd.h @@ -20,8 +20,10 @@ extern void free_initrd_mem(unsigned long, unsigned long); #ifdef CONFIG_BLK_DEV_INITRD extern void __init reserve_initrd_mem(void); +extern void wait_for_initramfs(void); #else static inline void __init reserve_initrd_mem(void) {} +static inline void wait_for_initramfs(void) {} #endif extern phys_addr_t phys_initrd_start; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 1bc46b88711a..03faf20a6817 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -20,6 +20,7 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/dmar.h> #include <linux/ioasid.h> +#include <linux/bitfield.h> #include <asm/cacheflush.h> #include <asm/iommu.h> @@ -80,6 +81,7 @@ #define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ +#define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */ #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ #define DMAR_PQH_REG 0xc0 /* Page request queue head register */ #define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ @@ -126,6 +128,10 @@ #define DMAR_VCMD_REG 0xe10 /* Virtual command register */ #define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */ +#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg) +#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg) +#define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg) + #define OFFSET_STRIDE (9) #define dmar_readq(a) readq(a) @@ -372,6 +378,7 @@ enum { /* PASID cache invalidation granu */ #define QI_PC_ALL_PASIDS 0 #define QI_PC_PASID_SEL 1 +#define QI_PC_GLOBAL 3 #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) @@ -763,14 +770,11 @@ u32 intel_svm_get_pasid(struct iommu_sva *handle); int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); -struct svm_dev_ops; - struct intel_svm_dev { struct list_head list; struct rcu_head rcu; struct device *dev; struct intel_iommu *iommu; - struct svm_dev_ops *ops; struct iommu_sva sva; u32 pasid; int users; diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 39d368a810b8..10fa80eef13a 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -8,13 +8,6 @@ #ifndef __INTEL_SVM_H__ #define __INTEL_SVM_H__ -struct device; - -struct svm_dev_ops { - void (*fault_cb)(struct device *dev, u32 pasid, u64 address, - void *private, int rwxp, int response); -}; - /* Values for rxwp in fault_cb callback */ #define SVM_REQ_READ (1<<3) #define SVM_REQ_WRITE (1<<2) @@ -22,16 +15,6 @@ struct svm_dev_ops { #define SVM_REQ_PRIV (1<<0) /* - * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" - * PASID for the current process. Even if a PASID already exists, a new one - * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID - * will not be given to subsequent callers. This facility allows a driver to - * disambiguate between multiple device contexts which access the same MM, - * if there is no other way to do so. It should be used sparingly, if at all. - */ -#define SVM_FLAG_PRIVATE_PASID (1<<0) - -/* * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only * for access to kernel addresses. No IOTLB flushes are automatically done * for kernel mappings; it is valid only for access to the kernel's static @@ -42,18 +25,18 @@ struct svm_dev_ops { * It is unlikely that we will ever hook into flush_tlb_kernel_range() to * do such IOTLB flushes automatically. */ -#define SVM_FLAG_SUPERVISOR_MODE (1<<1) +#define SVM_FLAG_SUPERVISOR_MODE BIT(0) /* * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest * processes. Compared to the host bind, the primary differences are: * 1. mm life cycle management * 2. fault reporting */ -#define SVM_FLAG_GUEST_MODE (1<<2) +#define SVM_FLAG_GUEST_MODE BIT(1) /* * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space, * which requires guest and host PASID translation at both directions. */ -#define SVM_FLAG_GUEST_PASID (1<<3) +#define SVM_FLAG_GUEST_PASID BIT(2) #endif /* __INTEL_SVM_H__ */ diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index 50b8398ffd21..93780834fc8f 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -33,7 +33,7 @@ enum rapl_domain_reg_id { RAPL_DOMAIN_REG_MAX, }; -struct rapl_package; +struct rapl_domain; enum rapl_primitives { ENERGY_COUNTER, diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 967e25767153..4777850a6dc7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -61,6 +61,9 @@ * interrupt handler after suspending interrupts. For system * wakeup devices users need to implement wakeup detection in * their interrupt handlers. + * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. + * Users will enable it explicitly by enable_irq() or enable_nmi() + * later. */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -74,6 +77,7 @@ #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 +#define IRQF_NO_AUTOEN 0x00080000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) @@ -654,26 +658,21 @@ enum TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ }; -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); } -static inline void tasklet_unlock(struct tasklet_struct *t) -{ - smp_mb__before_atomic(); - clear_bit(TASKLET_STATE_RUN, &(t)->state); -} +void tasklet_unlock(struct tasklet_struct *t); +void tasklet_unlock_wait(struct tasklet_struct *t); +void tasklet_unlock_spin_wait(struct tasklet_struct *t); -static inline void tasklet_unlock_wait(struct tasklet_struct *t) -{ - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } -} #else -#define tasklet_trylock(t) 1 -#define tasklet_unlock_wait(t) do { } while (0) -#define tasklet_unlock(t) do { } while (0) +static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } +static inline void tasklet_unlock(struct tasklet_struct *t) { } +static inline void tasklet_unlock_wait(struct tasklet_struct *t) { } +static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { } #endif extern void __tasklet_schedule(struct tasklet_struct *t); @@ -698,6 +697,17 @@ static inline void tasklet_disable_nosync(struct tasklet_struct *t) smp_mb__after_atomic(); } +/* + * Do not use in new code. Disabling tasklets from atomic contexts is + * error prone and should be avoided. + */ +static inline void tasklet_disable_in_atomic(struct tasklet_struct *t) +{ + tasklet_disable_nosync(t); + tasklet_unlock_spin_wait(t); + smp_mb(); +} + static inline void tasklet_disable(struct tasklet_struct *t) { tasklet_disable_nosync(t); @@ -712,7 +722,6 @@ static inline void tasklet_enable(struct tasklet_struct *t) } extern void tasklet_kill(struct tasklet_struct *t); -extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); extern void tasklet_setup(struct tasklet_struct *t, diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index c093e81310a9..e9743cfd8585 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -220,3 +220,6 @@ io_mapping_free(struct io_mapping *iomap) } #endif /* _LINUX_IO_MAPPING_H */ + +int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size); diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index a4c9ca2c31f1..4d40dfa75b55 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -204,10 +204,6 @@ struct io_pgtable { #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) -struct io_pgtable_domain_attr { - unsigned long quirks; -}; - static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) { if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all) diff --git a/include/linux/io.h b/include/linux/io.h index 8394c56babc2..9595151d800d 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -31,15 +31,6 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, } #endif -#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -void __init ioremap_huge_init(void); -int arch_ioremap_p4d_supported(void); -int arch_ioremap_pud_supported(void); -int arch_ioremap_pmd_supported(void); -#else -static inline void ioremap_huge_init(void) { } -#endif - /* * Managed iomap interface */ @@ -68,6 +59,8 @@ void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, resource_size_t size); void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, resource_size_t size); +void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset, + resource_size_t size); void devm_iounmap(struct device *dev, void __iomem *addr); int check_signature(const volatile void __iomem *io_addr, const unsigned char *signature, int length); @@ -80,20 +73,20 @@ void devm_memunmap(struct device *dev, void *addr); #ifdef CONFIG_PCI /* * The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and - * Posting") mandate non-posted configuration transactions. There is - * no ioremap API in the kernel that can guarantee non-posted write - * semantics across arches so provide a default implementation for - * mapping PCI config space that defaults to ioremap(); arches - * should override it if they have memory mapping implementations that - * guarantee non-posted writes semantics to make the memory mapping - * compliant with the PCI specification. + * Posting") mandate non-posted configuration transactions. This default + * implementation attempts to use the ioremap_np() API to provide this + * on arches that support it, and falls back to ioremap() on those that + * don't. Overriding this function is deprecated; arches that properly + * support non-posted accesses should implement ioremap_np() instead, which + * this default implementation can then use to return mappings compliant with + * the PCI specification. */ #ifndef pci_remap_cfgspace #define pci_remap_cfgspace pci_remap_cfgspace static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset, size_t size) { - return ioremap(offset, size); + return ioremap_np(offset, size) ?: ioremap(offset, size); } #endif #endif diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 79cde9906be0..04b650bcbbe5 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -7,19 +7,17 @@ #if defined(CONFIG_IO_URING) struct sock *io_uring_get_socket(struct file *file); -void __io_uring_task_cancel(void); -void __io_uring_files_cancel(struct files_struct *files); +void __io_uring_cancel(struct files_struct *files); void __io_uring_free(struct task_struct *tsk); -static inline void io_uring_task_cancel(void) +static inline void io_uring_files_cancel(struct files_struct *files) { if (current->io_uring) - __io_uring_task_cancel(); + __io_uring_cancel(files); } -static inline void io_uring_files_cancel(struct files_struct *files) +static inline void io_uring_task_cancel(void) { - if (current->io_uring) - __io_uring_files_cancel(files); + return io_uring_files_cancel(NULL); } static inline void io_uring_free(struct task_struct *tsk) { diff --git a/include/linux/iomap.h b/include/linux/iomap.h index d202fd2d0f91..c87d0cb0de6d 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -198,7 +198,6 @@ struct iomap_ioend { struct inode *io_inode; /* file being written to */ size_t io_size; /* size of the extent */ loff_t io_offset; /* offset in the file */ - void *io_private; /* file system private data */ struct bio *io_bio; /* bio being built */ struct bio io_inline_bio; /* MUST BE LAST! */ }; @@ -234,9 +233,7 @@ struct iomap_writepage_ctx { void iomap_finish_ioends(struct iomap_ioend *ioend, int error); void iomap_ioend_try_merge(struct iomap_ioend *ioend, - struct list_head *more_ioends, - void (*merge_private)(struct iomap_ioend *ioend, - struct iomap_ioend *next)); + struct list_head *more_ioends); void iomap_sort_ioends(struct list_head *ioend_list); int iomap_writepage(struct page *page, struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5e7fe519430a..32d448050bf7 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -96,32 +96,6 @@ enum iommu_cap { IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ }; -/* - * Following constraints are specifc to FSL_PAMUV1: - * -aperture must be power of 2, and naturally aligned - * -number of windows must be power of 2, and address space size - * of each window is determined by aperture size / # of windows - * -the actual size of the mapped region of a window must be power - * of 2 starting with 4KB and physical address must be naturally - * aligned. - * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. - * The caller can invoke iommu_domain_get_attr to check if the underlying - * iommu implementation supports these constraints. - */ - -enum iommu_attr { - DOMAIN_ATTR_GEOMETRY, - DOMAIN_ATTR_PAGING, - DOMAIN_ATTR_WINDOWS, - DOMAIN_ATTR_FSL_PAMU_STASH, - DOMAIN_ATTR_FSL_PAMU_ENABLE, - DOMAIN_ATTR_FSL_PAMUV1, - DOMAIN_ATTR_NESTING, /* two stages of translation */ - DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, - DOMAIN_ATTR_IO_PGTABLE_CFG, - DOMAIN_ATTR_MAX, -}; - /* These are the possible reserved region types */ enum iommu_resv_type { /* Memory regions which must be mapped 1:1 at all times */ @@ -156,10 +130,24 @@ struct iommu_resv_region { enum iommu_resv_type type; }; -/* Per device IOMMU features */ +/** + * enum iommu_dev_features - Per device IOMMU features + * @IOMMU_DEV_FEAT_AUX: Auxiliary domain feature + * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses + * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally + * enabling %IOMMU_DEV_FEAT_SVA requires + * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page + * Faults themselves instead of relying on the IOMMU. When + * supported, this feature must be enabled before and + * disabled after %IOMMU_DEV_FEAT_SVA. + * + * Device drivers query whether a feature is supported using + * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature(). + */ enum iommu_dev_features { - IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */ - IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */ + IOMMU_DEV_FEAT_AUX, + IOMMU_DEV_FEAT_SVA, + IOMMU_DEV_FEAT_IOPF, }; #define IOMMU_PASID_INVALID (-1U) @@ -203,13 +191,11 @@ struct iommu_iotlb_gather { * @probe_finalize: Do final setup work after the device is added to an IOMMU * group and attached to the groups domain * @device_group: find iommu group for a particular device - * @domain_get_attr: Query domain attributes - * @domain_set_attr: Change domain attributes + * @enable_nesting: Enable nesting + * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device * @apply_resv_region: Temporary helper call-back for iova reserved ranges - * @domain_window_enable: Configure and enable a particular window for a domain - * @domain_window_disable: Disable a particular window for a domain * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) @@ -255,10 +241,9 @@ struct iommu_ops { void (*release_device)(struct device *dev); void (*probe_finalize)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); - int (*domain_get_attr)(struct iommu_domain *domain, - enum iommu_attr attr, void *data); - int (*domain_set_attr)(struct iommu_domain *domain, - enum iommu_attr attr, void *data); + int (*enable_nesting)(struct iommu_domain *domain); + int (*set_pgtable_quirks)(struct iommu_domain *domain, + unsigned long quirks); /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); @@ -267,11 +252,6 @@ struct iommu_ops { struct iommu_domain *domain, struct iommu_resv_region *region); - /* Window handling functions */ - int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t paddr, u64 size, int prot); - void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); - int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); @@ -353,6 +333,7 @@ struct iommu_fault_param { * struct dev_iommu - Collection of per-device IOMMU data * * @fault_param: IOMMU detected device fault reporting data + * @iopf_param: I/O Page Fault queue and data * @fwspec: IOMMU fwspec data * @iommu_dev: IOMMU device this device is linked to * @priv: IOMMU Driver private data @@ -363,12 +344,15 @@ struct iommu_fault_param { struct dev_iommu { struct mutex lock; struct iommu_fault_param *fault_param; + struct iopf_device_param *iopf_param; struct iommu_fwspec *fwspec; struct iommu_device *iommu_dev; void *priv; }; -int iommu_device_register(struct iommu_device *iommu); +int iommu_device_register(struct iommu_device *iommu, + const struct iommu_ops *ops, + struct device *hwdev); void iommu_device_unregister(struct iommu_device *iommu); int iommu_device_sysfs_add(struct iommu_device *iommu, struct device *parent, @@ -379,25 +363,6 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link); void iommu_device_unlink(struct iommu_device *iommu, struct device *link); int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); -static inline void __iommu_device_set_ops(struct iommu_device *iommu, - const struct iommu_ops *ops) -{ - iommu->ops = ops; -} - -#define iommu_device_set_ops(iommu, ops) \ -do { \ - struct iommu_ops *__ops = (struct iommu_ops *)(ops); \ - __ops->owner = THIS_MODULE; \ - __iommu_device_set_ops(iommu, __ops); \ -} while (0) - -static inline void iommu_device_set_fwnode(struct iommu_device *iommu, - struct fwnode_handle *fwnode) -{ - iommu->fwnode = fwnode; -} - static inline struct iommu_device *dev_to_iommu_device(struct device *dev) { return (struct iommu_device *)dev_get_drvdata(dev); @@ -507,15 +472,12 @@ extern int iommu_page_response(struct device *dev, extern int iommu_group_id(struct iommu_group *group); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); -extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, - void *data); -extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, - void *data); +int iommu_enable_nesting(struct iommu_domain *domain); +int iommu_set_pgtable_quirks(struct iommu_domain *domain, + unsigned long quirks); -/* Window handling function prototypes */ -extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t offset, u64 size, - int prot); +void iommu_set_dma_strict(bool val); +bool iommu_get_dma_strict(struct iommu_domain *domain); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); @@ -547,7 +509,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, * structure can be rewritten. */ if (gather->pgsize != size || - end < gather->start || start > gather->end) { + end + 1 < gather->start || start > gather->end + 1) { if (gather->pgsize) iommu_iotlb_sync(domain, gather); gather->pgsize = size; @@ -571,8 +533,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev); * struct iommu_fwspec - per-device IOMMU instance data * @ops: ops for this device's IOMMU * @iommu_fwnode: firmware handle for this device's IOMMU - * @iommu_priv: IOMMU driver private data for this device - * @num_pasid_bits: number of PASID bits supported by this device + * @flags: IOMMU_FWSPEC_* flags * @num_ids: number of associated device IDs * @ids: IDs which this device may present to the IOMMU */ @@ -580,7 +541,6 @@ struct iommu_fwspec { const struct iommu_ops *ops; struct fwnode_handle *iommu_fwnode; u32 flags; - u32 num_pasid_bits; unsigned int num_ids; u32 ids[]; }; @@ -742,13 +702,6 @@ static inline void iommu_iotlb_sync(struct iommu_domain *domain, { } -static inline int iommu_domain_window_enable(struct iommu_domain *domain, - u32 wnd_nr, phys_addr_t paddr, - u64 size, int prot) -{ - return -ENODEV; -} - static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { return 0; @@ -889,33 +842,19 @@ static inline int iommu_group_id(struct iommu_group *group) return -ENODEV; } -static inline int iommu_domain_get_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) -{ - return -EINVAL; -} - -static inline int iommu_domain_set_attr(struct iommu_domain *domain, - enum iommu_attr attr, void *data) +static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, + unsigned long quirks) { - return -EINVAL; + return 0; } -static inline int iommu_device_register(struct iommu_device *iommu) +static inline int iommu_device_register(struct iommu_device *iommu, + const struct iommu_ops *ops, + struct device *hwdev) { return -ENODEV; } -static inline void iommu_device_set_ops(struct iommu_device *iommu, - const struct iommu_ops *ops) -{ -} - -static inline void iommu_device_set_fwnode(struct iommu_device *iommu, - struct fwnode_handle *fwnode) -{ -} - static inline struct iommu_device *dev_to_iommu_device(struct device *dev) { return NULL; diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 55de385c839c..8359c50f9988 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -108,6 +108,7 @@ struct resource { #define IORESOURCE_MEM_32BIT (3<<3) #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ #define IORESOURCE_MEM_EXPANSIONROM (1<<6) +#define IORESOURCE_MEM_NONPOSTED (1<<7) /* PnP I/O specific bits (IORESOURCE_BITS) */ #define IORESOURCE_IO_16BIT_ADDR (1<<0) @@ -331,7 +332,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq) { res->start = irq; res->end = irq; - res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET; + res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET; } extern struct address_space *iomem_get_mapping(void); diff --git a/include/linux/iova.h b/include/linux/iova.h index c834c01c0a5b..71d8a2de6635 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -95,6 +95,7 @@ struct iova_domain { flush-queues */ atomic_t fq_timer_on; /* 1 when timer is active, 0 when not */ + struct hlist_node cpuhp_dead; }; static inline unsigned long iova_size(struct iova *iova) @@ -156,7 +157,6 @@ int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); -void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); #else static inline int iova_cache_get(void) { @@ -233,10 +233,6 @@ static inline void put_iova_domain(struct iova_domain *iovad) { } -static inline void free_cpu_cached_iovas(unsigned int cpu, - struct iova_domain *iovad) -{ -} #endif #endif diff --git a/include/linux/irq.h b/include/linux/irq.h index 2efde6a79b7e..31b347c9f8dd 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -116,7 +116,7 @@ enum { * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping - * all descendent irqchips. + * all descendant irqchips. */ enum { IRQ_SET_MASK_OK = 0, @@ -302,7 +302,7 @@ static inline bool irqd_is_level_type(struct irq_data *d) /* * Must only be called of irqchip.irq_set_affinity() or low level - * hieararchy domain allocation functions. + * hierarchy domain allocation functions. */ static inline void irqd_set_single_target(struct irq_data *d) { @@ -1258,11 +1258,13 @@ int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)); */ extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init; #else +#ifndef set_handle_irq #define set_handle_irq(handle_irq) \ do { \ (void)handle_irq; \ WARN_ON(1); \ } while (0) #endif +#endif #endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index f6d092fdb93d..81cbf85f73de 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -575,67 +575,11 @@ #define ICC_SRE_EL1_DFB (1U << 1) #define ICC_SRE_EL1_SRE (1U << 0) -/* - * Hypervisor interface registers (SRE only) - */ -#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) - -#define ICH_LR_EOI (1ULL << 41) -#define ICH_LR_GROUP (1ULL << 60) -#define ICH_LR_HW (1ULL << 61) -#define ICH_LR_STATE (3ULL << 62) -#define ICH_LR_PENDING_BIT (1ULL << 62) -#define ICH_LR_ACTIVE_BIT (1ULL << 63) -#define ICH_LR_PHYS_ID_SHIFT 32 -#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) -#define ICH_LR_PRIORITY_SHIFT 48 -#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT) - /* These are for GICv2 emulation only */ #define GICH_LR_VIRTUALID (0x3ffUL << 0) #define GICH_LR_PHYSID_CPUID_SHIFT (10) #define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) -#define ICH_MISR_EOI (1 << 0) -#define ICH_MISR_U (1 << 1) - -#define ICH_HCR_EN (1 << 0) -#define ICH_HCR_UIE (1 << 1) -#define ICH_HCR_NPIE (1 << 3) -#define ICH_HCR_TC (1 << 10) -#define ICH_HCR_TALL0 (1 << 11) -#define ICH_HCR_TALL1 (1 << 12) -#define ICH_HCR_EOIcount_SHIFT 27 -#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT) - -#define ICH_VMCR_ACK_CTL_SHIFT 2 -#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT) -#define ICH_VMCR_FIQ_EN_SHIFT 3 -#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT) -#define ICH_VMCR_CBPR_SHIFT 4 -#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) -#define ICH_VMCR_EOIM_SHIFT 9 -#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT) -#define ICH_VMCR_BPR1_SHIFT 18 -#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) -#define ICH_VMCR_BPR0_SHIFT 21 -#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) -#define ICH_VMCR_PMR_SHIFT 24 -#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) -#define ICH_VMCR_ENG0_SHIFT 0 -#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT) -#define ICH_VMCR_ENG1_SHIFT 1 -#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT) - -#define ICH_VTR_PRI_BITS_SHIFT 29 -#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT) -#define ICH_VTR_ID_BITS_SHIFT 23 -#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT) -#define ICH_VTR_SEIS_SHIFT 22 -#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) -#define ICH_VTR_A3V_SHIFT 21 -#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) - #define ICC_IAR1_EL1_SPURIOUS 0x3ff #define ICC_SRE_EL2_SRE (1 << 0) diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 943c3411ca10..2c63375bbd43 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -145,4 +145,6 @@ int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *vpe_ops, const struct irq_domain_ops *sgi_ops); +bool gic_cpuif_has_vsgi(void); + #endif diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 891b323266df..df4651250785 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -32,7 +32,7 @@ struct pt_regs; * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts * @threads_handled: stats field for deferred spurious detection of threaded handlers - * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers + * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers * @lock: locking for SMP * @affinity_hint: hint to user space for preferred irq affinity * @affinity_notify: context for notification of affinity changes diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 33cacc8af26d..62a8e3d23829 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -256,11 +256,11 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, irq_hw_number_t hwirq_max, int direct_max, const struct irq_domain_ops *ops, void *host_data); -struct irq_domain *irq_domain_add_simple(struct device_node *of_node, - unsigned int size, - unsigned int first_irq, - const struct irq_domain_ops *ops, - void *host_data); +struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data); struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, unsigned int size, unsigned int first_irq, @@ -325,6 +325,15 @@ static inline struct irq_domain *irq_find_host(struct device_node *node) return d; } +static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node, + unsigned int size, + unsigned int first_irq, + const struct irq_domain_ops *ops, + void *host_data) +{ + return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data); +} + /** * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. * @of_node: pointer to interrupt controller's device tree node. @@ -415,15 +424,6 @@ static inline unsigned int irq_linear_revmap(struct irq_domain *domain, extern unsigned int irq_find_mapping(struct irq_domain *host, irq_hw_number_t hwirq); extern unsigned int irq_create_direct_mapping(struct irq_domain *host); -extern int irq_create_strict_mappings(struct irq_domain *domain, - unsigned int irq_base, - irq_hw_number_t hwirq_base, int count); - -static inline int irq_create_identity_mapping(struct irq_domain *host, - irq_hw_number_t hwirq) -{ - return irq_create_strict_mappings(host, hwirq, hwirq, 1); -} extern const struct irq_domain_ops irq_domain_simple_ops; diff --git a/include/linux/isicom.h b/include/linux/isicom.h deleted file mode 100644 index 7de6822d7b1a..000000000000 --- a/include/linux/isicom.h +++ /dev/null @@ -1,85 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_ISICOM_H -#define _LINUX_ISICOM_H - -#define YES 1 -#define NO 0 - -/* - * ISICOM Driver definitions ... - * - */ - -#define ISICOM_NAME "ISICom" - -/* - * PCI definitions - */ - -#define DEVID_COUNT 9 -#define VENDOR_ID 0x10b5 - -/* - * These are now officially allocated numbers - */ - -#define ISICOM_NMAJOR 112 /* normal */ -#define ISICOM_CMAJOR 113 /* callout */ -#define ISICOM_MAGIC (('M' << 8) | 'T') - -#define WAKEUP_CHARS 256 /* hard coded for now */ -#define TX_SIZE 254 - -#define BOARD_COUNT 4 -#define PORT_COUNT (BOARD_COUNT*16) - -/* character sizes */ - -#define ISICOM_CS5 0x0000 -#define ISICOM_CS6 0x0001 -#define ISICOM_CS7 0x0002 -#define ISICOM_CS8 0x0003 - -/* stop bits */ - -#define ISICOM_1SB 0x0000 -#define ISICOM_2SB 0x0004 - -/* parity */ - -#define ISICOM_NOPAR 0x0000 -#define ISICOM_ODPAR 0x0008 -#define ISICOM_EVPAR 0x0018 - -/* flow control */ - -#define ISICOM_CTSRTS 0x03 -#define ISICOM_INITIATE_XONXOFF 0x04 -#define ISICOM_RESPOND_XONXOFF 0x08 - -#define BOARD(line) (((line) >> 4) & 0x3) - - /* isi kill queue bitmap */ - -#define ISICOM_KILLTX 0x01 -#define ISICOM_KILLRX 0x02 - - /* isi_board status bitmap */ - -#define FIRMWARE_LOADED 0x0001 -#define BOARD_ACTIVE 0x0002 -#define BOARD_INIT 0x0004 - - /* isi_port status bitmap */ - -#define ISI_CTS 0x1000 -#define ISI_DSR 0x2000 -#define ISI_RI 0x4000 -#define ISI_DCD 0x8000 -#define ISI_DTR 0x0100 -#define ISI_RTS 0x0200 - - -#define ISI_TXOK 0x0001 - -#endif /* ISICOM_H */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 99d3cd051ac3..db0e1920cb12 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -61,7 +61,7 @@ void __jbd2_debug(int level, const char *file, const char *func, #define jbd_debug(n, fmt, a...) \ __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a) #else -#define jbd_debug(n, fmt, a...) /**/ +#define jbd_debug(n, fmt, a...) no_printk(fmt, ##a) #endif extern void *jbd2_alloc(size_t size, gfp_t flags); @@ -594,18 +594,22 @@ struct transaction_s */ unsigned long t_log_start; - /* Number of buffers on the t_buffers list [j_list_lock] */ + /* + * Number of buffers on the t_buffers list [j_list_lock, no locks + * needed for jbd2 thread] + */ int t_nr_buffers; /* * Doubly-linked circular list of all buffers reserved but not yet - * modified by this transaction [j_list_lock] + * modified by this transaction [j_list_lock, no locks needed fo + * jbd2 thread] */ struct journal_head *t_reserved_list; /* * Doubly-linked circular list of all metadata buffers owned by this - * transaction [j_list_lock] + * transaction [j_list_lock, no locks needed for jbd2 thread] */ struct journal_head *t_buffers; @@ -629,9 +633,11 @@ struct transaction_s struct journal_head *t_checkpoint_io_list; /* - * Doubly-linked circular list of metadata buffers being shadowed by log - * IO. The IO buffers on the iobuf list and the shadow buffers on this - * list match each other one for one at all times. [j_list_lock] + * Doubly-linked circular list of metadata buffers being + * shadowed by log IO. The IO buffers on the iobuf list and + * the shadow buffers on this list match each other one for + * one at all times. [j_list_lock, no locks needed for jbd2 + * thread] */ struct journal_head *t_shadow_list; @@ -768,7 +774,8 @@ enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY}; struct journal_s { /** - * @j_flags: General journaling state flags [j_state_lock] + * @j_flags: General journaling state flags [j_state_lock, + * no lock for quick racy checks] */ unsigned long j_flags; @@ -808,7 +815,8 @@ struct journal_s /** * @j_barrier_count: * - * Number of processes waiting to create a barrier lock [j_state_lock] + * Number of processes waiting to create a barrier lock [j_state_lock, + * no lock for quick racy checks] */ int j_barrier_count; @@ -821,7 +829,8 @@ struct journal_s * @j_running_transaction: * * Transactions: The current running transaction... - * [j_state_lock] [caller holding open handle] + * [j_state_lock, no lock for quick racy checks] [caller holding + * open handle] */ transaction_t *j_running_transaction; @@ -1033,7 +1042,7 @@ struct journal_s * @j_commit_sequence: * * Sequence number of the most recently committed transaction - * [j_state_lock]. + * [j_state_lock, no lock for quick racy checks] */ tid_t j_commit_sequence; @@ -1041,7 +1050,7 @@ struct journal_s * @j_commit_request: * * Sequence number of the most recent transaction wanting commit - * [j_state_lock] + * [j_state_lock, no lock for quick racy checks] */ tid_t j_commit_request; diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index d92691262f51..05f5554d860f 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -382,6 +382,21 @@ struct static_key_false { [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \ } +#define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name) +#define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name) +#define DEFINE_STATIC_KEY_MAYBE(cfg, name) \ + __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name) + +#define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name) +#define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name) +#define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \ + __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name) + +#define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name) +#define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name) +#define DECLARE_STATIC_KEY_MAYBE(cfg, name) \ + __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name) + extern bool ____wrong_branch_error(void); #define static_key_enabled(x) \ @@ -482,6 +497,10 @@ extern bool ____wrong_branch_error(void); #endif /* CONFIG_JUMP_LABEL */ +#define static_branch_maybe(config, x) \ + (IS_ENABLED(config) ? static_branch_likely(x) \ + : static_branch_unlikely(x)) + /* * Advanced usage; refcount, branch is enabled when: count != 0 */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 14f72ec96492..b1678a61e6a7 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -30,7 +30,8 @@ struct kunit_kasan_expectation { /* Software KASAN implementations use shadow memory. */ #ifdef CONFIG_KASAN_SW_TAGS -#define KASAN_SHADOW_INIT 0xFF +/* This matches KASAN_TAG_INVALID. */ +#define KASAN_SHADOW_INIT 0xFE #else #define KASAN_SHADOW_INIT 0 #endif @@ -95,6 +96,11 @@ static __always_inline bool kasan_enabled(void) return static_branch_likely(&kasan_flag_enabled); } +static inline bool kasan_has_integrated_init(void) +{ + return kasan_enabled(); +} + #else /* CONFIG_KASAN_HW_TAGS */ static inline bool kasan_enabled(void) @@ -102,6 +108,11 @@ static inline bool kasan_enabled(void) return true; } +static inline bool kasan_has_integrated_init(void) +{ + return false; +} + #endif /* CONFIG_KASAN_HW_TAGS */ slab_flags_t __kasan_never_merge(void); @@ -119,20 +130,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size) __kasan_unpoison_range(addr, size); } -void __kasan_alloc_pages(struct page *page, unsigned int order); +void __kasan_alloc_pages(struct page *page, unsigned int order, bool init); static __always_inline void kasan_alloc_pages(struct page *page, - unsigned int order) + unsigned int order, bool init) { if (kasan_enabled()) - __kasan_alloc_pages(page, order); + __kasan_alloc_pages(page, order, init); } -void __kasan_free_pages(struct page *page, unsigned int order); +void __kasan_free_pages(struct page *page, unsigned int order, bool init); static __always_inline void kasan_free_pages(struct page *page, - unsigned int order) + unsigned int order, bool init) { if (kasan_enabled()) - __kasan_free_pages(page, order); + __kasan_free_pages(page, order, init); } void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, @@ -192,11 +203,13 @@ static __always_inline void * __must_check kasan_init_slab_obj( return (void *)object; } -bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); -static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object) +bool __kasan_slab_free(struct kmem_cache *s, void *object, + unsigned long ip, bool init); +static __always_inline bool kasan_slab_free(struct kmem_cache *s, + void *object, bool init) { if (kasan_enabled()) - return __kasan_slab_free(s, object, _RET_IP_); + return __kasan_slab_free(s, object, _RET_IP_, init); return false; } @@ -215,12 +228,12 @@ static __always_inline void kasan_slab_free_mempool(void *ptr) } void * __must_check __kasan_slab_alloc(struct kmem_cache *s, - void *object, gfp_t flags); + void *object, gfp_t flags, bool init); static __always_inline void * __must_check kasan_slab_alloc( - struct kmem_cache *s, void *object, gfp_t flags) + struct kmem_cache *s, void *object, gfp_t flags, bool init) { if (kasan_enabled()) - return __kasan_slab_alloc(s, object, flags); + return __kasan_slab_alloc(s, object, flags, init); return object; } @@ -276,13 +289,17 @@ static inline bool kasan_enabled(void) { return false; } +static inline bool kasan_has_integrated_init(void) +{ + return false; +} static inline slab_flags_t kasan_never_merge(void) { return 0; } static inline void kasan_unpoison_range(const void *address, size_t size) {} -static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} -static inline void kasan_free_pages(struct page *page, unsigned int order) {} +static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {} +static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {} static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} @@ -298,14 +315,14 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache, { return (void *)object; } -static inline bool kasan_slab_free(struct kmem_cache *s, void *object) +static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init) { return false; } static inline void kasan_kfree_large(void *ptr) {} static inline void kasan_slab_free_mempool(void *ptr) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, - gfp_t flags) + gfp_t flags, bool init) { return object; } @@ -376,6 +393,12 @@ static inline void *kasan_reset_tag(const void *addr) #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ +#ifdef CONFIG_KASAN_HW_TAGS + +void kasan_report_async(void); + +#endif /* CONFIG_KASAN_HW_TAGS */ + #ifdef CONFIG_KASAN_SW_TAGS void __init kasan_init_sw_tags(void); #else diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index 24a59cb06963..cc8fa109cfa3 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -70,10 +70,4 @@ */ #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option)) -/* - * IF_ENABLED(CONFIG_FOO, ptr) evaluates to (ptr) if CONFIG_FOO is set to 'y' - * or 'm', NULL otherwise. - */ -#define IF_ENABLED(option, ptr) (IS_ENABLED(option) ? (ptr) : NULL) - #endif /* __LINUX_KCONFIG_H */ diff --git a/include/linux/kcov.h b/include/linux/kcov.h index 4e3037dc1204..55dc338f6bcd 100644 --- a/include/linux/kcov.h +++ b/include/linux/kcov.h @@ -2,6 +2,7 @@ #ifndef _LINUX_KCOV_H #define _LINUX_KCOV_H +#include <linux/sched.h> #include <uapi/linux/kcov.h> struct task_struct; diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index cf14840609ce..9fd0ad80fef6 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -1,4 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * KCSAN access checks and modifiers. These can be used to explicitly check + * uninstrumented accesses, or change KCSAN checking behaviour of accesses. + * + * Copyright (C) 2019, Google LLC. + */ #ifndef _LINUX_KCSAN_CHECKS_H #define _LINUX_KCSAN_CHECKS_H diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h index 53340d8789f9..fc266ecb2a4d 100644 --- a/include/linux/kcsan.h +++ b/include/linux/kcsan.h @@ -1,4 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface and + * data structures to set up runtime. See kcsan-checks.h for explicit checks and + * modifiers. For more info please see Documentation/dev-tools/kcsan.rst. + * + * Copyright (C) 2019, Google LLC. + */ #ifndef _LINUX_KCSAN_H #define _LINUX_KCSAN_H diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5b7ed6dc99ac..15d8bad3d2f2 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -3,6 +3,7 @@ #define _LINUX_KERNEL_H #include <stdarg.h> +#include <linux/align.h> #include <linux/limits.h> #include <linux/linkage.h> #include <linux/stddef.h> @@ -30,14 +31,6 @@ */ #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) -/* @a is a power of 2 value */ -#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) -#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) -#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) -#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) -#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a))) -#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) - /* generic data direction definitions */ #define READ 0 #define WRITE 1 @@ -48,6 +41,8 @@ */ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) +#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL) + #define u64_to_user_ptr(x) ( \ { \ typecheck(u64, (x)); \ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 8a7aa1d7e0e3..0c994ae37729 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -304,7 +304,15 @@ struct kimage { #ifdef CONFIG_IMA_KEXEC /* Virtual address of IMA measurement buffer for kexec syscall */ void *ima_buffer; + + phys_addr_t ima_buffer_addr; + size_t ima_buffer_size; #endif + + /* Core ELF header buffer */ + void *elf_headers; + unsigned long elf_headers_sz; + unsigned long elf_load_addr; }; /* kexec interface functions */ diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h index 3378bcbe585e..906521c2329c 100644 --- a/include/linux/kmsg_dump.h +++ b/include/linux/kmsg_dump.h @@ -30,6 +30,16 @@ enum kmsg_dump_reason { }; /** + * struct kmsg_dump_iter - iterator for retrieving kernel messages + * @cur_seq: Points to the oldest message to dump + * @next_seq: Points after the newest message to dump + */ +struct kmsg_dump_iter { + u64 cur_seq; + u64 next_seq; +}; + +/** * struct kmsg_dumper - kernel crash message dumper structure * @list: Entry in the dumper list (private) * @dump: Call into dumping code which will retrieve the data with @@ -41,31 +51,19 @@ struct kmsg_dumper { struct list_head list; void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason); enum kmsg_dump_reason max_reason; - bool active; bool registered; - - /* private state of the kmsg iterator */ - u32 cur_idx; - u32 next_idx; - u64 cur_seq; - u64 next_seq; }; #ifdef CONFIG_PRINTK void kmsg_dump(enum kmsg_dump_reason reason); -bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, - char *line, size_t size, size_t *len); - -bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, +bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog, char *line, size_t size, size_t *len); -bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, - char *buf, size_t size, size_t *len); - -void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper); +bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog, + char *buf, size_t size, size_t *len_out); -void kmsg_dump_rewind(struct kmsg_dumper *dumper); +void kmsg_dump_rewind(struct kmsg_dump_iter *iter); int kmsg_dump_register(struct kmsg_dumper *dumper); @@ -77,30 +75,19 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason) { } -static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, - bool syslog, const char *line, - size_t size, size_t *len) -{ - return false; -} - -static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, +static inline bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog, const char *line, size_t size, size_t *len) { return false; } -static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, +static inline bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog, char *buf, size_t size, size_t *len) { return false; } -static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper) -{ -} - -static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper) +static inline void kmsg_dump_rewind(struct kmsg_dump_iter *iter) { } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1b65e7204344..2f34487e21f2 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -192,8 +192,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val); int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev); -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, - struct kvm_io_device *dev); +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, + struct kvm_io_device *dev); struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr); @@ -218,6 +218,20 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +struct kvm_gfn_range { + struct kvm_memory_slot *slot; + gfn_t start; + gfn_t end; + pte_t pte; + bool may_block; +}; +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +#endif + enum { OUTSIDE_GUEST_MODE, IN_GUEST_MODE, @@ -324,6 +338,51 @@ struct kvm_vcpu { struct kvm_dirty_ring dirty_ring; }; +/* must be called with irqs disabled */ +static __always_inline void guest_enter_irqoff(void) +{ + /* + * This is running in ioctl context so its safe to assume that it's the + * stime pending cputime to flush. + */ + instrumentation_begin(); + vtime_account_guest_enter(); + instrumentation_end(); + + /* + * KVM does not hold any references to rcu protected data when it + * switches CPU into a guest mode. In fact switching to a guest mode + * is very similar to exiting to userspace from rcu point of view. In + * addition CPU may stay in a guest mode for quite a long time (up to + * one time slice). Lets treat guest mode as quiescent state, just like + * we do with user-mode execution. + */ + if (!context_tracking_guest_enter()) { + instrumentation_begin(); + rcu_virt_note_context_switch(smp_processor_id()); + instrumentation_end(); + } +} + +static __always_inline void guest_exit_irqoff(void) +{ + context_tracking_guest_exit(); + + instrumentation_begin(); + /* Flush the guest cputime we spent on the guest */ + vtime_account_guest_exit(); + instrumentation_end(); +} + +static inline void guest_exit(void) +{ + unsigned long flags; + + local_irq_save(flags); + guest_exit_irqoff(); + local_irq_restore(flags); +} + static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { /* @@ -640,6 +699,7 @@ void kvm_exit(void); void kvm_get_kvm(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm); +bool file_is_kvm(struct file *file); void kvm_put_kvm_no_destroy(struct kvm *kvm); static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) @@ -886,7 +946,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - struct kvm_memory_slot *memslot); + const struct kvm_memory_slot *memslot); #else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, @@ -945,6 +1005,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); +bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); int kvm_arch_post_init_vm(struct kvm *kvm); void kvm_arch_pre_destroy_vm(struct kvm *kvm); @@ -1116,7 +1177,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) } static inline unsigned long -__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) +__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) { return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; } diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 01f251b6e36c..89b69e645ac7 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -141,7 +141,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev, struct nvdimm_bus; struct module; -struct device; struct nd_blk_region; struct nd_blk_region_desc { int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 1db223710b28..0908abda9c1b 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -112,10 +112,8 @@ struct nvm_dev_ops { #ifdef CONFIG_NVM -#include <linux/blkdev.h> #include <linux/file.h> #include <linux/dmapool.h> -#include <uapi/linux/lightnvm.h> enum { /* HW Responsibilities */ diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h index 20f178c24e9d..453105f74e05 100644 --- a/include/linux/list_sort.h +++ b/include/linux/list_sort.h @@ -6,8 +6,9 @@ struct list_head; +typedef int __attribute__((nonnull(2,3))) (*list_cmp_func_t)(void *, + const struct list_head *, const struct list_head *); + __attribute__((nonnull(2,3))) -void list_sort(void *priv, struct list_head *head, - int (*cmp)(void *priv, struct list_head *a, - struct list_head *b)); +void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp); #endif diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 7b7ebf2e28ec..5cf387813754 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -155,7 +155,7 @@ extern void lockdep_set_selftest_task(struct task_struct *task); extern void lockdep_init_task(struct task_struct *task); /* - * Split the recrursion counter in two to readily detect 'off' vs recursion. + * Split the recursion counter in two to readily detect 'off' vs recursion. */ #define LOCKDEP_RECURSION_BITS 16 #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) @@ -268,6 +268,11 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, extern void lock_release(struct lockdep_map *lock, unsigned long ip); +/* lock_is_held_type() returns */ +#define LOCK_STATE_UNKNOWN -1 +#define LOCK_STATE_NOT_HELD 0 +#define LOCK_STATE_HELD 1 + /* * Same "read" as for lock_acquire(), except -1 means any. */ @@ -301,8 +306,14 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) -#define lockdep_assert_held(l) do { \ - WARN_ON(debug_locks && !lockdep_is_held(l)); \ +#define lockdep_assert_held(l) do { \ + WARN_ON(debug_locks && \ + lockdep_is_held(l) == LOCK_STATE_NOT_HELD); \ + } while (0) + +#define lockdep_assert_not_held(l) do { \ + WARN_ON(debug_locks && \ + lockdep_is_held(l) == LOCK_STATE_HELD); \ } while (0) #define lockdep_assert_held_write(l) do { \ @@ -317,6 +328,10 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ } while (0) +#define lockdep_assert_none_held_once() do { \ + WARN_ON_ONCE(debug_locks && current->lockdep_depth); \ + } while (0) + #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) @@ -393,9 +408,11 @@ extern int lockdep_is_held(const void *); #define lockdep_is_held_type(l, r) (1) #define lockdep_assert_held(l) do { (void)(l); } while (0) -#define lockdep_assert_held_write(l) do { (void)(l); } while (0) +#define lockdep_assert_not_held(l) do { (void)(l); } while (0) +#define lockdep_assert_held_write(l) do { (void)(l); } while (0) #define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0) +#define lockdep_assert_none_held_once() do { } while (0) #define lockdep_recursing(tsk) (0) diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 477a597db013..04c01794de83 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -59,9 +59,11 @@ LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc, struct fs_parameter *param) LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb) +LSM_HOOK(void, LSM_RET_VOID, sb_delete, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts) LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts) +LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts) LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts) LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb) LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb) @@ -203,7 +205,10 @@ LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old, LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid) LSM_HOOK(int, 0, task_getpgid, struct task_struct *p) LSM_HOOK(int, 0, task_getsid, struct task_struct *p) -LSM_HOOK(void, LSM_RET_VOID, task_getsecid, struct task_struct *p, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, task_getsecid_subj, + struct task_struct *p, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, task_getsecid_obj, + struct task_struct *p, u32 *secid) LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice) LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio) LSM_HOOK(int, 0, task_getioprio, struct task_struct *p) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index fb7f3193753d..5c4c5c0602cb 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -108,6 +108,9 @@ * allocated. * @sb contains the super_block structure to be modified. * Return 0 if operation was successful. + * @sb_delete: + * Release objects tied to a superblock (e.g. inodes). + * @sb contains the super_block structure being released. * @sb_free_security: * Deallocate and clear the sb->s_security field. * @sb contains the super_block structure to be modified. @@ -142,6 +145,12 @@ * @orig the original mount data copied from userspace. * @copy copied data which will be passed to the security module. * Returns 0 if the copy was successful. + * @sb_mnt_opts_compat: + * Determine if the new mount options in @mnt_opts are allowed given + * the existing mounted filesystem at @sb. + * @sb superblock being compared + * @mnt_opts new mount options + * Return 0 if options are compatible. * @sb_remount: * Extracts security system specific mount options and verifies no changes * are being made to those options. @@ -707,9 +716,15 @@ * @p. * @p contains the task_struct for the process. * Return 0 if permission is granted. - * @task_getsecid: - * Retrieve the security identifier of the process @p. - * @p contains the task_struct for the process and place is into @secid. + * @task_getsecid_subj: + * Retrieve the subjective security identifier of the task_struct in @p + * and return it in @secid. Special care must be taken to ensure that @p + * is the either the "current" task, or the caller has exclusive access + * to @p. + * In case of failure, @secid will be set to zero. + * @task_getsecid_obj: + * Retrieve the objective security identifier of the task_struct in @p + * and return it in @secid. * In case of failure, @secid will be set to zero. * * @task_setnice: @@ -1573,6 +1588,7 @@ struct lsm_blob_sizes { int lbs_cred; int lbs_file; int lbs_inode; + int lbs_superblock; int lbs_ipc; int lbs_msg_msg; int lbs_task; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index c544b70dfbd2..acee44b9db26 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -22,8 +22,13 @@ #define MARVELL_PHY_ID_88E1545 0x01410ea0 #define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 -#define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 +#define MARVELL_PHY_ID_88X2222 0x01410f10 + +/* PHY IDs and mask for Alaska 10G PHYs */ +#define MARVELL_PHY_ID_88X33X0_MASK 0xfffffff8 +#define MARVELL_PHY_ID_88X3310 0x002b09a0 +#define MARVELL_PHY_ID_88X3340 0x002b09a8 /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 @@ -34,6 +39,7 @@ */ #define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41 #define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90 +#define MARVELL_PHY_ID_88E6393_FAMILY 0x002b0b9b #define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4) diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 27eb383cb95d..1fb34ea394ad 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -10,7 +10,22 @@ #ifndef MDEV_H #define MDEV_H -struct mdev_device; +struct mdev_type; + +struct mdev_device { + struct device dev; + guid_t uuid; + void *driver_data; + struct list_head next; + struct mdev_type *type; + struct device *iommu_device; + bool active; +}; + +static inline struct mdev_device *to_mdev_device(struct device *dev) +{ + return container_of(dev, struct mdev_device, dev); +} /* * Called by the parent device driver to set the device which represents @@ -19,12 +34,21 @@ struct mdev_device; * * @dev: the mediated device that iommu will isolate. * @iommu_device: a pci device which represents the iommu for @dev. - * - * Return 0 for success, otherwise negative error value. */ -int mdev_set_iommu_device(struct device *dev, struct device *iommu_device); +static inline void mdev_set_iommu_device(struct mdev_device *mdev, + struct device *iommu_device) +{ + mdev->iommu_device = iommu_device; +} -struct device *mdev_get_iommu_device(struct device *dev); +static inline struct device *mdev_get_iommu_device(struct mdev_device *mdev) +{ + return mdev->iommu_device; +} + +unsigned int mdev_get_type_group_id(struct mdev_device *mdev); +unsigned int mtype_get_type_group_id(struct mdev_type *mtype); +struct device *mtype_get_parent_dev(struct mdev_type *mtype); /** * struct mdev_parent_ops - Structure to be registered for each parent device to @@ -38,7 +62,6 @@ struct device *mdev_get_iommu_device(struct device *dev); * @create: Called to allocate basic resources in parent device's * driver for a particular mediated device. It is * mandatory to provide create ops. - * @kobj: kobject of type for which 'create' is called. * @mdev: mdev_device structure on of mediated device * that is being created * Returns integer: success (0) or error (< 0) @@ -84,7 +107,7 @@ struct mdev_parent_ops { const struct attribute_group **mdev_attr_groups; struct attribute_group **supported_type_groups; - int (*create)(struct kobject *kobj, struct mdev_device *mdev); + int (*create)(struct mdev_device *mdev); int (*remove)(struct mdev_device *mdev); int (*open)(struct mdev_device *mdev); void (*release)(struct mdev_device *mdev); @@ -101,9 +124,11 @@ struct mdev_parent_ops { /* interface for exporting mdev supported type attributes */ struct mdev_type_attribute { struct attribute attr; - ssize_t (*show)(struct kobject *kobj, struct device *dev, char *buf); - ssize_t (*store)(struct kobject *kobj, struct device *dev, - const char *buf, size_t count); + ssize_t (*show)(struct mdev_type *mtype, + struct mdev_type_attribute *attr, char *buf); + ssize_t (*store)(struct mdev_type *mtype, + struct mdev_type_attribute *attr, const char *buf, + size_t count); }; #define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \ @@ -118,35 +143,46 @@ struct mdev_type_attribute mdev_type_attr_##_name = \ /** * struct mdev_driver - Mediated device driver - * @name: driver name * @probe: called when new device created * @remove: called when device removed * @driver: device driver structure * **/ struct mdev_driver { - const char *name; - int (*probe)(struct device *dev); - void (*remove)(struct device *dev); + int (*probe)(struct mdev_device *dev); + void (*remove)(struct mdev_device *dev); struct device_driver driver; }; -#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver) - -void *mdev_get_drvdata(struct mdev_device *mdev); -void mdev_set_drvdata(struct mdev_device *mdev, void *data); -const guid_t *mdev_uuid(struct mdev_device *mdev); +static inline void *mdev_get_drvdata(struct mdev_device *mdev) +{ + return mdev->driver_data; +} +static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data) +{ + mdev->driver_data = data; +} +static inline const guid_t *mdev_uuid(struct mdev_device *mdev) +{ + return &mdev->uuid; +} extern struct bus_type mdev_bus_type; int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops); void mdev_unregister_device(struct device *dev); -int mdev_register_driver(struct mdev_driver *drv, struct module *owner); +int mdev_register_driver(struct mdev_driver *drv); void mdev_unregister_driver(struct mdev_driver *drv); struct device *mdev_parent_dev(struct mdev_device *mdev); -struct device *mdev_dev(struct mdev_device *mdev); -struct mdev_device *mdev_from_dev(struct device *dev); +static inline struct device *mdev_dev(struct mdev_device *mdev) +{ + return &mdev->dev; +} +static inline struct mdev_device *mdev_from_dev(struct device *dev) +{ + return dev->bus == &mdev_bus_type ? to_mdev_device(dev) : NULL; +} #endif /* MDEV_H */ diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h index aca4dc037b70..373630fe5c28 100644 --- a/include/linux/mdio-bitbang.h +++ b/include/linux/mdio-bitbang.h @@ -33,6 +33,9 @@ struct mdiobb_ops { struct mdiobb_ctrl { const struct mdiobb_ops *ops; + unsigned int override_op_c22; + u8 op_c22_read; + u8 op_c22_write; }; int mdiobb_read(struct mii_bus *bus, int phy, int reg); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0c04d39a7967..c193be760709 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -76,10 +76,27 @@ enum mem_cgroup_events_target { }; struct memcg_vmstats_percpu { - long stat[MEMCG_NR_STAT]; - unsigned long events[NR_VM_EVENT_ITEMS]; - unsigned long nr_page_events; - unsigned long targets[MEM_CGROUP_NTARGETS]; + /* Local (CPU and cgroup) page state & events */ + long state[MEMCG_NR_STAT]; + unsigned long events[NR_VM_EVENT_ITEMS]; + + /* Delta calculation for lockless upward propagation */ + long state_prev[MEMCG_NR_STAT]; + unsigned long events_prev[NR_VM_EVENT_ITEMS]; + + /* Cgroup1: threshold notifications & softlimit tree updates */ + unsigned long nr_page_events; + unsigned long targets[MEM_CGROUP_NTARGETS]; +}; + +struct memcg_vmstats { + /* Aggregated (CPU and subtree) page state & events */ + long state[MEMCG_NR_STAT]; + unsigned long events[NR_VM_EVENT_ITEMS]; + + /* Pending child counts during tree propagation */ + long state_pending[MEMCG_NR_STAT]; + unsigned long events_pending[NR_VM_EVENT_ITEMS]; }; struct mem_cgroup_reclaim_iter { @@ -97,12 +114,13 @@ struct batched_lruvec_stat { }; /* - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, - * which have elements charged to this memcg. + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware + * shrinkers, which have elements charged to this memcg. */ -struct memcg_shrinker_map { +struct shrinker_info { struct rcu_head rcu; - unsigned long map[]; + atomic_long_t *nr_deferred; + unsigned long *map; }; /* @@ -128,7 +146,7 @@ struct mem_cgroup_per_node { struct mem_cgroup_reclaim_iter iter; - struct memcg_shrinker_map __rcu *shrinker_map; + struct shrinker_info __rcu *shrinker_info; struct rb_node tree_node; /* RB tree node */ unsigned long usage_in_excess;/* Set to the value by which */ @@ -287,8 +305,8 @@ struct mem_cgroup { MEMCG_PADDING(_pad1_); - atomic_long_t vmstats[MEMCG_NR_STAT]; - atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; + /* memory.stat */ + struct memcg_vmstats vmstats; /* memory.events */ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; @@ -315,10 +333,6 @@ struct mem_cgroup { atomic_t moving_account; struct task_struct *move_lock_task; - /* Legacy local VM stats and events */ - struct memcg_vmstats_percpu __percpu *vmstats_local; - - /* Subtree VM stats and events (batched updates) */ struct memcg_vmstats_percpu __percpu *vmstats_percpu; #ifdef CONFIG_CGROUP_WRITEBACK @@ -358,6 +372,62 @@ enum page_memcg_data_flags { #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) +static inline bool PageMemcgKmem(struct page *page); + +/* + * After the initialization objcg->memcg is always pointing at + * a valid memcg, but can be atomically swapped to the parent memcg. + * + * The caller must ensure that the returned memcg won't be released: + * e.g. acquire the rcu_read_lock or css_set_lock. + */ +static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) +{ + return READ_ONCE(objcg->memcg); +} + +/* + * __page_memcg - get the memory cgroup associated with a non-kmem page + * @page: a pointer to the page struct + * + * Returns a pointer to the memory cgroup associated with the page, + * or NULL. This function assumes that the page is known to have a + * proper memory cgroup pointer. It's not safe to call this function + * against some type of pages, e.g. slab pages or ex-slab pages or + * kmem pages. + */ +static inline struct mem_cgroup *__page_memcg(struct page *page) +{ + unsigned long memcg_data = page->memcg_data; + + VM_BUG_ON_PAGE(PageSlab(page), page); + VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); + VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page); + + return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); +} + +/* + * __page_objcg - get the object cgroup associated with a kmem page + * @page: a pointer to the page struct + * + * Returns a pointer to the object cgroup associated with the page, + * or NULL. This function assumes that the page is known to have a + * proper object cgroup pointer. It's not safe to call this function + * against some type of pages, e.g. slab pages or ex-slab pages or + * LRU pages. + */ +static inline struct obj_cgroup *__page_objcg(struct page *page) +{ + unsigned long memcg_data = page->memcg_data; + + VM_BUG_ON_PAGE(PageSlab(page), page); + VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); + VM_BUG_ON_PAGE(!(memcg_data & MEMCG_DATA_KMEM), page); + + return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); +} + /* * page_memcg - get the memory cgroup associated with a page * @page: a pointer to the page struct @@ -367,20 +437,23 @@ enum page_memcg_data_flags { * proper memory cgroup pointer. It's not safe to call this function * against some type of pages, e.g. slab pages or ex-slab pages. * - * Any of the following ensures page and memcg binding stability: + * For a non-kmem page any of the following ensures page and memcg binding + * stability: + * * - the page lock * - LRU isolation * - lock_page_memcg() * - exclusive reference + * + * For a kmem page a caller should hold an rcu read lock to protect memcg + * associated with a kmem page from being released. */ static inline struct mem_cgroup *page_memcg(struct page *page) { - unsigned long memcg_data = page->memcg_data; - - VM_BUG_ON_PAGE(PageSlab(page), page); - VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page); - - return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + if (PageMemcgKmem(page)) + return obj_cgroup_memcg(__page_objcg(page)); + else + return __page_memcg(page); } /* @@ -394,11 +467,19 @@ static inline struct mem_cgroup *page_memcg(struct page *page) */ static inline struct mem_cgroup *page_memcg_rcu(struct page *page) { + unsigned long memcg_data = READ_ONCE(page->memcg_data); + VM_BUG_ON_PAGE(PageSlab(page), page); WARN_ON_ONCE(!rcu_read_lock_held()); - return (struct mem_cgroup *)(READ_ONCE(page->memcg_data) & - ~MEMCG_DATA_FLAGS_MASK); + if (memcg_data & MEMCG_DATA_KMEM) { + struct obj_cgroup *objcg; + + objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return obj_cgroup_memcg(objcg); + } + + return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); } /* @@ -406,15 +487,21 @@ static inline struct mem_cgroup *page_memcg_rcu(struct page *page) * @page: a pointer to the page struct * * Returns a pointer to the memory cgroup associated with the page, - * or NULL. This function unlike page_memcg() can take any page + * or NULL. This function unlike page_memcg() can take any page * as an argument. It has to be used in cases when it's not known if a page - * has an associated memory cgroup pointer or an object cgroups vector. + * has an associated memory cgroup pointer or an object cgroups vector or + * an object cgroup. + * + * For a non-kmem page any of the following ensures page and memcg binding + * stability: * - * Any of the following ensures page and memcg binding stability: * - the page lock * - LRU isolation * - lock_page_memcg() * - exclusive reference + * + * For a kmem page a caller should hold an rcu read lock to protect memcg + * associated with a kmem page from being released. */ static inline struct mem_cgroup *page_memcg_check(struct page *page) { @@ -427,9 +514,17 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page) if (memcg_data & MEMCG_DATA_OBJCGS) return NULL; + if (memcg_data & MEMCG_DATA_KMEM) { + struct obj_cgroup *objcg; + + objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return obj_cgroup_memcg(objcg); + } + return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); } +#ifdef CONFIG_MEMCG_KMEM /* * PageMemcgKmem - check if the page has MemcgKmem flag set * @page: a pointer to the page struct @@ -444,7 +539,6 @@ static inline bool PageMemcgKmem(struct page *page) return page->memcg_data & MEMCG_DATA_KMEM; } -#ifdef CONFIG_MEMCG_KMEM /* * page_objcgs - get the object cgroups vector associated with a page * @page: a pointer to the page struct @@ -486,6 +580,11 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page) } #else +static inline bool PageMemcgKmem(struct page *page) +{ + return false; +} + static inline struct obj_cgroup **page_objcgs(struct page *page) { return NULL; @@ -596,18 +695,15 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) } int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); +int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, + gfp_t gfp, swp_entry_t entry); +void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); void mem_cgroup_uncharge(struct page *page); void mem_cgroup_uncharge_list(struct list_head *page_list); void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); -static struct mem_cgroup_per_node * -mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) -{ - return memcg->nodeinfo[nid]; -} - /** * mem_cgroup_lruvec - get the lru list vector for a memcg & node * @memcg: memcg of the wanted lruvec @@ -631,7 +727,7 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, if (!memcg) memcg = root_mem_cgroup; - mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); + mz = memcg->nodeinfo[pgdat->node_id]; lruvec = &mz->lruvec; out: /* @@ -708,21 +804,15 @@ static inline void obj_cgroup_get(struct obj_cgroup *objcg) percpu_ref_get(&objcg->refcnt); } -static inline void obj_cgroup_put(struct obj_cgroup *objcg) +static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, + unsigned long nr) { - percpu_ref_put(&objcg->refcnt); + percpu_ref_get_many(&objcg->refcnt, nr); } -/* - * After the initialization objcg->memcg is always pointing at - * a valid memcg, but can be atomically swapped to the parent memcg. - * - * The caller must ensure that the returned memcg won't be released: - * e.g. acquire the rcu_read_lock or css_set_lock. - */ -static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) +static inline void obj_cgroup_put(struct obj_cgroup *objcg) { - return READ_ONCE(objcg->memcg); + percpu_ref_put(&objcg->refcnt); } static inline void mem_cgroup_put(struct mem_cgroup *memcg) @@ -867,43 +957,9 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); extern bool cgroup_memory_noswap; #endif -struct mem_cgroup *lock_page_memcg(struct page *page); -void __unlock_page_memcg(struct mem_cgroup *memcg); +void lock_page_memcg(struct page *page); void unlock_page_memcg(struct page *page); -/* - * idx can be of type enum memcg_stat_item or node_stat_item. - * Keep in sync with memcg_exact_page_state(). - */ -static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) -{ - long x = atomic_long_read(&memcg->vmstats[idx]); -#ifdef CONFIG_SMP - if (x < 0) - x = 0; -#endif - return x; -} - -/* - * idx can be of type enum memcg_stat_item or node_stat_item. - * Keep in sync with memcg_exact_page_state(). - */ -static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, - int idx) -{ - long x = 0; - int cpu; - - for_each_possible_cpu(cpu) - x += per_cpu(memcg->vmstats_local->stat[idx], cpu); -#ifdef CONFIG_SMP - if (x < 0) - x = 0; -#endif - return x; -} - void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); /* idx can be of type enum memcg_stat_item or node_stat_item */ @@ -979,10 +1035,6 @@ static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, local_irq_restore(flags); } -unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, - gfp_t gfp_mask, - unsigned long *total_scanned); - void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count); @@ -1063,13 +1115,15 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, void split_page_memcg(struct page *head, unsigned int nr); +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, + gfp_t gfp_mask, + unsigned long *total_scanned); + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 #define MEM_CGROUP_ID_MAX 0 -struct mem_cgroup; - static inline struct mem_cgroup *page_memcg(struct page *page) { return NULL; @@ -1139,6 +1193,16 @@ static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, return 0; } +static inline int mem_cgroup_swapin_charge_page(struct page *page, + struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) +{ + return 0; +} + +static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) +{ +} + static inline void mem_cgroup_uncharge(struct page *page) { } @@ -1171,6 +1235,10 @@ static inline bool lruvec_holds_page_lru_lock(struct page *page, return lruvec == &pgdat->__lruvec; } +static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) +{ +} + static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { return NULL; @@ -1289,12 +1357,7 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) { } -static inline struct mem_cgroup *lock_page_memcg(struct page *page) -{ - return NULL; -} - -static inline void __unlock_page_memcg(struct mem_cgroup *memcg) +static inline void lock_page_memcg(struct page *page) { } @@ -1334,17 +1397,6 @@ static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) { } -static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) -{ - return 0; -} - -static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, - int idx) -{ - return 0; -} - static inline void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int nr) @@ -1390,18 +1442,6 @@ static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, mod_node_page_state(page_pgdat(page), idx, val); } -static inline -unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, - gfp_t gfp_mask, - unsigned long *total_scanned) -{ - return 0; -} - -static inline void split_page_memcg(struct page *head, unsigned int nr) -{ -} - static inline void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count) @@ -1424,8 +1464,16 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { } -static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) +static inline void split_page_memcg(struct page *head, unsigned int nr) +{ +} + +static inline +unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, + gfp_t gfp_mask, + unsigned long *total_scanned) { + return 0; } #endif /* CONFIG_MEMCG */ @@ -1563,10 +1611,10 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) return false; } -extern int memcg_expand_shrinker_maps(int new_id); - -extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, - int nid, int shrinker_id); +int alloc_shrinker_info(struct mem_cgroup *memcg); +void free_shrinker_info(struct mem_cgroup *memcg); +void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); +void reparent_shrinker_deferred(struct mem_cgroup *memcg); #else #define mem_cgroup_sockets_enabled 0 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; @@ -1576,8 +1624,8 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) return false; } -static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, - int nid, int shrinker_id) +static inline void set_shrinker_bit(struct mem_cgroup *memcg, + int nid, int shrinker_id) { } #endif diff --git a/include/linux/memory.h b/include/linux/memory.h index 4da95e684e20..97e92e8b556a 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -29,6 +29,11 @@ struct memory_block { int online_type; /* for passing data to online routine */ int nid; /* NID for this memory block */ struct device dev; + /* + * Number of vmemmap pages. These pages + * lay at the beginning of the memory block. + */ + unsigned long nr_vmemmap_pages; }; int arch_get_memory_phys_device(unsigned long start_pfn); @@ -80,7 +85,8 @@ static inline int memory_notify(unsigned long val, void *v) #else extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); -int create_memory_block_devices(unsigned long start, unsigned long size); +int create_memory_block_devices(unsigned long start, unsigned long size, + unsigned long vmemmap_pages); void remove_memory_block_devices(unsigned long start, unsigned long size); extern void memory_dev_init(void); extern int memory_notify(unsigned long val, void *v); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 7288aa5ef73b..28f32fd00fe9 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -56,6 +56,14 @@ typedef int __bitwise mhp_t; #define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) /* + * We want memmap (struct page array) to be self contained. + * To do so, we will use the beginning of the hot-added range to build + * the page tables for the memmap array that describes the entire range. + * Only selected architectures support it with SPARSE_VMEMMAP. + */ +#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) + +/* * Extended parameters for memory hotplug: * altmap: alternative allocator for memmap array (optional) * pgprot: page protection flags to apply to newly created page tables @@ -99,9 +107,13 @@ static inline void zone_seqlock_init(struct zone *zone) extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); +extern void adjust_present_page_count(struct zone *zone, long nr_pages); /* VM interface that may be used by firmware interface */ +extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, + struct zone *zone); +extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); extern int online_pages(unsigned long pfn, unsigned long nr_pages, - int online_type, int nid); + struct zone *zone); extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn); extern void __offline_isolated_pages(unsigned long start_pfn, @@ -359,6 +371,7 @@ extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_ extern int arch_create_linear_mapping(int nid, u64 start, u64 size, struct mhp_params *params); void arch_remove_linear_mapping(u64 start, u64 size); +extern bool mhp_supports_memmap_on_memory(unsigned long size); #endif /* CONFIG_MEMORY_HOTPLUG */ #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/memremap.h b/include/linux/memremap.h index f5b464daeeca..45a79da89c5f 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -17,7 +17,7 @@ struct device; * @alloc: track pages consumed, private to vmemmap_populate() */ struct vmem_altmap { - const unsigned long base_pfn; + unsigned long base_pfn; const unsigned long end_pfn; const unsigned long reserve; unsigned long free; diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h deleted file mode 100644 index a881d8495186..000000000000 --- a/include/linux/mfd/ab3100.h +++ /dev/null @@ -1,128 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2007-2009 ST-Ericsson AB - * AB3100 core access functions - * Author: Linus Walleij <linus.walleij@stericsson.com> - */ - -#include <linux/regulator/machine.h> - -struct device; - -#ifndef MFD_AB3100_H -#define MFD_AB3100_H - - -#define AB3100_P1A 0xc0 -#define AB3100_P1B 0xc1 -#define AB3100_P1C 0xc2 -#define AB3100_P1D 0xc3 -#define AB3100_P1E 0xc4 -#define AB3100_P1F 0xc5 -#define AB3100_P1G 0xc6 -#define AB3100_R2A 0xc7 -#define AB3100_R2B 0xc8 - -/* - * AB3100, EVENTA1, A2 and A3 event register flags - * these are catenated into a single 32-bit flag in the code - * for event notification broadcasts. - */ -#define AB3100_EVENTA1_ONSWA (0x01<<16) -#define AB3100_EVENTA1_ONSWB (0x02<<16) -#define AB3100_EVENTA1_ONSWC (0x04<<16) -#define AB3100_EVENTA1_DCIO (0x08<<16) -#define AB3100_EVENTA1_OVER_TEMP (0x10<<16) -#define AB3100_EVENTA1_SIM_OFF (0x20<<16) -#define AB3100_EVENTA1_VBUS (0x40<<16) -#define AB3100_EVENTA1_VSET_USB (0x80<<16) - -#define AB3100_EVENTA2_READY_TX (0x01<<8) -#define AB3100_EVENTA2_READY_RX (0x02<<8) -#define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8) -#define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8) -#define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8) -#define AB3100_EVENTA2_MIDR (0x20<<8) -#define AB3100_EVENTA2_BATTERY_REM (0x40<<8) -#define AB3100_EVENTA2_ALARM (0x80<<8) - -#define AB3100_EVENTA3_ADC_TRIG5 (0x01) -#define AB3100_EVENTA3_ADC_TRIG4 (0x02) -#define AB3100_EVENTA3_ADC_TRIG3 (0x04) -#define AB3100_EVENTA3_ADC_TRIG2 (0x08) -#define AB3100_EVENTA3_ADC_TRIGVBAT (0x10) -#define AB3100_EVENTA3_ADC_TRIGVTX (0x20) -#define AB3100_EVENTA3_ADC_TRIG1 (0x40) -#define AB3100_EVENTA3_ADC_TRIG0 (0x80) - -/* AB3100, STR register flags */ -#define AB3100_STR_ONSWA (0x01) -#define AB3100_STR_ONSWB (0x02) -#define AB3100_STR_ONSWC (0x04) -#define AB3100_STR_DCIO (0x08) -#define AB3100_STR_BOOT_MODE (0x10) -#define AB3100_STR_SIM_OFF (0x20) -#define AB3100_STR_BATT_REMOVAL (0x40) -#define AB3100_STR_VBUS (0x80) - -/* - * AB3100 contains 8 regulators, one external regulator controller - * and a buck converter, further the LDO E and buck converter can - * have separate settings if they are in sleep mode, this is - * modeled as a separate regulator. - */ -#define AB3100_NUM_REGULATORS 10 - -/** - * struct ab3100 - * @access_mutex: lock out concurrent accesses to the AB3100 registers - * @dev: pointer to the containing device - * @i2c_client: I2C client for this chip - * @testreg_client: secondary client for test registers - * @chip_name: name of this chip variant - * @chip_id: 8 bit chip ID for this chip variant - * @event_subscribers: event subscribers are listed here - * @startup_events: a copy of the first reading of the event registers - * @startup_events_read: whether the first events have been read - * - * This struct is PRIVATE and devices using it should NOT - * access ANY fields. It is used as a token for calling the - * AB3100 functions. - */ -struct ab3100 { - struct mutex access_mutex; - struct device *dev; - struct i2c_client *i2c_client; - struct i2c_client *testreg_client; - char chip_name[32]; - u8 chip_id; - struct blocking_notifier_head event_subscribers; - u8 startup_events[3]; - bool startup_events_read; -}; - -/** - * struct ab3100_platform_data - * Data supplied to initialize board connections to the AB3100 - * @reg_constraints: regulator constraints for target board - * the order of these constraints are: LDO A, C, D, E, - * F, G, H, K, EXT and BUCK. - * @reg_initvals: initial values for the regulator registers - * plus two sleep settings for LDO E and the BUCK converter. - * exactly AB3100_NUM_REGULATORS+2 values must be sent in. - * Order: LDO A, C, E, E sleep, F, G, H, K, EXT, BUCK, - * BUCK sleep, LDO D. (LDO D need to be initialized last.) - * @external_voltage: voltage level of the external regulator. - */ -struct ab3100_platform_data { - struct regulator_init_data reg_constraints[AB3100_NUM_REGULATORS]; - u8 reg_initvals[AB3100_NUM_REGULATORS+2]; - int external_voltage; -}; - -int ab3100_event_register(struct ab3100 *ab3100, - struct notifier_block *nb); -int ab3100_event_unregister(struct ab3100 *ab3100, - struct notifier_block *nb); - -#endif /* MFD_AB3100_H */ diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h index 23040b6f1615..7f07cfe44753 100644 --- a/include/linux/mfd/abx500.h +++ b/include/linux/mfd/abx500.h @@ -28,282 +28,6 @@ struct abx500_init_settings { u8 setting; }; -/* Battery driver related data */ -/* - * ADC for the battery thermistor. - * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined - * with a NTC resistor to both identify the battery and to measure its - * temperature. Different phone manufactures uses different techniques to both - * identify the battery and to read its temperature. - */ -enum abx500_adc_therm { - ABx500_ADC_THERM_BATCTRL, - ABx500_ADC_THERM_BATTEMP, -}; - -/** - * struct abx500_res_to_temp - defines one point in a temp to res curve. To - * be used in battery packs that combines the identification resistor with a - * NTC resistor. - * @temp: battery pack temperature in Celsius - * @resist: NTC resistor net total resistance - */ -struct abx500_res_to_temp { - int temp; - int resist; -}; - -/** - * struct abx500_v_to_cap - Table for translating voltage to capacity - * @voltage: Voltage in mV - * @capacity: Capacity in percent - */ -struct abx500_v_to_cap { - int voltage; - int capacity; -}; - -/* Forward declaration */ -struct abx500_fg; - -/** - * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds - * if not specified - * @recovery_sleep_timer: Time between measurements while recovering - * @recovery_total_time: Total recovery time - * @init_timer: Measurement interval during startup - * @init_discard_time: Time we discard voltage measurement at startup - * @init_total_time: Total init time during startup - * @high_curr_time: Time current has to be high to go to recovery - * @accu_charging: FG accumulation time while charging - * @accu_high_curr: FG accumulation time in high current mode - * @high_curr_threshold: High current threshold, in mA - * @lowbat_threshold: Low battery threshold, in mV - * @overbat_threshold: Over battery threshold, in mV - * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0 - * Resolution in 50 mV step. - * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1 - * Resolution in 50 mV step. - * @user_cap_limit Capacity reported from user must be within this - * limit to be considered as sane, in percentage - * points. - * @maint_thres This is the threshold where we stop reporting - * battery full while in maintenance, in per cent - * @pcut_enable: Enable power cut feature in ab8505 - * @pcut_max_time: Max time threshold - * @pcut_flag_time: Flagtime threshold - * @pcut_max_restart: Max number of restarts - * @pcut_debounce_time: Sets battery debounce time - */ -struct abx500_fg_parameters { - int recovery_sleep_timer; - int recovery_total_time; - int init_timer; - int init_discard_time; - int init_total_time; - int high_curr_time; - int accu_charging; - int accu_high_curr; - int high_curr_threshold; - int lowbat_threshold; - int overbat_threshold; - int battok_falling_th_sel0; - int battok_raising_th_sel1; - int user_cap_limit; - int maint_thres; - bool pcut_enable; - u8 pcut_max_time; - u8 pcut_flag_time; - u8 pcut_max_restart; - u8 pcut_debounce_time; -}; - -/** - * struct abx500_charger_maximization - struct used by the board config. - * @use_maxi: Enable maximization for this battery type - * @maxi_chg_curr: Maximum charger current allowed - * @maxi_wait_cycles: cycles to wait before setting charger current - * @charger_curr_step delta between two charger current settings (mA) - */ -struct abx500_maxim_parameters { - bool ena_maxi; - int chg_curr; - int wait_cycles; - int charger_curr_step; -}; - -/** - * struct abx500_battery_type - different batteries supported - * @name: battery technology - * @resis_high: battery upper resistance limit - * @resis_low: battery lower resistance limit - * @charge_full_design: Maximum battery capacity in mAh - * @nominal_voltage: Nominal voltage of the battery in mV - * @termination_vol: max voltage upto which battery can be charged - * @termination_curr battery charging termination current in mA - * @recharge_cap battery capacity limit that will trigger a new - * full charging cycle in the case where maintenan- - * -ce charging has been disabled - * @normal_cur_lvl: charger current in normal state in mA - * @normal_vol_lvl: charger voltage in normal state in mV - * @maint_a_cur_lvl: charger current in maintenance A state in mA - * @maint_a_vol_lvl: charger voltage in maintenance A state in mV - * @maint_a_chg_timer_h: charge time in maintenance A state - * @maint_b_cur_lvl: charger current in maintenance B state in mA - * @maint_b_vol_lvl: charger voltage in maintenance B state in mV - * @maint_b_chg_timer_h: charge time in maintenance B state - * @low_high_cur_lvl: charger current in temp low/high state in mA - * @low_high_vol_lvl: charger voltage in temp low/high state in mV' - * @battery_resistance: battery inner resistance in mOhm. - * @n_r_t_tbl_elements: number of elements in r_to_t_tbl - * @r_to_t_tbl: table containing resistance to temp points - * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl - * @v_to_cap_tbl: Voltage to capacity (in %) table - * @n_batres_tbl_elements number of elements in the batres_tbl - * @batres_tbl battery internal resistance vs temperature table - */ -struct abx500_battery_type { - int name; - int resis_high; - int resis_low; - int charge_full_design; - int nominal_voltage; - int termination_vol; - int termination_curr; - int recharge_cap; - int normal_cur_lvl; - int normal_vol_lvl; - int maint_a_cur_lvl; - int maint_a_vol_lvl; - int maint_a_chg_timer_h; - int maint_b_cur_lvl; - int maint_b_vol_lvl; - int maint_b_chg_timer_h; - int low_high_cur_lvl; - int low_high_vol_lvl; - int battery_resistance; - int n_temp_tbl_elements; - const struct abx500_res_to_temp *r_to_t_tbl; - int n_v_cap_tbl_elements; - const struct abx500_v_to_cap *v_to_cap_tbl; - int n_batres_tbl_elements; - const struct batres_vs_temp *batres_tbl; -}; - -/** - * struct abx500_bm_capacity_levels - abx500 capacity level data - * @critical: critical capacity level in percent - * @low: low capacity level in percent - * @normal: normal capacity level in percent - * @high: high capacity level in percent - * @full: full capacity level in percent - */ -struct abx500_bm_capacity_levels { - int critical; - int low; - int normal; - int high; - int full; -}; - -/** - * struct abx500_bm_charger_parameters - Charger specific parameters - * @usb_volt_max: maximum allowed USB charger voltage in mV - * @usb_curr_max: maximum allowed USB charger current in mA - * @ac_volt_max: maximum allowed AC charger voltage in mV - * @ac_curr_max: maximum allowed AC charger current in mA - */ -struct abx500_bm_charger_parameters { - int usb_volt_max; - int usb_curr_max; - int ac_volt_max; - int ac_curr_max; -}; - -/** - * struct abx500_bm_data - abx500 battery management data - * @temp_under under this temp, charging is stopped - * @temp_low between this temp and temp_under charging is reduced - * @temp_high between this temp and temp_over charging is reduced - * @temp_over over this temp, charging is stopped - * @temp_now present battery temperature - * @temp_interval_chg temperature measurement interval in s when charging - * @temp_interval_nochg temperature measurement interval in s when not charging - * @main_safety_tmr_h safety timer for main charger - * @usb_safety_tmr_h safety timer for usb charger - * @bkup_bat_v voltage which we charge the backup battery with - * @bkup_bat_i current which we charge the backup battery with - * @no_maintenance indicates that maintenance charging is disabled - * @capacity_scaling indicates whether capacity scaling is to be used - * @abx500_adc_therm placement of thermistor, batctrl or battemp adc - * @chg_unknown_bat flag to enable charging of unknown batteries - * @enable_overshoot flag to enable VBAT overshoot control - * @auto_trig flag to enable auto adc trigger - * @fg_res resistance of FG resistor in 0.1mOhm - * @n_btypes number of elements in array bat_type - * @batt_id index of the identified battery in array bat_type - * @interval_charging charge alg cycle period time when charging (sec) - * @interval_not_charging charge alg cycle period time when not charging (sec) - * @temp_hysteresis temperature hysteresis - * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm) - * @n_chg_out_curr number of elements in array chg_output_curr - * @n_chg_in_curr number of elements in array chg_input_curr - * @chg_output_curr charger output current level map - * @chg_input_curr charger input current level map - * @maxi maximization parameters - * @cap_levels capacity in percent for the different capacity levels - * @bat_type table of supported battery types - * @chg_params charger parameters - * @fg_params fuel gauge parameters - */ -struct abx500_bm_data { - int temp_under; - int temp_low; - int temp_high; - int temp_over; - int temp_now; - int temp_interval_chg; - int temp_interval_nochg; - int main_safety_tmr_h; - int usb_safety_tmr_h; - int bkup_bat_v; - int bkup_bat_i; - bool autopower_cfg; - bool ac_enabled; - bool usb_enabled; - bool no_maintenance; - bool capacity_scaling; - bool chg_unknown_bat; - bool enable_overshoot; - bool auto_trig; - enum abx500_adc_therm adc_therm; - int fg_res; - int n_btypes; - int batt_id; - int interval_charging; - int interval_not_charging; - int temp_hysteresis; - int gnd_lift_resistance; - int n_chg_out_curr; - int n_chg_in_curr; - int *chg_output_curr; - int *chg_input_curr; - const struct abx500_maxim_parameters *maxi; - const struct abx500_bm_capacity_levels *cap_levels; - struct abx500_battery_type *bat_type; - const struct abx500_bm_charger_parameters *chg_params; - const struct abx500_fg_parameters *fg_params; -}; - -enum { - NTC_EXTERNAL = 0, - NTC_INTERNAL, -}; - -int ab8500_bm_of_probe(struct device *dev, - struct device_node *np, - struct abx500_bm_data *bm); - int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg, u8 value); int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg, diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h deleted file mode 100644 index 903e94c189d8..000000000000 --- a/include/linux/mfd/abx500/ab8500-bm.h +++ /dev/null @@ -1,476 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright ST-Ericsson 2012. - * - * Author: Arun Murthy <arun.murthy@stericsson.com> - */ - -#ifndef _AB8500_BM_H -#define _AB8500_BM_H - -#include <linux/kernel.h> -#include <linux/mfd/abx500.h> - -/* - * System control 2 register offsets. - * bank = 0x02 - */ -#define AB8500_MAIN_WDOG_CTRL_REG 0x01 -#define AB8500_LOW_BAT_REG 0x03 -#define AB8500_BATT_OK_REG 0x04 -/* - * USB/ULPI register offsets - * Bank : 0x5 - */ -#define AB8500_USB_LINE_STAT_REG 0x80 -#define AB8500_USB_LINE_CTRL2_REG 0x82 -#define AB8500_USB_LINK1_STAT_REG 0x94 - -/* - * Charger / status register offfsets - * Bank : 0x0B - */ -#define AB8500_CH_STATUS1_REG 0x00 -#define AB8500_CH_STATUS2_REG 0x01 -#define AB8500_CH_USBCH_STAT1_REG 0x02 -#define AB8500_CH_USBCH_STAT2_REG 0x03 -#define AB8540_CH_USBCH_STAT3_REG 0x04 -#define AB8500_CH_STAT_REG 0x05 - -/* - * Charger / control register offfsets - * Bank : 0x0B - */ -#define AB8500_CH_VOLT_LVL_REG 0x40 -#define AB8500_CH_VOLT_LVL_MAX_REG 0x41 /*Only in Cut2.0*/ -#define AB8500_CH_OPT_CRNTLVL_REG 0x42 -#define AB8500_CH_OPT_CRNTLVL_MAX_REG 0x43 /*Only in Cut2.0*/ -#define AB8500_CH_WD_TIMER_REG 0x50 -#define AB8500_CHARG_WD_CTRL 0x51 -#define AB8500_BTEMP_HIGH_TH 0x52 -#define AB8500_LED_INDICATOR_PWM_CTRL 0x53 -#define AB8500_LED_INDICATOR_PWM_DUTY 0x54 -#define AB8500_BATT_OVV 0x55 -#define AB8500_CHARGER_CTRL 0x56 -#define AB8500_BAT_CTRL_CURRENT_SOURCE 0x60 /*Only in Cut2.0*/ - -/* - * Charger / main control register offsets - * Bank : 0x0B - */ -#define AB8500_MCH_CTRL1 0x80 -#define AB8500_MCH_CTRL2 0x81 -#define AB8500_MCH_IPT_CURLVL_REG 0x82 -#define AB8500_CH_WD_REG 0x83 - -/* - * Charger / USB control register offsets - * Bank : 0x0B - */ -#define AB8500_USBCH_CTRL1_REG 0xC0 -#define AB8500_USBCH_CTRL2_REG 0xC1 -#define AB8500_USBCH_IPT_CRNTLVL_REG 0xC2 -#define AB8540_USB_PP_MODE_REG 0xC5 -#define AB8540_USB_PP_CHR_REG 0xC6 - -/* - * Gas Gauge register offsets - * Bank : 0x0C - */ -#define AB8500_GASG_CC_CTRL_REG 0x00 -#define AB8500_GASG_CC_ACCU1_REG 0x01 -#define AB8500_GASG_CC_ACCU2_REG 0x02 -#define AB8500_GASG_CC_ACCU3_REG 0x03 -#define AB8500_GASG_CC_ACCU4_REG 0x04 -#define AB8500_GASG_CC_SMPL_CNTRL_REG 0x05 -#define AB8500_GASG_CC_SMPL_CNTRH_REG 0x06 -#define AB8500_GASG_CC_SMPL_CNVL_REG 0x07 -#define AB8500_GASG_CC_SMPL_CNVH_REG 0x08 -#define AB8500_GASG_CC_CNTR_AVGOFF_REG 0x09 -#define AB8500_GASG_CC_OFFSET_REG 0x0A -#define AB8500_GASG_CC_NCOV_ACCU 0x10 -#define AB8500_GASG_CC_NCOV_ACCU_CTRL 0x11 -#define AB8500_GASG_CC_NCOV_ACCU_LOW 0x12 -#define AB8500_GASG_CC_NCOV_ACCU_MED 0x13 -#define AB8500_GASG_CC_NCOV_ACCU_HIGH 0x14 - -/* - * Interrupt register offsets - * Bank : 0x0E - */ -#define AB8500_IT_SOURCE2_REG 0x01 -#define AB8500_IT_SOURCE21_REG 0x14 - -/* - * RTC register offsets - * Bank: 0x0F - */ -#define AB8500_RTC_BACKUP_CHG_REG 0x0C -#define AB8500_RTC_CC_CONF_REG 0x01 -#define AB8500_RTC_CTRL_REG 0x0B -#define AB8500_RTC_CTRL1_REG 0x11 - -/* - * OTP register offsets - * Bank : 0x15 - */ -#define AB8500_OTP_CONF_15 0x0E - -/* GPADC constants from AB8500 spec, UM0836 */ -#define ADC_RESOLUTION 1024 -#define ADC_CH_MAIN_MIN 0 -#define ADC_CH_MAIN_MAX 20030 -#define ADC_CH_VBUS_MIN 0 -#define ADC_CH_VBUS_MAX 20030 -#define ADC_CH_VBAT_MIN 2300 -#define ADC_CH_VBAT_MAX 4800 -#define ADC_CH_BKBAT_MIN 0 -#define ADC_CH_BKBAT_MAX 3200 - -/* Main charge i/p current */ -#define MAIN_CH_IP_CUR_0P9A 0x80 -#define MAIN_CH_IP_CUR_1P0A 0x90 -#define MAIN_CH_IP_CUR_1P1A 0xA0 -#define MAIN_CH_IP_CUR_1P2A 0xB0 -#define MAIN_CH_IP_CUR_1P3A 0xC0 -#define MAIN_CH_IP_CUR_1P4A 0xD0 -#define MAIN_CH_IP_CUR_1P5A 0xE0 - -/* ChVoltLevel */ -#define CH_VOL_LVL_3P5 0x00 -#define CH_VOL_LVL_4P0 0x14 -#define CH_VOL_LVL_4P05 0x16 -#define CH_VOL_LVL_4P1 0x1B -#define CH_VOL_LVL_4P15 0x20 -#define CH_VOL_LVL_4P2 0x25 -#define CH_VOL_LVL_4P6 0x4D - -/* ChOutputCurrentLevel */ -#define CH_OP_CUR_LVL_0P1 0x00 -#define CH_OP_CUR_LVL_0P2 0x01 -#define CH_OP_CUR_LVL_0P3 0x02 -#define CH_OP_CUR_LVL_0P4 0x03 -#define CH_OP_CUR_LVL_0P5 0x04 -#define CH_OP_CUR_LVL_0P6 0x05 -#define CH_OP_CUR_LVL_0P7 0x06 -#define CH_OP_CUR_LVL_0P8 0x07 -#define CH_OP_CUR_LVL_0P9 0x08 -#define CH_OP_CUR_LVL_1P4 0x0D -#define CH_OP_CUR_LVL_1P5 0x0E -#define CH_OP_CUR_LVL_1P6 0x0F -#define CH_OP_CUR_LVL_2P 0x3F - -/* BTEMP High thermal limits */ -#define BTEMP_HIGH_TH_57_0 0x00 -#define BTEMP_HIGH_TH_52 0x01 -#define BTEMP_HIGH_TH_57_1 0x02 -#define BTEMP_HIGH_TH_62 0x03 - -/* current is mA */ -#define USB_0P1A 100 -#define USB_0P2A 200 -#define USB_0P3A 300 -#define USB_0P4A 400 -#define USB_0P5A 500 - -#define LOW_BAT_3P1V 0x20 -#define LOW_BAT_2P3V 0x00 -#define LOW_BAT_RESET 0x01 -#define LOW_BAT_ENABLE 0x01 - -/* Backup battery constants */ -#define BUP_ICH_SEL_50UA 0x00 -#define BUP_ICH_SEL_150UA 0x04 -#define BUP_ICH_SEL_300UA 0x08 -#define BUP_ICH_SEL_700UA 0x0C - -enum bup_vch_sel { - BUP_VCH_SEL_2P5V, - BUP_VCH_SEL_2P6V, - BUP_VCH_SEL_2P8V, - BUP_VCH_SEL_3P1V, - /* - * Note that the following 5 values 2.7v, 2.9v, 3.0v, 3.2v, 3.3v - * are only available on ab8540. You can't choose these 5 - * voltage on ab8500/ab8505/ab9540. - */ - BUP_VCH_SEL_2P7V, - BUP_VCH_SEL_2P9V, - BUP_VCH_SEL_3P0V, - BUP_VCH_SEL_3P2V, - BUP_VCH_SEL_3P3V, -}; - -#define BUP_VCH_RANGE 0x02 -#define VBUP33_VRTCN 0x01 - -/* Battery OVV constants */ -#define BATT_OVV_ENA 0x02 -#define BATT_OVV_TH_3P7 0x00 -#define BATT_OVV_TH_4P75 0x01 - -/* A value to indicate over voltage */ -#define BATT_OVV_VALUE 4750 - -/* VBUS OVV constants */ -#define VBUS_OVV_SELECT_MASK 0x78 -#define VBUS_OVV_SELECT_5P6V 0x00 -#define VBUS_OVV_SELECT_5P7V 0x08 -#define VBUS_OVV_SELECT_5P8V 0x10 -#define VBUS_OVV_SELECT_5P9V 0x18 -#define VBUS_OVV_SELECT_6P0V 0x20 -#define VBUS_OVV_SELECT_6P1V 0x28 -#define VBUS_OVV_SELECT_6P2V 0x30 -#define VBUS_OVV_SELECT_6P3V 0x38 - -#define VBUS_AUTO_IN_CURR_LIM_ENA 0x04 - -/* Fuel Gauge constants */ -#define RESET_ACCU 0x02 -#define READ_REQ 0x01 -#define CC_DEEP_SLEEP_ENA 0x02 -#define CC_PWR_UP_ENA 0x01 -#define CC_SAMPLES_40 0x28 -#define RD_NCONV_ACCU_REQ 0x01 -#define CC_CALIB 0x08 -#define CC_INTAVGOFFSET_ENA 0x10 -#define CC_MUXOFFSET 0x80 -#define CC_INT_CAL_N_AVG_MASK 0x60 -#define CC_INT_CAL_SAMPLES_16 0x40 -#define CC_INT_CAL_SAMPLES_8 0x20 -#define CC_INT_CAL_SAMPLES_4 0x00 - -/* RTC constants */ -#define RTC_BUP_CH_ENA 0x10 - -/* BatCtrl Current Source Constants */ -#define BAT_CTRL_7U_ENA 0x01 -#define BAT_CTRL_20U_ENA 0x02 -#define BAT_CTRL_18U_ENA 0x01 -#define BAT_CTRL_16U_ENA 0x02 -#define BAT_CTRL_CMP_ENA 0x04 -#define FORCE_BAT_CTRL_CMP_HIGH 0x08 -#define BAT_CTRL_PULL_UP_ENA 0x10 - -/* Battery type */ -#define BATTERY_UNKNOWN 00 - -/* Registers for pcut feature in ab8505 and ab9540 */ -#define AB8505_RTC_PCUT_CTL_STATUS_REG 0x12 -#define AB8505_RTC_PCUT_TIME_REG 0x13 -#define AB8505_RTC_PCUT_MAX_TIME_REG 0x14 -#define AB8505_RTC_PCUT_FLAG_TIME_REG 0x15 -#define AB8505_RTC_PCUT_RESTART_REG 0x16 -#define AB8505_RTC_PCUT_DEBOUNCE_REG 0x17 - -/* USB Power Path constants for ab8540 */ -#define BUS_VSYS_VOL_SELECT_MASK 0x06 -#define BUS_VSYS_VOL_SELECT_3P6V 0x00 -#define BUS_VSYS_VOL_SELECT_3P325V 0x02 -#define BUS_VSYS_VOL_SELECT_3P9V 0x04 -#define BUS_VSYS_VOL_SELECT_4P3V 0x06 -#define BUS_POWER_PATH_MODE_ENA 0x01 -#define BUS_PP_PRECHG_CURRENT_MASK 0x0E -#define BUS_POWER_PATH_PRECHG_ENA 0x01 - -/** - * struct res_to_temp - defines one point in a temp to res curve. To - * be used in battery packs that combines the identification resistor with a - * NTC resistor. - * @temp: battery pack temperature in Celsius - * @resist: NTC resistor net total resistance - */ -struct res_to_temp { - int temp; - int resist; -}; - -/** - * struct batres_vs_temp - defines one point in a temp vs battery internal - * resistance curve. - * @temp: battery pack temperature in Celsius - * @resist: battery internal reistance in mOhm - */ -struct batres_vs_temp { - int temp; - int resist; -}; - -/* Forward declaration */ -struct ab8500_fg; - -/** - * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds - * if not specified - * @recovery_sleep_timer: Time between measurements while recovering - * @recovery_total_time: Total recovery time - * @init_timer: Measurement interval during startup - * @init_discard_time: Time we discard voltage measurement at startup - * @init_total_time: Total init time during startup - * @high_curr_time: Time current has to be high to go to recovery - * @accu_charging: FG accumulation time while charging - * @accu_high_curr: FG accumulation time in high current mode - * @high_curr_threshold: High current threshold, in mA - * @lowbat_threshold: Low battery threshold, in mV - * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0 - * Resolution in 50 mV step. - * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1 - * Resolution in 50 mV step. - * @user_cap_limit Capacity reported from user must be within this - * limit to be considered as sane, in percentage - * points. - * @maint_thres This is the threshold where we stop reporting - * battery full while in maintenance, in per cent - * @pcut_enable: Enable power cut feature in ab8505 - * @pcut_max_time: Max time threshold - * @pcut_flag_time: Flagtime threshold - * @pcut_max_restart: Max number of restarts - * @pcut_debunce_time: Sets battery debounce time - */ -struct ab8500_fg_parameters { - int recovery_sleep_timer; - int recovery_total_time; - int init_timer; - int init_discard_time; - int init_total_time; - int high_curr_time; - int accu_charging; - int accu_high_curr; - int high_curr_threshold; - int lowbat_threshold; - int battok_falling_th_sel0; - int battok_raising_th_sel1; - int user_cap_limit; - int maint_thres; - bool pcut_enable; - u8 pcut_max_time; - u8 pcut_flag_time; - u8 pcut_max_restart; - u8 pcut_debunce_time; -}; - -/** - * struct ab8500_charger_maximization - struct used by the board config. - * @use_maxi: Enable maximization for this battery type - * @maxi_chg_curr: Maximum charger current allowed - * @maxi_wait_cycles: cycles to wait before setting charger current - * @charger_curr_step delta between two charger current settings (mA) - */ -struct ab8500_maxim_parameters { - bool ena_maxi; - int chg_curr; - int wait_cycles; - int charger_curr_step; -}; - -/** - * struct ab8500_bm_capacity_levels - ab8500 capacity level data - * @critical: critical capacity level in percent - * @low: low capacity level in percent - * @normal: normal capacity level in percent - * @high: high capacity level in percent - * @full: full capacity level in percent - */ -struct ab8500_bm_capacity_levels { - int critical; - int low; - int normal; - int high; - int full; -}; - -/** - * struct ab8500_bm_charger_parameters - Charger specific parameters - * @usb_volt_max: maximum allowed USB charger voltage in mV - * @usb_curr_max: maximum allowed USB charger current in mA - * @ac_volt_max: maximum allowed AC charger voltage in mV - * @ac_curr_max: maximum allowed AC charger current in mA - */ -struct ab8500_bm_charger_parameters { - int usb_volt_max; - int usb_curr_max; - int ac_volt_max; - int ac_curr_max; -}; - -/** - * struct ab8500_bm_data - ab8500 battery management data - * @temp_under under this temp, charging is stopped - * @temp_low between this temp and temp_under charging is reduced - * @temp_high between this temp and temp_over charging is reduced - * @temp_over over this temp, charging is stopped - * @temp_interval_chg temperature measurement interval in s when charging - * @temp_interval_nochg temperature measurement interval in s when not charging - * @main_safety_tmr_h safety timer for main charger - * @usb_safety_tmr_h safety timer for usb charger - * @bkup_bat_v voltage which we charge the backup battery with - * @bkup_bat_i current which we charge the backup battery with - * @no_maintenance indicates that maintenance charging is disabled - * @capacity_scaling indicates whether capacity scaling is to be used - * @adc_therm placement of thermistor, batctrl or battemp adc - * @chg_unknown_bat flag to enable charging of unknown batteries - * @enable_overshoot flag to enable VBAT overshoot control - * @fg_res resistance of FG resistor in 0.1mOhm - * @n_btypes number of elements in array bat_type - * @batt_id index of the identified battery in array bat_type - * @interval_charging charge alg cycle period time when charging (sec) - * @interval_not_charging charge alg cycle period time when not charging (sec) - * @temp_hysteresis temperature hysteresis - * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm) - * @maxi: maximization parameters - * @cap_levels capacity in percent for the different capacity levels - * @bat_type table of supported battery types - * @chg_params charger parameters - * @fg_params fuel gauge parameters - */ -struct ab8500_bm_data { - int temp_under; - int temp_low; - int temp_high; - int temp_over; - int temp_interval_chg; - int temp_interval_nochg; - int main_safety_tmr_h; - int usb_safety_tmr_h; - int bkup_bat_v; - int bkup_bat_i; - bool no_maintenance; - bool capacity_scaling; - bool chg_unknown_bat; - bool enable_overshoot; - enum abx500_adc_therm adc_therm; - int fg_res; - int n_btypes; - int batt_id; - int interval_charging; - int interval_not_charging; - int temp_hysteresis; - int gnd_lift_resistance; - const struct ab8500_maxim_parameters *maxi; - const struct ab8500_bm_capacity_levels *cap_levels; - const struct ab8500_bm_charger_parameters *chg_params; - const struct ab8500_fg_parameters *fg_params; -}; - -struct ab8500_btemp; -struct ab8500_gpadc; -struct ab8500_fg; - -#ifdef CONFIG_AB8500_BM -extern struct abx500_bm_data ab8500_bm_data; - -void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA); -struct ab8500_btemp *ab8500_btemp_get(void); -int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp); -int ab8500_btemp_get_temp(struct ab8500_btemp *btemp); -struct ab8500_fg *ab8500_fg_get(void); -int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev); -int ab8500_fg_inst_curr_start(struct ab8500_fg *di); -int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res); -int ab8500_fg_inst_curr_started(struct ab8500_fg *di); -int ab8500_fg_inst_curr_done(struct ab8500_fg *di); - -#else -static struct abx500_bm_data ab8500_bm_data; -#endif -#endif /* _AB8500_BM_H */ diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h deleted file mode 100644 index 9b97d284d0ce..000000000000 --- a/include/linux/mfd/abx500/ux500_chargalg.h +++ /dev/null @@ -1,51 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson SA 2012 - * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson. - */ - -#ifndef _UX500_CHARGALG_H -#define _UX500_CHARGALG_H - -#include <linux/power_supply.h> - -/* - * Valid only for supplies of type: - * - POWER_SUPPLY_TYPE_MAINS, - * - POWER_SUPPLY_TYPE_USB, - * because only them store as drv_data pointer to struct ux500_charger. - */ -#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy) - -/* Forward declaration */ -struct ux500_charger; - -struct ux500_charger_ops { - int (*enable) (struct ux500_charger *, int, int, int); - int (*check_enable) (struct ux500_charger *, int, int); - int (*kick_wd) (struct ux500_charger *); - int (*update_curr) (struct ux500_charger *, int); -}; - -/** - * struct ux500_charger - power supply ux500 charger sub class - * @psy power supply base class - * @ops ux500 charger operations - * @max_out_volt maximum output charger voltage in mV - * @max_out_curr maximum output charger current in mA - * @enabled indicates if this charger is used or not - * @external external charger unit (pm2xxx) - */ -struct ux500_charger { - struct power_supply *psy; - struct ux500_charger_ops ops; - int max_out_volt; - int max_out_curr; - int wdt_refresh; - bool enabled; - bool external; -}; - -extern struct blocking_notifier_head charger_notifier_list; - -#endif diff --git a/include/linux/mfd/atc260x/atc2603c.h b/include/linux/mfd/atc260x/atc2603c.h new file mode 100644 index 000000000000..07ac640ef3e1 --- /dev/null +++ b/include/linux/mfd/atc260x/atc2603c.h @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ATC2603C PMIC register definitions + * + * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com> + */ + +#ifndef __LINUX_MFD_ATC260X_ATC2603C_H +#define __LINUX_MFD_ATC260X_ATC2603C_H + +enum atc2603c_irq_def { + ATC2603C_IRQ_AUDIO = 0, + ATC2603C_IRQ_OV, + ATC2603C_IRQ_OC, + ATC2603C_IRQ_OT, + ATC2603C_IRQ_UV, + ATC2603C_IRQ_ALARM, + ATC2603C_IRQ_ONOFF, + ATC2603C_IRQ_SGPIO, + ATC2603C_IRQ_IR, + ATC2603C_IRQ_REMCON, + ATC2603C_IRQ_POWER_IN, +}; + +/* PMU Registers */ +#define ATC2603C_PMU_SYS_CTL0 0x00 +#define ATC2603C_PMU_SYS_CTL1 0x01 +#define ATC2603C_PMU_SYS_CTL2 0x02 +#define ATC2603C_PMU_SYS_CTL3 0x03 +#define ATC2603C_PMU_SYS_CTL4 0x04 +#define ATC2603C_PMU_SYS_CTL5 0x05 +#define ATC2603C_PMU_SYS_CTL6 0x06 +#define ATC2603C_PMU_SYS_CTL7 0x07 +#define ATC2603C_PMU_SYS_CTL8 0x08 +#define ATC2603C_PMU_SYS_CTL9 0x09 +#define ATC2603C_PMU_BAT_CTL0 0x0A +#define ATC2603C_PMU_BAT_CTL1 0x0B +#define ATC2603C_PMU_VBUS_CTL0 0x0C +#define ATC2603C_PMU_VBUS_CTL1 0x0D +#define ATC2603C_PMU_WALL_CTL0 0x0E +#define ATC2603C_PMU_WALL_CTL1 0x0F +#define ATC2603C_PMU_SYS_PENDING 0x10 +#define ATC2603C_PMU_DC1_CTL0 0x11 +#define ATC2603C_PMU_DC1_CTL1 0x12 // Undocumented +#define ATC2603C_PMU_DC1_CTL2 0x13 // Undocumented +#define ATC2603C_PMU_DC2_CTL0 0x14 +#define ATC2603C_PMU_DC2_CTL1 0x15 // Undocumented +#define ATC2603C_PMU_DC2_CTL2 0x16 // Undocumented +#define ATC2603C_PMU_DC3_CTL0 0x17 +#define ATC2603C_PMU_DC3_CTL1 0x18 // Undocumented +#define ATC2603C_PMU_DC3_CTL2 0x19 // Undocumented +#define ATC2603C_PMU_DC4_CTL0 0x1A // Undocumented +#define ATC2603C_PMU_DC4_CTL1 0x1B // Undocumented +#define ATC2603C_PMU_DC5_CTL0 0x1C // Undocumented +#define ATC2603C_PMU_DC5_CTL1 0x1D // Undocumented +#define ATC2603C_PMU_LDO1_CTL 0x1E +#define ATC2603C_PMU_LDO2_CTL 0x1F +#define ATC2603C_PMU_LDO3_CTL 0x20 +#define ATC2603C_PMU_LDO4_CTL 0x21 // Undocumented +#define ATC2603C_PMU_LDO5_CTL 0x22 +#define ATC2603C_PMU_LDO6_CTL 0x23 +#define ATC2603C_PMU_LDO7_CTL 0x24 +#define ATC2603C_PMU_LDO8_CTL 0x25 // Undocumented +#define ATC2603C_PMU_LDO9_CTL 0x26 // Undocumented +#define ATC2603C_PMU_LDO10_CTL 0x27 // Undocumented +#define ATC2603C_PMU_LDO11_CTL 0x28 +#define ATC2603C_PMU_SWITCH_CTL 0x29 +#define ATC2603C_PMU_OV_CTL0 0x2A +#define ATC2603C_PMU_OV_CTL1 0x2B +#define ATC2603C_PMU_OV_STATUS 0x2C +#define ATC2603C_PMU_OV_EN 0x2D +#define ATC2603C_PMU_OV_INT_EN 0x2E +#define ATC2603C_PMU_OC_CTL 0x2F +#define ATC2603C_PMU_OC_STATUS 0x30 +#define ATC2603C_PMU_OC_EN 0x31 +#define ATC2603C_PMU_OC_INT_EN 0x32 +#define ATC2603C_PMU_UV_CTL0 0x33 +#define ATC2603C_PMU_UV_CTL1 0x34 +#define ATC2603C_PMU_UV_STATUS 0x35 +#define ATC2603C_PMU_UV_EN 0x36 +#define ATC2603C_PMU_UV_INT_EN 0x37 +#define ATC2603C_PMU_OT_CTL 0x38 +#define ATC2603C_PMU_CHARGER_CTL0 0x39 +#define ATC2603C_PMU_CHARGER_CTL1 0x3A +#define ATC2603C_PMU_CHARGER_CTL2 0x3B +#define ATC2603C_PMU_BAKCHARGER_CTL 0x3C // Undocumented +#define ATC2603C_PMU_APDS_CTL 0x3D +#define ATC2603C_PMU_AUXADC_CTL0 0x3E +#define ATC2603C_PMU_AUXADC_CTL1 0x3F +#define ATC2603C_PMU_BATVADC 0x40 +#define ATC2603C_PMU_BATIADC 0x41 +#define ATC2603C_PMU_WALLVADC 0x42 +#define ATC2603C_PMU_WALLIADC 0x43 +#define ATC2603C_PMU_VBUSVADC 0x44 +#define ATC2603C_PMU_VBUSIADC 0x45 +#define ATC2603C_PMU_SYSPWRADC 0x46 +#define ATC2603C_PMU_REMCONADC 0x47 +#define ATC2603C_PMU_SVCCADC 0x48 +#define ATC2603C_PMU_CHGIADC 0x49 +#define ATC2603C_PMU_IREFADC 0x4A +#define ATC2603C_PMU_BAKBATADC 0x4B +#define ATC2603C_PMU_ICTEMPADC 0x4C +#define ATC2603C_PMU_AUXADC0 0x4D +#define ATC2603C_PMU_AUXADC1 0x4E +#define ATC2603C_PMU_AUXADC2 0x4F +#define ATC2603C_PMU_ICMADC 0x50 +#define ATC2603C_PMU_BDG_CTL 0x51 // Undocumented +#define ATC2603C_RTC_CTL 0x52 +#define ATC2603C_RTC_MSALM 0x53 +#define ATC2603C_RTC_HALM 0x54 +#define ATC2603C_RTC_YMDALM 0x55 +#define ATC2603C_RTC_MS 0x56 +#define ATC2603C_RTC_H 0x57 +#define ATC2603C_RTC_DC 0x58 +#define ATC2603C_RTC_YMD 0x59 +#define ATC2603C_EFUSE_DAT 0x5A // Undocumented +#define ATC2603C_EFUSECRTL1 0x5B // Undocumented +#define ATC2603C_EFUSECRTL2 0x5C // Undocumented +#define ATC2603C_PMU_FW_USE0 0x5D // Undocumented +#define ATC2603C_PMU_FW_USE1 0x5E // Undocumented +#define ATC2603C_PMU_FW_USE2 0x5F // Undocumented +#define ATC2603C_PMU_FW_USE3 0x60 // Undocumented +#define ATC2603C_PMU_FW_USE4 0x61 // Undocumented +#define ATC2603C_PMU_ABNORMAL_STATUS 0x62 +#define ATC2603C_PMU_WALL_APDS_CTL 0x63 +#define ATC2603C_PMU_REMCON_CTL0 0x64 +#define ATC2603C_PMU_REMCON_CTL1 0x65 +#define ATC2603C_PMU_MUX_CTL0 0x66 +#define ATC2603C_PMU_SGPIO_CTL0 0x67 +#define ATC2603C_PMU_SGPIO_CTL1 0x68 +#define ATC2603C_PMU_SGPIO_CTL2 0x69 +#define ATC2603C_PMU_SGPIO_CTL3 0x6A +#define ATC2603C_PMU_SGPIO_CTL4 0x6B +#define ATC2603C_PWMCLK_CTL 0x6C +#define ATC2603C_PWM0_CTL 0x6D +#define ATC2603C_PWM1_CTL 0x6E +#define ATC2603C_PMU_ADC_DBG0 0x70 +#define ATC2603C_PMU_ADC_DBG1 0x71 +#define ATC2603C_PMU_ADC_DBG2 0x72 +#define ATC2603C_PMU_ADC_DBG3 0x73 +#define ATC2603C_PMU_ADC_DBG4 0x74 +#define ATC2603C_IRC_CTL 0x80 +#define ATC2603C_IRC_STAT 0x81 +#define ATC2603C_IRC_CC 0x82 +#define ATC2603C_IRC_KDC 0x83 +#define ATC2603C_IRC_WK 0x84 +#define ATC2603C_IRC_RCC 0x85 +#define ATC2603C_IRC_FILTER 0x86 + +/* AUDIO_OUT Registers */ +#define ATC2603C_AUDIOINOUT_CTL 0xA0 +#define ATC2603C_AUDIO_DEBUGOUTCTL 0xA1 +#define ATC2603C_DAC_DIGITALCTL 0xA2 +#define ATC2603C_DAC_VOLUMECTL0 0xA3 +#define ATC2603C_DAC_ANALOG0 0xA4 +#define ATC2603C_DAC_ANALOG1 0xA5 +#define ATC2603C_DAC_ANALOG2 0xA6 +#define ATC2603C_DAC_ANALOG3 0xA7 + +/* AUDIO_IN Registers */ +#define ATC2603C_ADC_DIGITALCTL 0xA8 +#define ATC2603C_ADC_HPFCTL 0xA9 +#define ATC2603C_ADC_CTL 0xAA +#define ATC2603C_AGC_CTL0 0xAB +#define ATC2603C_AGC_CTL1 0xAC // Undocumented +#define ATC2603C_AGC_CTL2 0xAD +#define ATC2603C_ADC_ANALOG0 0xAE +#define ATC2603C_ADC_ANALOG1 0xAF + +/* PCM_IF Registers */ +#define ATC2603C_PCM0_CTL 0xB0 // Undocumented +#define ATC2603C_PCM1_CTL 0xB1 // Undocumented +#define ATC2603C_PCM2_CTL 0xB2 // Undocumented +#define ATC2603C_PCMIF_CTL 0xB3 // Undocumented + +/* CMU_CONTROL Registers */ +#define ATC2603C_CMU_DEVRST 0xC1 // Undocumented + +/* INTS Registers */ +#define ATC2603C_INTS_PD 0xC8 +#define ATC2603C_INTS_MSK 0xC9 + +/* MFP Registers */ +#define ATC2603C_MFP_CTL 0xD0 +#define ATC2603C_PAD_VSEL 0xD1 // Undocumented +#define ATC2603C_GPIO_OUTEN 0xD2 +#define ATC2603C_GPIO_INEN 0xD3 +#define ATC2603C_GPIO_DAT 0xD4 +#define ATC2603C_PAD_DRV 0xD5 +#define ATC2603C_PAD_EN 0xD6 +#define ATC2603C_DEBUG_SEL 0xD7 // Undocumented +#define ATC2603C_DEBUG_IE 0xD8 // Undocumented +#define ATC2603C_DEBUG_OE 0xD9 // Undocumented +#define ATC2603C_BIST_START 0x0A // Undocumented +#define ATC2603C_BIST_RESULT 0x0B // Undocumented +#define ATC2603C_CHIP_VER 0xDC + +/* TWSI Registers */ +#define ATC2603C_SADDR 0xFF + +/* PMU_SYS_CTL0 Register Mask Bits */ +#define ATC2603C_PMU_SYS_CTL0_IR_WK_EN BIT(5) +#define ATC2603C_PMU_SYS_CTL0_RESET_WK_EN BIT(6) +#define ATC2603C_PMU_SYS_CTL0_HDSW_WK_EN BIT(7) +#define ATC2603C_PMU_SYS_CTL0_ALARM_WK_EN BIT(8) +#define ATC2603C_PMU_SYS_CTL0_REM_CON_WK_EN BIT(9) +#define ATC2603C_PMU_SYS_CTL0_RESTART_EN BIT(10) +#define ATC2603C_PMU_SYS_CTL0_SGPIOIRQ_WK_EN BIT(11) +#define ATC2603C_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN BIT(12) +#define ATC2603C_PMU_SYS_CTL0_ONOFF_LONG_WK_EN BIT(13) +#define ATC2603C_PMU_SYS_CTL0_WALL_WK_EN BIT(14) +#define ATC2603C_PMU_SYS_CTL0_USB_WK_EN BIT(15) +#define ATC2603C_PMU_SYS_CTL0_WK_ALL (GENMASK(15, 5) & (~BIT(10))) + +/* PMU_SYS_CTL1 Register Mask Bits */ +#define ATC2603C_PMU_SYS_CTL1_EN_S1 BIT(0) +#define ATC2603C_PMU_SYS_CTL1_LB_S4_EN BIT(2) +#define ATC2603C_PMU_SYS_CTL1_LB_S4 GENMASK(4, 3) +#define ATC2603C_PMU_SYS_CTL1_LB_S4_3_1V BIT(4) +#define ATC2603C_PMU_SYS_CTL1_IR_WK_FLAG BIT(5) +#define ATC2603C_PMU_SYS_CTL1_RESET_WK_FLAG BIT(6) +#define ATC2603C_PMU_SYS_CTL1_HDSW_WK_FLAG BIT(7) +#define ATC2603C_PMU_SYS_CTL1_ALARM_WK_FLAG BIT(8) +#define ATC2603C_PMU_SYS_CTL1_REM_CON_WK_FLAG BIT(9) +#define ATC2603C_PMU_SYS_CTL1_ONOFF_PRESS_RESET_IRQ_PD BIT(10) +#define ATC2603C_PMU_SYS_CTL1_SGPIOIRQ_WK_FLAG BIT(11) +#define ATC2603C_PMU_SYS_CTL1_ONOFF_SHORT_WK_FLAG BIT(12) +#define ATC2603C_PMU_SYS_CTL1_ONOFF_LONG_WK_FLAG BIT(13) +#define ATC2603C_PMU_SYS_CTL1_WALL_WK_FLAG BIT(14) +#define ATC2603C_PMU_SYS_CTL1_USB_WK_FLAG BIT(15) + +/* PMU_SYS_CTL2 Register Mask Bits */ +#define ATC2603C_PMU_SYS_CTL2_PMU_A_EN BIT(0) +#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN BIT(1) +#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_PD BIT(2) +#define ATC2603C_PMU_SYS_CTL2_S2TIMER GENMASK(5, 3) +#define ATC2603C_PMU_SYS_CTL2_S2_TIMER_EN BIT(6) +#define ATC2603C_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL GENMASK(8, 7) +#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_RESET_EN BIT(9) +#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS_TIME GENMASK(11, 10) +#define ATC2603C_PMU_SYS_CTL2_ONOFF_INT_EN BIT(12) +#define ATC2603C_PMU_SYS_CTL2_ONOFF_LONG_PRESS BIT(13) +#define ATC2603C_PMU_SYS_CTL2_ONOFF_SHORT_PRESS BIT(14) +#define ATC2603C_PMU_SYS_CTL2_ONOFF_PRESS BIT(15) + +/* PMU_SYS_CTL3 Register Mask Bits */ +#define ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER GENMASK(8, 7) +#define ATC2603C_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN BIT(9) +#define ATC2603C_PMU_SYS_CTL3_S3_TIMER GENMASK(12, 10) +#define ATC2603C_PMU_SYS_CTL3_S3_TIMER_EN BIT(13) +#define ATC2603C_PMU_SYS_CTL3_EN_S3 BIT(14) +#define ATC2603C_PMU_SYS_CTL3_EN_S2 BIT(15) + +/* PMU_SYS_CTL5 Register Mask Bits */ +#define ATC2603C_PMU_SYS_CTL5_WALLWKDTEN BIT(7) +#define ATC2603C_PMU_SYS_CTL5_VBUSWKDTEN BIT(8) +#define ATC2603C_PMU_SYS_CTL5_REMCON_DECT_EN BIT(9) +#define ATC2603C_PMU_SYS_CTL5_ONOFF_8S_SEL BIT(10) + +/* INTS_MSK Register Mask Bits */ +#define ATC2603C_INTS_MSK_AUDIO BIT(0) +#define ATC2603C_INTS_MSK_OV BIT(1) +#define ATC2603C_INTS_MSK_OC BIT(2) +#define ATC2603C_INTS_MSK_OT BIT(3) +#define ATC2603C_INTS_MSK_UV BIT(4) +#define ATC2603C_INTS_MSK_ALARM BIT(5) +#define ATC2603C_INTS_MSK_ONOFF BIT(6) +#define ATC2603C_INTS_MSK_SGPIO BIT(7) +#define ATC2603C_INTS_MSK_IR BIT(8) +#define ATC2603C_INTS_MSK_REMCON BIT(9) +#define ATC2603C_INTS_MSK_POWERIN BIT(10) + +/* CMU_DEVRST Register Mask Bits */ +#define ATC2603C_CMU_DEVRST_MFP BIT(1) +#define ATC2603C_CMU_DEVRST_INTS BIT(2) +#define ATC2603C_CMU_DEVRST_AUDIO BIT(4) + +/* PAD_EN Register Mask Bits */ +#define ATC2603C_PAD_EN_EXTIRQ BIT(0) + +#endif /* __LINUX_MFD_ATC260X_ATC2603C_H */ diff --git a/include/linux/mfd/atc260x/atc2609a.h b/include/linux/mfd/atc260x/atc2609a.h new file mode 100644 index 000000000000..b957d7bd73e9 --- /dev/null +++ b/include/linux/mfd/atc260x/atc2609a.h @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ATC2609A PMIC register definitions + * + * Copyright (C) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#ifndef __LINUX_MFD_ATC260X_ATC2609A_H +#define __LINUX_MFD_ATC260X_ATC2609A_H + +enum atc2609a_irq_def { + ATC2609A_IRQ_AUDIO = 0, + ATC2609A_IRQ_OV, + ATC2609A_IRQ_OC, + ATC2609A_IRQ_OT, + ATC2609A_IRQ_UV, + ATC2609A_IRQ_ALARM, + ATC2609A_IRQ_ONOFF, + ATC2609A_IRQ_WKUP, + ATC2609A_IRQ_IR, + ATC2609A_IRQ_REMCON, + ATC2609A_IRQ_POWER_IN, +}; + +/* PMU Registers */ +#define ATC2609A_PMU_SYS_CTL0 0x00 +#define ATC2609A_PMU_SYS_CTL1 0x01 +#define ATC2609A_PMU_SYS_CTL2 0x02 +#define ATC2609A_PMU_SYS_CTL3 0x03 +#define ATC2609A_PMU_SYS_CTL4 0x04 +#define ATC2609A_PMU_SYS_CTL5 0x05 +#define ATC2609A_PMU_SYS_CTL6 0x06 +#define ATC2609A_PMU_SYS_CTL7 0x07 +#define ATC2609A_PMU_SYS_CTL8 0x08 +#define ATC2609A_PMU_SYS_CTL9 0x09 +#define ATC2609A_PMU_BAT_CTL0 0x0A +#define ATC2609A_PMU_BAT_CTL1 0x0B +#define ATC2609A_PMU_VBUS_CTL0 0x0C +#define ATC2609A_PMU_VBUS_CTL1 0x0D +#define ATC2609A_PMU_WALL_CTL0 0x0E +#define ATC2609A_PMU_WALL_CTL1 0x0F +#define ATC2609A_PMU_SYS_PENDING 0x10 +#define ATC2609A_PMU_APDS_CTL0 0x11 +#define ATC2609A_PMU_APDS_CTL1 0x12 +#define ATC2609A_PMU_APDS_CTL2 0x13 +#define ATC2609A_PMU_CHARGER_CTL 0x14 +#define ATC2609A_PMU_BAKCHARGER_CTL 0x15 +#define ATC2609A_PMU_SWCHG_CTL0 0x16 +#define ATC2609A_PMU_SWCHG_CTL1 0x17 +#define ATC2609A_PMU_SWCHG_CTL2 0x18 +#define ATC2609A_PMU_SWCHG_CTL3 0x19 +#define ATC2609A_PMU_SWCHG_CTL4 0x1A +#define ATC2609A_PMU_DC_OSC 0x1B +#define ATC2609A_PMU_DC0_CTL0 0x1C +#define ATC2609A_PMU_DC0_CTL1 0x1D +#define ATC2609A_PMU_DC0_CTL2 0x1E +#define ATC2609A_PMU_DC0_CTL3 0x1F +#define ATC2609A_PMU_DC0_CTL4 0x20 +#define ATC2609A_PMU_DC0_CTL5 0x21 +#define ATC2609A_PMU_DC0_CTL6 0x22 +#define ATC2609A_PMU_DC1_CTL0 0x23 +#define ATC2609A_PMU_DC1_CTL1 0x24 +#define ATC2609A_PMU_DC1_CTL2 0x25 +#define ATC2609A_PMU_DC1_CTL3 0x26 +#define ATC2609A_PMU_DC1_CTL4 0x27 +#define ATC2609A_PMU_DC1_CTL5 0x28 +#define ATC2609A_PMU_DC1_CTL6 0x29 +#define ATC2609A_PMU_DC2_CTL0 0x2A +#define ATC2609A_PMU_DC2_CTL1 0x2B +#define ATC2609A_PMU_DC2_CTL2 0x2C +#define ATC2609A_PMU_DC2_CTL3 0x2D +#define ATC2609A_PMU_DC2_CTL4 0x2E +#define ATC2609A_PMU_DC2_CTL5 0x2F +#define ATC2609A_PMU_DC2_CTL6 0x30 +#define ATC2609A_PMU_DC3_CTL0 0x31 +#define ATC2609A_PMU_DC3_CTL1 0x32 +#define ATC2609A_PMU_DC3_CTL2 0x33 +#define ATC2609A_PMU_DC3_CTL3 0x34 +#define ATC2609A_PMU_DC3_CTL4 0x35 +#define ATC2609A_PMU_DC3_CTL5 0x36 +#define ATC2609A_PMU_DC3_CTL6 0x37 +#define ATC2609A_PMU_DC_ZR 0x38 +#define ATC2609A_PMU_LDO0_CTL0 0x39 +#define ATC2609A_PMU_LDO0_CTL1 0x3A +#define ATC2609A_PMU_LDO1_CTL0 0x3B +#define ATC2609A_PMU_LDO1_CTL1 0x3C +#define ATC2609A_PMU_LDO2_CTL0 0x3D +#define ATC2609A_PMU_LDO2_CTL1 0x3E +#define ATC2609A_PMU_LDO3_CTL0 0x3F +#define ATC2609A_PMU_LDO3_CTL1 0x40 +#define ATC2609A_PMU_LDO4_CTL0 0x41 +#define ATC2609A_PMU_LDO4_CTL1 0x42 +#define ATC2609A_PMU_LDO5_CTL0 0x43 +#define ATC2609A_PMU_LDO5_CTL1 0x44 +#define ATC2609A_PMU_LDO6_CTL0 0x45 +#define ATC2609A_PMU_LDO6_CTL1 0x46 +#define ATC2609A_PMU_LDO7_CTL0 0x47 +#define ATC2609A_PMU_LDO7_CTL1 0x48 +#define ATC2609A_PMU_LDO8_CTL0 0x49 +#define ATC2609A_PMU_LDO8_CTL1 0x4A +#define ATC2609A_PMU_LDO9_CTL 0x4B +#define ATC2609A_PMU_OV_INT_EN 0x4C +#define ATC2609A_PMU_OV_STATUS 0x4D +#define ATC2609A_PMU_UV_INT_EN 0x4E +#define ATC2609A_PMU_UV_STATUS 0x4F +#define ATC2609A_PMU_OC_INT_EN 0x50 +#define ATC2609A_PMU_OC_STATUS 0x51 +#define ATC2609A_PMU_OT_CTL 0x52 +#define ATC2609A_PMU_CM_CTL0 0x53 +#define ATC2609A_PMU_FW_USE0 0x54 +#define ATC2609A_PMU_FW_USE1 0x55 +#define ATC2609A_PMU_ADC12B_I 0x56 +#define ATC2609A_PMU_ADC12B_V 0x57 +#define ATC2609A_PMU_ADC12B_DUMMY 0x58 +#define ATC2609A_PMU_AUXADC_CTL0 0x59 +#define ATC2609A_PMU_AUXADC_CTL1 0x5A +#define ATC2609A_PMU_BATVADC 0x5B +#define ATC2609A_PMU_BATIADC 0x5C +#define ATC2609A_PMU_WALLVADC 0x5D +#define ATC2609A_PMU_WALLIADC 0x5E +#define ATC2609A_PMU_VBUSVADC 0x5F +#define ATC2609A_PMU_VBUSIADC 0x60 +#define ATC2609A_PMU_SYSPWRADC 0x61 +#define ATC2609A_PMU_REMCONADC 0x62 +#define ATC2609A_PMU_SVCCADC 0x63 +#define ATC2609A_PMU_CHGIADC 0x64 +#define ATC2609A_PMU_IREFADC 0x65 +#define ATC2609A_PMU_BAKBATADC 0x66 +#define ATC2609A_PMU_ICTEMPADC 0x67 +#define ATC2609A_PMU_AUXADC0 0x68 +#define ATC2609A_PMU_AUXADC1 0x69 +#define ATC2609A_PMU_AUXADC2 0x6A +#define ATC2609A_PMU_AUXADC3 0x6B +#define ATC2609A_PMU_ICTEMPADC_ADJ 0x6C +#define ATC2609A_PMU_BDG_CTL 0x6D +#define ATC2609A_RTC_CTL 0x6E +#define ATC2609A_RTC_MSALM 0x6F +#define ATC2609A_RTC_HALM 0x70 +#define ATC2609A_RTC_YMDALM 0x71 +#define ATC2609A_RTC_MS 0x72 +#define ATC2609A_RTC_H 0x73 +#define ATC2609A_RTC_DC 0x74 +#define ATC2609A_RTC_YMD 0x75 +#define ATC2609A_EFUSE_DAT 0x76 +#define ATC2609A_EFUSECRTL1 0x77 +#define ATC2609A_EFUSECRTL2 0x78 +#define ATC2609A_PMU_DC4_CTL0 0x79 +#define ATC2609A_PMU_DC4_CTL1 0x7A +#define ATC2609A_PMU_DC4_CTL2 0x7B +#define ATC2609A_PMU_DC4_CTL3 0x7C +#define ATC2609A_PMU_DC4_CTL4 0x7D +#define ATC2609A_PMU_DC4_CTL5 0x7E +#define ATC2609A_PMU_DC4_CTL6 0x7F +#define ATC2609A_PMU_PWR_STATUS 0x80 +#define ATC2609A_PMU_S2_PWR 0x81 +#define ATC2609A_CLMT_CTL0 0x82 +#define ATC2609A_CLMT_DATA0 0x83 +#define ATC2609A_CLMT_DATA1 0x84 +#define ATC2609A_CLMT_DATA2 0x85 +#define ATC2609A_CLMT_DATA3 0x86 +#define ATC2609A_CLMT_ADD0 0x87 +#define ATC2609A_CLMT_ADD1 0x88 +#define ATC2609A_CLMT_OCV_TABLE 0x89 +#define ATC2609A_CLMT_R_TABLE 0x8A +#define ATC2609A_PMU_PWRON_CTL0 0x8D +#define ATC2609A_PMU_PWRON_CTL1 0x8E +#define ATC2609A_PMU_PWRON_CTL2 0x8F +#define ATC2609A_IRC_CTL 0x90 +#define ATC2609A_IRC_STAT 0x91 +#define ATC2609A_IRC_CC 0x92 +#define ATC2609A_IRC_KDC 0x93 +#define ATC2609A_IRC_WK 0x94 +#define ATC2609A_IRC_RCC 0x95 + +/* AUDIO_OUT Registers */ +#define ATC2609A_AUDIOINOUT_CTL 0xA0 +#define ATC2609A_AUDIO_DEBUGOUTCTL 0xA1 +#define ATC2609A_DAC_DIGITALCTL 0xA2 +#define ATC2609A_DAC_VOLUMECTL0 0xA3 +#define ATC2609A_DAC_ANALOG0 0xA4 +#define ATC2609A_DAC_ANALOG1 0xA5 +#define ATC2609A_DAC_ANALOG2 0xA6 +#define ATC2609A_DAC_ANALOG3 0xA7 + +/* AUDIO_IN Registers */ +#define ATC2609A_ADC_DIGITALCTL 0xA8 +#define ATC2609A_ADC_HPFCTL 0xA9 +#define ATC2609A_ADC_CTL 0xAA +#define ATC2609A_AGC_CTL0 0xAB +#define ATC2609A_AGC_CTL1 0xAC +#define ATC2609A_AGC_CTL2 0xAD +#define ATC2609A_ADC_ANALOG0 0xAE +#define ATC2609A_ADC_ANALOG1 0xAF + +/* PCM_IF Registers */ +#define ATC2609A_PCM0_CTL 0xB0 +#define ATC2609A_PCM1_CTL 0xB1 +#define ATC2609A_PCM2_CTL 0xB2 +#define ATC2609A_PCMIF_CTL 0xB3 + +/* CMU_CONTROL Registers */ +#define ATC2609A_CMU_DEVRST 0xC1 + +/* INTS Registers */ +#define ATC2609A_INTS_PD 0xC8 +#define ATC2609A_INTS_MSK 0xC9 + +/* MFP Registers */ +#define ATC2609A_MFP_CTL 0xD0 +#define ATC2609A_PAD_VSEL 0xD1 +#define ATC2609A_GPIO_OUTEN 0xD2 +#define ATC2609A_GPIO_INEN 0xD3 +#define ATC2609A_GPIO_DAT 0xD4 +#define ATC2609A_PAD_DRV 0xD5 +#define ATC2609A_PAD_EN 0xD6 +#define ATC2609A_DEBUG_SEL 0xD7 +#define ATC2609A_DEBUG_IE 0xD8 +#define ATC2609A_DEBUG_OE 0xD9 +#define ATC2609A_CHIP_VER 0xDC + +/* PWSI Registers */ +#define ATC2609A_PWSI_CTL 0xF0 +#define ATC2609A_PWSI_STATUS 0xF1 + +/* TWSI Registers */ +#define ATC2609A_SADDR 0xFF + +/* PMU_SYS_CTL0 Register Mask Bits */ +#define ATC2609A_PMU_SYS_CTL0_IR_WK_EN BIT(5) +#define ATC2609A_PMU_SYS_CTL0_RESET_WK_EN BIT(6) +#define ATC2609A_PMU_SYS_CTL0_HDSW_WK_EN BIT(7) +#define ATC2609A_PMU_SYS_CTL0_ALARM_WK_EN BIT(8) +#define ATC2609A_PMU_SYS_CTL0_REM_CON_WK_EN BIT(9) +#define ATC2609A_PMU_SYS_CTL0_RESTART_EN BIT(10) +#define ATC2609A_PMU_SYS_CTL0_WKIRQ_WK_EN BIT(11) +#define ATC2609A_PMU_SYS_CTL0_ONOFF_SHORT_WK_EN BIT(12) +#define ATC2609A_PMU_SYS_CTL0_ONOFF_LONG_WK_EN BIT(13) +#define ATC2609A_PMU_SYS_CTL0_WALL_WK_EN BIT(14) +#define ATC2609A_PMU_SYS_CTL0_USB_WK_EN BIT(15) +#define ATC2609A_PMU_SYS_CTL0_WK_ALL (GENMASK(15, 5) & (~BIT(10))) + +/* PMU_SYS_CTL1 Register Mask Bits */ +#define ATC2609A_PMU_SYS_CTL1_EN_S1 BIT(0) +#define ATC2609A_PMU_SYS_CTL1_LB_S4_EN BIT(2) +#define ATC2609A_PMU_SYS_CTL1_LB_S4 GENMASK(4, 3) +#define ATC2609A_PMU_SYS_CTL1_LB_S4_3_1V BIT(4) +#define ATC2609A_PMU_SYS_CTL1_IR_WK_FLAG BIT(5) +#define ATC2609A_PMU_SYS_CTL1_RESET_WK_FLAG BIT(6) +#define ATC2609A_PMU_SYS_CTL1_HDSW_WK_FLAG BIT(7) +#define ATC2609A_PMU_SYS_CTL1_ALARM_WK_FLAG BIT(8) +#define ATC2609A_PMU_SYS_CTL1_REM_CON_WK_FLAG BIT(9) +#define ATC2609A_PMU_SYS_CTL1_RESTART_WK_FLAG BIT(10) +#define ATC2609A_PMU_SYS_CTL1_WKIRQ_WK_FLAG BIT(11) +#define ATC2609A_PMU_SYS_CTL1_ONOFF_SHORT_WK_FLAG BIT(12) +#define ATC2609A_PMU_SYS_CTL1_ONOFF_LONG_WK_FLAG BIT(13) +#define ATC2609A_PMU_SYS_CTL1_WALL_WK_FLAG BIT(14) +#define ATC2609A_PMU_SYS_CTL1_USB_WK_FLAG BIT(15) + +/* PMU_SYS_CTL2 Register Mask Bits */ +#define ATC2609A_PMU_SYS_CTL2_PMU_A_EN BIT(0) +#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_INT_EN BIT(1) +#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_PD BIT(2) +#define ATC2609A_PMU_SYS_CTL2_S2TIMER GENMASK(5, 3) +#define ATC2609A_PMU_SYS_CTL2_S2_TIMER_EN BIT(6) +#define ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_TIME_SEL GENMASK(8, 7) +#define ATC2609A_PMU_SYS_CTL2_ONOFF_RESET_EN BIT(9) +#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS_TIME GENMASK(11, 10) +#define ATC2609A_PMU_SYS_CTL2_ONOFF_LSP_INT_EN BIT(12) +#define ATC2609A_PMU_SYS_CTL2_ONOFF_LONG_PRESS BIT(13) +#define ATC2609A_PMU_SYS_CTL2_ONOFF_SHORT_PRESS BIT(14) +#define ATC2609A_PMU_SYS_CTL2_ONOFF_PRESS BIT(15) + +/* PMU_SYS_CTL3 Register Mask Bits */ +#define ATC2609A_PMU_SYS_CTL3_S2S3TOS1_TIMER GENMASK(8, 7) +#define ATC2609A_PMU_SYS_CTL3_S2S3TOS1_TIMER_EN BIT(9) +#define ATC2609A_PMU_SYS_CTL3_S3_TIMER GENMASK(12, 10) +#define ATC2609A_PMU_SYS_CTL3_S3_TIMER_EN BIT(13) +#define ATC2609A_PMU_SYS_CTL3_EN_S3 BIT(14) +#define ATC2609A_PMU_SYS_CTL3_EN_S2 BIT(15) + +/* PMU_SYS_CTL5 Register Mask Bits */ +#define ATC2609A_PMU_SYS_CTL5_WALLWKDTEN BIT(7) +#define ATC2609A_PMU_SYS_CTL5_VBUSWKDTEN BIT(8) +#define ATC2609A_PMU_SYS_CTL5_REMCON_DECT_EN BIT(9) +#define ATC2609A_PMU_SYS_CTL5_ONOFF_8S_SEL BIT(10) + +/* INTS_MSK Register Mask Bits */ +#define ATC2609A_INTS_MSK_AUDIO BIT(0) +#define ATC2609A_INTS_MSK_OV BIT(1) +#define ATC2609A_INTS_MSK_OC BIT(2) +#define ATC2609A_INTS_MSK_OT BIT(3) +#define ATC2609A_INTS_MSK_UV BIT(4) +#define ATC2609A_INTS_MSK_ALARM BIT(5) +#define ATC2609A_INTS_MSK_ONOFF BIT(6) +#define ATC2609A_INTS_MSK_WKUP BIT(7) +#define ATC2609A_INTS_MSK_IR BIT(8) +#define ATC2609A_INTS_MSK_REMCON BIT(9) +#define ATC2609A_INTS_MSK_POWERIN BIT(10) + +/* CMU_DEVRST Register Mask Bits */ +#define ATC2609A_CMU_DEVRST_AUDIO BIT(0) +#define ATC2609A_CMU_DEVRST_MFP BIT(1) +#define ATC2609A_CMU_DEVRST_INTS BIT(2) + +/* PAD_EN Register Mask Bits */ +#define ATC2609A_PAD_EN_EXTIRQ BIT(0) + +#endif /* __LINUX_MFD_ATC260X_ATC2609A_H */ diff --git a/include/linux/mfd/atc260x/core.h b/include/linux/mfd/atc260x/core.h new file mode 100644 index 000000000000..777b6c345d44 --- /dev/null +++ b/include/linux/mfd/atc260x/core.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Core MFD defines for ATC260x PMICs + * + * Copyright (C) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com> + */ + +#ifndef __LINUX_MFD_ATC260X_CORE_H +#define __LINUX_MFD_ATC260X_CORE_H + +#include <linux/mfd/atc260x/atc2603c.h> +#include <linux/mfd/atc260x/atc2609a.h> + +enum atc260x_type { + ATC2603A = 0, + ATC2603C, + ATC2609A, +}; + +enum atc260x_ver { + ATC260X_A = 0, + ATC260X_B, + ATC260X_C, + ATC260X_D, + ATC260X_E, + ATC260X_F, + ATC260X_G, + ATC260X_H, +}; + +struct atc260x { + struct device *dev; + + struct regmap *regmap; + const struct regmap_irq_chip *regmap_irq_chip; + struct regmap_irq_chip_data *irq_data; + + struct mutex *regmap_mutex; /* mutex for custom regmap locking */ + + const struct mfd_cell *cells; + int nr_cells; + int irq; + + enum atc260x_type ic_type; + enum atc260x_ver ic_ver; + const char *type_name; + unsigned int rev_reg; + + const struct atc260x_init_regs *init_regs; /* regs for device init */ +}; + +struct regmap_config; + +int atc260x_match_device(struct atc260x *atc260x, struct regmap_config *regmap_cfg); +int atc260x_device_probe(struct atc260x *atc260x); + +#endif /* __LINUX_MFD_ATC260X_CORE_H */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 2009c4b936d9..0bc7cba798a3 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -50,7 +50,7 @@ #define MFD_DEP_LEVEL_HIGH 1 struct irq_domain; -struct property_entry; +struct software_node; /* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */ struct mfd_cell_acpi_match { @@ -78,8 +78,8 @@ struct mfd_cell { void *platform_data; size_t pdata_size; - /* device properties passed to the sub devices drivers */ - const struct property_entry *properties; + /* Software node for the device. */ + const struct software_node *swnode; /* * Device Tree compatible string diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h index 1dbabf1b3cb8..6e0f66a2e727 100644 --- a/include/linux/mfd/da9063/registers.h +++ b/include/linux/mfd/da9063/registers.h @@ -1037,6 +1037,9 @@ #define DA9063_NONKEY_PIN_AUTODOWN 0x02 #define DA9063_NONKEY_PIN_AUTOFLPRT 0x03 +/* DA9063_REG_CONFIG_J (addr=0x10F) */ +#define DA9063_TWOWIRE_TO 0x40 + /* DA9063_REG_MON_REG_5 (addr=0x116) */ #define DA9063_MON_A8_IDX_MASK 0x07 #define DA9063_MON_A8_IDX_NONE 0x00 diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h index 4b63d3ecdcff..a62de3d155ed 100644 --- a/include/linux/mfd/db8500-prcmu.h +++ b/include/linux/mfd/db8500-prcmu.h @@ -720,7 +720,7 @@ static inline int db8500_prcmu_load_a9wdog(u8 id, u32 val) static inline bool db8500_prcmu_is_ac_wake_requested(void) { - return 0; + return false; } static inline int db8500_prcmu_set_arm_opp(u8 opp) diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h index 74d4e193966a..f0044b14136e 100644 --- a/include/linux/mfd/intel-m10-bmc.h +++ b/include/linux/mfd/intel-m10-bmc.h @@ -9,9 +9,15 @@ #include <linux/regmap.h> -#define M10BMC_LEGACY_SYS_BASE 0x300400 +#define M10BMC_LEGACY_BUILD_VER 0x300468 #define M10BMC_SYS_BASE 0x300800 -#define M10BMC_MEM_END 0x200000fc +#define M10BMC_SYS_END 0x300fff +#define M10BMC_FLASH_BASE 0x10000000 +#define M10BMC_FLASH_END 0x1fffffff +#define M10BMC_MEM_END M10BMC_FLASH_END + +#define M10BMC_STAGING_BASE 0x18000000 +#define M10BMC_STAGING_SIZE 0x3800000 /* Register offset of system registers */ #define NIOS2_FW_VERSION 0x0 @@ -30,6 +36,88 @@ #define M10BMC_VER_PCB_INFO_MSK GENMASK(31, 24) #define M10BMC_VER_LEGACY_INVALID 0xffffffff +/* Secure update doorbell register, in system register region */ +#define M10BMC_DOORBELL 0x400 + +/* Authorization Result register, in system register region */ +#define M10BMC_AUTH_RESULT 0x404 + +/* Doorbell register fields */ +#define DRBL_RSU_REQUEST BIT(0) +#define DRBL_RSU_PROGRESS GENMASK(7, 4) +#define DRBL_HOST_STATUS GENMASK(11, 8) +#define DRBL_RSU_STATUS GENMASK(23, 16) +#define DRBL_PKVL_EEPROM_LOAD_SEC BIT(24) +#define DRBL_PKVL1_POLL_EN BIT(25) +#define DRBL_PKVL2_POLL_EN BIT(26) +#define DRBL_CONFIG_SEL BIT(28) +#define DRBL_REBOOT_REQ BIT(29) +#define DRBL_REBOOT_DISABLED BIT(30) + +/* Progress states */ +#define RSU_PROG_IDLE 0x0 +#define RSU_PROG_PREPARE 0x1 +#define RSU_PROG_READY 0x3 +#define RSU_PROG_AUTHENTICATING 0x4 +#define RSU_PROG_COPYING 0x5 +#define RSU_PROG_UPDATE_CANCEL 0x6 +#define RSU_PROG_PROGRAM_KEY_HASH 0x7 +#define RSU_PROG_RSU_DONE 0x8 +#define RSU_PROG_PKVL_PROM_DONE 0x9 + +/* Device and error states */ +#define RSU_STAT_NORMAL 0x0 +#define RSU_STAT_TIMEOUT 0x1 +#define RSU_STAT_AUTH_FAIL 0x2 +#define RSU_STAT_COPY_FAIL 0x3 +#define RSU_STAT_FATAL 0x4 +#define RSU_STAT_PKVL_REJECT 0x5 +#define RSU_STAT_NON_INC 0x6 +#define RSU_STAT_ERASE_FAIL 0x7 +#define RSU_STAT_WEAROUT 0x8 +#define RSU_STAT_NIOS_OK 0x80 +#define RSU_STAT_USER_OK 0x81 +#define RSU_STAT_FACTORY_OK 0x82 +#define RSU_STAT_USER_FAIL 0x83 +#define RSU_STAT_FACTORY_FAIL 0x84 +#define RSU_STAT_NIOS_FLASH_ERR 0x85 +#define RSU_STAT_FPGA_FLASH_ERR 0x86 + +#define HOST_STATUS_IDLE 0x0 +#define HOST_STATUS_WRITE_DONE 0x1 +#define HOST_STATUS_ABORT_RSU 0x2 + +#define rsu_prog(doorbell) FIELD_GET(DRBL_RSU_PROGRESS, doorbell) +#define rsu_stat(doorbell) FIELD_GET(DRBL_RSU_STATUS, doorbell) + +/* interval 100ms and timeout 5s */ +#define NIOS_HANDSHAKE_INTERVAL_US (100 * 1000) +#define NIOS_HANDSHAKE_TIMEOUT_US (5 * 1000 * 1000) + +/* RSU PREP Timeout (2 minutes) to erase flash staging area */ +#define RSU_PREP_INTERVAL_MS 100 +#define RSU_PREP_TIMEOUT_MS (2 * 60 * 1000) + +/* RSU Complete Timeout (40 minutes) for full flash update */ +#define RSU_COMPLETE_INTERVAL_MS 1000 +#define RSU_COMPLETE_TIMEOUT_MS (40 * 60 * 1000) + +/* Addresses for security related data in FLASH */ +#define BMC_REH_ADDR 0x17ffc004 +#define BMC_PROG_ADDR 0x17ffc000 +#define BMC_PROG_MAGIC 0x5746 + +#define SR_REH_ADDR 0x17ffd004 +#define SR_PROG_ADDR 0x17ffd000 +#define SR_PROG_MAGIC 0x5253 + +#define PR_REH_ADDR 0x17ffe004 +#define PR_PROG_ADDR 0x17ffe000 +#define PR_PROG_MAGIC 0x5250 + +/* Address of 4KB inverted bit vector containing staging area FLASH count */ +#define STAGING_FLASH_COUNT 0x17ffb000 + /** * struct intel_m10bmc - Intel MAX 10 BMC parent driver data structure * @dev: this device diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h index d44ddfb6bb63..5640e6088fe6 100644 --- a/include/linux/mfd/lp87565.h +++ b/include/linux/mfd/lp87565.h @@ -237,9 +237,6 @@ enum lp87565_device_type { #define LP87565_GOIO2_OUT BIT(1) #define LP87565_GOIO1_OUT BIT(0) -/* Number of step-down converters available */ -#define LP87565_NUM_BUCK 6 - enum LP87565_regulator_id { /* BUCK's */ LP87565_BUCK_0, diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h index 6ddca2bbb3a8..39967a5eca6d 100644 --- a/include/linux/mfd/lpc_ich.h +++ b/include/linux/mfd/lpc_ich.h @@ -8,7 +8,7 @@ #ifndef LPC_ICH_H #define LPC_ICH_H -#include <linux/platform_data/intel-spi.h> +#include <linux/platform_data/x86/intel-spi.h> /* GPIO resources */ #define ICH_RES_GPIO 0 diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h index e955e2f0a2cc..6c98edcf4b0b 100644 --- a/include/linux/mfd/max8997.h +++ b/include/linux/mfd/max8997.h @@ -14,13 +14,13 @@ * others and b) it can be enabled simply by using MAX17042 driver. */ -#ifndef __LINUX_MFD_MAX8998_H -#define __LINUX_MFD_MAX8998_H +#ifndef __LINUX_MFD_MAX8997_H +#define __LINUX_MFD_MAX8997_H #include <linux/regulator/consumer.h> /* MAX8997/8966 regulator IDs */ -enum max8998_regulators { +enum max8997_regulators { MAX8997_LDO1 = 0, MAX8997_LDO2, MAX8997_LDO3, @@ -207,4 +207,4 @@ struct max8997_platform_data { struct max8997_led_platform_data *led_pdata; }; -#endif /* __LINUX_MFD_MAX8998_H */ +#endif /* __LINUX_MFD_MAX8997_H */ diff --git a/include/linux/mfd/ntxec.h b/include/linux/mfd/ntxec.h new file mode 100644 index 000000000000..26ab3b8eb612 --- /dev/null +++ b/include/linux/mfd/ntxec.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2020 Jonathan Neuschäfer + * + * Register access and version information for the Netronix embedded + * controller. + */ + +#ifndef NTXEC_H +#define NTXEC_H + +#include <linux/types.h> + +struct device; +struct regmap; + +struct ntxec { + struct device *dev; + struct regmap *regmap; +}; + +/* + * Some registers, such as the battery status register (0x41), are in + * big-endian, but others only have eight significant bits, which are in the + * first byte transmitted over I2C (the MSB of the big-endian value). + * This convenience function converts an 8-bit value to 16-bit for use in the + * second kind of register. + */ +static inline __be16 ntxec_reg8(u8 value) +{ + return value << 8; +} + +/* Known firmware versions */ +#define NTXEC_VERSION_KOBO_AURA 0xd726 /* found in Kobo Aura */ +#define NTXEC_VERSION_TOLINO_SHINE2 0xf110 /* found in Tolino Shine 2 HD */ + +#endif diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h index fba0df13d9a8..8aa0bda1af4f 100644 --- a/include/linux/mfd/rn5t618.h +++ b/include/linux/mfd/rn5t618.h @@ -188,6 +188,7 @@ #define RN5T618_CHGOSCSCORESET3 0xd7 #define RN5T618_CHGOSCFREQSET1 0xd8 #define RN5T618_CHGOSCFREQSET2 0xd9 +#define RN5T618_GCHGDET 0xda #define RN5T618_CONTROL 0xe0 #define RN5T618_SOC 0xe1 #define RN5T618_RE_CAP_H 0xe2 diff --git a/include/linux/mfd/rohm-bd71815.h b/include/linux/mfd/rohm-bd71815.h new file mode 100644 index 000000000000..ec6d9612bebe --- /dev/null +++ b/include/linux/mfd/rohm-bd71815.h @@ -0,0 +1,562 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2021 ROHM Semiconductors. + * + * Author: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com> + * + * Copyright 2014 Embest Technology Co. Ltd. Inc. + * + * Author: yanglsh@embest-tech.com + */ + +#ifndef _MFD_BD71815_H +#define _MFD_BD71815_H + +#include <linux/regmap.h> + +enum { + BD71815_BUCK1 = 0, + BD71815_BUCK2, + BD71815_BUCK3, + BD71815_BUCK4, + BD71815_BUCK5, + /* General Purpose */ + BD71815_LDO1, + BD71815_LDO2, + BD71815_LDO3, + /* LDOs for SD Card and SD Card Interface */ + BD71815_LDO4, + BD71815_LDO5, + /* LDO for DDR Reference Voltage */ + BD71815_LDODVREF, + /* LDO for Low-Power State Retention */ + BD71815_LDOLPSR, + BD71815_WLED, + BD71815_REGULATOR_CNT, +}; + +#define BD71815_SUPPLY_STATE_ENABLED 0x1 + +enum { + BD71815_REG_DEVICE = 0, + BD71815_REG_PWRCTRL, + BD71815_REG_BUCK1_MODE, + BD71815_REG_BUCK2_MODE, + BD71815_REG_BUCK3_MODE, + BD71815_REG_BUCK4_MODE, + BD71815_REG_BUCK5_MODE, + BD71815_REG_BUCK1_VOLT_H, + BD71815_REG_BUCK1_VOLT_L, + BD71815_REG_BUCK2_VOLT_H, + BD71815_REG_BUCK2_VOLT_L, + BD71815_REG_BUCK3_VOLT, + BD71815_REG_BUCK4_VOLT, + BD71815_REG_BUCK5_VOLT, + BD71815_REG_LED_CTRL, + BD71815_REG_LED_DIMM, + BD71815_REG_LDO_MODE1, + BD71815_REG_LDO_MODE2, + BD71815_REG_LDO_MODE3, + BD71815_REG_LDO_MODE4, + BD71815_REG_LDO1_VOLT, + BD71815_REG_LDO2_VOLT, + BD71815_REG_LDO3_VOLT, + BD71815_REG_LDO4_VOLT, + BD71815_REG_LDO5_VOLT_H, + BD71815_REG_LDO5_VOLT_L, + BD71815_REG_BUCK_PD_DIS, + BD71815_REG_LDO_PD_DIS, + BD71815_REG_GPO, + BD71815_REG_OUT32K, + BD71815_REG_SEC, + BD71815_REG_MIN, + BD71815_REG_HOUR, + BD71815_REG_WEEK, + BD71815_REG_DAY, + BD71815_REG_MONTH, + BD71815_REG_YEAR, + BD71815_REG_ALM0_SEC, + + BD71815_REG_ALM1_SEC = 0x2C, + + BD71815_REG_ALM0_MASK = 0x33, + BD71815_REG_ALM1_MASK, + BD71815_REG_ALM2, + BD71815_REG_TRIM, + BD71815_REG_CONF, + BD71815_REG_SYS_INIT, + BD71815_REG_CHG_STATE, + BD71815_REG_CHG_LAST_STATE, + BD71815_REG_BAT_STAT, + BD71815_REG_DCIN_STAT, + BD71815_REG_VSYS_STAT, + BD71815_REG_CHG_STAT, + BD71815_REG_CHG_WDT_STAT, + BD71815_REG_BAT_TEMP, + BD71815_REG_IGNORE_0, + BD71815_REG_INHIBIT_0, + BD71815_REG_DCIN_CLPS, + BD71815_REG_VSYS_REG, + BD71815_REG_VSYS_MAX, + BD71815_REG_VSYS_MIN, + BD71815_REG_CHG_SET1, + BD71815_REG_CHG_SET2, + BD71815_REG_CHG_WDT_PRE, + BD71815_REG_CHG_WDT_FST, + BD71815_REG_CHG_IPRE, + BD71815_REG_CHG_IFST, + BD71815_REG_CHG_IFST_TERM, + BD71815_REG_CHG_VPRE, + BD71815_REG_CHG_VBAT_1, + BD71815_REG_CHG_VBAT_2, + BD71815_REG_CHG_VBAT_3, + BD71815_REG_CHG_LED_1, + BD71815_REG_VF_TH, + BD71815_REG_BAT_SET_1, + BD71815_REG_BAT_SET_2, + BD71815_REG_BAT_SET_3, + BD71815_REG_ALM_VBAT_TH_U, + BD71815_REG_ALM_VBAT_TH_L, + BD71815_REG_ALM_DCIN_TH, + BD71815_REG_ALM_VSYS_TH, + BD71815_REG_VM_IBAT_U, + BD71815_REG_VM_IBAT_L, + BD71815_REG_VM_VBAT_U, + BD71815_REG_VM_VBAT_L, + BD71815_REG_VM_BTMP, + BD71815_REG_VM_VTH, + BD71815_REG_VM_DCIN_U, + BD71815_REG_VM_DCIN_L, + BD71815_REG_VM_VSYS, + BD71815_REG_VM_VF, + BD71815_REG_VM_OCI_PRE_U, + BD71815_REG_VM_OCI_PRE_L, + BD71815_REG_VM_OCV_PRE_U, + BD71815_REG_VM_OCV_PRE_L, + BD71815_REG_VM_OCI_PST_U, + BD71815_REG_VM_OCI_PST_L, + BD71815_REG_VM_OCV_PST_U, + BD71815_REG_VM_OCV_PST_L, + BD71815_REG_VM_SA_VBAT_U, + BD71815_REG_VM_SA_VBAT_L, + BD71815_REG_VM_SA_IBAT_U, + BD71815_REG_VM_SA_IBAT_L, + BD71815_REG_CC_CTRL, + BD71815_REG_CC_BATCAP1_TH_U, + BD71815_REG_CC_BATCAP1_TH_L, + BD71815_REG_CC_BATCAP2_TH_U, + BD71815_REG_CC_BATCAP2_TH_L, + BD71815_REG_CC_BATCAP3_TH_U, + BD71815_REG_CC_BATCAP3_TH_L, + BD71815_REG_CC_STAT, + BD71815_REG_CC_CCNTD_3, + BD71815_REG_CC_CCNTD_2, + BD71815_REG_CC_CCNTD_1, + BD71815_REG_CC_CCNTD_0, + BD71815_REG_CC_CURCD_U, + BD71815_REG_CC_CURCD_L, + BD71815_REG_VM_OCUR_THR_1, + BD71815_REG_VM_OCUR_DUR_1, + BD71815_REG_VM_OCUR_THR_2, + BD71815_REG_VM_OCUR_DUR_2, + BD71815_REG_VM_OCUR_THR_3, + BD71815_REG_VM_OCUR_DUR_3, + BD71815_REG_VM_OCUR_MON, + BD71815_REG_VM_BTMP_OV_THR, + BD71815_REG_VM_BTMP_OV_DUR, + BD71815_REG_VM_BTMP_LO_THR, + BD71815_REG_VM_BTMP_LO_DUR, + BD71815_REG_VM_BTMP_MON, + BD71815_REG_INT_EN_01, + + BD71815_REG_INT_EN_11 = 0x95, + BD71815_REG_INT_EN_12, + BD71815_REG_INT_STAT, + BD71815_REG_INT_STAT_01, + BD71815_REG_INT_STAT_02, + BD71815_REG_INT_STAT_03, + BD71815_REG_INT_STAT_04, + BD71815_REG_INT_STAT_05, + BD71815_REG_INT_STAT_06, + BD71815_REG_INT_STAT_07, + BD71815_REG_INT_STAT_08, + BD71815_REG_INT_STAT_09, + BD71815_REG_INT_STAT_10, + BD71815_REG_INT_STAT_11, + BD71815_REG_INT_STAT_12, + BD71815_REG_INT_UPDATE, + + BD71815_REG_VM_VSYS_U = 0xC0, + BD71815_REG_VM_VSYS_L, + BD71815_REG_VM_SA_VSYS_U, + BD71815_REG_VM_SA_VSYS_L, + + BD71815_REG_VM_SA_IBAT_MIN_U = 0xD0, + BD71815_REG_VM_SA_IBAT_MIN_L, + BD71815_REG_VM_SA_IBAT_MAX_U, + BD71815_REG_VM_SA_IBAT_MAX_L, + BD71815_REG_VM_SA_VBAT_MIN_U, + BD71815_REG_VM_SA_VBAT_MIN_L, + BD71815_REG_VM_SA_VBAT_MAX_U, + BD71815_REG_VM_SA_VBAT_MAX_L, + BD71815_REG_VM_SA_VSYS_MIN_U, + BD71815_REG_VM_SA_VSYS_MIN_L, + BD71815_REG_VM_SA_VSYS_MAX_U, + BD71815_REG_VM_SA_VSYS_MAX_L, + BD71815_REG_VM_SA_MINMAX_CLR, + + BD71815_REG_REX_CCNTD_3 = 0xE0, + BD71815_REG_REX_CCNTD_2, + BD71815_REG_REX_CCNTD_1, + BD71815_REG_REX_CCNTD_0, + BD71815_REG_REX_SA_VBAT_U, + BD71815_REG_REX_SA_VBAT_L, + BD71815_REG_REX_CTRL_1, + BD71815_REG_REX_CTRL_2, + BD71815_REG_FULL_CCNTD_3, + BD71815_REG_FULL_CCNTD_2, + BD71815_REG_FULL_CCNTD_1, + BD71815_REG_FULL_CCNTD_0, + BD71815_REG_FULL_CTRL, + + BD71815_REG_CCNTD_CHG_3 = 0xF0, + BD71815_REG_CCNTD_CHG_2, + + BD71815_REG_TEST_MODE = 0xFE, + BD71815_MAX_REGISTER, +}; + +/* BD71815_REG_BUCK1_MODE bits */ +#define BD71815_BUCK_RAMPRATE_MASK 0xC0 +#define BD71815_BUCK_RAMPRATE_10P00MV 0x0 +#define BD71815_BUCK_RAMPRATE_5P00MV 0x01 +#define BD71815_BUCK_RAMPRATE_2P50MV 0x02 +#define BD71815_BUCK_RAMPRATE_1P25MV 0x03 + +#define BD71815_BUCK_PWM_FIXED BIT(4) +#define BD71815_BUCK_SNVS_ON BIT(3) +#define BD71815_BUCK_RUN_ON BIT(2) +#define BD71815_BUCK_LPSR_ON BIT(1) +#define BD71815_BUCK_SUSP_ON BIT(0) + +/* BD71815_REG_BUCK1_VOLT_H bits */ +#define BD71815_BUCK_DVSSEL BIT(7) +#define BD71815_BUCK_STBY_DVS BIT(6) +#define BD71815_VOLT_MASK 0x3F +#define BD71815_BUCK1_H_DEFAULT 0x14 +#define BD71815_BUCK1_L_DEFAULT 0x14 + +/* BD71815_REG_BUCK2_VOLT_H bits */ +#define BD71815_BUCK2_H_DEFAULT 0x14 +#define BD71815_BUCK2_L_DEFAULT 0x14 + +/* WLED output */ +/* current register mask */ +#define LED_DIMM_MASK 0x3f +/* LED enable bits at LED_CTRL reg */ +#define LED_CHGDONE_EN BIT(4) +#define LED_RUN_ON BIT(2) +#define LED_LPSR_ON BIT(1) +#define LED_SUSP_ON BIT(0) + +/* BD71815_REG_LDO1_CTRL bits */ +#define LDO1_EN BIT(0) +#define LDO2_EN BIT(1) +#define LDO3_EN BIT(2) +#define DVREF_EN BIT(3) +#define VOSNVS_SW_EN BIT(4) + +/* LDO_MODE1_register */ +#define LDO1_SNVS_ON BIT(7) +#define LDO1_RUN_ON BIT(6) +#define LDO1_LPSR_ON BIT(5) +#define LDO1_SUSP_ON BIT(4) +/* set => register control, unset => GPIO control */ +#define LDO4_MODE_MASK BIT(3) +#define LDO4_MODE_I2C BIT(3) +#define LDO4_MODE_GPIO 0 +/* set => register control, unset => start when DCIN connected */ +#define LDO3_MODE_MASK BIT(2) +#define LDO3_MODE_I2C BIT(2) +#define LDO3_MODE_DCIN 0 + +/* LDO_MODE2 register */ +#define LDO3_SNVS_ON BIT(7) +#define LDO3_RUN_ON BIT(6) +#define LDO3_LPSR_ON BIT(5) +#define LDO3_SUSP_ON BIT(4) +#define LDO2_SNVS_ON BIT(3) +#define LDO2_RUN_ON BIT(2) +#define LDO2_LPSR_ON BIT(1) +#define LDO2_SUSP_ON BIT(0) + + +/* LDO_MODE3 register */ +#define LDO5_SNVS_ON BIT(7) +#define LDO5_RUN_ON BIT(6) +#define LDO5_LPSR_ON BIT(5) +#define LDO5_SUSP_ON BIT(4) +#define LDO4_SNVS_ON BIT(3) +#define LDO4_RUN_ON BIT(2) +#define LDO4_LPSR_ON BIT(1) +#define LDO4_SUSP_ON BIT(0) + +/* LDO_MODE4 register */ +#define DVREF_SNVS_ON BIT(7) +#define DVREF_RUN_ON BIT(6) +#define DVREF_LPSR_ON BIT(5) +#define DVREF_SUSP_ON BIT(4) +#define LDO_LPSR_SNVS_ON BIT(3) +#define LDO_LPSR_RUN_ON BIT(2) +#define LDO_LPSR_LPSR_ON BIT(1) +#define LDO_LPSR_SUSP_ON BIT(0) + +/* BD71815_REG_OUT32K bits */ +#define OUT32K_EN BIT(0) +#define OUT32K_MODE BIT(1) +#define OUT32K_MODE_CMOS BIT(1) +#define OUT32K_MODE_OPEN_DRAIN 0 + +/* BD71815_REG_BAT_STAT bits */ +#define BAT_DET BIT(5) +#define BAT_DET_OFFSET 5 +#define BAT_DET_DONE BIT(4) +#define VBAT_OV BIT(3) +#define DBAT_DET BIT(0) + +/* BD71815_REG_VBUS_STAT bits */ +#define VBUS_DET BIT(0) + +#define BD71815_REG_RTC_START BD71815_REG_SEC +#define BD71815_REG_RTC_ALM_START BD71815_REG_ALM0_SEC + +/* BD71815_REG_ALM0_MASK bits */ +#define A0_ONESEC BIT(7) + +/* BD71815_REG_INT_EN_00 bits */ +#define ALMALE BIT(0) + +/* BD71815_REG_INT_STAT_03 bits */ +#define DCIN_MON_DET BIT(1) +#define DCIN_MON_RES BIT(0) +#define POWERON_LONG BIT(2) +#define POWERON_MID BIT(3) +#define POWERON_SHORT BIT(4) +#define POWERON_PRESS BIT(5) + +/* BD71805_REG_INT_STAT_08 bits */ +#define VBAT_MON_DET BIT(1) +#define VBAT_MON_RES BIT(0) + +/* BD71805_REG_INT_STAT_11 bits */ +#define INT_STAT_11_VF_DET BIT(7) +#define INT_STAT_11_VF_RES BIT(6) +#define INT_STAT_11_VF125_DET BIT(5) +#define INT_STAT_11_VF125_RES BIT(4) +#define INT_STAT_11_OVTMP_DET BIT(3) +#define INT_STAT_11_OVTMP_RES BIT(2) +#define INT_STAT_11_LOTMP_DET BIT(1) +#define INT_STAT_11_LOTMP_RES BIT(0) + +#define VBAT_MON_DET BIT(1) +#define VBAT_MON_RES BIT(0) + +/* BD71815_REG_PWRCTRL bits */ +#define RESTARTEN BIT(0) + +/* BD71815_REG_GPO bits */ +#define READY_FORCE_LOW BIT(2) +#define BD71815_GPIO_DRIVE_MASK BIT(4) +#define BD71815_GPIO_OPEN_DRAIN 0 +#define BD71815_GPIO_CMOS BIT(4) + +/* BD71815 interrupt masks */ +enum { + BD71815_INT_EN_01_BUCKAST_MASK = 0x0F, + BD71815_INT_EN_02_DCINAST_MASK = 0x3E, + BD71815_INT_EN_03_DCINAST_MASK = 0x3F, + BD71815_INT_EN_04_VSYSAST_MASK = 0xCF, + BD71815_INT_EN_05_CHGAST_MASK = 0xFC, + BD71815_INT_EN_06_BATAST_MASK = 0xF3, + BD71815_INT_EN_07_BMONAST_MASK = 0xFE, + BD71815_INT_EN_08_BMONAST_MASK = 0x03, + BD71815_INT_EN_09_BMONAST_MASK = 0x07, + BD71815_INT_EN_10_BMONAST_MASK = 0x3F, + BD71815_INT_EN_11_TMPAST_MASK = 0xFF, + BD71815_INT_EN_12_ALMAST_MASK = 0x07, +}; +/* BD71815 interrupt irqs */ +enum { + /* BUCK reg interrupts */ + BD71815_INT_BUCK1_OCP, + BD71815_INT_BUCK2_OCP, + BD71815_INT_BUCK3_OCP, + BD71815_INT_BUCK4_OCP, + BD71815_INT_BUCK5_OCP, + BD71815_INT_LED_OVP, + BD71815_INT_LED_OCP, + BD71815_INT_LED_SCP, + /* DCIN1 interrupts */ + BD71815_INT_DCIN_RMV, + BD71815_INT_CLPS_OUT, + BD71815_INT_CLPS_IN, + BD71815_INT_DCIN_OVP_RES, + BD71815_INT_DCIN_OVP_DET, + /* DCIN2 interrupts */ + BD71815_INT_DCIN_MON_RES, + BD71815_INT_DCIN_MON_DET, + BD71815_INT_WDOG, + /* Vsys INT_STAT_04 */ + BD71815_INT_VSYS_UV_RES, + BD71815_INT_VSYS_UV_DET, + BD71815_INT_VSYS_LOW_RES, + BD71815_INT_VSYS_LOW_DET, + BD71815_INT_VSYS_MON_RES, + BD71815_INT_VSYS_MON_DET, + /* Charger INT_STAT_05 */ + BD71815_INT_CHG_WDG_TEMP, + BD71815_INT_CHG_WDG_TIME, + BD71815_INT_CHG_RECHARGE_RES, + BD71815_INT_CHG_RECHARGE_DET, + BD71815_INT_CHG_RANGED_TEMP_TRANSITION, + BD71815_INT_CHG_STATE_TRANSITION, + /* Battery INT_STAT_06 */ + BD71815_INT_BAT_TEMP_NORMAL, + BD71815_INT_BAT_TEMP_ERANGE, + BD71815_INT_BAT_REMOVED, + BD71815_INT_BAT_DETECTED, + BD71815_INT_THERM_REMOVED, + BD71815_INT_THERM_DETECTED, + /* Battery Mon 1 INT_STAT_07 */ + BD71815_INT_BAT_DEAD, + BD71815_INT_BAT_SHORTC_RES, + BD71815_INT_BAT_SHORTC_DET, + BD71815_INT_BAT_LOW_VOLT_RES, + BD71815_INT_BAT_LOW_VOLT_DET, + BD71815_INT_BAT_OVER_VOLT_RES, + BD71815_INT_BAT_OVER_VOLT_DET, + /* Battery Mon 2 INT_STAT_08 */ + BD71815_INT_BAT_MON_RES, + BD71815_INT_BAT_MON_DET, + /* Battery Mon 3 (Coulomb counter) INT_STAT_09 */ + BD71815_INT_BAT_CC_MON1, + BD71815_INT_BAT_CC_MON2, + BD71815_INT_BAT_CC_MON3, + /* Battery Mon 4 INT_STAT_10 */ + BD71815_INT_BAT_OVER_CURR_1_RES, + BD71815_INT_BAT_OVER_CURR_1_DET, + BD71815_INT_BAT_OVER_CURR_2_RES, + BD71815_INT_BAT_OVER_CURR_2_DET, + BD71815_INT_BAT_OVER_CURR_3_RES, + BD71815_INT_BAT_OVER_CURR_3_DET, + /* Temperature INT_STAT_11 */ + BD71815_INT_TEMP_BAT_LOW_RES, + BD71815_INT_TEMP_BAT_LOW_DET, + BD71815_INT_TEMP_BAT_HI_RES, + BD71815_INT_TEMP_BAT_HI_DET, + BD71815_INT_TEMP_CHIP_OVER_125_RES, + BD71815_INT_TEMP_CHIP_OVER_125_DET, + BD71815_INT_TEMP_CHIP_OVER_VF_RES, + BD71815_INT_TEMP_CHIP_OVER_VF_DET, + /* RTC Alarm INT_STAT_12 */ + BD71815_INT_RTC0, + BD71815_INT_RTC1, + BD71815_INT_RTC2, +}; + +#define BD71815_INT_BUCK1_OCP_MASK BIT(0) +#define BD71815_INT_BUCK2_OCP_MASK BIT(1) +#define BD71815_INT_BUCK3_OCP_MASK BIT(2) +#define BD71815_INT_BUCK4_OCP_MASK BIT(3) +#define BD71815_INT_BUCK5_OCP_MASK BIT(4) +#define BD71815_INT_LED_OVP_MASK BIT(5) +#define BD71815_INT_LED_OCP_MASK BIT(6) +#define BD71815_INT_LED_SCP_MASK BIT(7) + +#define BD71815_INT_DCIN_RMV_MASK BIT(1) +#define BD71815_INT_CLPS_OUT_MASK BIT(2) +#define BD71815_INT_CLPS_IN_MASK BIT(3) +#define BD71815_INT_DCIN_OVP_RES_MASK BIT(4) +#define BD71815_INT_DCIN_OVP_DET_MASK BIT(5) + +#define BD71815_INT_DCIN_MON_RES_MASK BIT(0) +#define BD71815_INT_DCIN_MON_DET_MASK BIT(1) +#define BD71815_INT_WDOG_MASK BIT(6) + +#define BD71815_INT_VSYS_UV_RES_MASK BIT(0) +#define BD71815_INT_VSYS_UV_DET_MASK BIT(1) +#define BD71815_INT_VSYS_LOW_RES_MASK BIT(2) +#define BD71815_INT_VSYS_LOW_DET_MASK BIT(3) +#define BD71815_INT_VSYS_MON_RES_MASK BIT(6) +#define BD71815_INT_VSYS_MON_DET_MASK BIT(7) + +#define BD71815_INT_CHG_WDG_TEMP_MASK BIT(2) +#define BD71815_INT_CHG_WDG_TIME_MASK BIT(3) +#define BD71815_INT_CHG_RECHARGE_RES_MASK BIT(4) +#define BD71815_INT_CHG_RECHARGE_DET_MASK BIT(5) +#define BD71815_INT_CHG_RANGED_TEMP_TRANSITION_MASK BIT(6) +#define BD71815_INT_CHG_STATE_TRANSITION_MASK BIT(7) + +#define BD71815_INT_BAT_TEMP_NORMAL_MASK BIT(0) +#define BD71815_INT_BAT_TEMP_ERANGE_MASK BIT(1) +#define BD71815_INT_BAT_REMOVED_MASK BIT(4) +#define BD71815_INT_BAT_DETECTED_MASK BIT(5) +#define BD71815_INT_THERM_REMOVED_MASK BIT(6) +#define BD71815_INT_THERM_DETECTED_MASK BIT(7) + +#define BD71815_INT_BAT_DEAD_MASK BIT(1) +#define BD71815_INT_BAT_SHORTC_RES_MASK BIT(2) +#define BD71815_INT_BAT_SHORTC_DET_MASK BIT(3) +#define BD71815_INT_BAT_LOW_VOLT_RES_MASK BIT(4) +#define BD71815_INT_BAT_LOW_VOLT_DET_MASK BIT(5) +#define BD71815_INT_BAT_OVER_VOLT_RES_MASK BIT(6) +#define BD71815_INT_BAT_OVER_VOLT_DET_MASK BIT(7) + +#define BD71815_INT_BAT_MON_RES_MASK BIT(0) +#define BD71815_INT_BAT_MON_DET_MASK BIT(1) + +#define BD71815_INT_BAT_CC_MON1_MASK BIT(0) +#define BD71815_INT_BAT_CC_MON2_MASK BIT(1) +#define BD71815_INT_BAT_CC_MON3_MASK BIT(2) + +#define BD71815_INT_BAT_OVER_CURR_1_RES_MASK BIT(0) +#define BD71815_INT_BAT_OVER_CURR_1_DET_MASK BIT(1) +#define BD71815_INT_BAT_OVER_CURR_2_RES_MASK BIT(2) +#define BD71815_INT_BAT_OVER_CURR_2_DET_MASK BIT(3) +#define BD71815_INT_BAT_OVER_CURR_3_RES_MASK BIT(4) +#define BD71815_INT_BAT_OVER_CURR_3_DET_MASK BIT(5) + +#define BD71815_INT_TEMP_BAT_LOW_RES_MASK BIT(0) +#define BD71815_INT_TEMP_BAT_LOW_DET_MASK BIT(1) +#define BD71815_INT_TEMP_BAT_HI_RES_MASK BIT(2) +#define BD71815_INT_TEMP_BAT_HI_DET_MASK BIT(3) +#define BD71815_INT_TEMP_CHIP_OVER_125_RES_MASK BIT(4) +#define BD71815_INT_TEMP_CHIP_OVER_125_DET_MASK BIT(5) +#define BD71815_INT_TEMP_CHIP_OVER_VF_RES_MASK BIT(6) +#define BD71815_INT_TEMP_CHIP_OVER_VF_DET_MASK BIT(7) + +#define BD71815_INT_RTC0_MASK BIT(0) +#define BD71815_INT_RTC1_MASK BIT(1) +#define BD71815_INT_RTC2_MASK BIT(2) + +/* BD71815_REG_CC_CTRL bits */ +#define CCNTRST 0x80 +#define CCNTENB 0x40 +#define CCCALIB 0x20 + +/* BD71815_REG_CC_CURCD */ +#define CURDIR_Discharging 0x8000 + +/* BD71815_REG_VM_SA_IBAT */ +#define IBAT_SA_DIR_Discharging 0x8000 + +/* BD71815_REG_REX_CTRL_1 bits */ +#define REX_CLR BIT(4) + +/* BD71815_REG_REX_CTRL_1 bits */ +#define REX_PMU_STATE_MASK BIT(2) + +/* BD71815_REG_LED_CTRL bits */ +#define CHGDONE_LED_EN BIT(4) + +#endif /* __LINUX_MFD_BD71815_H */ diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h index 017a4c01cb31..c7ab69c87ee8 100644 --- a/include/linux/mfd/rohm-bd71828.h +++ b/include/linux/mfd/rohm-bd71828.h @@ -151,6 +151,9 @@ enum { #define BD71828_REG_GPIO_CTRL3 0x49 #define BD71828_REG_IO_STAT 0xed +/* clk */ +#define BD71828_REG_OUT32K 0x4b + /* RTC */ #define BD71828_REG_RTC_SEC 0x4c #define BD71828_REG_RTC_MINUTE 0x4d diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h index bee2474a8f9f..df2918198d37 100644 --- a/include/linux/mfd/rohm-bd718x7.h +++ b/include/linux/mfd/rohm-bd718x7.h @@ -310,17 +310,4 @@ enum { BD718XX_PWRBTN_LONG_PRESS_15S }; -struct bd718xx { - /* - * Please keep this as the first member here as some - * drivers (clk) supporting more than one chip may only know this - * generic struct 'struct rohm_regmap_dev' and assume it is - * the first chunk of parent device's private data. - */ - struct rohm_regmap_dev chip; - - int chip_irq; - struct regmap_irq_chip_data *irq_data; -}; - #endif /* __LINUX_MFD_BD718XX_H__ */ diff --git a/include/linux/mfd/rohm-bd957x.h b/include/linux/mfd/rohm-bd957x.h new file mode 100644 index 000000000000..acc920b64f75 --- /dev/null +++ b/include/linux/mfd/rohm-bd957x.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 2021 ROHM Semiconductors */ + +#ifndef __LINUX_MFD_BD957X_H__ +#define __LINUX_MFD_BD957X_H__ + +enum { + BD957X_VD50, + BD957X_VD18, + BD957X_VDDDR, + BD957X_VD10, + BD957X_VOUTL1, + BD957X_VOUTS1, +}; + +/* + * The BD9576 has own IRQ 'blocks' for: + * - I2C/thermal, + * - Over voltage protection + * - Short-circuit protection + * - Over current protection + * - Over voltage detection + * - Under voltage detection + * - Under voltage protection + * - 'system interrupt'. + * + * Each of the blocks have a status register giving more accurate IRQ source + * information - for example which of the regulators have over-voltage. + * + * On top of this, there is "main IRQ" status register where each bit indicates + * which of sub-blocks have active IRQs. Fine. That would fit regmap-irq main + * status handling. Except that: + * - Only some sub-IRQs can be masked. + * - The IRQ informs us about fault-condition, not when fault state changes. + * The IRQ line it is kept asserted until the detected condition is acked + * AND cleared in HW. This is annoying for IRQs like the one informing high + * temperature because if IRQ is not disabled it keeps the CPU in IRQ + * handling loop. + * + * For now we do just use the main-IRQ register as source for our IRQ + * information and bind the regmap-irq to this. We leave fine-grained sub-IRQ + * register handling to handlers in sub-devices. The regulator driver shall + * read which regulators are source for problem - or if the detected error is + * regulator temperature error. The sub-drivers do also handle masking of "sub- + * IRQs" if this is supported/needed. + * + * To overcome the problem with HW keeping IRQ asserted we do call + * disable_irq_nosync() from sub-device handler and add a delayed work to + * re-enable IRQ roughly 1 second later. This should keep our CPU out of + * busy-loop. + */ +#define IRQS_SILENT_MS 1000 + +enum { + BD9576_INT_THERM, + BD9576_INT_OVP, + BD9576_INT_SCP, + BD9576_INT_OCP, + BD9576_INT_OVD, + BD9576_INT_UVD, + BD9576_INT_UVP, + BD9576_INT_SYS, +}; + +#define BD957X_REG_SMRB_ASSERT 0x15 +#define BD957X_REG_PMIC_INTERNAL_STAT 0x20 +#define BD957X_REG_INT_THERM_STAT 0x23 +#define BD957X_REG_INT_THERM_MASK 0x24 +#define BD957X_REG_INT_OVP_STAT 0x25 +#define BD957X_REG_INT_SCP_STAT 0x26 +#define BD957X_REG_INT_OCP_STAT 0x27 +#define BD957X_REG_INT_OVD_STAT 0x28 +#define BD957X_REG_INT_UVD_STAT 0x29 +#define BD957X_REG_INT_UVP_STAT 0x2a +#define BD957X_REG_INT_SYS_STAT 0x2b +#define BD957X_REG_INT_SYS_MASK 0x2c +#define BD957X_REG_INT_MAIN_STAT 0x30 +#define BD957X_REG_INT_MAIN_MASK 0x31 + +#define UVD_IRQ_VALID_MASK 0x6F +#define OVD_IRQ_VALID_MASK 0x2F + +#define BD957X_MASK_INT_MAIN_THERM BIT(0) +#define BD957X_MASK_INT_MAIN_OVP BIT(1) +#define BD957X_MASK_INT_MAIN_SCP BIT(2) +#define BD957X_MASK_INT_MAIN_OCP BIT(3) +#define BD957X_MASK_INT_MAIN_OVD BIT(4) +#define BD957X_MASK_INT_MAIN_UVD BIT(5) +#define BD957X_MASK_INT_MAIN_UVP BIT(6) +#define BD957X_MASK_INT_MAIN_SYS BIT(7) +#define BD957X_MASK_INT_ALL 0xff + +#define BD957X_REG_WDT_CONF 0x16 + +#define BD957X_REG_POW_TRIGGER1 0x41 +#define BD957X_REG_POW_TRIGGER2 0x42 +#define BD957X_REG_POW_TRIGGER3 0x43 +#define BD957X_REG_POW_TRIGGER4 0x44 +#define BD957X_REG_POW_TRIGGERL1 0x45 +#define BD957X_REG_POW_TRIGGERS1 0x46 + +#define BD957X_REGULATOR_EN_MASK 0xff +#define BD957X_REGULATOR_DIS_VAL 0xff + +#define BD957X_VSEL_REG_MASK 0xff + +#define BD957X_MASK_VOUT1_TUNE 0x87 +#define BD957X_MASK_VOUT2_TUNE 0x87 +#define BD957X_MASK_VOUT3_TUNE 0x1f +#define BD957X_MASK_VOUT4_TUNE 0x1f +#define BD957X_MASK_VOUTL1_TUNE 0x87 + +#define BD957X_REG_VOUT1_TUNE 0x50 +#define BD957X_REG_VOUT2_TUNE 0x53 +#define BD957X_REG_VOUT3_TUNE 0x56 +#define BD957X_REG_VOUT4_TUNE 0x59 +#define BD957X_REG_VOUTL1_TUNE 0x5c + +#define BD9576_REG_VOUT1_OVD 0x51 +#define BD9576_REG_VOUT1_UVD 0x52 +#define BD9576_REG_VOUT2_OVD 0x54 +#define BD9576_REG_VOUT2_UVD 0x55 +#define BD9576_REG_VOUT3_OVD 0x57 +#define BD9576_REG_VOUT3_UVD 0x58 +#define BD9576_REG_VOUT4_OVD 0x5a +#define BD9576_REG_VOUT4_UVD 0x5b +#define BD9576_REG_VOUTL1_OVD 0x5d +#define BD9576_REG_VOUTL1_UVD 0x5e + +#define BD9576_MASK_XVD 0x7f + +#define BD9576_REG_VOUT1S_OCW 0x5f +#define BD9576_REG_VOUT1S_OCP 0x60 + +#define BD9576_MASK_VOUT1S_OCW 0x3f +#define BD9576_MASK_VOUT1S_OCP 0x3f + +#define BD957X_MAX_REGISTER 0x61 + +#endif diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h index 66f673c35303..35b392a0d73a 100644 --- a/include/linux/mfd/rohm-generic.h +++ b/include/linux/mfd/rohm-generic.h @@ -8,12 +8,15 @@ #include <linux/regulator/driver.h> enum rohm_chip_type { - ROHM_CHIP_TYPE_BD71837 = 0, - ROHM_CHIP_TYPE_BD71847, - ROHM_CHIP_TYPE_BD70528, - ROHM_CHIP_TYPE_BD71828, ROHM_CHIP_TYPE_BD9571, + ROHM_CHIP_TYPE_BD9573, ROHM_CHIP_TYPE_BD9574, + ROHM_CHIP_TYPE_BD9576, + ROHM_CHIP_TYPE_BD70528, + ROHM_CHIP_TYPE_BD71815, + ROHM_CHIP_TYPE_BD71828, + ROHM_CHIP_TYPE_BD71837, + ROHM_CHIP_TYPE_BD71847, ROHM_CHIP_TYPE_AMOUNT }; @@ -26,7 +29,8 @@ struct rohm_regmap_dev { #define ROHM_DVS_LEVEL_IDLE BIT(1) #define ROHM_DVS_LEVEL_SUSPEND BIT(2) #define ROHM_DVS_LEVEL_LPSR BIT(3) -#define ROHM_DVS_LEVEL_VALID_AMOUNT 4 +#define ROHM_DVS_LEVEL_SNVS BIT(4) +#define ROHM_DVS_LEVEL_VALID_AMOUNT 5 #define ROHM_DVS_LEVEL_UNKNOWN 0 /** @@ -65,6 +69,9 @@ struct rohm_dvs_config { unsigned int lpsr_reg; unsigned int lpsr_mask; unsigned int lpsr_on_mask; + unsigned int snvs_reg; + unsigned int snvs_mask; + unsigned int snvs_on_mask; }; #if IS_ENABLED(CONFIG_REGULATOR_ROHM) diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h index 089e8942223a..8871cc5188a0 100644 --- a/include/linux/mfd/twl.h +++ b/include/linux/mfd/twl.h @@ -781,8 +781,6 @@ int twl4030_sih_setup(struct device *dev, int module, int irq_base); #define TWL4030_VAUX3_DEV_GRP 0x1F #define TWL4030_VAUX3_DEDICATED 0x22 -static inline int twl4030charger_usb_en(int enable) { return 0; } - /*----------------------------------------------------------------------*/ /* Linux-specific regulator identifiers ... for now, we only support diff --git a/include/linux/mhi.h b/include/linux/mhi.h index d26acc8b21cd..944aa3aa3035 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -117,6 +117,7 @@ struct mhi_link_info { * @MHI_EE_WFW: WLAN firmware mode * @MHI_EE_PTHRU: Passthrough * @MHI_EE_EDL: Embedded downloader + * @MHI_EE_FP: Flash Programmer Environment */ enum mhi_ee_type { MHI_EE_PBL, @@ -126,7 +127,8 @@ enum mhi_ee_type { MHI_EE_WFW, MHI_EE_PTHRU, MHI_EE_EDL, - MHI_EE_MAX_SUPPORTED = MHI_EE_EDL, + MHI_EE_FP, + MHI_EE_MAX_SUPPORTED = MHI_EE_FP, MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ MHI_EE_NOT_SUPPORTED, MHI_EE_MAX, @@ -203,7 +205,7 @@ enum mhi_db_brst_mode { * @num: The number assigned to this channel * @num_elements: The number of elements that can be queued to this channel * @local_elements: The local ring length of the channel - * @event_ring: The event rung index that services this channel + * @event_ring: The event ring index that services this channel * @dir: Direction that data may flow on this channel * @type: Channel type * @ee_mask: Execution Environment mask for this channel @@ -296,7 +298,7 @@ struct mhi_controller_config { * @wake_db: MHI WAKE doorbell register address * @iova_start: IOMMU starting address for data (required) * @iova_stop: IOMMU stop address for data (required) - * @fw_image: Firmware image name for normal booting (required) + * @fw_image: Firmware image name for normal booting (optional) * @edl_image: Firmware image name for emergency download mode (optional) * @rddm_size: RAM dump size that host should allocate for debugging purpose * @sbl_size: SBL image size downloaded through BHIe (optional) @@ -352,7 +354,6 @@ struct mhi_controller_config { * @index: Index of the MHI controller instance * @bounce_buf: Use of bounce buffer * @fbc_download: MHI host needs to do complete image transfer (optional) - * @pre_init: MHI host needs to do pre-initialization before power up * @wake_set: Device wakeup set flag * @irq_flags: irq flags passed to request_irq (optional) * @@ -445,7 +446,6 @@ struct mhi_controller { int index; bool bounce_buf; bool fbc_download; - bool pre_init; bool wake_set; unsigned long irq_flags; }; @@ -712,13 +712,27 @@ int mhi_device_get_sync(struct mhi_device *mhi_dev); void mhi_device_put(struct mhi_device *mhi_dev); /** - * mhi_prepare_for_transfer - Setup channel for data transfer + * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer. + * Allocate and initialize the channel context and + * also issue the START channel command to both + * channels. Channels can be started only if both + * host and device execution environments match and + * channels are in a DISABLED state. * @mhi_dev: Device associated with the channels */ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); /** - * mhi_unprepare_from_transfer - Unprepare the channels + * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. + * Issue the RESET channel command and let the + * device clean-up the context so no incoming + * transfers are seen on the host. Free memory + * associated with the context on host. If device + * is unresponsive, only perform a host side + * clean-up. Channels can be reset only if both + * host and device execution environments match + * and channels are in an ENABLED, STOPPED or + * SUSPENDED state. * @mhi_dev: Device associated with the channels */ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 3a389633b68f..4bb4e519e3f5 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -27,6 +27,7 @@ enum migrate_reason { MR_MEMPOLICY_MBIND, MR_NUMA_MISPLACED, MR_CONTIG_RANGE, + MR_LONGTERM_PIN, MR_TYPES }; @@ -43,10 +44,7 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); -extern void putback_movable_page(struct page *page); -extern void migrate_prep(void); -extern void migrate_prep_local(void); extern void migrate_page_states(struct page *newpage, struct page *page); extern void migrate_page_copy(struct page *newpage, struct page *page); extern int migrate_huge_page_move_mapping(struct address_space *mapping, @@ -66,9 +64,6 @@ static inline struct page *alloc_migration_target(struct page *page, static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } -static inline int migrate_prep(void) { return -ENOSYS; } -static inline int migrate_prep_local(void) { return -ENOSYS; } - static inline void migrate_page_states(struct page *newpage, struct page *page) { } diff --git a/include/linux/minmax.h b/include/linux/minmax.h index c0f57b0c64d9..5433c08fcc68 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -2,6 +2,8 @@ #ifndef _LINUX_MINMAX_H #define _LINUX_MINMAX_H +#include <linux/const.h> + /* * min()/max()/clamp() macros must accomplish three things: * @@ -17,14 +19,6 @@ #define __typecheck(x, y) \ (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) -/* - * This returns a constant expression while determining if an argument is - * a constant expression, most importantly without evaluating the argument. - * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> - */ -#define __is_constexpr(x) \ - (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) - #define __no_side_effects(x, y) \ (__is_constexpr(x) && __is_constexpr(y)) diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h new file mode 100644 index 000000000000..da2367e2ac1e --- /dev/null +++ b/include/linux/misc_cgroup.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Miscellaneous cgroup controller. + * + * Copyright 2020 Google LLC + * Author: Vipin Sharma <vipinsh@google.com> + */ +#ifndef _MISC_CGROUP_H_ +#define _MISC_CGROUP_H_ + +/** + * Types of misc cgroup entries supported by the host. + */ +enum misc_res_type { +#ifdef CONFIG_KVM_AMD_SEV + /* AMD SEV ASIDs resource */ + MISC_CG_RES_SEV, + /* AMD SEV-ES ASIDs resource */ + MISC_CG_RES_SEV_ES, +#endif + MISC_CG_RES_TYPES +}; + +struct misc_cg; + +#ifdef CONFIG_CGROUP_MISC + +#include <linux/cgroup.h> + +/** + * struct misc_res: Per cgroup per misc type resource + * @max: Maximum limit on the resource. + * @usage: Current usage of the resource. + * @failed: True if charged failed for the resource in a cgroup. + */ +struct misc_res { + unsigned long max; + atomic_long_t usage; + bool failed; +}; + +/** + * struct misc_cg - Miscellaneous controller's cgroup structure. + * @css: cgroup subsys state object. + * @res: Array of misc resources usage in the cgroup. + */ +struct misc_cg { + struct cgroup_subsys_state css; + struct misc_res res[MISC_CG_RES_TYPES]; +}; + +unsigned long misc_cg_res_total_usage(enum misc_res_type type); +int misc_cg_set_capacity(enum misc_res_type type, unsigned long capacity); +int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, + unsigned long amount); +void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, + unsigned long amount); + +/** + * css_misc() - Get misc cgroup from the css. + * @css: cgroup subsys state object. + * + * Context: Any context. + * Return: + * * %NULL - If @css is null. + * * struct misc_cg* - misc cgroup pointer of the passed css. + */ +static inline struct misc_cg *css_misc(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct misc_cg, css) : NULL; +} + +/* + * get_current_misc_cg() - Find and get the misc cgroup of the current task. + * + * Returned cgroup has its ref count increased by 1. Caller must call + * put_misc_cg() to return the reference. + * + * Return: Misc cgroup to which the current task belongs to. + */ +static inline struct misc_cg *get_current_misc_cg(void) +{ + return css_misc(task_get_css(current, misc_cgrp_id)); +} + +/* + * put_misc_cg() - Put the misc cgroup and reduce its ref count. + * @cg - cgroup to put. + */ +static inline void put_misc_cg(struct misc_cg *cg) +{ + if (cg) + css_put(&cg->css); +} + +#else /* !CONFIG_CGROUP_MISC */ + +static inline unsigned long misc_cg_res_total_usage(enum misc_res_type type) +{ + return 0; +} + +static inline int misc_cg_set_capacity(enum misc_res_type type, + unsigned long capacity) +{ + return 0; +} + +static inline int misc_cg_try_charge(enum misc_res_type type, + struct misc_cg *cg, + unsigned long amount) +{ + return 0; +} + +static inline void misc_cg_uncharge(enum misc_res_type type, + struct misc_cg *cg, + unsigned long amount) +{ +} + +static inline struct misc_cg *get_current_misc_cg(void) +{ + return NULL; +} + +static inline void put_misc_cg(struct misc_cg *cg) +{ +} + +#endif /* CONFIG_CGROUP_MISC */ +#endif /* _MISC_CGROUP_H_ */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index dc3d2508f5c6..578c4ccae91c 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -911,8 +911,11 @@ static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe) return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF; } -#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9) -#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6) +#define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3 +#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9 +#define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16 +#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6 +#define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13 struct mpwrq_cqe_bc { __be16 filler_consumed_strides; @@ -1142,6 +1145,8 @@ enum mlx5_flex_parser_protos { MLX5_FLEX_PROTO_GENEVE = 1 << 3, MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4, MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5, + MLX5_FLEX_PROTO_ICMP = 1 << 8, + MLX5_FLEX_PROTO_ICMPV6 = 1 << 9, }; /* MLX5 DEV CAPs */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 53b89631a1d9..020a8f7fdbdd 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -127,6 +127,7 @@ enum { MLX5_REG_PELC = 0x500e, MLX5_REG_PVLC = 0x500f, MLX5_REG_PCMR = 0x5041, + MLX5_REG_PDDR = 0x5031, MLX5_REG_PMLP = 0x5002, MLX5_REG_PPLM = 0x5023, MLX5_REG_PCAM = 0x507f, @@ -438,7 +439,6 @@ struct mlx5_core_health { unsigned long flags; struct work_struct fatal_report_work; struct work_struct report_work; - struct delayed_work recover_work; struct devlink_health_reporter *fw_reporter; struct devlink_health_reporter *fw_fatal_reporter; }; @@ -517,8 +517,8 @@ struct mlx5_rate_limit { struct mlx5_rl_entry { u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)]; - u16 index; u64 refcount; + u16 index; u16 uid; u8 dedicated : 1; }; @@ -530,6 +530,7 @@ struct mlx5_rl_table { u32 max_rate; u32 min_rate; struct mlx5_rl_entry *rl_entry; + u64 refcount; }; struct mlx5_core_roce { @@ -644,10 +645,14 @@ struct mlx5_td { }; struct mlx5e_resources { - u32 pdn; - struct mlx5_td td; - struct mlx5_core_mkey mkey; - struct mlx5_sq_bfreg bfreg; + struct mlx5e_hw_objs { + u32 pdn; + struct mlx5_td td; + struct mlx5_core_mkey mkey; + struct mlx5_sq_bfreg bfreg; + } hw_objs; + struct devlink_port dl_port; + struct net_device *uplink_netdev; }; enum mlx5_sw_icm_type { @@ -698,6 +703,27 @@ struct mlx5_hv_vhca; #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) +enum { + MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, + MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, +}; + +enum { + MR_CACHE_LAST_STD_ENTRY = 20, + MLX5_IMR_MTT_CACHE_ENTRY, + MLX5_IMR_KSM_CACHE_ENTRY, + MAX_MR_CACHE_ENTRIES +}; + +struct mlx5_profile { + u64 mask; + u8 log_max_qp; + struct { + int size; + int limit; + } mr_cache[MAX_MR_CACHE_ENTRIES]; +}; + struct mlx5_core_dev { struct device *device; enum mlx5_coredev_type coredev_type; @@ -726,7 +752,7 @@ struct mlx5_core_dev { struct mutex intf_state_mutex; unsigned long intf_state; struct mlx5_priv priv; - struct mlx5_profile *profile; + struct mlx5_profile profile; u32 issi; struct mlx5e_resources mlx5e_res; struct mlx5_dm *dm; @@ -874,6 +900,11 @@ static inline u32 mlx5_base_mkey(const u32 key) return key & 0xffffff00u; } +static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) +{ + return ((u32)1 << log_sz) << log_stride; +} + static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags, u8 log_stride, u8 log_sz, u16 strides_offset, @@ -1073,18 +1104,6 @@ static inline u8 mlx5_mkey_variant(u32 mkey) return mkey & 0xff; } -enum { - MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, - MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, -}; - -enum { - MR_CACHE_LAST_STD_ENTRY = 20, - MLX5_IMR_MTT_CACHE_ENTRY, - MLX5_IMR_KSM_CACHE_ENTRY, - MAX_MR_CACHE_ENTRIES -}; - /* Async-atomic event notifier used by mlx5 core to forward FW * evetns recived from event queue to mlx5 consumers. * Optimise event queue dipatching. @@ -1138,15 +1157,6 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, struct ib_device *device, struct rdma_netdev_alloc_params *params); -struct mlx5_profile { - u64 mask; - u8 log_max_qp; - struct { - int size; - int limit; - } mr_cache[MAX_MR_CACHE_ENTRIES]; -}; - enum { MLX5_PCI_DEV_IS_VF = 1 << 0, }; @@ -1226,7 +1236,7 @@ enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; -static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev) +static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev) { struct devlink *devlink = priv_to_devlink(dev); union devlink_param_value val; diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 994c2c8cb4fd..17109b65c1ac 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -48,6 +48,7 @@ struct mlx5_eswitch_rep { /* Only IB rep is using vport_index */ u16 vport_index; u32 vlan_refcount; + struct mlx5_eswitch *esw; }; void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, @@ -61,10 +62,8 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, u16 vport_num); void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); struct mlx5_flow_handle * -mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, - u16 vport_num, u32 sqn); - -u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); +mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, + struct mlx5_eswitch_rep *rep, u32 sqn); #ifdef CONFIG_MLX5_ESWITCH enum devlink_eswitch_encap_mode @@ -74,20 +73,19 @@ bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw); bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); /* Reg C0 usage: - * Reg C0 = < ESW_PFNUM_BITS(4) | ESW_VPORT BITS(12) | ESW_CHAIN_TAG(16) > + * Reg C0 = < ESW_PFNUM_BITS(4) | ESW_VPORT BITS(12) | ESW_REG_C0_OBJ(16) > * * Highest 4 bits of the reg c0 is the PF_NUM (range 0-15), 12 bits of * unique non-zero vport id (range 1-4095). The rest (lowest 16 bits) is left - * for tc chain tag restoration. + * for user data objects managed by a common mapping context. * PFNUM + VPORT comprise the SOURCE_PORT matching. */ #define ESW_VPORT_BITS 12 #define ESW_PFNUM_BITS 4 #define ESW_SOURCE_PORT_METADATA_BITS (ESW_PFNUM_BITS + ESW_VPORT_BITS) #define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS) -#define ESW_CHAIN_TAG_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS) -#define ESW_CHAIN_TAG_METADATA_MASK GENMASK(ESW_CHAIN_TAG_METADATA_BITS - 1,\ - 0) +#define ESW_REG_C0_USER_DATA_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS) +#define ESW_REG_C0_USER_DATA_METADATA_MASK GENMASK(ESW_REG_C0_USER_DATA_METADATA_BITS - 1, 0) static inline u32 mlx5_eswitch_get_vport_metadata_mask(void) { @@ -126,6 +124,8 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, #define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); +u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); + #else /* CONFIG_MLX5_ESWITCH */ static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) @@ -152,8 +152,7 @@ mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) }; static inline u32 -mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, - int vport_num) +mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num) { return 0; }; @@ -163,10 +162,17 @@ mlx5_eswitch_get_vport_metadata_mask(void) { return 0; } + +static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) +{ + return 0; +} + #endif /* CONFIG_MLX5_ESWITCH */ static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) { return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS; } + #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 9c68b2da14c6..6d16eed6850e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -133,6 +133,7 @@ enum { MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, MLX5_CMD_OP_ALLOC_MEMIC = 0x205, MLX5_CMD_OP_DEALLOC_MEMIC = 0x206, + MLX5_CMD_OP_MODIFY_MEMIC = 0x207, MLX5_CMD_OP_CREATE_EQ = 0x301, MLX5_CMD_OP_DESTROY_EQ = 0x302, MLX5_CMD_OP_QUERY_EQ = 0x303, @@ -622,7 +623,19 @@ struct mlx5_ifc_fte_match_set_misc3_bits { u8 geneve_tlv_option_0_data[0x20]; - u8 reserved_at_140[0xc0]; + u8 gtpu_teid[0x20]; + + u8 gtpu_msg_type[0x8]; + u8 gtpu_msg_flags[0x8]; + u8 reserved_at_170[0x10]; + + u8 gtpu_dw_2[0x20]; + + u8 gtpu_first_ext_dw_0[0x20]; + + u8 gtpu_dw_0[0x20]; + + u8 reserved_at_1e0[0x20]; }; struct mlx5_ifc_fte_match_set_misc4_bits { @@ -806,9 +819,11 @@ struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; - u8 reserved_at_5[0x3]; + u8 reserved_at_5[0x2]; + u8 esw_shared_ingress_acl[0x1]; u8 esw_uplink_ingress_acl[0x1]; - u8 reserved_at_9[0x10]; + u8 root_ft_on_other_esw[0x1]; + u8 reserved_at_a[0xf]; u8 esw_functions_changed[0x1]; u8 reserved_at_1a[0x1]; u8 ecpf_vport_exists[0x1]; @@ -947,7 +962,9 @@ struct mlx5_ifc_roce_cap_bits { u8 roce_apm[0x1]; u8 reserved_at_1[0x3]; u8 sw_r_roce_src_udp_port[0x1]; - u8 reserved_at_5[0x19]; + u8 fl_rc_qp_when_roce_disabled[0x1]; + u8 fl_rc_qp_when_roce_enabled[0x1]; + u8 reserved_at_7[0x17]; u8 qp_ts_format[0x2]; u8 reserved_at_20[0x60]; @@ -1015,7 +1032,11 @@ struct mlx5_ifc_device_mem_cap_bits { u8 header_modify_sw_icm_start_address[0x40]; - u8 reserved_at_180[0x680]; + u8 reserved_at_180[0x80]; + + u8 memic_operations[0x20]; + + u8 reserved_at_220[0x5e0]; }; struct mlx5_ifc_device_event_cap_bits { @@ -1235,9 +1256,17 @@ enum { enum { MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3, + MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4, + mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5, MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, + MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED = 1 << 10, + MLX5_FLEX_PARSER_GTPU_ENABLED = 1 << 11, + MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED = 1 << 16, + MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED = 1 << 17, + MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED = 1 << 18, + MLX5_FLEX_PARSER_GTPU_TEID_ENABLED = 1 << 19, }; enum { @@ -1295,7 +1324,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; u8 event_cap[0x1]; - u8 reserved_at_91[0x7]; + u8 reserved_at_91[0x2]; + u8 isolate_vl_tc_new[0x1]; + u8 reserved_at_94[0x4]; u8 prio_tag_required[0x1]; u8 reserved_at_99[0x2]; u8 log_max_qp[0x5]; @@ -1502,7 +1533,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_270[0x6]; u8 lag_dct[0x2]; u8 lag_tx_port_affinity[0x1]; - u8 reserved_at_279[0x2]; + u8 lag_native_fdb_selection[0x1]; + u8 reserved_at_27a[0x1]; u8 lag_master[0x1]; u8 num_lag_ports[0x4]; @@ -1634,7 +1666,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 cqe_compression_timeout[0x10]; u8 cqe_compression_max_num[0x10]; - u8 reserved_at_5e0[0x10]; + u8 reserved_at_5e0[0x8]; + u8 flex_parser_id_gtpu_dw_0[0x4]; + u8 reserved_at_5ec[0x4]; u8 tag_matching[0x1]; u8 rndv_offload_rc[0x1]; u8 rndv_offload_dc[0x1]; @@ -1645,7 +1679,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 affiliate_nic_vport_criteria[0x8]; u8 native_port_num[0x8]; u8 num_vhca_ports[0x8]; - u8 reserved_at_618[0x6]; + u8 flex_parser_id_gtpu_teid[0x4]; + u8 reserved_at_61c[0x2]; u8 sw_owner_id[0x1]; u8 reserved_at_61f[0x1]; @@ -1680,7 +1715,17 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_6e0[0x10]; u8 sf_base_id[0x10]; - u8 reserved_at_700[0x80]; + u8 flex_parser_id_gtpu_dw_2[0x4]; + u8 flex_parser_id_gtpu_first_ext_dw_0[0x4]; + u8 num_total_dynamic_vf_msix[0x18]; + u8 reserved_at_720[0x14]; + u8 dynamic_msix_table_size[0xc]; + u8 reserved_at_740[0xc]; + u8 min_dynamic_vf_msix_table_size[0x4]; + u8 reserved_at_750[0x4]; + u8 max_dynamic_vf_msix_table_size[0xc]; + + u8 reserved_at_760[0x20]; u8 vhca_tunnel_commands[0x40]; u8 reserved_at_7c0[0x40]; }; @@ -2906,7 +2951,8 @@ struct mlx5_ifc_qpc_bits { u8 state[0x4]; u8 lag_tx_port_affinity[0x4]; u8 st[0x8]; - u8 reserved_at_10[0x3]; + u8 reserved_at_10[0x2]; + u8 isolate_vl_tc[0x1]; u8 pm_state[0x2]; u8 reserved_at_15[0x1]; u8 req_e2e_credit_mode[0x2]; @@ -9944,6 +9990,53 @@ struct mlx5_ifc_mirc_reg_bits { u8 reserved_at_20[0x20]; }; +struct mlx5_ifc_pddr_monitor_opcode_bits { + u8 reserved_at_0[0x10]; + u8 monitor_opcode[0x10]; +}; + +union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits { + struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode; + u8 reserved_at_0[0x20]; +}; + +enum { + /* Monitor opcodes */ + MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR = 0x0, +}; + +struct mlx5_ifc_pddr_troubleshooting_page_bits { + u8 reserved_at_0[0x10]; + u8 group_opcode[0x10]; + + union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits status_opcode; + + u8 reserved_at_40[0x20]; + + u8 status_message[59][0x20]; +}; + +union mlx5_ifc_pddr_reg_page_data_auto_bits { + struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page; + u8 reserved_at_0[0x7c0]; +}; + +enum { + MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE = 0x1, +}; + +struct mlx5_ifc_pddr_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 reserved_at_12[0xe]; + + u8 reserved_at_20[0x18]; + u8 page_select[0x8]; + + union mlx5_ifc_pddr_reg_page_data_auto_bits page_data; +}; + union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; @@ -9958,6 +10051,9 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_pamp_reg_bits pamp_reg; struct mlx5_ifc_paos_reg_bits paos_reg; struct mlx5_ifc_pcap_reg_bits pcap_reg; + struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode; + struct mlx5_ifc_pddr_reg_bits pddr_reg; + struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page; struct mlx5_ifc_peir_reg_bits peir_reg; struct mlx5_ifc_pelc_reg_bits pelc_reg; struct mlx5_ifc_pfcc_reg_bits pfcc_reg; @@ -10038,14 +10134,19 @@ struct mlx5_ifc_set_flow_table_root_in_bits { u8 reserved_at_60[0x20]; u8 table_type[0x8]; - u8 reserved_at_88[0x18]; + u8 reserved_at_88[0x7]; + u8 table_of_other_vport[0x1]; + u8 table_vport_number[0x10]; u8 reserved_at_a0[0x8]; u8 table_id[0x18]; u8 reserved_at_c0[0x8]; u8 underlay_qpn[0x18]; - u8 reserved_at_e0[0x120]; + u8 table_eswitch_owner_vhca_id_valid[0x1]; + u8 reserved_at_e1[0xf]; + u8 table_eswitch_owner_vhca_id[0x10]; + u8 reserved_at_100[0x100]; }; enum { @@ -10275,7 +10376,8 @@ struct mlx5_ifc_dcbx_param_bits { }; struct mlx5_ifc_lagc_bits { - u8 reserved_at_0[0x1d]; + u8 fdb_selection_mode[0x1]; + u8 reserved_at_1[0x1c]; u8 lag_state[0x3]; u8 reserved_at_20[0x14]; @@ -10401,6 +10503,41 @@ struct mlx5_ifc_destroy_vport_lag_in_bits { u8 reserved_at_40[0x40]; }; +enum { + MLX5_MODIFY_MEMIC_OP_MOD_ALLOC, + MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC, +}; + +struct mlx5_ifc_modify_memic_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x18]; + u8 memic_operation_type[0x8]; + + u8 memic_start_addr[0x40]; + + u8 reserved_at_c0[0x140]; +}; + +struct mlx5_ifc_modify_memic_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 memic_operation_addr[0x40]; + + u8 reserved_at_c0[0x140]; +}; + struct mlx5_ifc_alloc_memic_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; diff --git a/include/linux/mlx5/mpfs.h b/include/linux/mlx5/mpfs.h new file mode 100644 index 000000000000..bf700c8d5516 --- /dev/null +++ b/include/linux/mlx5/mpfs.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB + * Copyright (c) 2021 Mellanox Technologies Ltd. + */ + +#ifndef _MLX5_MPFS_ +#define _MLX5_MPFS_ + +struct mlx5_core_dev; + +#ifdef CONFIG_MLX5_MPFS +int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac); +int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac); +#else /* #ifndef CONFIG_MLX5_MPFS */ +static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } +static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } +#endif + +#endif diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 23edd2db4803..77ea4f9c5265 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -45,6 +45,7 @@ enum mlx5_module_id { MLX5_MODULE_ID_QSFP = 0xC, MLX5_MODULE_ID_QSFP_PLUS = 0xD, MLX5_MODULE_ID_QSFP28 = 0x11, + MLX5_MODULE_ID_DSFP = 0x1B, }; enum mlx5_an_status { @@ -62,6 +63,15 @@ enum mlx5_an_status { #define MLX5_EEPROM_PAGE_LENGTH 256 #define MLX5_EEPROM_HIGH_PAGE_LENGTH 128 +struct mlx5_module_eeprom_query_params { + u16 size; + u16 offset; + u16 i2c_address; + u32 page; + u32 bank; + u32 module_number; +}; + enum mlx5e_link_mode { MLX5E_1000BASE_CX_SGMII = 0, MLX5E_1000BASE_KX = 1, @@ -200,6 +210,8 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, bool *enabled); int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, u16 offset, u16 size, u8 *data); +int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, + struct mlx5_module_eeprom_query_params *params, u8 *data); int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 4db87bcfce7b..aad53cb72f17 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -36,14 +36,6 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/device.h> -#define MLX5_VPORT_PF_PLACEHOLDER (1u) -#define MLX5_VPORT_UPLINK_PLACEHOLDER (1u) -#define MLX5_VPORT_ECPF_PLACEHOLDER(mdev) (mlx5_ecpf_vport_exists(mdev)) - -#define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER + \ - MLX5_VPORT_UPLINK_PLACEHOLDER + \ - MLX5_VPORT_ECPF_PLACEHOLDER(mdev)) - #define MLX5_VPORT_MANAGER(mdev) \ (MLX5_CAP_GEN(mdev, vport_group_manager) && \ (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ diff --git a/include/linux/mm.h b/include/linux/mm.h index 8ba434287387..c274f75efcf9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -106,7 +106,7 @@ extern int mmap_rnd_compat_bits __read_mostly; * embedding these tags into addresses that point to these memory regions, and * checking that the memory and the pointer tags match on memory accesses) * redefine this macro to strip tags from pointers. - * It's defined as noop for arcitectures that don't support memory tagging. + * It's defined as noop for architectures that don't support memory tagging. */ #ifndef untagged_addr #define untagged_addr(addr) (addr) @@ -125,6 +125,16 @@ extern int mmap_rnd_compat_bits __read_mostly; #endif /* + * With CONFIG_CFI_CLANG, the compiler replaces function addresses in + * instrumented C code with jump table addresses. Architectures that + * support CFI can define this macro to return the actual function address + * when needed. + */ +#ifndef function_nocfi +#define function_nocfi(x) (x) +#endif + +/* * To prevent common memory management code establishing * a zero page mapping on a read fault. * This macro should be defined within <asm/pgtable.h>. @@ -362,6 +372,13 @@ extern unsigned int kobjsize(const void *objp); # define VM_GROWSUP VM_NONE #endif +#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR +# define VM_UFFD_MINOR_BIT 37 +# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */ +#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ +# define VM_UFFD_MINOR VM_NONE +#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ + /* Bits set in the VMA until the stack is in its final location */ #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) @@ -422,8 +439,7 @@ extern unsigned int kobjsize(const void *objp); extern pgprot_t protection_map[16]; /** - * Fault flag definitions. - * + * enum fault_flag - Fault flag definitions. * @FAULT_FLAG_WRITE: Fault was a write fault. * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. @@ -454,16 +470,18 @@ extern pgprot_t protection_map[16]; * signals before a retry to make sure the continuous page faults can still be * interrupted if necessary. */ -#define FAULT_FLAG_WRITE 0x01 -#define FAULT_FLAG_MKWRITE 0x02 -#define FAULT_FLAG_ALLOW_RETRY 0x04 -#define FAULT_FLAG_RETRY_NOWAIT 0x08 -#define FAULT_FLAG_KILLABLE 0x10 -#define FAULT_FLAG_TRIED 0x20 -#define FAULT_FLAG_USER 0x40 -#define FAULT_FLAG_REMOTE 0x80 -#define FAULT_FLAG_INSTRUCTION 0x100 -#define FAULT_FLAG_INTERRUPTIBLE 0x200 +enum fault_flag { + FAULT_FLAG_WRITE = 1 << 0, + FAULT_FLAG_MKWRITE = 1 << 1, + FAULT_FLAG_ALLOW_RETRY = 1 << 2, + FAULT_FLAG_RETRY_NOWAIT = 1 << 3, + FAULT_FLAG_KILLABLE = 1 << 4, + FAULT_FLAG_TRIED = 1 << 5, + FAULT_FLAG_USER = 1 << 6, + FAULT_FLAG_REMOTE = 1 << 7, + FAULT_FLAG_INSTRUCTION = 1 << 8, + FAULT_FLAG_INTERRUPTIBLE = 1 << 9, +}; /* * The default fault flags that should be used by most of the @@ -475,6 +493,7 @@ extern pgprot_t protection_map[16]; /** * fault_flag_allow_retry_first - check ALLOW_RETRY the first time + * @flags: Fault flags. * * This is mostly used for places where we want to try to avoid taking * the mmap_lock for too long a time when waiting for another condition @@ -485,7 +504,7 @@ extern pgprot_t protection_map[16]; * Return: true if the page fault allows retry and this is the first * attempt of the fault handling; false otherwise. */ -static inline bool fault_flag_allow_retry_first(unsigned int flags) +static inline bool fault_flag_allow_retry_first(enum fault_flag flags) { return (flags & FAULT_FLAG_ALLOW_RETRY) && (!(flags & FAULT_FLAG_TRIED)); @@ -520,7 +539,7 @@ struct vm_fault { pgoff_t pgoff; /* Logical page offset based on vma */ unsigned long address; /* Faulting virtual address */ }; - unsigned int flags; /* FAULT_FLAG_xxx flags + enum fault_flag flags; /* FAULT_FLAG_xxx flags * XXX: should really be 'const' */ pmd_t *pmd; /* Pointer to pmd entry matching * the 'address' */ @@ -570,7 +589,7 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); /* Called any time before splitting to check if it's allowed */ int (*may_split)(struct vm_area_struct *area, unsigned long addr); - int (*mremap)(struct vm_area_struct *area, unsigned long flags); + int (*mremap)(struct vm_area_struct *area); /* * Called by mprotect() to make driver-specific permission * checks before mprotect() is finalised. The VMA must not @@ -1122,6 +1141,11 @@ static inline bool is_zone_device_page(const struct page *page) } #endif +static inline bool is_zone_movable_page(const struct page *page) +{ + return page_zonenum(page) == ZONE_MOVABLE; +} + #ifdef CONFIG_DEV_PAGEMAP_OPS void free_devmap_managed_page(struct page *page); DECLARE_STATIC_KEY_FALSE(devmap_managed_key); @@ -1255,13 +1279,16 @@ static inline void put_page(struct page *page) void unpin_user_page(struct page *page); void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty); +void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, + bool make_dirty); void unpin_user_pages(struct page **pages, unsigned long npages); /** - * page_maybe_dma_pinned() - report if a page is pinned for DMA. + * page_maybe_dma_pinned - Report if a page is pinned for DMA. + * @page: The page. * * This function checks if a page has been pinned via a call to - * pin_user_pages*(). + * a function in the pin_user_pages() family. * * For non-huge pages, the return value is partially fuzzy: false is not fuzzy, * because it means "definitely not pinned for DMA", but true means "probably @@ -1279,9 +1306,8 @@ void unpin_user_pages(struct page **pages, unsigned long npages); * * For more information, please see Documentation/core-api/pin_user_pages.rst. * - * @page: pointer to page to be queried. - * @Return: True, if it is likely that the page has been "dma-pinned". - * False, if the page is definitely not dma-pinned. + * Return: True, if it is likely that the page has been "dma-pinned". + * False, if the page is definitely not dma-pinned. */ static inline bool page_maybe_dma_pinned(struct page *page) { @@ -1529,6 +1555,20 @@ static inline unsigned long page_to_section(const struct page *page) } #endif +/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ +#ifdef CONFIG_MIGRATION +static inline bool is_pinnable_page(struct page *page) +{ + return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) || + is_zero_pfn(page_to_pfn(page)); +} +#else +static inline bool is_pinnable_page(struct page *page) +{ + return true; +} +#endif + static inline void set_page_zone(struct page *page, enum zone_type zone) { page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); @@ -1619,7 +1659,6 @@ static inline pgoff_t page_index(struct page *page) bool page_mapped(struct page *page); struct address_space *page_mapping(struct page *page); -struct address_space *page_mapping_file(struct page *page); /* * Return true only if the page has been allocated with @@ -2347,7 +2386,7 @@ extern unsigned long free_reserved_area(void *start, void *end, int poison, const char *s); extern void adjust_managed_page_count(struct page *page, long count); -extern void mem_init_print_info(const char *str); +extern void mem_init_print_info(void); extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); @@ -2721,6 +2760,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); +int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t prot); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num); @@ -2780,7 +2821,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO * and return without waiting upon it */ #define FOLL_POPULATE 0x40 /* fault in page */ -#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ @@ -2904,18 +2944,20 @@ static inline void kernel_poison_pages(struct page *page, int numpages) { } static inline void kernel_unpoison_pages(struct page *page, int numpages) { } #endif -DECLARE_STATIC_KEY_FALSE(init_on_alloc); +DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); static inline bool want_init_on_alloc(gfp_t flags) { - if (static_branch_unlikely(&init_on_alloc)) + if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, + &init_on_alloc)) return true; return flags & __GFP_ZERO; } -DECLARE_STATIC_KEY_FALSE(init_on_free); +DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); static inline bool want_init_on_free(void) { - return static_branch_unlikely(&init_on_free); + return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, + &init_on_free); } extern bool _debug_pagealloc_enabled_early; @@ -3168,7 +3210,43 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping, extern int sysctl_nr_trim_pages; +#ifdef CONFIG_PRINTK void mem_dump_obj(void *object); +#else +static inline void mem_dump_obj(void *object) {} +#endif + +/** + * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it + * @seals: the seals to check + * @vma: the vma to operate on + * + * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on + * the vma flags. Return 0 if check pass, or <0 for errors. + */ +static inline int seal_check_future_write(int seals, struct vm_area_struct *vma) +{ + if (seals & F_SEAL_FUTURE_WRITE) { + /* + * New PROT_WRITE and MAP_SHARED mmaps are not allowed when + * "future write" seal active. + */ + if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) + return -EPERM; + + /* + * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as + * MAP_SHARED and read-only, take care to not allow mprotect to + * revert protections on such mappings. Do this only for shared + * mappings. For private mappings, don't need to mask + * VM_MAYWRITE as we still want them to be COW-writable. + */ + if (vma->vm_flags & VM_SHARED) + vma->vm_flags &= ~(VM_MAYWRITE); + } + + return 0; +} #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6613b26a8894..5aacc1c10a45 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -97,10 +97,10 @@ struct page { }; struct { /* page_pool used by netstack */ /** - * @dma_addr: might require a 64-bit value even on + * @dma_addr: might require a 64-bit value on * 32-bit architectures. */ - dma_addr_t dma_addr; + unsigned long dma_addr[2]; }; struct { /* slab, slob and slub */ union { diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 26a3c7bc29ae..c7e7b43600e9 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -302,9 +302,6 @@ struct mmc_host { u32 ocr_avail_sdio; /* SDIO-specific OCR */ u32 ocr_avail_sd; /* SD-specific OCR */ u32 ocr_avail_mmc; /* MMC-specific OCR */ -#ifdef CONFIG_PM_SLEEP - struct notifier_block pm_notify; -#endif struct wakeup_source *ws; /* Enable consume of uevents */ u32 max_current_330; u32 max_current_300; @@ -423,7 +420,6 @@ struct mmc_host { /* group bitfields together to minimize padding */ unsigned int use_spi_crc:1; unsigned int claimed:1; /* host exclusively claimed */ - unsigned int bus_dead:1; /* bus has been released */ unsigned int doing_init_tune:1; /* initial tuning in progress */ unsigned int can_retune:1; /* re-tuning can be used */ unsigned int doing_retune:1; /* re-tuning in progress */ @@ -454,7 +450,6 @@ struct mmc_host { struct mmc_slot slot; const struct mmc_bus_ops *bus_ops; /* current bus driver */ - unsigned int bus_refs; /* reference counter */ unsigned int sdio_irqs; struct task_struct *sdio_irq_thread; @@ -514,7 +509,7 @@ void mmc_free_host(struct mmc_host *); void mmc_of_parse_clk_phase(struct mmc_host *host, struct mmc_clk_phase_map *map); int mmc_of_parse(struct mmc_host *host); -int mmc_of_parse_voltage(struct device_node *np, u32 *mask); +int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask); static inline void *mmc_priv(struct mmc_host *host) { diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index e28769991e82..2a05d1ac4f0e 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h @@ -82,7 +82,7 @@ #define SDIO_SD_REV_1_01 0 /* SD Physical Spec Version 1.01 */ #define SDIO_SD_REV_1_10 1 /* SD Physical Spec Version 1.10 */ #define SDIO_SD_REV_2_00 2 /* SD Physical Spec Version 2.00 */ -#define SDIO_SD_REV_3_00 3 /* SD Physical Spev Version 3.00 */ +#define SDIO_SD_REV_3_00 3 /* SD Physical Spec Version 3.00 */ #define SDIO_CCCR_IOEx 0x02 #define SDIO_CCCR_IORx 0x03 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 47946cec7584..0d53eba1c383 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -55,7 +55,7 @@ enum migratetype { * pageblocks to MIGRATE_CMA which can be done by * __free_pageblock_cma() function. What is important though * is that a range of pageblocks must be aligned to - * MAX_ORDER_NR_PAGES should biggest page be bigger then + * MAX_ORDER_NR_PAGES should biggest page be bigger than * a single pageblock. */ MIGRATE_CMA, @@ -407,8 +407,13 @@ enum zone_type { * to increase the number of THP/huge pages. Notable special cases are: * * 1. Pinned pages: (long-term) pinning of movable pages might - * essentially turn such pages unmovable. Memory offlining might - * retry a long time. + * essentially turn such pages unmovable. Therefore, we do not allow + * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and + * faulted, they come from the right zone right away. However, it is + * still possible that address space already has pages in + * ZONE_MOVABLE at the time when pages are pinned (i.e. user has + * touches that memory before pinning). In such case we migrate them + * to a different zone. When migration fails - pinning fails. * 2. memblock allocations: kernelcore/movablecore setups might create * situations where ZONE_MOVABLE contains unmovable allocations * after boot. Memory offlining and allocations fail early. @@ -427,6 +432,15 @@ enum zone_type { * techniques might use alloc_contig_range() to hide previously * exposed pages from the buddy again (e.g., to implement some sort * of memory unplug in virtio-mem). + * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create + * situations where ZERO_PAGE(0) which is allocated differently + * on different platforms may end up in a movable zone. ZERO_PAGE(0) + * cannot be migrated. + * 7. Memory-hotplug: when using memmap_on_memory and onlining the + * memory to the MOVABLE zone, the vmemmap pages are also placed in + * such zone. Such pages cannot be really moved around as they are + * self-stored in the range, but they are treated as movable when + * the range they describe is about to be offlined. * * In general, no unmovable allocations that degrade memory offlining * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) @@ -993,7 +1007,8 @@ static inline int is_highmem_idx(enum zone_type idx) * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. - * @zone - pointer to struct zone variable + * @zone: pointer to struct zone variable + * Return: 1 for a highmem zone, 0 otherwise */ static inline int is_highmem(struct zone *zone) { @@ -1044,7 +1059,7 @@ extern struct zone *next_zone(struct zone *zone); /** * for_each_online_pgdat - helper macro to iterate over all online nodes - * @pgdat - pointer to a pg_data_t variable + * @pgdat: pointer to a pg_data_t variable */ #define for_each_online_pgdat(pgdat) \ for (pgdat = first_online_pgdat(); \ @@ -1052,7 +1067,7 @@ extern struct zone *next_zone(struct zone *zone); pgdat = next_online_pgdat(pgdat)) /** * for_each_zone - helper macro to iterate over all memory zones - * @zone - pointer to struct zone variable + * @zone: pointer to struct zone variable * * The user only needs to declare the zone variable, for_each_zone * fills it in. @@ -1091,15 +1106,18 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z, /** * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point - * @z - The cursor used as a starting point for the search - * @highest_zoneidx - The zone index of the highest zone to return - * @nodes - An optional nodemask to filter the zonelist with + * @z: The cursor used as a starting point for the search + * @highest_zoneidx: The zone index of the highest zone to return + * @nodes: An optional nodemask to filter the zonelist with * * This function returns the next zone at or below a given zone index that is * within the allowed nodemask using a cursor as the starting point for the * search. The zoneref returned is a cursor that represents the current zone * being examined. It should be advanced by one before calling * next_zones_zonelist again. + * + * Return: the next zone at or below highest_zoneidx within the allowed + * nodemask using a cursor within a zonelist as a starting point */ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, @@ -1112,10 +1130,9 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, /** * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist - * @zonelist - The zonelist to search for a suitable zone - * @highest_zoneidx - The zone index of the highest zone to return - * @nodes - An optional nodemask to filter the zonelist with - * @return - Zoneref pointer for the first suitable zone found (see below) + * @zonelist: The zonelist to search for a suitable zone + * @highest_zoneidx: The zone index of the highest zone to return + * @nodes: An optional nodemask to filter the zonelist with * * This function returns the first zone at or below a given zone index that is * within the allowed nodemask. The zoneref returned is a cursor that can be @@ -1125,6 +1142,8 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is * never NULL). This may happen either genuinely, or due to concurrent nodemask * update due to cpuset modification. + * + * Return: Zoneref pointer for the first suitable zone found */ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, @@ -1136,11 +1155,11 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, /** * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask - * @zone - The current zone in the iterator - * @z - The current pointer within zonelist->_zonerefs being iterated - * @zlist - The zonelist being iterated - * @highidx - The zone index of the highest zone to return - * @nodemask - Nodemask allowed by the allocator + * @zone: The current zone in the iterator + * @z: The current pointer within zonelist->_zonerefs being iterated + * @zlist: The zonelist being iterated + * @highidx: The zone index of the highest zone to return + * @nodemask: Nodemask allowed by the allocator * * This iterator iterates though all zones at or below a given zone index and * within a given nodemask @@ -1160,10 +1179,10 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, /** * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index - * @zone - The current zone in the iterator - * @z - The current pointer within zonelist->zones being iterated - * @zlist - The zonelist being iterated - * @highidx - The zone index of the highest zone to return + * @zone: The current zone in the iterator + * @z: The current pointer within zonelist->zones being iterated + * @zlist: The zonelist being iterated + * @highidx: The zone index of the highest zone to return * * This iterator iterates though all zones at or below a given zone index. */ @@ -1378,10 +1397,8 @@ static inline int online_section_nr(unsigned long nr) #ifdef CONFIG_MEMORY_HOTPLUG void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); -#ifdef CONFIG_MEMORY_HOTREMOVE void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); #endif -#endif static inline struct mem_section *__pfn_to_section(unsigned long pfn) { diff --git a/include/linux/module.h b/include/linux/module.h index da4b6fbe8ebe..8100bb477d86 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -26,6 +26,7 @@ #include <linux/tracepoint-defs.h> #include <linux/srcu.h> #include <linux/static_call_types.h> +#include <linux/cfi.h> #include <linux/percpu.h> #include <asm/module.h> @@ -128,13 +129,17 @@ extern void cleanup_module(void); #define module_init(initfn) \ static inline initcall_t __maybe_unused __inittest(void) \ { return initfn; } \ - int init_module(void) __copy(initfn) __attribute__((alias(#initfn))); + int init_module(void) __copy(initfn) \ + __attribute__((alias(#initfn))); \ + __CFI_ADDRESSABLE(init_module, __initdata); /* This is only required if you want to be unloadable. */ #define module_exit(exitfn) \ static inline exitcall_t __maybe_unused __exittest(void) \ { return exitfn; } \ - void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn))); + void cleanup_module(void) __copy(exitfn) \ + __attribute__((alias(#exitfn))); \ + __CFI_ADDRESSABLE(cleanup_module, __exitdata); #endif @@ -376,6 +381,10 @@ struct module { const s32 *crcs; unsigned int num_syms; +#ifdef CONFIG_CFI_CLANG + cfi_check_fn cfi_check; +#endif + /* Kernel parameters. */ #ifdef CONFIG_SYSFS struct mutex param_lock; diff --git a/include/linux/msi.h b/include/linux/msi.h index aef35fd1cf11..6aff469e511d 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -240,8 +240,7 @@ void pci_msi_unmask_irq(struct irq_data *data); /* * The arch hooks to setup up msi irqs. Default functions are implemented * as weak symbols so that they /can/ be overriden by architecture specific - * code if needed. These hooks must be enabled by the architecture or by - * drivers which depend on them via msi_controller based MSI handling. + * code if needed. These hooks can only be enabled by the architecture. * * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by * stubs with warnings. @@ -251,7 +250,6 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc); void arch_teardown_msi_irq(unsigned int irq); int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); void arch_teardown_msi_irqs(struct pci_dev *dev); -void default_teardown_msi_irqs(struct pci_dev *dev); #else static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { @@ -272,19 +270,6 @@ static inline void arch_teardown_msi_irqs(struct pci_dev *dev) void arch_restore_msi_irqs(struct pci_dev *dev); void default_restore_msi_irqs(struct pci_dev *dev); -struct msi_controller { - struct module *owner; - struct device *dev; - struct device_node *of_node; - struct list_head list; - - int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, - struct msi_desc *desc); - int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, - int nvec, int type); - void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); -}; - #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN #include <linux/irqhandler.h> diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h index 3c668cb1e344..15cc9b95e32b 100644 --- a/include/linux/mtd/blktrans.h +++ b/include/linux/mtd/blktrans.h @@ -77,5 +77,16 @@ extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev); extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev); +/** + * module_mtd_blktrans() - Helper macro for registering a mtd blktrans driver + * @__mtd_blktrans: mtd_blktrans_ops struct + * + * Helper macro for mtd blktrans drivers which do not do anything special in + * module init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_mtd_blktrans(__mtd_blktrans) \ + module_driver(__mtd_blktrans, register_mtd_blktrans, \ + deregister_mtd_blktrans) #endif /* __MTD_TRANS_H__ */ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 157357ec1441..a89955f3cbc8 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -229,6 +229,7 @@ struct mtd_part { */ struct mtd_master { struct mutex partitions_lock; + struct mutex chrdev_lock; unsigned int suspended : 1; }; @@ -333,9 +334,12 @@ struct mtd_info { int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to, - size_t len, size_t *retlen, u_char *buf); + size_t len, size_t *retlen, + const u_char *buf); int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len); + int (*_erase_user_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len); int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); void (*_sync) (struct mtd_info *mtd); @@ -515,8 +519,9 @@ int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, u_char *buf); + size_t *retlen, const u_char *buf); int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len); +int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len); int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); diff --git a/include/linux/mtd/nand-ecc-sw-bch.h b/include/linux/mtd/nand-ecc-sw-bch.h index 22c92073b3dd..9da9969505a8 100644 --- a/include/linux/mtd/nand-ecc-sw-bch.h +++ b/include/linux/mtd/nand-ecc-sw-bch.h @@ -16,7 +16,6 @@ * @req_ctx: Save request context and tweak the original request to fit the * engine needs * @code_size: Number of bytes needed to store a code (one code per step) - * @nsteps: Number of steps * @calc_buf: Buffer to use when calculating ECC bytes * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip * @bch: BCH control structure @@ -26,7 +25,6 @@ struct nand_ecc_sw_bch_conf { struct nand_ecc_req_tweak_ctx req_ctx; unsigned int code_size; - unsigned int nsteps; u8 *calc_buf; u8 *code_buf; struct bch_control *bch; diff --git a/include/linux/mtd/nand-ecc-sw-hamming.h b/include/linux/mtd/nand-ecc-sw-hamming.h index 9f9073d86ff3..c6c71894c575 100644 --- a/include/linux/mtd/nand-ecc-sw-hamming.h +++ b/include/linux/mtd/nand-ecc-sw-hamming.h @@ -17,7 +17,6 @@ * @req_ctx: Save request context and tweak the original request to fit the * engine needs * @code_size: Number of bytes needed to store a code (one code per step) - * @nsteps: Number of steps * @calc_buf: Buffer to use when calculating ECC bytes * @code_buf: Buffer to use when reading (raw) ECC bytes from the chip * @sm_order: Smart Media special ordering @@ -25,7 +24,6 @@ struct nand_ecc_sw_hamming_conf { struct nand_ecc_req_tweak_ctx req_ctx; unsigned int code_size; - unsigned int nsteps; u8 *calc_buf; u8 *code_buf; unsigned int sm_order; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 414f8a4d2853..32fc7edf65b3 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -231,12 +231,14 @@ struct nand_ops { /** * struct nand_ecc_context - Context for the ECC engine * @conf: basic ECC engine parameters + * @nsteps: number of ECC steps * @total: total number of bytes used for storing ECC codes, this is used by * generic OOB layouts * @priv: ECC engine driver private data */ struct nand_ecc_context { struct nand_ecc_props conf; + unsigned int nsteps; unsigned int total; void *priv; }; @@ -586,6 +588,26 @@ nanddev_get_ecc_conf(struct nand_device *nand) } /** + * nanddev_get_ecc_nsteps() - Extract the number of ECC steps + * @nand: NAND device + */ +static inline unsigned int +nanddev_get_ecc_nsteps(struct nand_device *nand) +{ + return nand->ecc.ctx.nsteps; +} + +/** + * nanddev_get_ecc_bytes_per_step() - Extract the number of ECC bytes per step + * @nand: NAND device + */ +static inline unsigned int +nanddev_get_ecc_bytes_per_step(struct nand_device *nand) +{ + return nand->ecc.ctx.total / nand->ecc.ctx.nsteps; +} + +/** * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND * device * @nand: NAND device diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 6b3240e44310..29df2f43dcb5 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -18,7 +18,6 @@ #include <linux/mtd/flashchip.h> #include <linux/mtd/bbm.h> #include <linux/mtd/jedec.h> -#include <linux/mtd/nand.h> #include <linux/mtd/onfi.h> #include <linux/mutex.h> #include <linux/of.h> @@ -1037,6 +1036,16 @@ struct nand_manufacturer { }; /** + * struct nand_secure_region - NAND secure region structure + * @offset: Offset of the start of the secure region + * @size: Size of the secure region + */ +struct nand_secure_region { + u64 offset; + u64 size; +}; + +/** * struct nand_chip - NAND Private Flash Chip Data * @base: Inherit from the generic NAND device * @id: Holds NAND ID @@ -1086,6 +1095,8 @@ struct nand_manufacturer { * NAND Controller drivers should not modify this value, but they're * allowed to read it. * @read_retries: The number of read retry modes supported + * @secure_regions: Structure containing the secure regions info + * @nr_secure_regions: Number of secure regions * @controller: The hardware controller structure which is shared among multiple * independent devices * @ecc: The ECC controller structure @@ -1135,6 +1146,8 @@ struct nand_chip { unsigned int suspended : 1; int cur_cs; int read_retries; + struct nand_secure_region *secure_regions; + u8 nr_secure_regions; /* Externals */ struct nand_controller *controller; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index a0d572855444..98ed91b529ea 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -107,6 +107,11 @@ #define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ #define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ +/* Used for GigaDevices and Winbond flashes. */ +#define SPINOR_OP_ESECR 0x44 /* Erase Security registers */ +#define SPINOR_OP_PSECR 0x42 /* Program Security registers */ +#define SPINOR_OP_RSECR 0x48 /* Read Security registers */ + /* Status Register bits. */ #define SR_WIP BIT(0) /* Write in progress */ #define SR_WEL BIT(1) /* Write enable latch */ @@ -138,6 +143,9 @@ /* Status Register 2 bits. */ #define SR2_QUAD_EN_BIT1 BIT(1) +#define SR2_LB1 BIT(3) /* Security Register Lock Bit 1 */ +#define SR2_LB2 BIT(4) /* Security Register Lock Bit 2 */ +#define SR2_LB3 BIT(5) /* Security Register Lock Bit 3 */ #define SR2_QUAD_EN_BIT7 BIT(7) /* Supported SPI protocols */ diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 515cff77a4f4..e19323521f9c 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -20,6 +20,7 @@ #include <linux/osq_lock.h> #include <linux/debug_locks.h> +struct ww_class; struct ww_acquire_ctx; /* @@ -65,9 +66,6 @@ struct mutex { #endif }; -struct ww_class; -struct ww_acquire_ctx; - struct ww_mutex { struct mutex base; struct ww_acquire_ctx *ctx; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 87a5d186faff..5cbc950b34df 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -756,6 +756,13 @@ struct rx_queue_attribute { const char *buf, size_t len); }; +/* XPS map type and offset of the xps map within net_device->xps_maps[]. */ +enum xps_map_type { + XPS_CPUS = 0, + XPS_RXQS, + XPS_MAPS_MAX, +}; + #ifdef CONFIG_XPS /* * This structure holds an XPS map which can be of variable length. The @@ -773,9 +780,19 @@ struct xps_map { /* * This structure holds all XPS maps for device. Maps are indexed by CPU. + * + * We keep track of the number of cpus/rxqs used when the struct is allocated, + * in nr_ids. This will help not accessing out-of-bound memory. + * + * We keep track of the number of traffic classes used when the struct is + * allocated, in num_tc. This will be used to navigate the maps, to ensure we're + * not crossing its upper bound, as the original dev->num_tc can be updated in + * the meantime. */ struct xps_dev_maps { struct rcu_head rcu; + unsigned int nr_ids; + s16 num_tc; struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ }; @@ -833,6 +850,59 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); +enum net_device_path_type { + DEV_PATH_ETHERNET = 0, + DEV_PATH_VLAN, + DEV_PATH_BRIDGE, + DEV_PATH_PPPOE, + DEV_PATH_DSA, +}; + +struct net_device_path { + enum net_device_path_type type; + const struct net_device *dev; + union { + struct { + u16 id; + __be16 proto; + u8 h_dest[ETH_ALEN]; + } encap; + struct { + enum { + DEV_PATH_BR_VLAN_KEEP, + DEV_PATH_BR_VLAN_TAG, + DEV_PATH_BR_VLAN_UNTAG, + DEV_PATH_BR_VLAN_UNTAG_HW, + } vlan_mode; + u16 vlan_id; + __be16 vlan_proto; + } bridge; + struct { + int port; + u16 proto; + } dsa; + }; +}; + +#define NET_DEVICE_PATH_STACK_MAX 5 +#define NET_DEVICE_PATH_VLAN_MAX 2 + +struct net_device_path_stack { + int num_paths; + struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; +}; + +struct net_device_path_ctx { + const struct net_device *dev; + const u8 *daddr; + + int num_vlans; + struct { + u16 id; + __be16 proto; + } vlan[NET_DEVICE_PATH_VLAN_MAX]; +}; + enum tc_setup_type { TC_SETUP_QDISC_MQPRIO, TC_SETUP_CLSU32, @@ -1267,6 +1337,8 @@ struct netdev_net_notifier { * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); * If a device is paired with a peer device, return the peer instance. * The caller must be under RCU read context. + * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); + * Get the forwarding path to reach the real device from the HW destination address */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1473,6 +1545,8 @@ struct net_device_ops { int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); + int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, + struct net_device_path *path); }; /** @@ -1520,6 +1594,8 @@ struct net_device_ops { * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running + * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with + * skb_headlen(skb) == 0 (data starts from frag0) */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1553,6 +1629,7 @@ enum netdev_priv_flags { IFF_FAILOVER_SLAVE = 1<<28, IFF_L3MDEV_RX_HANDLER = 1<<29, IFF_LIVE_RENAME_OK = 1<<30, + IFF_TX_SKB_NO_LINEAR = 1<<31, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1579,12 +1656,14 @@ enum netdev_priv_flags { #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE #define IFF_TEAM IFF_TEAM #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED +#define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM #define IFF_MACSEC IFF_MACSEC #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER #define IFF_FAILOVER IFF_FAILOVER #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK +#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR /* Specifies the type of the struct net_device::ml_priv pointer */ enum netdev_ml_priv_type { @@ -1760,8 +1839,7 @@ enum netdev_ml_priv_type { * @tx_queue_len: Max frames per queue allowed * @tx_global_lock: XXX: need comments on this one * @xdp_bulkq: XDP device bulk queue - * @xps_cpus_map: all CPUs map for XPS device - * @xps_rxqs_map: all RXQs map for XPS device + * @xps_maps: all CPUs/RXQs maps for XPS device * * @xps_maps: XXX: need comments on this one * @miniq_egress: clsact qdisc specific data for @@ -1773,6 +1851,7 @@ enum netdev_ml_priv_type { * * @proto_down_reason: reason a netdev interface is held down * @pcpu_refcnt: Number of references to this device + * @dev_refcnt: Number of references to this device * @todo_list: Delayed register/unregister * @link_watch_list: XXX: need comments on this one * @@ -2057,8 +2136,7 @@ struct net_device { struct xdp_dev_bulk_queue __percpu *xdp_bulkq; #ifdef CONFIG_XPS - struct xps_dev_maps __rcu *xps_cpus_map; - struct xps_dev_maps __rcu *xps_rxqs_map; + struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; #endif #ifdef CONFIG_NET_CLS_ACT struct mini_Qdisc __rcu *miniq_egress; @@ -2074,7 +2152,12 @@ struct net_device { u32 proto_down_reason; struct list_head todo_list; + +#ifdef CONFIG_PCPU_DEV_REFCNT int __percpu *pcpu_refcnt; +#else + refcount_t dev_refcnt; +#endif struct list_head link_watch_list; @@ -2846,6 +2929,8 @@ void dev_remove_offload(struct packet_offload *po); int dev_get_iflink(const struct net_device *dev); int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); +int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, + struct net_device_path_stack *stack); struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, unsigned short mask); struct net_device *dev_get_by_name(struct net *net, const char *name); @@ -3424,6 +3509,24 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) } /** + * netdev_queue_set_dql_min_limit - set dql minimum limit + * @dev_queue: pointer to transmit queue + * @min_limit: dql minimum limit + * + * Forces xmit_more() to return true until the minimum threshold + * defined by @min_limit is reached (or until the tx queue is + * empty). Warning: to be use with care, misuse will impact the + * latency. + */ +static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, + unsigned int min_limit) +{ +#ifdef CONFIG_BQL + dev_queue->dql.min_limit = min_limit; +#endif +} + +/** * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write * @dev_queue: pointer to transmit queue * @@ -3688,7 +3791,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, u16 index); int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, - u16 index, bool is_rxqs_map); + u16 index, enum xps_map_type type); /** * netif_attr_test_mask - Test a CPU or Rx queue set in a mask @@ -3783,7 +3886,7 @@ static inline int netif_set_xps_queue(struct net_device *dev, static inline int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, - u16 index, bool is_rxqs_map) + u16 index, enum xps_map_type type) { return 0; } @@ -3923,7 +4026,14 @@ void __dev_notify_flags(struct net_device *, unsigned int old_flags, int dev_change_name(struct net_device *, const char *); int dev_set_alias(struct net_device *, const char *, size_t); int dev_get_alias(const struct net_device *, char *, size_t); -int dev_change_net_namespace(struct net_device *, struct net *, const char *); +int __dev_change_net_namespace(struct net_device *dev, struct net *net, + const char *pat, int new_ifindex); +static inline +int dev_change_net_namespace(struct net_device *dev, struct net *net, + const char *pat) +{ + return __dev_change_net_namespace(dev, net, pat, 0); +} int __dev_set_mtu(struct net_device *, int); int dev_validate_mtu(struct net_device *dev, int mtu, struct netlink_ext_ack *extack); @@ -4026,7 +4136,11 @@ void netdev_run_todo(void); */ static inline void dev_put(struct net_device *dev) { +#ifdef CONFIG_PCPU_DEV_REFCNT this_cpu_dec(*dev->pcpu_refcnt); +#else + refcount_dec(&dev->dev_refcnt); +#endif } /** @@ -4037,7 +4151,11 @@ static inline void dev_put(struct net_device *dev) */ static inline void dev_hold(struct net_device *dev) { +#ifdef CONFIG_PCPU_DEV_REFCNT this_cpu_inc(*dev->pcpu_refcnt); +#else + refcount_inc(&dev->dev_refcnt); +#endif } /* Carrier loss detection, dial on demand. The functions netif_carrier_on @@ -4172,7 +4290,7 @@ static inline bool netif_oper_up(const struct net_device *dev) * * Check if device has not been removed from system. */ -static inline bool netif_device_present(struct net_device *dev) +static inline bool netif_device_present(const struct net_device *dev) { return test_bit(__LINK_STATE_PRESENT, &dev->state); } @@ -4611,6 +4729,7 @@ void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); extern int netdev_max_backlog; extern int netdev_tstamp_prequeue; +extern int netdev_unregister_timeout_secs; extern int weight_p; extern int dev_weight_rx_bias; extern int dev_weight_tx_bias; @@ -5287,6 +5406,9 @@ do { \ #define PTYPE_HASH_SIZE (16) #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) +extern struct list_head ptype_all __read_mostly; +extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; + extern struct net_device *blackhole_netdev; #endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 46d9a0c26c67..10279c4830ac 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -124,8 +124,6 @@ struct ip_set_ext { bool target; }; -struct ip_set; - #define ext_timeout(e, s) \ ((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])) #define ext_counter(e, s) \ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index f6267e2883f2..515ce53aa20d 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -7,21 +7,26 @@ #include <net/netlink.h> #include <uapi/linux/netfilter/nfnetlink.h> +struct nfnl_info { + struct net *net; + struct sock *sk; + const struct nlmsghdr *nlh; + struct netlink_ext_ack *extack; +}; + +enum nfnl_callback_type { + NFNL_CB_UNSPEC = 0, + NFNL_CB_MUTEX, + NFNL_CB_RCU, + NFNL_CB_BATCH, +}; + struct nfnl_callback { - int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb, - const struct nlmsghdr *nlh, - const struct nlattr * const cda[], - struct netlink_ext_ack *extack); - int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb, - const struct nlmsghdr *nlh, - const struct nlattr * const cda[], - struct netlink_ext_ack *extack); - int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, - const struct nlmsghdr *nlh, - const struct nlattr * const cda[], - struct netlink_ext_ack *extack); - const struct nla_policy *policy; /* netlink attribute policy */ - const u_int16_t attr_count; /* number of nlattr's */ + int (*call)(struct sk_buff *skb, const struct nfnl_info *info, + const struct nlattr * const cda[]); + const struct nla_policy *policy; + enum nfnl_callback_type type; + __u16 attr_count; }; enum nfnl_abort_action { @@ -51,12 +56,41 @@ int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid); +void nfnetlink_broadcast(struct net *net, struct sk_buff *skb, __u32 portid, + __u32 group, gfp_t allocation); static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) { return subsys << 8 | msg_type; } +static inline void nfnl_fill_hdr(struct nlmsghdr *nlh, u8 family, u8 version, + __be16 res_id) +{ + struct nfgenmsg *nfmsg; + + nfmsg = nlmsg_data(nlh); + nfmsg->nfgen_family = family; + nfmsg->version = version; + nfmsg->res_id = res_id; +} + +static inline struct nlmsghdr *nfnl_msg_put(struct sk_buff *skb, u32 portid, + u32 seq, int type, int flags, + u8 family, u8 version, + __be16 res_id) +{ + struct nlmsghdr *nlh; + + nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg), flags); + if (!nlh) + return NULL; + + nfnl_fill_hdr(nlh, family, version, res_id); + + return nlh; +} + void nfnl_lock(__u8 subsys_id); void nfnl_unlock(__u8 subsys_id); #ifdef CONFIG_PROVE_LOCKING diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 8ec48466410a..07c6ad8f2a02 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -158,7 +158,7 @@ struct xt_match { /* Called when entry of this type deleted. */ void (*destroy)(const struct xt_mtdtor_param *); -#ifdef CONFIG_COMPAT +#ifdef CONFIG_NETFILTER_XTABLES_COMPAT /* Called when userspace align differs from kernel space one */ void (*compat_from_user)(void *dst, const void *src); int (*compat_to_user)(void __user *dst, const void *src); @@ -169,7 +169,7 @@ struct xt_match { const char *table; unsigned int matchsize; unsigned int usersize; -#ifdef CONFIG_COMPAT +#ifdef CONFIG_NETFILTER_XTABLES_COMPAT unsigned int compatsize; #endif unsigned int hooks; @@ -199,7 +199,7 @@ struct xt_target { /* Called when entry of this type deleted. */ void (*destroy)(const struct xt_tgdtor_param *); -#ifdef CONFIG_COMPAT +#ifdef CONFIG_NETFILTER_XTABLES_COMPAT /* Called when userspace align differs from kernel space one */ void (*compat_from_user)(void *dst, const void *src); int (*compat_to_user)(void __user *dst, const void *src); @@ -210,7 +210,7 @@ struct xt_target { const char *table; unsigned int targetsize; unsigned int usersize; -#ifdef CONFIG_COMPAT +#ifdef CONFIG_NETFILTER_XTABLES_COMPAT unsigned int compatsize; #endif unsigned int hooks; @@ -229,6 +229,9 @@ struct xt_table { /* Man behind the curtain... */ struct xt_table_info *private; + /* hook ops that register the table with the netfilter core */ + struct nf_hook_ops *ops; + /* Set this to THIS_MODULE if you are a module, otherwise NULL */ struct module *me; @@ -322,6 +325,7 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision); int xt_find_revision(u8 af, const char *name, u8 revision, int target, int *err); +struct xt_table *xt_find_table(struct net *net, u8 af, const char *name); struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, const char *name); struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af, @@ -448,7 +452,7 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); -#ifdef CONFIG_COMPAT +#ifdef CONFIG_NETFILTER_XTABLES_COMPAT #include <net/compat.h> struct compat_xt_entry_match { @@ -529,5 +533,5 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset); -#endif /* CONFIG_COMPAT */ +#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */ #endif /* _X_TABLES_H */ diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index 26a13294318c..4f9a4b3c5892 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -51,15 +51,14 @@ struct arpt_error { extern void *arpt_alloc_initial_table(const struct xt_table *); int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, - const struct nf_hook_ops *ops, struct xt_table **res); -void arpt_unregister_table(struct net *net, struct xt_table *table); -void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table, - const struct nf_hook_ops *ops); + const struct nf_hook_ops *ops); +void arpt_unregister_table(struct net *net, const char *name); +void arpt_unregister_table_pre_exit(struct net *net, const char *name); extern unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); -#ifdef CONFIG_COMPAT +#ifdef CONFIG_NETFILTER_XTABLES_COMPAT #include <net/compat.h> struct compat_arpt_entry { diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 3a956145a25c..a8178253ce53 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -100,6 +100,7 @@ struct ebt_table { unsigned int valid_hooks); /* the data used by the kernel */ struct ebt_table_info *private; + struct nf_hook_ops *ops; struct module *me; }; @@ -108,11 +109,9 @@ struct ebt_table { extern int ebt_register_table(struct net *net, const struct ebt_table *table, - const struct nf_hook_ops *ops, - struct ebt_table **res); -extern void ebt_unregister_table(struct net *net, struct ebt_table *table); -void ebt_unregister_table_pre_exit(struct net *net, const char *tablename, - const struct nf_hook_ops *ops); + const struct nf_hook_ops *ops); +extern void ebt_unregister_table(struct net *net, const char *tablename); +void ebt_unregister_table_pre_exit(struct net *net, const char *tablename); extern unsigned int ebt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct ebt_table *table); diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index c4676d6feeff..8d09bfe850dc 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -24,15 +24,10 @@ int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, - const struct nf_hook_ops *ops, struct xt_table **res); - -void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops); -void ipt_unregister_table_exit(struct net *net, struct xt_table *table); - -void ipt_unregister_table(struct net *net, struct xt_table *table, - const struct nf_hook_ops *ops); +void ipt_unregister_table_pre_exit(struct net *net, const char *name); +void ipt_unregister_table_exit(struct net *net, const char *name); /* Standard entry. */ struct ipt_standard { @@ -72,7 +67,7 @@ extern unsigned int ipt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); -#ifdef CONFIG_COMPAT +#ifdef CONFIG_NETFILTER_XTABLES_COMPAT #include <net/compat.h> struct compat_ipt_entry { diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 1547d5f9ae06..79e73fd7d965 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -26,17 +26,14 @@ extern void *ip6t_alloc_initial_table(const struct xt_table *); int ip6t_register_table(struct net *net, const struct xt_table *table, const struct ip6t_replace *repl, - const struct nf_hook_ops *ops, struct xt_table **res); -void ip6t_unregister_table(struct net *net, struct xt_table *table, - const struct nf_hook_ops *ops); -void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table, - const struct nf_hook_ops *ops); -void ip6t_unregister_table_exit(struct net *net, struct xt_table *table); + const struct nf_hook_ops *ops); +void ip6t_unregister_table_pre_exit(struct net *net, const char *name); +void ip6t_unregister_table_exit(struct net *net, const char *name); extern unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); -#ifdef CONFIG_COMPAT +#ifdef CONFIG_NETFILTER_XTABLES_COMPAT #include <net/compat.h> struct compat_ip6t_entry { diff --git a/include/linux/netfs.h b/include/linux/netfs.h new file mode 100644 index 000000000000..9062adfa2fb9 --- /dev/null +++ b/include/linux/netfs.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Network filesystem support services. + * + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * See: + * + * Documentation/filesystems/netfs_library.rst + * + * for a description of the network filesystem interface declared here. + */ + +#ifndef _LINUX_NETFS_H +#define _LINUX_NETFS_H + +#include <linux/workqueue.h> +#include <linux/fs.h> +#include <linux/pagemap.h> + +/* + * Overload PG_private_2 to give us PG_fscache - this is used to indicate that + * a page is currently backed by a local disk cache + */ +#define PageFsCache(page) PagePrivate2((page)) +#define SetPageFsCache(page) SetPagePrivate2((page)) +#define ClearPageFsCache(page) ClearPagePrivate2((page)) +#define TestSetPageFsCache(page) TestSetPagePrivate2((page)) +#define TestClearPageFsCache(page) TestClearPagePrivate2((page)) + +/** + * set_page_fscache - Set PG_fscache on a page and take a ref + * @page: The page. + * + * Set the PG_fscache (PG_private_2) flag on a page and take the reference + * needed for the VM to handle its lifetime correctly. This sets the flag and + * takes the reference unconditionally, so care must be taken not to set the + * flag again if it's already set. + */ +static inline void set_page_fscache(struct page *page) +{ + set_page_private_2(page); +} + +/** + * end_page_fscache - Clear PG_fscache and release any waiters + * @page: The page + * + * Clear the PG_fscache (PG_private_2) bit on a page and wake up any sleepers + * waiting for this. The page ref held for PG_private_2 being set is released. + * + * This is, for example, used when a netfs page is being written to a local + * disk cache, thereby allowing writes to the cache for the same page to be + * serialised. + */ +static inline void end_page_fscache(struct page *page) +{ + end_page_private_2(page); +} + +/** + * wait_on_page_fscache - Wait for PG_fscache to be cleared on a page + * @page: The page to wait on + * + * Wait for PG_fscache (aka PG_private_2) to be cleared on a page. + */ +static inline void wait_on_page_fscache(struct page *page) +{ + wait_on_page_private_2(page); +} + +/** + * wait_on_page_fscache_killable - Wait for PG_fscache to be cleared on a page + * @page: The page to wait on + * + * Wait for PG_fscache (aka PG_private_2) to be cleared on a page or until a + * fatal signal is received by the calling task. + * + * Return: + * - 0 if successful. + * - -EINTR if a fatal signal was encountered. + */ +static inline int wait_on_page_fscache_killable(struct page *page) +{ + return wait_on_page_private_2_killable(page); +} + +enum netfs_read_source { + NETFS_FILL_WITH_ZEROES, + NETFS_DOWNLOAD_FROM_SERVER, + NETFS_READ_FROM_CACHE, + NETFS_INVALID_READ, +} __mode(byte); + +typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, + bool was_async); + +/* + * Resources required to do operations on a cache. + */ +struct netfs_cache_resources { + const struct netfs_cache_ops *ops; + void *cache_priv; + void *cache_priv2; +}; + +/* + * Descriptor for a single component subrequest. + */ +struct netfs_read_subrequest { + struct netfs_read_request *rreq; /* Supervising read request */ + struct list_head rreq_link; /* Link in rreq->subrequests */ + loff_t start; /* Where to start the I/O */ + size_t len; /* Size of the I/O */ + size_t transferred; /* Amount of data transferred */ + refcount_t usage; + short error; /* 0 or error that occurred */ + unsigned short debug_index; /* Index in list (for debugging output) */ + enum netfs_read_source source; /* Where to read from */ + unsigned long flags; +#define NETFS_SREQ_WRITE_TO_CACHE 0 /* Set if should write to cache */ +#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ +#define NETFS_SREQ_SHORT_READ 2 /* Set if there was a short read from the cache */ +#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ +#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ +}; + +/* + * Descriptor for a read helper request. This is used to make multiple I/O + * requests on a variety of sources and then stitch the result together. + */ +struct netfs_read_request { + struct work_struct work; + struct inode *inode; /* The file being accessed */ + struct address_space *mapping; /* The mapping being accessed */ + struct netfs_cache_resources cache_resources; + struct list_head subrequests; /* Requests to fetch I/O from disk or net */ + void *netfs_priv; /* Private data for the netfs */ + unsigned int debug_id; + unsigned int cookie_debug_id; + atomic_t nr_rd_ops; /* Number of read ops in progress */ + atomic_t nr_wr_ops; /* Number of write ops in progress */ + size_t submitted; /* Amount submitted for I/O so far */ + size_t len; /* Length of the request */ + short error; /* 0 or error that occurred */ + loff_t i_size; /* Size of the file */ + loff_t start; /* Start position */ + pgoff_t no_unlock_page; /* Don't unlock this page after read */ + refcount_t usage; + unsigned long flags; +#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */ +#define NETFS_RREQ_WRITE_TO_CACHE 1 /* Need to write to the cache */ +#define NETFS_RREQ_NO_UNLOCK_PAGE 2 /* Don't unlock no_unlock_page on completion */ +#define NETFS_RREQ_DONT_UNLOCK_PAGES 3 /* Don't unlock the pages on completion */ +#define NETFS_RREQ_FAILED 4 /* The request failed */ +#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */ + const struct netfs_read_request_ops *netfs_ops; +}; + +/* + * Operations the network filesystem can/must provide to the helpers. + */ +struct netfs_read_request_ops { + bool (*is_cache_enabled)(struct inode *inode); + void (*init_rreq)(struct netfs_read_request *rreq, struct file *file); + int (*begin_cache_operation)(struct netfs_read_request *rreq); + void (*expand_readahead)(struct netfs_read_request *rreq); + bool (*clamp_length)(struct netfs_read_subrequest *subreq); + void (*issue_op)(struct netfs_read_subrequest *subreq); + bool (*is_still_valid)(struct netfs_read_request *rreq); + int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, + struct page *page, void **_fsdata); + void (*done)(struct netfs_read_request *rreq); + void (*cleanup)(struct address_space *mapping, void *netfs_priv); +}; + +/* + * Table of operations for access to a cache. This is obtained by + * rreq->ops->begin_cache_operation(). + */ +struct netfs_cache_ops { + /* End an operation */ + void (*end_operation)(struct netfs_cache_resources *cres); + + /* Read data from the cache */ + int (*read)(struct netfs_cache_resources *cres, + loff_t start_pos, + struct iov_iter *iter, + bool seek_data, + netfs_io_terminated_t term_func, + void *term_func_priv); + + /* Write data to the cache */ + int (*write)(struct netfs_cache_resources *cres, + loff_t start_pos, + struct iov_iter *iter, + netfs_io_terminated_t term_func, + void *term_func_priv); + + /* Expand readahead request */ + void (*expand_readahead)(struct netfs_cache_resources *cres, + loff_t *_start, size_t *_len, loff_t i_size); + + /* Prepare a read operation, shortening it to a cached/uncached + * boundary as appropriate. + */ + enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq, + loff_t i_size); + + /* Prepare a write operation, working out what part of the write we can + * actually do. + */ + int (*prepare_write)(struct netfs_cache_resources *cres, + loff_t *_start, size_t *_len, loff_t i_size); +}; + +struct readahead_control; +extern void netfs_readahead(struct readahead_control *, + const struct netfs_read_request_ops *, + void *); +extern int netfs_readpage(struct file *, + struct page *, + const struct netfs_read_request_ops *, + void *); +extern int netfs_write_begin(struct file *, struct address_space *, + loff_t, unsigned int, unsigned int, struct page **, + void **, + const struct netfs_read_request_ops *, + void *); + +extern void netfs_subreq_terminated(struct netfs_read_subrequest *, ssize_t, bool); +extern void netfs_stats_show(struct seq_file *); + +#endif /* _LINUX_NETFS_H */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 0bcf98098c5a..61b1c7fcc401 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -129,23 +129,19 @@ struct netlink_ext_ack { static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, u64 cookie) { - u64 __cookie = cookie; - if (!extack) return; - memcpy(extack->cookie, &__cookie, sizeof(__cookie)); - extack->cookie_len = sizeof(__cookie); + memcpy(extack->cookie, &cookie, sizeof(cookie)); + extack->cookie_len = sizeof(cookie); } static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack, u32 cookie) { - u32 __cookie = cookie; - if (!extack) return; - memcpy(extack->cookie, &__cookie, sizeof(__cookie)); - extack->cookie_len = sizeof(__cookie); + memcpy(extack->cookie, &cookie, sizeof(cookie)); + extack->cookie_len = sizeof(cookie); } void netlink_kernel_release(struct sock *sk); diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 5b4c67c91f56..15004c469807 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -452,6 +452,7 @@ enum lock_type4 { #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) #define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) +#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) #define FATTR4_WORD2_MODE_UMASK (1UL << 17) #define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18) @@ -709,6 +710,14 @@ struct nl4_server { } u; }; +enum nfs4_change_attr_type { + NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0, + NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1, + NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2, + NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3, + NFS4_CHANGE_TYPE_IS_UNDEFINED = 4, +}; + /* * Options for setxattr. These match the flags for setxattr(2). */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index eadaabd18dc7..ffba254d2098 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -246,11 +246,15 @@ struct nfs4_copy_state { BIT(13) /* Deferred cache invalidation */ #define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ #define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */ +#define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */ +#define NFS_INO_INVALID_MODE BIT(17) /* cached mode is invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ | NFS_INO_INVALID_MTIME \ | NFS_INO_INVALID_SIZE \ + | NFS_INO_INVALID_NLINK \ + | NFS_INO_INVALID_MODE \ | NFS_INO_INVALID_OTHER) /* inode metadata is invalid */ /* @@ -386,7 +390,7 @@ extern void nfs_access_set_mask(struct nfs_access_entry *, u32); extern int nfs_permission(struct user_namespace *, struct inode *, int); extern int nfs_open(struct inode *, struct file *); extern int nfs_attribute_cache_expired(struct inode *inode); -extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); +extern int nfs_revalidate_inode(struct inode *inode, unsigned long flags); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int nfs_clear_invalid_mapping(struct address_space *mapping); extern bool nfs_mapping_need_revalidate_inode(struct inode *inode); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 6f76b32a0238..d71a0e90faeb 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -156,6 +156,7 @@ struct nfs_server { #define NFS_MOUNT_WRITE_EAGER 0x01000000 #define NFS_MOUNT_WRITE_WAIT 0x02000000 + unsigned int fattr_valid; /* Valid attributes */ unsigned int caps; /* server capabilities */ unsigned int rsize; /* read size */ unsigned int rpages; /* read size (in pages) */ @@ -180,6 +181,9 @@ struct nfs_server { #define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ #define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */ + enum nfs4_change_attr_type + change_attr_type;/* Description of change attribute */ + struct nfs_fsid fsid; __u64 maxfilesize; /* maximum file size */ struct timespec64 time_delta; /* smallest time granularity */ @@ -256,6 +260,7 @@ struct nfs_server { /* User namespace info */ const struct cred *cred; + bool has_sec_mnt_opts; }; /* Server capabilities */ @@ -264,16 +269,7 @@ struct nfs_server { #define NFS_CAP_SYMLINKS (1U << 2) #define NFS_CAP_ACLS (1U << 3) #define NFS_CAP_ATOMIC_OPEN (1U << 4) -/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */ #define NFS_CAP_LGOPEN (1U << 5) -#define NFS_CAP_FILEID (1U << 6) -#define NFS_CAP_MODE (1U << 7) -#define NFS_CAP_NLINK (1U << 8) -#define NFS_CAP_OWNER (1U << 9) -#define NFS_CAP_OWNER_GROUP (1U << 10) -#define NFS_CAP_ATIME (1U << 11) -#define NFS_CAP_CTIME (1U << 12) -#define NFS_CAP_MTIME (1U << 13) #define NFS_CAP_POSIX_LOCK (1U << 14) #define NFS_CAP_UIDGID_NOMAP (1U << 15) #define NFS_CAP_STATEID_NFSV41 (1U << 16) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 3327239fa2f9..717ecc87c9e7 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -15,6 +15,8 @@ #define NFS_DEF_FILE_IO_SIZE (4096U) #define NFS_MIN_FILE_IO_SIZE (1024U) +#define NFS_BITMASK_SZ 3 + struct nfs4_string { unsigned int len; char *data; @@ -150,6 +152,8 @@ struct nfs_fsinfo { __u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */ __u32 blksize; /* preferred pnfs io block size */ __u32 clone_blksize; /* granularity of a CLONE operation */ + enum nfs4_change_attr_type + change_attr_type; /* Info about change attr */ __u32 xattr_support; /* User xattrs supported */ }; @@ -525,7 +529,8 @@ struct nfs_closeargs { struct nfs_seqid * seqid; fmode_t fmode; u32 share_access; - u32 * bitmask; + const u32 * bitmask; + u32 bitmask_store[NFS_BITMASK_SZ]; struct nfs4_layoutreturn_args *lr_args; }; @@ -608,7 +613,8 @@ struct nfs4_delegreturnargs { struct nfs4_sequence_args seq_args; const struct nfs_fh *fhandle; const nfs4_stateid *stateid; - u32 * bitmask; + const u32 *bitmask; + u32 bitmask_store[NFS_BITMASK_SZ]; struct nfs4_layoutreturn_args *lr_args; }; @@ -648,7 +654,8 @@ struct nfs_pgio_args { union { unsigned int replen; /* used by read */ struct { - u32 * bitmask; /* used by write */ + const u32 * bitmask; /* used by write */ + u32 bitmask_store[NFS_BITMASK_SZ]; /* used by write */ enum nfs3_stable_how stable; /* used by write */ }; }; diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h index 0ba99c513649..8e76a79cdc6a 100644 --- a/include/linux/nfsacl.h +++ b/include/linux/nfsacl.h @@ -41,5 +41,8 @@ nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, extern bool nfs_stream_decode_acl(struct xdr_stream *xdr, unsigned int *aclcnt, struct posix_acl **pacl); +extern bool +nfs_stream_encode_acl(struct xdr_stream *xdr, struct inode *inode, + struct posix_acl *acl, int encode_entries, int typeflag); #endif /* __LINUX_NFSACL_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index b08787cd0881..edcbd60b88b9 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -405,6 +405,16 @@ struct nvme_id_ctrl_zns { __u8 rsvd1[4095]; }; +struct nvme_id_ctrl_nvm { + __u8 vsl; + __u8 wzsl; + __u8 wusl; + __u8 dmrl; + __le32 dmrsl; + __le64 dmsl; + __u8 rsvd16[4080]; +}; + enum { NVME_ID_CNS_NS = 0x00, NVME_ID_CNS_CTRL = 0x01, diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 052293f4cbdb..923dada24eb4 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -65,6 +65,10 @@ int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val); int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val); int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val); int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val); +int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, + u32 *val); +int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, + u64 *val); /* direct nvmem device read/write interface */ struct nvmem_device *nvmem_device_get(struct device *dev, const char *name); diff --git a/include/linux/of.h b/include/linux/of.h index 4b27c9a27df3..d8db8d3592fd 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -424,12 +424,14 @@ extern int of_detach_node(struct device_node *); * @sz: number of array elements to read * * Search for a property in a device node and read 8-bit value(s) from - * it. Returns 0 on success, -EINVAL if the property does not exist, - * -ENODATA if property does not have a value, and -EOVERFLOW if the - * property data isn't large enough. + * it. * * dts entry of array should be like: - * property = /bits/ 8 <0x50 0x60 0x70>; + * ``property = /bits/ 8 <0x50 0x60 0x70>;`` + * + * Return: 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. * * The out_values is modified only if a valid u8 value can be decoded. */ @@ -454,12 +456,14 @@ static inline int of_property_read_u8_array(const struct device_node *np, * @sz: number of array elements to read * * Search for a property in a device node and read 16-bit value(s) from - * it. Returns 0 on success, -EINVAL if the property does not exist, - * -ENODATA if property does not have a value, and -EOVERFLOW if the - * property data isn't large enough. + * it. * * dts entry of array should be like: - * property = /bits/ 16 <0x5000 0x6000 0x7000>; + * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;`` + * + * Return: 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. * * The out_values is modified only if a valid u16 value can be decoded. */ @@ -485,7 +489,9 @@ static inline int of_property_read_u16_array(const struct device_node *np, * @sz: number of array elements to read * * Search for a property in a device node and read 32-bit value(s) from - * it. Returns 0 on success, -EINVAL if the property does not exist, + * it. + * + * Return: 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * @@ -513,7 +519,9 @@ static inline int of_property_read_u32_array(const struct device_node *np, * @sz: number of array elements to read * * Search for a property in a device node and read 64-bit value(s) from - * it. Returns 0 on success, -EINVAL if the property does not exist, + * it. + * + * Return: 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * @@ -560,6 +568,13 @@ int of_map_id(struct device_node *np, u32 id, phys_addr_t of_dma_get_max_cpu_address(struct device_node *np); +struct kimage; +void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, + unsigned long initrd_load_addr, + unsigned long initrd_len, + const char *cmdline, size_t extra_fdt_size); +int ima_get_kexec_buffer(void **addr, size_t *size); +int ima_free_kexec_buffer(void); #else /* CONFIG_OF */ static inline void of_core_init(void) @@ -1063,7 +1078,9 @@ static inline bool of_node_is_type(const struct device_node *np, const char *typ * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u8 elements - * in it. Returns number of elements on sucess, -EINVAL if the property does + * in it. + * + * Return: The number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u8 and -ENODATA if the * property does not have a value. */ @@ -1080,7 +1097,9 @@ static inline int of_property_count_u8_elems(const struct device_node *np, * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u16 elements - * in it. Returns number of elements on sucess, -EINVAL if the property does + * in it. + * + * Return: The number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u16 and -ENODATA if the * property does not have a value. */ @@ -1097,7 +1116,9 @@ static inline int of_property_count_u16_elems(const struct device_node *np, * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u32 elements - * in it. Returns number of elements on sucess, -EINVAL if the property does + * in it. + * + * Return: The number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u32 and -ENODATA if the * property does not have a value. */ @@ -1114,7 +1135,9 @@ static inline int of_property_count_u32_elems(const struct device_node *np, * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u64 elements - * in it. Returns number of elements on sucess, -EINVAL if the property does + * in it. + * + * Return: The number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u64 and -ENODATA if the * property does not have a value. */ @@ -1135,7 +1158,7 @@ static inline int of_property_count_u64_elems(const struct device_node *np, * Search for a property in a device tree node and retrieve a list of * terminated string values (pointer to data, not a copy) in that property. * - * If @out_strs is NULL, the number of strings in the property is returned. + * Return: If @out_strs is NULL, the number of strings in the property is returned. */ static inline int of_property_read_string_array(const struct device_node *np, const char *propname, const char **out_strs, @@ -1151,10 +1174,11 @@ static inline int of_property_read_string_array(const struct device_node *np, * @propname: name of the property to be searched. * * Search for a property in a device tree node and retrieve the number of null - * terminated string contain in it. Returns the number of strings on - * success, -EINVAL if the property does not exist, -ENODATA if property - * does not have a value, and -EILSEQ if the string is not null-terminated - * within the length of the property data. + * terminated string contain in it. + * + * Return: The number of strings on success, -EINVAL if the property does not + * exist, -ENODATA if property does not have a value, and -EILSEQ if the string + * is not null-terminated within the length of the property data. */ static inline int of_property_count_strings(const struct device_node *np, const char *propname) @@ -1168,13 +1192,14 @@ static inline int of_property_count_strings(const struct device_node *np, * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @index: index of the string in the list of strings - * @out_string: pointer to null terminated return string, modified only if + * @output: pointer to null terminated return string, modified only if * return value is 0. * * Search for a property in a device tree node and retrieve a null * terminated string value (pointer to data, not a copy) in the list of strings * contained in that property. - * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if + * + * Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if * property does not have a value, and -EILSEQ if the string is not * null-terminated within the length of the property data. * @@ -1194,7 +1219,8 @@ static inline int of_property_read_string_index(const struct device_node *np, * @propname: name of the property to be searched. * * Search for a property in a device node. - * Returns true if the property exists false otherwise. + * + * Return: true if the property exists false otherwise. */ static inline bool of_property_read_bool(const struct device_node *np, const char *propname) @@ -1440,14 +1466,14 @@ static inline int of_reconfig_get_state_change(unsigned long action, * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node * @np: Pointer to the given device_node * - * return true if present false otherwise + * Return: true if present false otherwise */ static inline bool of_device_is_system_power_controller(const struct device_node *np) { return of_property_read_bool(np, "system-power-controller"); } -/** +/* * Overlay support */ diff --git a/include/linux/of_net.h b/include/linux/of_net.h index 71bbfcf3adcd..daef3b0d9270 100644 --- a/include/linux/of_net.h +++ b/include/linux/of_net.h @@ -13,7 +13,7 @@ struct net_device; extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface); -extern const void *of_get_mac_address(struct device_node *np); +extern int of_get_mac_address(struct device_node *np, u8 *mac); extern struct net_device *of_find_net_device_by_node(struct device_node *np); #else static inline int of_get_phy_mode(struct device_node *np, @@ -22,9 +22,9 @@ static inline int of_get_phy_mode(struct device_node *np, return -ENODEV; } -static inline const void *of_get_mac_address(struct device_node *np) +static inline int of_get_mac_address(struct device_node *np, u8 *mac) { - return ERR_PTR(-ENODEV); + return -ENODEV; } static inline struct net_device *of_find_net_device_by_node(struct device_node *np) diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 4462ed2c18cd..461b7aa587ba 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -19,8 +19,14 @@ enum OID { OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */ OID_id_dsa, /* 1.2.840.10040.4.1 */ - OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */ + OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */ + OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */ + OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ + OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */ + OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */ + OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */ + OID_id_ecdsa_with_sha512, /* 1.2.840.10045.4.3.4 */ /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ @@ -58,6 +64,7 @@ enum OID { OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ + OID_id_ansip384r1, /* 1.3.132.0.34 */ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ OID_sha512, /* 2.16.840.1.101.3.4.2.3 */ @@ -113,10 +120,16 @@ enum OID { OID_SM2_with_SM3, /* 1.2.156.10197.1.501 */ OID_sm3WithRSAEncryption, /* 1.2.156.10197.1.504 */ + /* TCG defined OIDS for TPM based keys */ + OID_TPMLoadableKey, /* 2.23.133.10.1.3 */ + OID_TPMImportableKey, /* 2.23.133.10.1.4 */ + OID_TPMSealedData, /* 2.23.133.10.1.5 */ + OID__NR }; extern enum OID look_up_OID(const void *data, size_t datasize); +extern int parse_OID(const void *data, size_t datasize, enum OID *oid); extern int sprint_oid(const void *, size_t, char *, size_t); extern int sprint_OID(enum OID, char *, size_t); diff --git a/include/linux/overflow.h b/include/linux/overflow.h index ef74051d5cfe..0f12345c21fb 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -235,7 +235,7 @@ static inline bool __must_check __must_check_overflow(bool overflow) * - 'a << s' sets the sign bit, if any, in '*d'. * * '*d' will hold the results of the attempted shift, but is not - * considered "safe for use" if false is returned. + * considered "safe for use" if true is returned. */ #define check_shl_overflow(a, s, d) __must_check_overflow(({ \ typeof(a) _a = a; \ diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 7d4ec26d8a3e..ef1e3e736e14 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -21,16 +21,17 @@ #elif MAX_NR_ZONES <= 8 #define ZONES_SHIFT 3 #else -#error ZONES_SHIFT -- too many zones configured adjust calculation +#error ZONES_SHIFT "Too many zones configured" #endif +#define ZONES_WIDTH ZONES_SHIFT + #ifdef CONFIG_SPARSEMEM #include <asm/sparsemem.h> - -/* SECTION_SHIFT #bits space required to store a section # */ #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) - -#endif /* CONFIG_SPARSEMEM */ +#else +#define SECTIONS_SHIFT 0 +#endif #ifndef BUILD_VDSO32_64 /* @@ -54,17 +55,28 @@ #define SECTIONS_WIDTH 0 #endif -#define ZONES_WIDTH ZONES_SHIFT - -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS +#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS #define NODES_WIDTH NODES_SHIFT -#else -#ifdef CONFIG_SPARSEMEM_VMEMMAP +#elif defined(CONFIG_SPARSEMEM_VMEMMAP) #error "Vmemmap: No space for nodes field in page flags" -#endif +#else #define NODES_WIDTH 0 #endif +/* + * Note that this #define MUST have a value so that it can be tested with + * the IS_ENABLED() macro. + */ +#if NODES_SHIFT != 0 && NODES_WIDTH == 0 +#define NODE_NOT_IN_PAGE_FLAGS 1 +#endif + +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) +#define KASAN_TAG_WIDTH 8 +#else +#define KASAN_TAG_WIDTH 0 +#endif + #ifdef CONFIG_NUMA_BALANCING #define LAST__PID_SHIFT 8 #define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1) @@ -77,36 +89,20 @@ #define LAST_CPUPID_SHIFT 0 #endif -#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) -#define KASAN_TAG_WIDTH 8 -#else -#define KASAN_TAG_WIDTH 0 -#endif - -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \ +#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT \ <= BITS_PER_LONG - NR_PAGEFLAGS #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT #else #define LAST_CPUPID_WIDTH 0 #endif -#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \ - > BITS_PER_LONG - NR_PAGEFLAGS -#error "Not enough bits in page flags" -#endif - -/* - * We are going to use the flags for the page to node mapping if its in - * there. This includes the case where there is no node, so it is implicit. - * Note that this #define MUST have a value so that it can be tested with - * the IS_ENABLED() macro. - */ -#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0) -#define NODE_NOT_IN_PAGE_FLAGS 1 +#if LAST_CPUPID_SHIFT != 0 && LAST_CPUPID_WIDTH == 0 +#define LAST_CPUPID_NOT_IN_PAGE_FLAGS #endif -#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0 -#define LAST_CPUPID_NOT_IN_PAGE_FLAGS +#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH \ + > BITS_PER_LONG - NR_PAGEFLAGS +#error "Not enough bits in page flags" #endif #endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 8c9947fd62f3..e89df447fae3 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -18,6 +18,11 @@ struct pagevec; +static inline bool mapping_empty(struct address_space *mapping) +{ + return xa_empty(&mapping->i_pages); +} + /* * Bits in mapping->flags. */ @@ -158,6 +163,16 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping) void release_pages(struct page **pages, int nr); /* + * For file cache pages, return the address_space, otherwise return NULL + */ +static inline struct address_space *page_mapping_file(struct page *page) +{ + if (unlikely(PageSwapCache(page))) + return NULL; + return page_mapping(page); +} + +/* * speculatively take a reference to a page. * If the page is free (_refcount == 0), then _refcount is untouched, and 0 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. @@ -688,6 +703,26 @@ void wait_for_stable_page(struct page *page); void page_endio(struct page *page, bool is_write, int err); +/** + * set_page_private_2 - Set PG_private_2 on a page and take a ref + * @page: The page. + * + * Set the PG_private_2 flag on a page and take the reference needed for the VM + * to handle its lifetime correctly. This sets the flag and takes the + * reference unconditionally, so care must be taken not to set the flag again + * if it's already set. + */ +static inline void set_page_private_2(struct page *page) +{ + page = compound_head(page); + get_page(page); + SetPagePrivate2(page); +} + +void end_page_private_2(struct page *page); +void wait_on_page_private_2(struct page *page); +int wait_on_page_private_2_killable(struct page *page); + /* * Add an arbitrary waiter to a page's wait queue */ @@ -792,20 +827,23 @@ static inline int add_to_page_cache(struct page *page, * @file: The file, used primarily by network filesystems for authentication. * May be NULL if invoked internally by the filesystem. * @mapping: Readahead this filesystem object. + * @ra: File readahead state. May be NULL. */ struct readahead_control { struct file *file; struct address_space *mapping; + struct file_ra_state *ra; /* private: use the readahead_* accessors instead */ pgoff_t _index; unsigned int _nr_pages; unsigned int _batch_count; }; -#define DEFINE_READAHEAD(rac, f, m, i) \ - struct readahead_control rac = { \ +#define DEFINE_READAHEAD(ractl, f, r, m, i) \ + struct readahead_control ractl = { \ .file = f, \ .mapping = m, \ + .ra = r, \ ._index = i, \ } @@ -813,10 +851,11 @@ struct readahead_control { void page_cache_ra_unbounded(struct readahead_control *, unsigned long nr_to_read, unsigned long lookahead_count); -void page_cache_sync_ra(struct readahead_control *, struct file_ra_state *, +void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); +void page_cache_async_ra(struct readahead_control *, struct page *, unsigned long req_count); -void page_cache_async_ra(struct readahead_control *, struct file_ra_state *, - struct page *, unsigned long req_count); +void readahead_expand(struct readahead_control *ractl, + loff_t new_start, size_t new_len); /** * page_cache_sync_readahead - generic file readahead @@ -836,8 +875,8 @@ void page_cache_sync_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, pgoff_t index, unsigned long req_count) { - DEFINE_READAHEAD(ractl, file, mapping, index); - page_cache_sync_ra(&ractl, ra, req_count); + DEFINE_READAHEAD(ractl, file, ra, mapping, index); + page_cache_sync_ra(&ractl, req_count); } /** @@ -859,8 +898,8 @@ void page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, struct page *page, pgoff_t index, unsigned long req_count) { - DEFINE_READAHEAD(ractl, file, mapping, index); - page_cache_async_ra(&ractl, ra, page, req_count); + DEFINE_READAHEAD(ractl, file, ra, mapping, index); + page_cache_async_ra(&ractl, page, req_count); } /** @@ -958,9 +997,9 @@ static inline loff_t readahead_pos(struct readahead_control *rac) * readahead_length - The number of bytes in this readahead request. * @rac: The readahead request. */ -static inline loff_t readahead_length(struct readahead_control *rac) +static inline size_t readahead_length(struct readahead_control *rac) { - return (loff_t)rac->_nr_pages * PAGE_SIZE; + return rac->_nr_pages * PAGE_SIZE; } /** @@ -981,6 +1020,15 @@ static inline unsigned int readahead_count(struct readahead_control *rac) return rac->_nr_pages; } +/** + * readahead_batch_length - The number of bytes in the current batch. + * @rac: The readahead request. + */ +static inline size_t readahead_batch_length(struct readahead_control *rac) +{ + return rac->_batch_count * PAGE_SIZE; +} + static inline unsigned long dir_pages(struct inode *inode) { return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index b1cb6b753abb..ac7b38ad5903 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -7,7 +7,7 @@ struct mm_walk; /** - * mm_walk_ops - callbacks for walk_page_range + * struct mm_walk_ops - callbacks for walk_page_range * @pgd_entry: if set, called for each non-empty PGD (top-level) entry * @p4d_entry: if set, called for each non-empty P4D entry * @pud_entry: if set, called for each non-empty PUD entry @@ -71,7 +71,7 @@ enum page_walk_action { }; /** - * mm_walk - walk_page_range data + * struct mm_walk - walk_page_range data * @ops: operation to call during the walk * @mm: mm_struct representing the target process of page table walk * @pgd: pointer to PGD; only valid with no_vma (otherwise set to NULL) diff --git a/include/linux/parport.h b/include/linux/parport.h index f981f794c850..1c16ffb8b908 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -332,9 +332,19 @@ int __must_check __parport_register_driver(struct parport_driver *, __parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) /* Unregister a high-level driver. */ -extern void parport_unregister_driver (struct parport_driver *); void parport_unregister_driver(struct parport_driver *); +/** + * module_parport_driver() - Helper macro for registering a modular parport driver + * @__parport_driver: struct parport_driver to be used + * + * Helper macro for parport drivers which do not do anything special in module + * init and exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit(). + */ +#define module_parport_driver(__parport_driver) \ + module_driver(__parport_driver, parport_register_driver, parport_unregister_driver) + /* If parport_register_driver doesn't fit your needs, perhaps * parport_find_xxx does. */ extern struct parport *parport_find_number (int); diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 65d3d83015c3..fbdadd4d8377 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -85,6 +85,7 @@ extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ +extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ #endif #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) diff --git a/include/linux/pci.h b/include/linux/pci.h index 86c799c97b77..c20211e59a57 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -458,7 +458,6 @@ struct pci_dev { u32 saved_config_space[16]; /* Config space saved at suspend time */ struct hlist_head saved_cap_space; - struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */ int rom_attr_enabled; /* Display of ROM attribute enabled? */ struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ @@ -540,7 +539,6 @@ struct pci_host_bridge { int (*map_irq)(const struct pci_dev *, u8, u8); void (*release_fn)(struct pci_host_bridge *); void *release_data; - struct msi_controller *msi; unsigned int ignore_reset_delay:1; /* For entire hierarchy */ unsigned int no_ext_tags:1; /* No Extended Tags */ unsigned int native_aer:1; /* OS may use PCIe AER */ @@ -551,6 +549,7 @@ struct pci_host_bridge { unsigned int native_dpc:1; /* OS may use PCIe DPC */ unsigned int preserve_config:1; /* Preserve FW resource setup */ unsigned int size_windows:1; /* Enable root bus sizing */ + unsigned int msi_domain:1; /* Bridge wants MSI domain */ /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, @@ -621,7 +620,6 @@ struct pci_bus { struct resource busn_res; /* Bus numbers routed to this bus */ struct pci_ops *ops; /* Configuration access functions */ - struct msi_controller *msi; /* MSI controller */ void *sysdata; /* Hook for sys-specific extension */ struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */ @@ -856,6 +854,12 @@ struct module; * e.g. drivers/net/e100.c. * @sriov_configure: Optional driver callback to allow configuration of * number of VFs to enable via sysfs "sriov_numvfs" file. + * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X + * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count". + * This will change MSI-X Table Size in the VF Message Control + * registers. + * @sriov_get_vf_total_msix: PF driver callback to get the total number of + * MSI-X vectors available for distribution to the VFs. * @err_handler: See Documentation/PCI/pci-error-recovery.rst * @groups: Sysfs attribute groups. * @driver: Driver model structure. @@ -871,6 +875,8 @@ struct pci_driver { int (*resume)(struct pci_dev *dev); /* Device woken up */ void (*shutdown)(struct pci_dev *dev); int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */ + int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */ + u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf); const struct pci_error_handlers *err_handler; const struct attribute_group **groups; struct device_driver driver; @@ -1077,6 +1083,7 @@ u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap); u16 pci_find_ext_capability(struct pci_dev *dev, int cap); u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap); struct pci_bus *pci_find_next_bus(const struct pci_bus *from); +u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap); u64 pci_get_dsn(struct pci_dev *dev); @@ -1201,6 +1208,7 @@ int __must_check pci_set_mwi(struct pci_dev *dev); int __must_check pcim_set_mwi(struct pci_dev *dev); int pci_try_set_mwi(struct pci_dev *dev); void pci_clear_mwi(struct pci_dev *dev); +void pci_disable_parity(struct pci_dev *dev); void pci_intx(struct pci_dev *dev, int enable); bool pci_check_and_mask_intx(struct pci_dev *dev); bool pci_check_and_unmask_intx(struct pci_dev *dev); @@ -1302,7 +1310,6 @@ void pci_unlock_rescan_remove(void); /* Vital Product Data routines */ ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); -int pci_set_vpd_size(struct pci_dev *dev, size_t len); /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); @@ -1944,8 +1951,8 @@ enum pci_fixup_pass { #ifdef CONFIG_LTO_CLANG #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ class_shift, hook, stub) \ - void stub(struct pci_dev *dev); \ - void stub(struct pci_dev *dev) \ + void __cficanonical stub(struct pci_dev *dev); \ + void __cficanonical stub(struct pci_dev *dev) \ { \ hook(dev); \ } \ @@ -2311,14 +2318,13 @@ static inline u8 pci_vpd_info_field_size(const u8 *info_field) /** * pci_vpd_find_tag - Locates the Resource Data Type tag provided * @buf: Pointer to buffered vpd data - * @off: The offset into the buffer at which to begin the search * @len: The length of the vpd buffer * @rdt: The Resource Data Type to search for * * Returns the index where the Resource Data Type was found or * -ENOENT otherwise. */ -int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt); +int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt); /** * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index a76ccb697bef..4c3fa5293d76 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1688,37 +1688,8 @@ #define PCI_VENDOR_ID_MICROSEMI 0x11f8 #define PCI_VENDOR_ID_RP 0x11fe -#define PCI_DEVICE_ID_RP32INTF 0x0001 -#define PCI_DEVICE_ID_RP8INTF 0x0002 -#define PCI_DEVICE_ID_RP16INTF 0x0003 -#define PCI_DEVICE_ID_RP4QUAD 0x0004 -#define PCI_DEVICE_ID_RP8OCTA 0x0005 -#define PCI_DEVICE_ID_RP8J 0x0006 -#define PCI_DEVICE_ID_RP4J 0x0007 -#define PCI_DEVICE_ID_RP8SNI 0x0008 -#define PCI_DEVICE_ID_RP16SNI 0x0009 -#define PCI_DEVICE_ID_RPP4 0x000A -#define PCI_DEVICE_ID_RPP8 0x000B -#define PCI_DEVICE_ID_RP4M 0x000D -#define PCI_DEVICE_ID_RP2_232 0x000E -#define PCI_DEVICE_ID_RP2_422 0x000F -#define PCI_DEVICE_ID_URP32INTF 0x0801 -#define PCI_DEVICE_ID_URP8INTF 0x0802 -#define PCI_DEVICE_ID_URP16INTF 0x0803 -#define PCI_DEVICE_ID_URP8OCTA 0x0805 -#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C -#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D -#define PCI_DEVICE_ID_CRP16INTF 0x0903 #define PCI_VENDOR_ID_CYCLADES 0x120e -#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 -#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101 -#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102 -#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103 -#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104 -#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105 -#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200 -#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201 #define PCI_DEVICE_ID_PC300_RX_2 0x0300 #define PCI_DEVICE_ID_PC300_RX_1 0x0301 #define PCI_DEVICE_ID_PC300_TE_2 0x0310 @@ -2065,8 +2036,6 @@ #define PCI_DEVICE_ID_EXAR_XR17V358 0x0358 #define PCI_VENDOR_ID_MICROGATE 0x13c0 -#define PCI_DEVICE_ID_MICROGATE_USC 0x0010 -#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030 #define PCI_VENDOR_ID_3WARE 0x13C1 #define PCI_DEVICE_ID_3WARE_1000 0x1000 diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index 351c1c9aedc5..2cb5188a7ef1 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -10,10 +10,15 @@ #include <linux/phy.h> #include <linux/phylink.h> +/* AN mode */ +#define DW_AN_C73 1 +#define DW_AN_C37_SGMII 2 + struct mdio_xpcs_args { __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); struct mii_bus *bus; int addr; + int an_mode; }; struct mdio_xpcs_ops { diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3f7f89ea5e51..f5a6a2f069ed 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -260,15 +260,16 @@ struct perf_event; /** * pmu::capabilities flags */ -#define PERF_PMU_CAP_NO_INTERRUPT 0x01 -#define PERF_PMU_CAP_NO_NMI 0x02 -#define PERF_PMU_CAP_AUX_NO_SG 0x04 -#define PERF_PMU_CAP_EXTENDED_REGS 0x08 -#define PERF_PMU_CAP_EXCLUSIVE 0x10 -#define PERF_PMU_CAP_ITRACE 0x20 -#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 -#define PERF_PMU_CAP_NO_EXCLUDE 0x80 -#define PERF_PMU_CAP_AUX_OUTPUT 0x100 +#define PERF_PMU_CAP_NO_INTERRUPT 0x0001 +#define PERF_PMU_CAP_NO_NMI 0x0002 +#define PERF_PMU_CAP_AUX_NO_SG 0x0004 +#define PERF_PMU_CAP_EXTENDED_REGS 0x0008 +#define PERF_PMU_CAP_EXCLUSIVE 0x0010 +#define PERF_PMU_CAP_ITRACE 0x0020 +#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040 +#define PERF_PMU_CAP_NO_EXCLUDE 0x0080 +#define PERF_PMU_CAP_AUX_OUTPUT 0x0100 +#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200 struct perf_output_handle; @@ -607,6 +608,7 @@ struct swevent_hlist { #define PERF_ATTACH_TASK_DATA 0x08 #define PERF_ATTACH_ITRACE 0x10 #define PERF_ATTACH_SCHED_CB 0x20 +#define PERF_ATTACH_CHILD 0x40 struct perf_cgroup; struct perf_buffer; @@ -734,6 +736,7 @@ struct perf_event { int pending_wakeup; int pending_kill; int pending_disable; + unsigned long pending_addr; /* SIGTRAP */ struct irq_work pending; atomic_t event_limit; @@ -951,13 +954,11 @@ extern void perf_event_itrace_started(struct perf_event *event); extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); extern void perf_pmu_unregister(struct pmu *pmu); -extern int perf_num_counters(void); -extern const char *perf_pmu_name(void); extern void __perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task); extern void __perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next); -extern int perf_event_init_task(struct task_struct *child); +extern int perf_event_init_task(struct task_struct *child, u64 clone_flags); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); extern void perf_event_delayed_put(struct task_struct *task); @@ -1176,30 +1177,24 @@ DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); * which is guaranteed by us not actually scheduling inside other swevents * because those disable preemption. */ -static __always_inline void -perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) +static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { - if (static_key_false(&perf_swevent_enabled[event_id])) { - struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); + struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); - perf_fetch_caller_regs(regs); - ___perf_sw_event(event_id, nr, regs, addr); - } + perf_fetch_caller_regs(regs); + ___perf_sw_event(event_id, nr, regs, addr); } extern struct static_key_false perf_sched_events; -static __always_inline bool -perf_sw_migrate_enabled(void) +static __always_inline bool __perf_sw_enabled(int swevt) { - if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) - return true; - return false; + return static_key_false(&perf_swevent_enabled[swevt]); } static inline void perf_event_task_migrate(struct task_struct *task) { - if (perf_sw_migrate_enabled()) + if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS)) task->sched_migrated = 1; } @@ -1209,11 +1204,9 @@ static inline void perf_event_task_sched_in(struct task_struct *prev, if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_in(prev, task); - if (perf_sw_migrate_enabled() && task->sched_migrated) { - struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); - - perf_fetch_caller_regs(regs); - ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); + if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) && + task->sched_migrated) { + __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); task->sched_migrated = 0; } } @@ -1221,7 +1214,15 @@ static inline void perf_event_task_sched_in(struct task_struct *prev, static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { - perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); + if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES)) + __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); + +#ifdef CONFIG_CGROUP_PERF + if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) && + perf_cgroup_from_task(prev, NULL) != + perf_cgroup_from_task(next, NULL)) + __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0); +#endif if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_out(prev, next); @@ -1448,7 +1449,8 @@ perf_event_task_sched_in(struct task_struct *prev, static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { } -static inline int perf_event_init_task(struct task_struct *child) { return 0; } +static inline int perf_event_init_task(struct task_struct *child, + u64 clone_flags) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_delayed_put(struct task_struct *task) { } @@ -1477,8 +1479,6 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh) static inline void perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } static inline void -perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } -static inline void perf_bp_event(struct perf_event *event, void *data) { } static inline int perf_register_guest_info_callbacks @@ -1548,6 +1548,18 @@ struct perf_pmu_events_ht_attr { const char *event_str_noht; }; +struct perf_pmu_events_hybrid_attr { + struct device_attribute attr; + u64 id; + const char *event_str; + u64 pmu_type; +}; + +struct perf_pmu_format_hybrid_attr { + struct device_attribute attr; + u64 pmu_type; +}; + ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 5e772392a379..46b13780c2c8 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -426,7 +426,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres /* * On some architectures hardware does not set page access bit when accessing - * memory page, it is responsibilty of software setting this bit. It brings + * memory page, it is responsibility of software setting this bit. It brings * out extra page fault penalty to track page access bit. For optimization page * access bit can be set during all page fault flow on these arches. * To be differentiate with macro pte_mkyoung, this macro is used on platforms @@ -519,7 +519,7 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); /* * This is an implementation of pmdp_establish() that is only suitable for an * architecture that doesn't have hardware dirty/accessed bits. In this case we - * can't race with CPU which sets these bits and non-atomic aproach is fine. + * can't race with CPU which sets these bits and non-atomic approach is fine. */ static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t pmd) @@ -852,7 +852,7 @@ static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, * updates, but to prevent any updates it may make from being lost. * * This does not protect against other software modifications of the - * pte; the appropriate pte lock must be held over the transation. + * pte; the appropriate pte lock must be held over the transaction. * * Note that this interface is intended to be batchable, meaning that * ptep_modify_prot_commit may not actually update the pte, but merely @@ -1111,6 +1111,7 @@ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, extern void untrack_pfn_moved(struct vm_area_struct *vma); #endif +#ifdef CONFIG_MMU #ifdef __HAVE_COLOR_ZERO_PAGE static inline int is_zero_pfn(unsigned long pfn) { @@ -1134,6 +1135,17 @@ static inline unsigned long my_zero_pfn(unsigned long addr) return zero_pfn; } #endif +#else +static inline int is_zero_pfn(unsigned long pfn) +{ + return 0; +} + +static inline unsigned long my_zero_pfn(unsigned long addr) +{ + return 0; +} +#endif /* CONFIG_MMU */ #ifdef CONFIG_MMU @@ -1269,13 +1281,13 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) * * The complete check uses is_pmd_migration_entry() in linux/swapops.h * But using that requires moving current function and pmd_trans_unstable() - * to linux/swapops.h to resovle dependency, which is too much code move. + * to linux/swapops.h to resolve dependency, which is too much code move. * * !pmd_present() is equivalent to is_pmd_migration_entry() currently, * because !pmd_present() pages can only be under migration not swapped * out. * - * pmd_none() is preseved for future condition checks on pmd migration + * pmd_none() is preserved for future condition checks on pmd migration * entries and not confusing with this function name, although it is * redundant with !pmd_present(). */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 1a12e4436b5b..852743f07e3e 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -493,8 +493,14 @@ struct macsec_ops; * @loopback_enabled: Set true if this PHY has been loopbacked successfully. * @downshifted_rate: Set true if link speed has been downshifted. * @is_on_sfp_module: Set true if PHY is located on an SFP module. + * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY * @state: State of the PHY for management purposes * @dev_flags: Device-specific flags used by the PHY driver. + * Bits [15:0] are free to use by the PHY driver to communicate + * driver specific behavior. + * Bits [23:16] are currently reserved for future use. + * Bits [31:24] are reserved for defining generic + * PHY driver behavior. * @irq: IRQ number of the PHY's interrupt (-1 if none) * @phy_timer: The timer for handling the state machine * @phylink: Pointer to phylink instance for this PHY @@ -567,6 +573,7 @@ struct phy_device { unsigned loopback_enabled:1; unsigned downshifted_rate:1; unsigned is_on_sfp_module:1; + unsigned mac_managed_pm:1; unsigned autoneg:1; /* The most recently read link state */ @@ -1408,6 +1415,7 @@ void phy_disconnect(struct phy_device *phydev); void phy_detach(struct phy_device *phydev); void phy_start(struct phy_device *phydev); void phy_stop(struct phy_device *phydev); +int phy_config_aneg(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); int phy_aneg_done(struct phy_device *phydev); int phy_speed_down(struct phy_device *phydev, bool sync); @@ -1532,6 +1540,9 @@ int genphy_c45_read_mdix(struct phy_device *phydev); int genphy_c45_pma_read_abilities(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); int genphy_c45_config_aneg(struct phy_device *phydev); +int genphy_c45_loopback(struct phy_device *phydev, bool enable); +int genphy_c45_pma_resume(struct phy_device *phydev); +int genphy_c45_pma_suspend(struct phy_device *phydev); /* Generic C45 PHY driver */ extern struct phy_driver genphy_c45_driver; diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index e435bdb0bab3..0ed434d02196 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -44,6 +44,12 @@ enum phy_mode { PHY_MODE_DP }; +enum phy_media { + PHY_MEDIA_DEFAULT, + PHY_MEDIA_SR, + PHY_MEDIA_DAC, +}; + /** * union phy_configure_opts - Opaque generic phy configuration * @@ -64,6 +70,8 @@ union phy_configure_opts { * @power_on: powering on the phy * @power_off: powering off the phy * @set_mode: set the mode of the phy + * @set_media: set the media type of the phy (optional) + * @set_speed: set the speed of the phy (optional) * @reset: resetting the phy * @calibrate: calibrate the phy * @release: ops to be performed while the consumer relinquishes the PHY @@ -75,6 +83,8 @@ struct phy_ops { int (*power_on)(struct phy *phy); int (*power_off)(struct phy *phy); int (*set_mode)(struct phy *phy, enum phy_mode mode, int submode); + int (*set_media)(struct phy *phy, enum phy_media media); + int (*set_speed)(struct phy *phy, int speed); /** * @configure: @@ -215,6 +225,8 @@ int phy_power_off(struct phy *phy); int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode); #define phy_set_mode(phy, mode) \ phy_set_mode_ext(phy, mode, 0) +int phy_set_media(struct phy *phy, enum phy_media media); +int phy_set_speed(struct phy *phy, int speed); int phy_configure(struct phy *phy, union phy_configure_opts *opts); int phy_validate(struct phy *phy, enum phy_mode mode, int submode, union phy_configure_opts *opts); @@ -344,6 +356,20 @@ static inline int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, #define phy_set_mode(phy, mode) \ phy_set_mode_ext(phy, mode, 0) +static inline int phy_set_media(struct phy *phy, enum phy_media media) +{ + if (!phy) + return 0; + return -ENODEV; +} + +static inline int phy_set_speed(struct phy *phy, int speed) +{ + if (!phy) + return 0; + return -ENODEV; +} + static inline enum phy_mode phy_get_mode(struct phy *phy) { return PHY_MODE_INVALID; diff --git a/include/linux/phylink.h b/include/linux/phylink.h index d81a714cfbbd..fd2acfd9b597 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -64,6 +64,7 @@ enum phylink_op_type { * @pcs_poll: MAC PCS cannot provide link change interrupt * @poll_fixed_state: if true, starts link_poll, * if MAC link is at %MLO_AN_FIXED mode. + * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND * @get_fixed_state: callback to execute to determine the fixed link state, * if MAC link is at %MLO_AN_FIXED mode. */ @@ -72,6 +73,7 @@ struct phylink_config { enum phylink_op_type type; bool pcs_poll; bool poll_fixed_state; + bool ovr_an_inband; void (*get_fixed_state)(struct phylink_config *config, struct phylink_link_state *state); }; diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 6aeb711f7cd1..e18ab3d5908f 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -76,10 +76,11 @@ struct pinctrl_map; * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, * schmitt-trigger mode is disabled. - * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power + * @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power * operation, if several modes of operation are supported these can be * passed in the argument on a custom form, else just use argument 1 * to indicate low power mode, argument 0 turns low power mode off. + * @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM * @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode * without driving a value there. For most platforms this reduces to * enable the output buffers and then let the pin controller current @@ -90,6 +91,7 @@ struct pinctrl_map; * value on the line. Use argument 1 to indicate high level, argument 0 to * indicate low level. (Please see Documentation/driver-api/pinctl.rst, * section "GPIO mode pitfalls" for a discussion around this parameter.) + * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power * supplies, the argument to this parameter (on a custom format) tells * the driver which alternative power source to use. @@ -101,7 +103,6 @@ struct pinctrl_map; * or latch delay (on outputs) this parameter (in a custom format) * specifies the clock skew or latch delay. It typically controls how * many double inverters are put in front of the line. - * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if * you need to pass in custom configurations to the pin controller, use * PIN_CONFIG_END+1 as the base offset. @@ -124,14 +125,15 @@ enum pin_config_param { PIN_CONFIG_INPUT_ENABLE, PIN_CONFIG_INPUT_SCHMITT, PIN_CONFIG_INPUT_SCHMITT_ENABLE, - PIN_CONFIG_LOW_POWER_MODE, + PIN_CONFIG_MODE_LOW_POWER, + PIN_CONFIG_MODE_PWM, PIN_CONFIG_OUTPUT_ENABLE, PIN_CONFIG_OUTPUT, + PIN_CONFIG_PERSIST_STATE, PIN_CONFIG_POWER_SOURCE, PIN_CONFIG_SLEEP_HARDWARE_STATE, PIN_CONFIG_SLEW_RATE, PIN_CONFIG_SKEW_DELAY, - PIN_CONFIG_PERSIST_STATE, PIN_CONFIG_END = 0x7F, PIN_CONFIG_MAX = 0xFF, }; diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 5ff8597ceabd..45f53afc46e2 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -3467,6 +3467,7 @@ struct ec_response_get_next_event_v1 { #define EC_MKBP_LID_OPEN 0 #define EC_MKBP_TABLET_MODE 1 #define EC_MKBP_BASE_ATTACHED 2 +#define EC_MKBP_FRONT_PROXIMITY 3 /* Run keyboard factory test scanning */ #define EC_CMD_KEYBOARD_FACTORY_TEST 0x0068 @@ -5678,6 +5679,7 @@ enum tcpc_cc_polarity { #define PD_STATUS_EVENT_SOP_DISC_DONE BIT(0) #define PD_STATUS_EVENT_SOP_PRIME_DISC_DONE BIT(1) +#define PD_STATUS_EVENT_HARD_RESET BIT(2) struct ec_params_typec_status { uint8_t port; diff --git a/include/linux/platform_data/eth_ixp4xx.h b/include/linux/platform_data/eth_ixp4xx.h index 6f652ea0c6ae..114b0940729f 100644 --- a/include/linux/platform_data/eth_ixp4xx.h +++ b/include/linux/platform_data/eth_ixp4xx.h @@ -14,6 +14,8 @@ struct eth_plat_info { u8 rxq; /* configurable, currently 0 - 31 only */ u8 txreadyq; u8 hwaddr[6]; + u8 npe; /* NPE instance used by this interface */ + bool has_mdio; /* If this instance has an MDIO bus */ }; #endif diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index 8b30b14b47d3..f377817ce75c 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -85,6 +85,7 @@ * omap2+ specific GPIO registers */ #define OMAP24XX_GPIO_REVISION 0x0000 +#define OMAP24XX_GPIO_SYSCONFIG 0x0010 #define OMAP24XX_GPIO_IRQSTATUS1 0x0018 #define OMAP24XX_GPIO_IRQSTATUS2 0x0028 #define OMAP24XX_GPIO_IRQENABLE2 0x002c @@ -108,6 +109,7 @@ #define OMAP24XX_GPIO_SETDATAOUT 0x0094 #define OMAP4_GPIO_REVISION 0x0000 +#define OMAP4_GPIO_SYSCONFIG 0x0010 #define OMAP4_GPIO_EOI 0x0020 #define OMAP4_GPIO_IRQSTATUSRAW0 0x0024 #define OMAP4_GPIO_IRQSTATUSRAW1 0x0028 @@ -148,6 +150,7 @@ #ifndef __ASSEMBLER__ struct omap_gpio_reg_offs { u16 revision; + u16 sysconfig; u16 direction; u16 datain; u16 dataout; diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h index 388846766bb2..6a000df5541f 100644 --- a/include/linux/platform_data/hirschmann-hellcreek.h +++ b/include/linux/platform_data/hirschmann-hellcreek.h @@ -12,6 +12,7 @@ #include <linux/types.h> struct hellcreek_platform_data { + const char *name; /* Switch name */ int num_ports; /* Amount of switch ports */ int is_100_mbits; /* Is it configured to 100 or 1000 mbit/s */ int qbv_support; /* Qbv support on front TSN ports */ diff --git a/include/linux/platform_data/i2c-designware.h b/include/linux/platform_data/i2c-designware.h deleted file mode 100644 index 014c4a5a7e13..000000000000 --- a/include/linux/platform_data/i2c-designware.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright(c) 2014 Intel Corporation. - */ - -#ifndef I2C_DESIGNWARE_H -#define I2C_DESIGNWARE_H - -struct dw_i2c_platform_data { - unsigned int i2c_scl_freq; -}; - -#endif diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h index 93974f4cfba1..f05b37521f67 100644 --- a/include/linux/platform_data/invensense_mpu6050.h +++ b/include/linux/platform_data/invensense_mpu6050.h @@ -12,7 +12,7 @@ * mounting matrix retrieved from device-tree) * * Contains platform specific information on how to configure the MPU6050 to - * work on this platform. The orientation matricies are 3x3 rotation matricies + * work on this platform. The orientation matrices are 3x3 rotation matrices * that are applied to the data to rotate from the mounting orientation to the * platform orientation. The values must be one of 0, 1, or -1 and each row and * column should have exactly 1 non-zero value. diff --git a/include/linux/platform_data/media/camera-mx2.h b/include/linux/platform_data/media/camera-mx2.h deleted file mode 100644 index 8cfa76b6e1e1..000000000000 --- a/include/linux/platform_data/media/camera-mx2.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * mx2-cam.h - i.MX27/i.MX25 camera driver header file - * - * Copyright (C) 2003, Intel Corporation - * Copyright (C) 2008, Sascha Hauer <s.hauer@pengutronix.de> - * Copyright (C) 2010, Baruch Siach <baruch@tkos.co.il> - */ - -#ifndef __MACH_MX2_CAM_H_ -#define __MACH_MX2_CAM_H_ - -#define MX2_CAMERA_EXT_VSYNC (1 << 1) -#define MX2_CAMERA_CCIR (1 << 2) -#define MX2_CAMERA_CCIR_INTERLACE (1 << 3) -#define MX2_CAMERA_HSYNC_HIGH (1 << 4) -#define MX2_CAMERA_GATED_CLOCK (1 << 5) -#define MX2_CAMERA_INV_DATA (1 << 6) -#define MX2_CAMERA_PCLK_SAMPLE_RISING (1 << 7) - -/** - * struct mx2_camera_platform_data - optional platform data for mx2_camera - * @flags: any combination of MX2_CAMERA_* - * @clk: clock rate of the csi block / 2 - */ -struct mx2_camera_platform_data { - unsigned long flags; - unsigned long clk; -}; - -#endif /* __MACH_MX2_CAM_H_ */ diff --git a/include/linux/platform_data/media/camera-mx3.h b/include/linux/platform_data/media/camera-mx3.h deleted file mode 100644 index 781c004e5596..000000000000 --- a/include/linux/platform_data/media/camera-mx3.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * mx3_camera.h - i.MX3x camera driver header file - * - * Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> - */ - -#ifndef _MX3_CAMERA_H_ -#define _MX3_CAMERA_H_ - -#include <linux/device.h> - -#define MX3_CAMERA_CLK_SRC 1 -#define MX3_CAMERA_EXT_VSYNC 2 -#define MX3_CAMERA_DP 4 -#define MX3_CAMERA_PCP 8 -#define MX3_CAMERA_HSP 0x10 -#define MX3_CAMERA_VSP 0x20 -#define MX3_CAMERA_DATAWIDTH_4 0x40 -#define MX3_CAMERA_DATAWIDTH_8 0x80 -#define MX3_CAMERA_DATAWIDTH_10 0x100 -#define MX3_CAMERA_DATAWIDTH_15 0x200 - -#define MX3_CAMERA_DATAWIDTH_MASK (MX3_CAMERA_DATAWIDTH_4 | MX3_CAMERA_DATAWIDTH_8 | \ - MX3_CAMERA_DATAWIDTH_10 | MX3_CAMERA_DATAWIDTH_15) - -struct v4l2_async_subdev; - -/** - * struct mx3_camera_pdata - i.MX3x camera platform data - * @flags: MX3_CAMERA_* flags - * @mclk_10khz: master clock frequency in 10kHz units - * @dma_dev: IPU DMA device to match against in channel allocation - */ -struct mx3_camera_pdata { - unsigned long flags; - unsigned long mclk_10khz; - struct device *dma_dev; - struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */ - int *asd_sizes; /* 0-terminated array of asd group sizes */ -}; - -#endif diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h index ca8337695c2a..27ea99af6e1d 100644 --- a/include/linux/platform_data/simplefb.h +++ b/include/linux/platform_data/simplefb.h @@ -16,6 +16,7 @@ #define SIMPLEFB_FORMATS \ { \ { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \ + { "r5g5b5a1", 16, {11, 5}, {6, 5}, {1, 5}, {0, 1}, DRM_FORMAT_RGBA5551 }, \ { "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \ { "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \ { "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \ diff --git a/include/linux/platform_data/usb-mx2.h b/include/linux/platform_data/usb-mx2.h deleted file mode 100644 index 97a670f3d8fb..000000000000 --- a/include/linux/platform_data/usb-mx2.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2009 Martin Fuzzey <mfuzzey@gmail.com> - */ - -#ifndef __ASM_ARCH_MX21_USBH -#define __ASM_ARCH_MX21_USBH - -enum mx21_usbh_xcvr { - /* Values below as used by hardware (HWMODE register) */ - MX21_USBXCVR_TXDIF_RXDIF = 0, - MX21_USBXCVR_TXDIF_RXSE = 1, - MX21_USBXCVR_TXSE_RXDIF = 2, - MX21_USBXCVR_TXSE_RXSE = 3, -}; - -struct mx21_usbh_platform_data { - enum mx21_usbh_xcvr host_xcvr; /* tranceiver mode host 1,2 ports */ - enum mx21_usbh_xcvr otg_xcvr; /* tranceiver mode otg (as host) port */ - u16 enable_host1:1, - enable_host2:1, - enable_otg_host:1, /* enable "OTG" port (as host) */ - host1_xcverless:1, /* traceiverless host1 port */ - host1_txenoe:1, /* output enable host1 transmit enable */ - otg_ext_xcvr:1, /* external tranceiver for OTG port */ - unused:10; -}; - -#endif /* __ASM_ARCH_MX21_USBH */ diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/x86/intel-spi.h index 7f53a5c6f35e..7f53a5c6f35e 100644 --- a/include/linux/platform_data/intel-spi.h +++ b/include/linux/platform_data/x86/intel-spi.h diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 3f23f6e430bf..cd81e060863c 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -359,4 +359,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev) } #endif /* CONFIG_SUPERH */ +/* For now only SuperH uses it */ +void early_platform_cleanup(void); + #endif /* _PLATFORM_DEVICE_H_ */ diff --git a/include/linux/pm.h b/include/linux/pm.h index 482313a8ccfc..1d8209c09686 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -39,7 +39,6 @@ static inline void pm_vt_switch_unregister(struct device *dev) * Device power management */ -struct device; #ifdef CONFIG_PM extern const char power_group_name[]; /* = "power" */ @@ -602,6 +601,7 @@ struct dev_pm_info { unsigned int idle_notification:1; unsigned int request_pending:1; unsigned int deferred_resume:1; + unsigned int needs_force_resume:1; unsigned int runtime_auto:1; bool ignore_children:1; unsigned int no_callbacks:1; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index c0371efa4a0f..84150a22fd7c 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -144,18 +144,21 @@ int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count); void dev_pm_opp_put_supported_hw(struct opp_table *opp_table); +int devm_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count); struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name); void dev_pm_opp_put_prop_name(struct opp_table *opp_table); struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); void dev_pm_opp_put_regulators(struct opp_table *opp_table); +int devm_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); +int devm_pm_opp_set_clkname(struct device *dev, const char *name); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); -struct opp_table *devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); +int devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); void dev_pm_opp_detach_genpd(struct opp_table *opp_table); -struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); +int devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); @@ -319,6 +322,13 @@ static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {} +static inline int devm_pm_opp_set_supported_hw(struct device *dev, + const u32 *versions, + unsigned int count) +{ + return -EOPNOTSUPP; +} + static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)) { @@ -327,11 +337,10 @@ static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {} -static inline struct opp_table * -devm_pm_opp_register_set_opp_helper(struct device *dev, +static inline int devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)) { - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; } static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) @@ -348,6 +357,13 @@ static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, co static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} +static inline int devm_pm_opp_set_regulators(struct device *dev, + const char * const names[], + unsigned int count) +{ + return -EOPNOTSUPP; +} + static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) { return ERR_PTR(-EOPNOTSUPP); @@ -355,6 +371,11 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} +static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name) +{ + return -EOPNOTSUPP; +} + static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs) { return ERR_PTR(-EOPNOTSUPP); @@ -362,10 +383,11 @@ static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, cons static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} -static inline struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, - const char **names, struct device ***virt_devs) +static inline int devm_pm_opp_attach_genpd(struct device *dev, + const char **names, + struct device ***virt_devs) { - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP; } static inline struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, @@ -419,6 +441,7 @@ int dev_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_add_table_indexed(struct device *dev, int index); int dev_pm_opp_of_add_table_noclk(struct device *dev, int index); void dev_pm_opp_of_remove_table(struct device *dev); +int devm_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); @@ -451,6 +474,11 @@ static inline void dev_pm_opp_of_remove_table(struct device *dev) { } +static inline int devm_pm_opp_of_add_table(struct device *dev) +{ + return -EOPNOTSUPP; +} + static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) { return -EOPNOTSUPP; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index b492ae00cc90..6c08a085367b 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -265,7 +265,7 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {} static inline void pm_runtime_irq_safe(struct device *dev) {} static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; } -static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } +static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; } static inline void pm_runtime_mark_last_busy(struct device *dev) {} static inline void __pm_runtime_use_autosuspend(struct device *dev, bool use) {} diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 111a40d0d3d5..a1aa68141d0b 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -33,6 +33,7 @@ enum bq27xxx_chip { BQ27Z561, BQ28Z610, BQ34Z100, + BQ78Z100, }; struct bq27xxx_device_info; @@ -53,7 +54,6 @@ struct bq27xxx_reg_cache { int capacity; int energy; int flags; - int power_avg; int health; }; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 81a55e974feb..be203985ecdd 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -381,8 +381,14 @@ struct power_supply_battery_info { extern struct atomic_notifier_head power_supply_notifier; extern int power_supply_reg_notifier(struct notifier_block *nb); extern void power_supply_unreg_notifier(struct notifier_block *nb); +#if IS_ENABLED(CONFIG_POWER_SUPPLY) extern struct power_supply *power_supply_get_by_name(const char *name); extern void power_supply_put(struct power_supply *psy); +#else +static inline void power_supply_put(struct power_supply *psy) {} +static inline struct power_supply *power_supply_get_by_name(const char *name) +{ return NULL; } +#endif #ifdef CONFIG_OF extern struct power_supply *power_supply_get_by_phandle(struct device_node *np, const char *property); @@ -426,9 +432,16 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; } extern int power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val); +#if IS_ENABLED(CONFIG_POWER_SUPPLY) extern int power_supply_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val); +#else +static inline int power_supply_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ return 0; } +#endif extern int power_supply_property_is_writeable(struct power_supply *psy, enum power_supply_property psp); extern void power_supply_external_power_changed(struct power_supply *psy); @@ -476,12 +489,12 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp) case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_CURRENT_AVG: case POWER_SUPPLY_PROP_CURRENT_BOOT: - return 1; + return true; default: break; } - return 0; + return false; } static inline bool power_supply_is_watt_property(enum power_supply_property psp) @@ -504,12 +517,12 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp) case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: case POWER_SUPPLY_PROP_POWER_NOW: - return 1; + return true; default: break; } - return 0; + return false; } #ifdef CONFIG_POWER_SUPPLY_HWMON diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h index 98966064ee68..91f9a928344e 100644 --- a/include/linux/ppp_channel.h +++ b/include/linux/ppp_channel.h @@ -28,6 +28,9 @@ struct ppp_channel_ops { int (*start_xmit)(struct ppp_channel *, struct sk_buff *); /* Handle an ioctl call that has come in via /dev/ppp. */ int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long); + int (*fill_forward_path)(struct net_device_path_ctx *, + struct net_device_path *, + const struct ppp_channel *); }; struct ppp_channel { diff --git a/include/linux/pps-gpio.h b/include/linux/pps-gpio.h deleted file mode 100644 index 7bf49908be06..000000000000 --- a/include/linux/pps-gpio.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * pps-gpio.h -- PPS client for GPIOs - * - * Copyright (C) 2011 James Nuss <jamesnuss@nanometrics.ca> - */ - -#ifndef _PPS_GPIO_H -#define _PPS_GPIO_H - -struct pps_gpio_platform_data { - struct gpio_desc *gpio_pin; - struct gpio_desc *echo_pin; - bool assert_falling_edge; - bool capture_clear; - unsigned int echo_active_ms; -}; - -#endif /* _PPS_GPIO_H */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 69cc8b64aa3a..9881eac0698f 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -79,7 +79,11 @@ #define nmi_count() (preempt_count() & NMI_MASK) #define hardirq_count() (preempt_count() & HARDIRQ_MASK) -#define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#ifdef CONFIG_PREEMPT_RT +# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK) +#else +# define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#endif #define irq_count() (nmi_count() | hardirq_count() | softirq_count()) /* diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 000cc0533c33..069c7fd95396 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -32,6 +32,7 @@ struct proc_ops { ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *); ssize_t (*proc_read_iter)(struct kiocb *, struct iov_iter *); ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *); + /* mandatory unless nonseekable_open() or equivalent is used */ loff_t (*proc_lseek)(struct file *, loff_t, int); int (*proc_release)(struct inode *, struct file *); __poll_t (*proc_poll)(struct file *, struct poll_table_struct *); diff --git a/include/linux/profile.h b/include/linux/profile.h index bad18ca43150..fd18ca96f557 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -15,7 +15,6 @@ #define KVM_PROFILING 4 struct proc_dir_entry; -struct pt_regs; struct notifier_block; #if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) @@ -84,8 +83,6 @@ int task_handoff_unregister(struct notifier_block * n); int profile_event_register(enum profile_type, struct notifier_block * n); int profile_event_unregister(enum profile_type, struct notifier_block * n); -struct pt_regs; - #else #define prof_on 0 diff --git a/include/linux/property.h b/include/linux/property.h index dd4687b56239..0d876316e61d 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -254,6 +254,13 @@ struct software_node_ref_args { u64 args[NR_FWNODE_REFERENCE_ARGS]; }; +#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \ +(const struct software_node_ref_args) { \ + .node = _ref_, \ + .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \ + .args = { __VA_ARGS__ }, \ +} + /** * struct property_entry - "Built-in" device property representation. * @name: Name of the property. @@ -362,11 +369,7 @@ struct property_entry { .name = _name_, \ .length = sizeof(struct software_node_ref_args), \ .type = DEV_PROP_REF, \ - { .pointer = &(const struct software_node_ref_args) { \ - .node = _ref_, \ - .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \ - .args = { __VA_ARGS__ }, \ - } }, \ + { .pointer = &SOFTWARE_NODE_REFERENCE(_ref_, ##__VA_ARGS__), }, \ } struct property_entry * diff --git a/include/linux/psi.h b/include/linux/psi.h index 7361023f3fdd..65eb1476ac70 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -20,7 +20,6 @@ void psi_task_change(struct task_struct *task, int clear, int set); void psi_task_switch(struct task_struct *prev, struct task_struct *next, bool sleep); -void psi_memstall_tick(struct task_struct *task, int cpu); void psi_memstall_enter(unsigned long *flags); void psi_memstall_leave(unsigned long *flags); diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index b95f3211566a..0a23300d49af 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -50,9 +50,10 @@ enum psi_states { PSI_MEM_SOME, PSI_MEM_FULL, PSI_CPU_SOME, + PSI_CPU_FULL, /* Only per-CPU, to weigh the CPU in the global average: */ PSI_NONIDLE, - NR_PSI_STATES = 6, + NR_PSI_STATES = 7, }; enum psi_aggregators { diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index b801ead1e2bb..d48a7192e881 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -73,6 +73,7 @@ enum sev_cmd { SEV_CMD_SEND_UPDATE_DATA = 0x041, SEV_CMD_SEND_UPDATE_VMSA = 0x042, SEV_CMD_SEND_FINISH = 0x043, + SEV_CMD_SEND_CANCEL = 0x044, /* Guest migration commands (incoming) */ SEV_CMD_RECEIVE_START = 0x050, @@ -326,11 +327,11 @@ struct sev_data_send_start { u64 pdh_cert_address; /* In */ u32 pdh_cert_len; /* In */ u32 reserved1; - u64 plat_cert_address; /* In */ - u32 plat_cert_len; /* In */ + u64 plat_certs_address; /* In */ + u32 plat_certs_len; /* In */ u32 reserved2; - u64 amd_cert_address; /* In */ - u32 amd_cert_len; /* In */ + u64 amd_certs_address; /* In */ + u32 amd_certs_len; /* In */ u32 reserved3; u64 session_address; /* In */ u32 session_len; /* In/Out */ @@ -393,6 +394,15 @@ struct sev_data_send_finish { } __packed; /** + * struct sev_data_send_cancel - SEND_CANCEL command parameters + * + * @handle: handle of the VM to process + */ +struct sev_data_send_cancel { + u32 handle; /* In */ +} __packed; + +/** * struct sev_data_receive_start - RECEIVE_START command parameters * * @handle: handle of the VM to perform receive operation diff --git a/include/linux/ptp_kvm.h b/include/linux/ptp_kvm.h new file mode 100644 index 000000000000..f960a719f0d5 --- /dev/null +++ b/include/linux/ptp_kvm.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Virtual PTP 1588 clock for use with KVM guests + * + * Copyright (C) 2017 Red Hat Inc. + */ + +#ifndef _PTP_KVM_H_ +#define _PTP_KVM_H_ + +struct timespec64; +struct clocksource; + +int kvm_arch_ptp_init(void); +int kvm_arch_ptp_get_clock(struct timespec64 *ts); +int kvm_arch_ptp_get_crosststamp(u64 *cycle, + struct timespec64 *tspec, struct clocksource **cs); + +#endif /* _PTP_KVM_H_ */ diff --git a/include/linux/ptp_pch.h b/include/linux/ptp_pch.h new file mode 100644 index 000000000000..51818198c292 --- /dev/null +++ b/include/linux/ptp_pch.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * PTP PCH + * + * Copyright 2019 Linaro Ltd. + * + * Author Lee Jones <lee.jones@linaro.org> + */ + +#ifndef _PTP_PCH_H_ +#define _PTP_PCH_H_ + +void pch_ch_control_write(struct pci_dev *pdev, u32 val); +u32 pch_ch_event_read(struct pci_dev *pdev); +void pch_ch_event_write(struct pci_dev *pdev, u32 val); +u32 pch_src_uuid_lo_read(struct pci_dev *pdev); +u32 pch_src_uuid_hi_read(struct pci_dev *pdev); +u64 pch_rx_snap_read(struct pci_dev *pdev); +u64 pch_tx_snap_read(struct pci_dev *pdev); +int pch_set_station_address(u8 *addr, struct pci_dev *pdev); + +#endif /* _PTP_PCH_H_ */ diff --git a/include/linux/pwm.h b/include/linux/pwm.h index e4d84d4db293..5bb90af4997e 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -91,6 +91,11 @@ struct pwm_device { * pwm_get_state() - retrieve the current PWM state * @pwm: PWM device * @state: state to fill with the current PWM state + * + * The returned PWM state represents the state that was applied by a previous call to + * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to + * hardware. If pwm_apply_state() was never called, this returns either the current hardware + * state (if supported) or the default settings. */ static inline void pwm_get_state(const struct pwm_device *pwm, struct pwm_state *state) @@ -392,8 +397,6 @@ int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, int pwm_set_chip_data(struct pwm_device *pwm, void *data); void *pwm_get_chip_data(struct pwm_device *pwm); -int pwmchip_add_with_polarity(struct pwm_chip *chip, - enum pwm_polarity polarity); int pwmchip_add(struct pwm_chip *chip); int pwmchip_remove(struct pwm_chip *chip); struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index c4fdb4463f7d..7c811eebcaab 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -47,7 +47,6 @@ struct geni_icc_path { * @num_clk_levels: Number of valid clock levels in clk_perf_tbl * @clk_perf_tbl: Table of clock frequency input to serial engine clock * @icc_paths: Array of ICC paths for SE - * @opp_table: Pointer to the OPP table */ struct geni_se { void __iomem *base; @@ -57,7 +56,6 @@ struct geni_se { unsigned int num_clk_levels; unsigned long *clk_perf_tbl; struct geni_icc_path icc_paths[3]; - struct opp_table *opp_table; }; /* Common SE registers */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index e339b48de32d..f34dbd0db795 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -19,7 +19,7 @@ enum qed_chain_mode { /* Each Page contains a next pointer at its end */ QED_CHAIN_MODE_NEXT_PTR, - /* Chain is a single page (next ptr) is unrequired */ + /* Chain is a single page (next ptr) is not required */ QED_CHAIN_MODE_SINGLE, /* Page pointers are located in a side list */ @@ -56,13 +56,13 @@ struct qed_chain_pbl_u32 { }; struct qed_chain_u16 { - /* Cyclic index of next element to produce/consme */ + /* Cyclic index of next element to produce/consume */ u16 prod_idx; u16 cons_idx; }; struct qed_chain_u32 { - /* Cyclic index of next element to produce/consme */ + /* Cyclic index of next element to produce/consume */ u32 prod_idx; u32 cons_idx; }; @@ -270,7 +270,7 @@ static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain) /** * @brief qed_chain_advance_page - * - * Advance the next element accros pages for a linked chain + * Advance the next element across pages for a linked chain * * @param p_chain * @param p_next_elem diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 2f64ed79cee9..ea273ba1c991 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -12,7 +12,6 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/skbuff.h> -#include <linux/version.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/qed/qed_if.h> diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h new file mode 100644 index 000000000000..bebc911161b6 --- /dev/null +++ b/include/linux/randomize_kstack.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _LINUX_RANDOMIZE_KSTACK_H +#define _LINUX_RANDOMIZE_KSTACK_H + +#include <linux/kernel.h> +#include <linux/jump_label.h> +#include <linux/percpu-defs.h> + +DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, + randomize_kstack_offset); +DECLARE_PER_CPU(u32, kstack_offset); + +/* + * Do not use this anywhere else in the kernel. This is used here because + * it provides an arch-agnostic way to grow the stack with correct + * alignment. Also, since this use is being explicitly masked to a max of + * 10 bits, stack-clash style attacks are unlikely. For more details see + * "VLAs" in Documentation/process/deprecated.rst + */ +void *__builtin_alloca(size_t size); +/* + * Use, at most, 10 bits of entropy. We explicitly cap this to keep the + * "VLA" from being unbounded (see above). 10 bits leaves enough room for + * per-arch offset masks to reduce entropy (by removing higher bits, since + * high entropy may overly constrain usable stack space), and for + * compiler/arch-specific stack alignment to remove the lower bits. + */ +#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF) + +/* + * These macros must be used during syscall entry when interrupts and + * preempt are disabled, and after user registers have been stored to + * the stack. + */ +#define add_random_kstack_offset() do { \ + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ + &randomize_kstack_offset)) { \ + u32 offset = raw_cpu_read(kstack_offset); \ + u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset)); \ + /* Keep allocation even after "ptr" loses scope. */ \ + asm volatile("" :: "r"(ptr) : "memory"); \ + } \ +} while (0) + +#define choose_random_kstack_offset(rand) do { \ + if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ + &randomize_kstack_offset)) { \ + u32 offset = raw_cpu_read(kstack_offset); \ + offset ^= (rand); \ + raw_cpu_write(kstack_offset, offset); \ + } \ +} while (0) + +#endif diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 8afe886e85f1..3db96c4f45fd 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -109,7 +109,7 @@ struct rcu_cblist { * | SEGCBLIST_KTHREAD_GP | * | | * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops | - * | handling callbacks. | + * | handling callbacks. Enable bypass queueing. | * ---------------------------------------------------------------------------- */ @@ -125,7 +125,7 @@ struct rcu_cblist { * | SEGCBLIST_KTHREAD_GP | * | | * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() | - * | ignores callbacks. | + * | ignores callbacks. Bypass enqueue is enabled. | * ---------------------------------------------------------------------------- * | * v @@ -134,7 +134,8 @@ struct rcu_cblist { * | SEGCBLIST_KTHREAD_GP | * | | * | CB/GP kthreads and local rcu_core() handle callbacks concurrently | - * | holding nocb_lock. Wake up CB and GP kthreads if necessary. | + * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable | + * | bypass enqueue. | * ---------------------------------------------------------------------------- * | * v diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index ff3e94779e73..d8afdb8784c1 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -161,7 +161,7 @@ static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n) * * The barrier() is needed to make sure compiler doesn't cache first element [1], * as this loop can be restarted [2] - * [1] Documentation/core-api/atomic_ops.rst around line 114 + * [1] Documentation/memory-barriers.txt around line 1533 * [2] Documentation/RCU/rculist_nulls.rst around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index bd04f722714f..9455476c5ba2 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -334,7 +334,8 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_sleep_check() \ do { \ rcu_preempt_sleep_check(); \ - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ + RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ "Illegal context switch in RCU-bh read-side critical section"); \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ "Illegal context switch in RCU-sched read-side critical section"); \ @@ -881,7 +882,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. */ -#define kfree_rcu kvfree_rcu +#define kfree_rcu(ptr, rhf...) kvfree_rcu(ptr, ## rhf) /** * kvfree_rcu() - kvfree an object after a grace period. diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 2a97334eb786..35e0be326ffc 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -17,10 +17,9 @@ /* Never flag non-existent other CPUs! */ static inline bool rcu_eqs_special_set(int cpu) { return false; } -static inline unsigned long get_state_synchronize_rcu(void) -{ - return 0; -} +unsigned long get_state_synchronize_rcu(void); +unsigned long start_poll_synchronize_rcu(void); +bool poll_state_synchronize_rcu(unsigned long oldstate); static inline void cond_synchronize_rcu(unsigned long oldstate) { diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index df578b73960f..b89b54130f49 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -41,6 +41,8 @@ void rcu_momentary_dyntick_idle(void); void kfree_rcu_scheduler_running(void); bool rcu_gp_might_be_stalled(void); unsigned long get_state_synchronize_rcu(void); +unsigned long start_poll_synchronize_rcu(void); +bool poll_state_synchronize_rcu(unsigned long oldstate); void cond_synchronize_rcu(unsigned long oldstate); void rcu_idle_enter(void); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 2cc4ecd36298..f87a11a5cc4a 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1378,6 +1378,9 @@ struct regmap_irq_sub_irq_map { * status_base. Should contain num_regs arrays. * Can be provided for chips with more complex mapping than * 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ... + * When used with not_fixed_stride, each one-element array + * member contains offset calculated as address from each + * peripheral to first peripheral. * @num_main_regs: Number of 'main status' irq registers for chips which have * main_status set. * @@ -1390,6 +1393,7 @@ struct regmap_irq_sub_irq_map { * Using zero value is possible with @use_ack bit. * @wake_base: Base address for wake enables. If zero unsupported. * @type_base: Base address for irq type. If zero unsupported. + * @virt_reg_base: Base addresses for extra config regs. * @irq_reg_stride: Stride to use for chips where registers are not contiguous. * @init_ack_masked: Ack all masked interrupts once during initalization. * @mask_invert: Inverted mask register: cleared bits are masked out. @@ -1404,6 +1408,9 @@ struct regmap_irq_sub_irq_map { * @clear_on_unmask: For chips with interrupts cleared on read: read the status * registers before unmasking interrupts to clear any bits * set when they were masked. + * @not_fixed_stride: Used when chip peripherals are not laid out with fixed + * stride. Must be used with sub_reg_offsets containing the + * offsets to each peripheral. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * * @num_regs: Number of registers in each control bank. @@ -1411,12 +1418,16 @@ struct regmap_irq_sub_irq_map { * assigned based on the index in the array of the interrupt. * @num_irqs: Number of descriptors. * @num_type_reg: Number of type registers. + * @num_virt_regs: Number of non-standard irq configuration registers. + * If zero unsupported. * @type_reg_stride: Stride to use for chips where type registers are not * contiguous. * @handle_pre_irq: Driver specific callback to handle interrupt from device * before regmap_irq_handler process the interrupts. * @handle_post_irq: Driver specific callback to handle interrupt from device * after handling the interrupts in regmap_irq_handler(). + * @set_type_virt: Driver specific callback to extend regmap_irq_set_type() + * and configure virt regs. * @irq_drv_data: Driver specific IRQ data which is passed as parameter when * driver specific pre/post interrupt handler is called. * @@ -1438,6 +1449,7 @@ struct regmap_irq_chip { unsigned int ack_base; unsigned int wake_base; unsigned int type_base; + unsigned int *virt_reg_base; unsigned int irq_reg_stride; bool mask_writeonly:1; bool init_ack_masked:1; @@ -1450,6 +1462,7 @@ struct regmap_irq_chip { bool type_invert:1; bool type_in_mask:1; bool clear_on_unmask:1; + bool not_fixed_stride:1; int num_regs; @@ -1457,10 +1470,13 @@ struct regmap_irq_chip { int num_irqs; int num_type_reg; + int num_virt_regs; unsigned int type_reg_stride; int (*handle_pre_irq)(void *irq_drv_data); int (*handle_post_irq)(void *irq_drv_data); + int (*set_type_virt)(unsigned int **buf, unsigned int type, + unsigned long hwirq, int reg); void *irq_drv_data; }; diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index d7c77ee370f3..4ea520c248e9 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -373,6 +373,10 @@ struct regulator_desc { unsigned int pull_down_reg; unsigned int pull_down_mask; unsigned int pull_down_val_on; + unsigned int ramp_reg; + unsigned int ramp_mask; + const unsigned int *ramp_delay_table; + unsigned int n_ramp_values; unsigned int enable_time; @@ -472,7 +476,7 @@ struct regulator_dev { unsigned int is_switch:1; /* time when this regulator was disabled last time */ - unsigned long last_off_jiffy; + ktime_t last_off; }; struct regulator_dev * @@ -535,6 +539,7 @@ int regulator_set_current_limit_regmap(struct regulator_dev *rdev, int min_uA, int max_uA); int regulator_get_current_limit_regmap(struct regulator_dev *rdev); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); +int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay); /* * Helper functions intended to be used by regulator drivers prior registering @@ -543,4 +548,6 @@ void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc, unsigned int selector); +int regulator_desc_list_voltage_linear(const struct regulator_desc *desc, + unsigned int selector); #endif diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index f28ee75d1005..8b795b544f75 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -315,6 +315,7 @@ struct rproc; /** * struct rproc_mem_entry - memory entry descriptor * @va: virtual address + * @is_iomem: io memory * @dma: dma address * @len: length, in bytes * @da: device address @@ -329,6 +330,7 @@ struct rproc; */ struct rproc_mem_entry { void *va; + bool is_iomem; dma_addr_t dma; size_t len; u32 da; @@ -361,6 +363,7 @@ enum rsc_handling_status { * @start: power on the device and boot it * @stop: power off the device * @attach: attach to a device that his already powered up + * @detach: detach from a device, leaving it powered up * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) @@ -368,7 +371,9 @@ enum rsc_handling_status { * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a * negative value on error * @load_rsc_table: load resource table from firmware image - * @find_loaded_rsc_table: find the loaded resouce table + * @find_loaded_rsc_table: find the loaded resource table from firmware image + * @get_loaded_rsc_table: get resource table installed in memory + * by external entity * @load: load firmware to memory, where the remote processor * expects to find it * @sanity_check: sanity check the fw image @@ -383,13 +388,16 @@ struct rproc_ops { int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); int (*attach)(struct rproc *rproc); + int (*detach)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); - void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len); + void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len, bool *is_iomem); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc, int offset, int avail); struct resource_table *(*find_loaded_rsc_table)( struct rproc *rproc, const struct firmware *fw); + struct resource_table *(*get_loaded_rsc_table)( + struct rproc *rproc, size_t *size); int (*load)(struct rproc *rproc, const struct firmware *fw); int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); u64 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); @@ -405,6 +413,8 @@ struct rproc_ops { * @RPROC_RUNNING: device is up and running * @RPROC_CRASHED: device has crashed; need to start recovery * @RPROC_DELETED: device is deleted + * @RPROC_ATTACHED: device has been booted by another entity and the core + * has attached to it * @RPROC_DETACHED: device has been booted by another entity and waiting * for the core to attach to it * @RPROC_LAST: just keep this one at the end @@ -421,8 +431,9 @@ enum rproc_state { RPROC_RUNNING = 2, RPROC_CRASHED = 3, RPROC_DELETED = 4, - RPROC_DETACHED = 5, - RPROC_LAST = 6, + RPROC_ATTACHED = 5, + RPROC_DETACHED = 6, + RPROC_LAST = 7, }; /** @@ -505,11 +516,12 @@ struct rproc_dump_segment { * @recovery_disabled: flag that state if recovery was disabled * @max_notifyid: largest allocated notify id. * @table_ptr: pointer to the resource table in effect + * @clean_table: copy of the resource table without modifications. Used + * when a remote processor is attached or detached from the core * @cached_table: copy of the resource table * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU * @auto_boot: flag to indicate if remote processor should be auto-started - * @autonomous: true if an external entity has booted the remote processor * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc * @char_dev: character device of the rproc @@ -542,11 +554,11 @@ struct rproc { bool recovery_disabled; int max_notifyid; struct resource_table *table_ptr; + struct resource_table *clean_table; struct resource_table *cached_table; size_t table_sz; bool has_iommu; bool auto_boot; - bool autonomous; struct list_head dump_segments; int nb_vdev; u8 elf_class; @@ -655,6 +667,7 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, int rproc_boot(struct rproc *rproc); void rproc_shutdown(struct rproc *rproc); +int rproc_detach(struct rproc *rproc); int rproc_set_firmware(struct rproc *rproc, const char *fw_name); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); void rproc_coredump_using_sections(struct rproc *rproc); diff --git a/include/linux/reset.h b/include/linux/reset.h index 46e6372cb431..db0e6115a2f6 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -76,6 +76,11 @@ static inline int reset_control_reset(struct reset_control *rstc) return 0; } +static inline int reset_control_rearm(struct reset_control *rstc) +{ + return 0; +} + static inline int reset_control_assert(struct reset_control *rstc) { return 0; diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 136ea0997e6d..dac53fd3afea 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -61,7 +61,8 @@ enum ring_buffer_type { unsigned ring_buffer_event_length(struct ring_buffer_event *event); void *ring_buffer_event_data(struct ring_buffer_event *event); -u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); +u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, + struct ring_buffer_event *event); /* * ring_buffer_discard_commit will remove an event that has not @@ -180,7 +181,7 @@ unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cp unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu); unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu); -u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu); +u64 ring_buffer_time_stamp(struct trace_buffer *buffer); void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, int cpu, u64 *ts); void ring_buffer_set_clock(struct trace_buffer *buffer, diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index a5db828b2420..d97dcd049f18 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -18,8 +18,7 @@ #include <linux/mutex.h> #include <linux/poll.h> #include <linux/rpmsg/byteorder.h> - -#define RPMSG_ADDR_ANY 0xFFFFFFFF +#include <uapi/linux/rpmsg.h> struct rpmsg_device; struct rpmsg_endpoint; diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 6fd615a0eea9..d1672de9ca89 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -31,12 +31,6 @@ struct rt_mutex { raw_spinlock_t wait_lock; struct rb_root_cached waiters; struct task_struct *owner; -#ifdef CONFIG_DEBUG_RT_MUTEXES - int save_state; - const char *name, *file; - int line; - void *magic; -#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -46,35 +40,17 @@ struct rt_mutex_waiter; struct hrtimer_sleeper; #ifdef CONFIG_DEBUG_RT_MUTEXES - extern int rt_mutex_debug_check_no_locks_freed(const void *from, - unsigned long len); - extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); +extern void rt_mutex_debug_task_free(struct task_struct *tsk); #else - static inline int rt_mutex_debug_check_no_locks_freed(const void *from, - unsigned long len) - { - return 0; - } -# define rt_mutex_debug_check_no_locks_held(task) do { } while (0) +static inline void rt_mutex_debug_task_free(struct task_struct *tsk) { } #endif -#ifdef CONFIG_DEBUG_RT_MUTEXES -# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ - , .name = #mutexname, .file = __FILE__, .line = __LINE__ - -# define rt_mutex_init(mutex) \ +#define rt_mutex_init(mutex) \ do { \ static struct lock_class_key __key; \ __rt_mutex_init(mutex, __func__, &__key); \ } while (0) - extern void rt_mutex_debug_task_free(struct task_struct *tsk); -#else -# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL) -# define rt_mutex_debug_task_free(t) do { } while (0) -#endif - #ifdef CONFIG_DEBUG_LOCK_ALLOC #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ , .dep_map = { .name = #mutexname } @@ -86,7 +62,6 @@ do { \ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ , .waiters = RB_ROOT_CACHED \ , .owner = NULL \ - __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} #define DEFINE_RT_MUTEX(mutexname) \ @@ -104,7 +79,6 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock) } extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); -extern void rt_mutex_destroy(struct rt_mutex *lock); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); @@ -115,9 +89,6 @@ extern void rt_mutex_lock(struct rt_mutex *lock); #endif extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); -extern int rt_mutex_timed_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *timeout); - extern int rt_mutex_trylock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 4c715be48717..a66038d88878 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -110,7 +110,7 @@ do { \ /* * This is the same regardless of which rwsem implementation that is being used. - * It is just a heuristic meant to be called by somebody alreadying holding the + * It is just a heuristic meant to be called by somebody already holding the * rwsem to see if somebody from an incompatible type is wanting access to the * lock. */ diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 74cc6384715e..2713e689ad66 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -57,9 +57,22 @@ struct sbitmap { unsigned int map_nr; /** + * @round_robin: Allocate bits in strict round-robin order. + */ + bool round_robin; + + /** * @map: Allocated bitmap. */ struct sbitmap_word *map; + + /* + * @alloc_hint: Cache of last successfully allocated or freed bit. + * + * This is per-cpu, which allows multiple users to stick to different + * cachelines until the map is exhausted. + */ + unsigned int __percpu *alloc_hint; }; #define SBQ_WAIT_QUEUES 8 @@ -95,14 +108,6 @@ struct sbitmap_queue { */ struct sbitmap sb; - /* - * @alloc_hint: Cache of last successfully allocated or freed bit. - * - * This is per-cpu, which allows multiple users to stick to different - * cachelines until the map is exhausted. - */ - unsigned int __percpu *alloc_hint; - /** * @wake_batch: Number of bits which must be freed before we wake up any * waiters. @@ -125,11 +130,6 @@ struct sbitmap_queue { atomic_t ws_active; /** - * @round_robin: Allocate bits in strict round-robin order. - */ - bool round_robin; - - /** * @min_shallow_depth: The minimum shallow depth which may be passed to * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). */ @@ -144,11 +144,16 @@ struct sbitmap_queue { * given, a good default is chosen. * @flags: Allocation flags. * @node: Memory node to allocate on. + * @round_robin: If true, be stricter about allocation order; always allocate + * starting from the last allocated bit. This is less efficient + * than the default behavior (false). + * @alloc_hint: If true, apply percpu hint for where to start searching for + * a free bit. * * Return: Zero on success or negative errno on failure. */ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, - gfp_t flags, int node); + gfp_t flags, int node, bool round_robin, bool alloc_hint); /** * sbitmap_free() - Free memory used by a &struct sbitmap. @@ -156,6 +161,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, */ static inline void sbitmap_free(struct sbitmap *sb) { + free_percpu(sb->alloc_hint); kfree(sb->map); sb->map = NULL; } @@ -173,22 +179,17 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth); /** * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap. * @sb: Bitmap to allocate from. - * @alloc_hint: Hint for where to start searching for a free bit. - * @round_robin: If true, be stricter about allocation order; always allocate - * starting from the last allocated bit. This is less efficient - * than the default behavior (false). * * This operation provides acquire barrier semantics if it succeeds. * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ -int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin); +int sbitmap_get(struct sbitmap *sb); /** * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, * limiting the depth used from each word. * @sb: Bitmap to allocate from. - * @alloc_hint: Hint for where to start searching for a free bit. * @shallow_depth: The maximum number of bits to allocate from a single word. * * This rather specific operation allows for having multiple users with @@ -200,8 +201,7 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin); * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ -int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, - unsigned long shallow_depth); +int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth); /** * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap. @@ -315,10 +315,16 @@ static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int b set_bit(SB_NR_TO_BIT(sb, bitnr), addr); } -static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb, - unsigned int bitnr) +/* + * Pair of sbitmap_get, and this one applies both cleared bit and + * allocation hint. + */ +static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr) { - clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); + sbitmap_deferred_clear_bit(sb, bitnr); + + if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth)) + *raw_cpu_ptr(sb->alloc_hint) = bitnr; } static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) @@ -326,6 +332,24 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } +static inline int sbitmap_calculate_shift(unsigned int depth) +{ + int shift = ilog2(BITS_PER_LONG); + + /* + * If the bitmap is small, shrink the number of bits per word so + * we spread over a few cachelines, at least. If less than 4 + * bits, just forget about it, it's not going to work optimally + * anyway. + */ + if (depth >= 4) { + while ((4U << shift) > depth) + shift--; + } + + return shift; +} + /** * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. * @sb: Bitmap to show. @@ -335,6 +359,16 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) */ void sbitmap_show(struct sbitmap *sb, struct seq_file *m); + +/** + * sbitmap_weight() - Return how many set and not cleared bits in a &struct + * sbitmap. + * @sb: Bitmap to check. + * + * Return: How many set and not cleared bits set + */ +unsigned int sbitmap_weight(const struct sbitmap *sb); + /** * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct * seq_file. @@ -369,7 +403,6 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, static inline void sbitmap_queue_free(struct sbitmap_queue *sbq) { kfree(sbq->ws); - free_percpu(sbq->alloc_hint); sbitmap_free(&sbq->sb); } diff --git a/include/linux/sched.h b/include/linux/sched.h index ef00bb22164c..d2c881384517 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -14,7 +14,6 @@ #include <linux/pid.h> #include <linux/sem.h> #include <linux/shm.h> -#include <linux/kcov.h> #include <linux/mutex.h> #include <linux/plist.h> #include <linux/hrtimer.h> @@ -42,6 +41,7 @@ struct audit_context; struct backing_dev_info; struct bio_list; struct blk_plug; +struct bpf_local_storage; struct capture_control; struct cfs_rq; struct fs_struct; @@ -841,6 +841,10 @@ struct task_struct { /* Stalled due to lack of memory */ unsigned in_memstall:1; #endif +#ifdef CONFIG_PAGE_OWNER + /* Used by page_owner=on to detect recursion in page tracking. */ + unsigned in_page_owner:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ @@ -985,6 +989,7 @@ struct task_struct { /* Signal handlers: */ struct signal_struct *signal; struct sighand_struct __rcu *sighand; + struct sigqueue *sigqueue_cache; sigset_t blocked; sigset_t real_blocked; /* Restored if set_restore_sigmask() was used: */ @@ -1044,6 +1049,9 @@ struct task_struct { int softirq_context; int irq_config; #endif +#ifdef CONFIG_PREEMPT_RT + int softirq_disable_cnt; +#endif #ifdef CONFIG_LOCKDEP # define MAX_LOCK_DEPTH 48UL @@ -1098,7 +1106,7 @@ struct task_struct { #ifdef CONFIG_CPUSETS /* Protected by ->alloc_lock: */ nodemask_t mems_allowed; - /* Seqence number to catch updates: */ + /* Sequence number to catch updates: */ seqcount_spinlock_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; @@ -1351,6 +1359,10 @@ struct task_struct { /* Used by LSM modules for access restriction: */ void *security; #endif +#ifdef CONFIG_BPF_SYSCALL + /* Used by BPF task local storage */ + struct bpf_local_storage __rcu *bpf_storage; +#endif #ifdef CONFIG_GCC_PLUGIN_STACKLEAK unsigned long lowest_stack; @@ -1571,7 +1583,7 @@ extern struct pid *cad_pid; #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ -#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ +#define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 90b2a0bce11c..e24b1fe348e3 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -151,12 +151,13 @@ static inline bool in_vfork(struct task_struct *tsk) * Applies per-task gfp context to the given allocation flags. * PF_MEMALLOC_NOIO implies GFP_NOIO * PF_MEMALLOC_NOFS implies GFP_NOFS + * PF_MEMALLOC_PIN implies !GFP_MOVABLE */ static inline gfp_t current_gfp_context(gfp_t flags) { unsigned int pflags = READ_ONCE(current->flags); - if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { + if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence @@ -165,6 +166,9 @@ static inline gfp_t current_gfp_context(gfp_t flags) flags &= ~(__GFP_IO | __GFP_FS); else if (pflags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; + + if (pflags & PF_MEMALLOC_PIN) + flags &= ~__GFP_MOVABLE; } return flags; } @@ -271,29 +275,18 @@ static inline void memalloc_noreclaim_restore(unsigned int flags) current->flags = (current->flags & ~PF_MEMALLOC) | flags; } -#ifdef CONFIG_CMA -static inline unsigned int memalloc_nocma_save(void) +static inline unsigned int memalloc_pin_save(void) { - unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; + unsigned int flags = current->flags & PF_MEMALLOC_PIN; - current->flags |= PF_MEMALLOC_NOCMA; + current->flags |= PF_MEMALLOC_PIN; return flags; } -static inline void memalloc_nocma_restore(unsigned int flags) -{ - current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; -} -#else -static inline unsigned int memalloc_nocma_save(void) -{ - return 0; -} - -static inline void memalloc_nocma_restore(unsigned int flags) +static inline void memalloc_pin_restore(unsigned int flags) { + current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags; } -#endif #ifdef CONFIG_MEMCG DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 3f6a0fcaa10c..7f4278fa21fe 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -326,6 +326,7 @@ int send_sig_mceerr(int code, void __user *, short, struct task_struct *); int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); int force_sig_pkuerr(void __user *addr, u32 pkey); +int force_sig_perf(void __user *addr, u32 type, u64 sig_data); int force_sig_ptrace_errno_trap(int errno, void __user *addr); diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 3c31ba88aca5..db2c0f34aaaf 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -26,10 +26,11 @@ int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, enum { sysctl_hung_task_timeout_secs = 0 }; #endif +extern unsigned int sysctl_sched_child_runs_first; + extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; -extern unsigned int sysctl_sched_child_runs_first; enum sched_tunable_scaling { SCHED_TUNABLESCALING_NONE, @@ -37,7 +38,7 @@ enum sched_tunable_scaling { SCHED_TUNABLESCALING_LINEAR, SCHED_TUNABLESCALING_END, }; -extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; +extern unsigned int sysctl_sched_tunable_scaling; extern unsigned int sysctl_numa_balancing_scan_delay; extern unsigned int sysctl_numa_balancing_scan_period_min; @@ -48,8 +49,8 @@ extern unsigned int sysctl_numa_balancing_scan_size; extern __read_mostly unsigned int sysctl_sched_migration_cost; extern __read_mostly unsigned int sysctl_sched_nr_migrate; -int sched_proc_update_handler(struct ctl_table *table, int write, - void *buffer, size_t *length, loff_t *ppos); +extern int sysctl_resched_latency_warn_ms; +extern int sysctl_resched_latency_warn_once; #endif /* diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index a8ec3b6093fc..3632c5d6ec55 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -14,9 +14,6 @@ struct user_struct { refcount_t __count; /* reference count */ atomic_t processes; /* How many processes does this user have? */ atomic_t sigpending; /* How many pending signals does this user have? */ -#ifdef CONFIG_FANOTIFY - atomic_t fanotify_listeners; -#endif #ifdef CONFIG_EPOLL atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ #endif diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index ecb3aad1a964..79d0a1237e6c 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -2,7 +2,7 @@ /* * SCMI Message Protocol driver header * - * Copyright (C) 2018 ARM Ltd. + * Copyright (C) 2018-2021 ARM Ltd. */ #ifndef _LINUX_SCMI_PROTOCOL_H @@ -57,9 +57,11 @@ struct scmi_clock_info { }; struct scmi_handle; +struct scmi_device; +struct scmi_protocol_handle; /** - * struct scmi_clk_ops - represents the various operations provided + * struct scmi_clk_proto_ops - represents the various operations provided * by SCMI Clock Protocol * * @count_get: get the count of clocks provided by SCMI @@ -69,21 +71,21 @@ struct scmi_handle; * @enable: enables the specified clock * @disable: disables the specified clock */ -struct scmi_clk_ops { - int (*count_get)(const struct scmi_handle *handle); +struct scmi_clk_proto_ops { + int (*count_get)(const struct scmi_protocol_handle *ph); const struct scmi_clock_info *(*info_get) - (const struct scmi_handle *handle, u32 clk_id); - int (*rate_get)(const struct scmi_handle *handle, u32 clk_id, + (const struct scmi_protocol_handle *ph, u32 clk_id); + int (*rate_get)(const struct scmi_protocol_handle *ph, u32 clk_id, u64 *rate); - int (*rate_set)(const struct scmi_handle *handle, u32 clk_id, + int (*rate_set)(const struct scmi_protocol_handle *ph, u32 clk_id, u64 rate); - int (*enable)(const struct scmi_handle *handle, u32 clk_id); - int (*disable)(const struct scmi_handle *handle, u32 clk_id); + int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id); + int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id); }; /** - * struct scmi_perf_ops - represents the various operations provided + * struct scmi_perf_proto_ops - represents the various operations provided * by SCMI Performance Protocol * * @limits_set: sets limits on the performance level of a domain @@ -100,33 +102,33 @@ struct scmi_clk_ops { * @est_power_get: gets the estimated power cost for a given performance domain * at a given frequency */ -struct scmi_perf_ops { - int (*limits_set)(const struct scmi_handle *handle, u32 domain, +struct scmi_perf_proto_ops { + int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain, u32 max_perf, u32 min_perf); - int (*limits_get)(const struct scmi_handle *handle, u32 domain, + int (*limits_get)(const struct scmi_protocol_handle *ph, u32 domain, u32 *max_perf, u32 *min_perf); - int (*level_set)(const struct scmi_handle *handle, u32 domain, + int (*level_set)(const struct scmi_protocol_handle *ph, u32 domain, u32 level, bool poll); - int (*level_get)(const struct scmi_handle *handle, u32 domain, + int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain, u32 *level, bool poll); int (*device_domain_id)(struct device *dev); - int (*transition_latency_get)(const struct scmi_handle *handle, + int (*transition_latency_get)(const struct scmi_protocol_handle *ph, struct device *dev); - int (*device_opps_add)(const struct scmi_handle *handle, + int (*device_opps_add)(const struct scmi_protocol_handle *ph, struct device *dev); - int (*freq_set)(const struct scmi_handle *handle, u32 domain, + int (*freq_set)(const struct scmi_protocol_handle *ph, u32 domain, unsigned long rate, bool poll); - int (*freq_get)(const struct scmi_handle *handle, u32 domain, + int (*freq_get)(const struct scmi_protocol_handle *ph, u32 domain, unsigned long *rate, bool poll); - int (*est_power_get)(const struct scmi_handle *handle, u32 domain, + int (*est_power_get)(const struct scmi_protocol_handle *ph, u32 domain, unsigned long *rate, unsigned long *power); - bool (*fast_switch_possible)(const struct scmi_handle *handle, + bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph, struct device *dev); - bool (*power_scale_mw_get)(const struct scmi_handle *handle); + bool (*power_scale_mw_get)(const struct scmi_protocol_handle *ph); }; /** - * struct scmi_power_ops - represents the various operations provided + * struct scmi_power_proto_ops - represents the various operations provided * by SCMI Power Protocol * * @num_domains_get: get the count of power domains provided by SCMI @@ -134,9 +136,9 @@ struct scmi_perf_ops { * @state_set: sets the power state of a power domain * @state_get: gets the power state of a power domain */ -struct scmi_power_ops { - int (*num_domains_get)(const struct scmi_handle *handle); - char *(*name_get)(const struct scmi_handle *handle, u32 domain); +struct scmi_power_proto_ops { + int (*num_domains_get)(const struct scmi_protocol_handle *ph); + char *(*name_get)(const struct scmi_protocol_handle *ph, u32 domain); #define SCMI_POWER_STATE_TYPE_SHIFT 30 #define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1) #define SCMI_POWER_STATE_PARAM(type, id) \ @@ -144,9 +146,9 @@ struct scmi_power_ops { ((id) & SCMI_POWER_STATE_ID_MASK)) #define SCMI_POWER_STATE_GENERIC_ON SCMI_POWER_STATE_PARAM(0, 0) #define SCMI_POWER_STATE_GENERIC_OFF SCMI_POWER_STATE_PARAM(1, 0) - int (*state_set)(const struct scmi_handle *handle, u32 domain, + int (*state_set)(const struct scmi_protocol_handle *ph, u32 domain, u32 state); - int (*state_get)(const struct scmi_handle *handle, u32 domain, + int (*state_get)(const struct scmi_protocol_handle *ph, u32 domain, u32 *state); }; @@ -429,7 +431,7 @@ enum scmi_sensor_class { }; /** - * struct scmi_sensor_ops - represents the various operations provided + * struct scmi_sensor_proto_ops - represents the various operations provided * by SCMI Sensor Protocol * * @count_get: get the count of sensors provided by SCMI @@ -444,25 +446,25 @@ enum scmi_sensor_class { * @config_get: Get sensor current configuration * @config_set: Set sensor current configuration */ -struct scmi_sensor_ops { - int (*count_get)(const struct scmi_handle *handle); +struct scmi_sensor_proto_ops { + int (*count_get)(const struct scmi_protocol_handle *ph); const struct scmi_sensor_info *(*info_get) - (const struct scmi_handle *handle, u32 sensor_id); - int (*trip_point_config)(const struct scmi_handle *handle, + (const struct scmi_protocol_handle *ph, u32 sensor_id); + int (*trip_point_config)(const struct scmi_protocol_handle *ph, u32 sensor_id, u8 trip_id, u64 trip_value); - int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, + int (*reading_get)(const struct scmi_protocol_handle *ph, u32 sensor_id, u64 *value); - int (*reading_get_timestamped)(const struct scmi_handle *handle, + int (*reading_get_timestamped)(const struct scmi_protocol_handle *ph, u32 sensor_id, u8 count, struct scmi_sensor_reading *readings); - int (*config_get)(const struct scmi_handle *handle, + int (*config_get)(const struct scmi_protocol_handle *ph, u32 sensor_id, u32 *sensor_config); - int (*config_set)(const struct scmi_handle *handle, + int (*config_set)(const struct scmi_protocol_handle *ph, u32 sensor_id, u32 sensor_config); }; /** - * struct scmi_reset_ops - represents the various operations provided + * struct scmi_reset_proto_ops - represents the various operations provided * by SCMI Reset Protocol * * @num_domains_get: get the count of reset domains provided by SCMI @@ -472,13 +474,13 @@ struct scmi_sensor_ops { * @assert: explicitly assert reset signal of the specified reset domain * @deassert: explicitly deassert reset signal of the specified reset domain */ -struct scmi_reset_ops { - int (*num_domains_get)(const struct scmi_handle *handle); - char *(*name_get)(const struct scmi_handle *handle, u32 domain); - int (*latency_get)(const struct scmi_handle *handle, u32 domain); - int (*reset)(const struct scmi_handle *handle, u32 domain); - int (*assert)(const struct scmi_handle *handle, u32 domain); - int (*deassert)(const struct scmi_handle *handle, u32 domain); +struct scmi_reset_proto_ops { + int (*num_domains_get)(const struct scmi_protocol_handle *ph); + char *(*name_get)(const struct scmi_protocol_handle *ph, u32 domain); + int (*latency_get)(const struct scmi_protocol_handle *ph, u32 domain); + int (*reset)(const struct scmi_protocol_handle *ph, u32 domain); + int (*assert)(const struct scmi_protocol_handle *ph, u32 domain); + int (*deassert)(const struct scmi_protocol_handle *ph, u32 domain); }; /** @@ -513,7 +515,7 @@ struct scmi_voltage_info { }; /** - * struct scmi_voltage_ops - represents the various operations provided + * struct scmi_voltage_proto_ops - represents the various operations provided * by SCMI Voltage Protocol * * @num_domains_get: get the count of voltage domains provided by SCMI @@ -523,27 +525,31 @@ struct scmi_voltage_info { * @level_set: set the voltage level for the specified domain * @level_get: get the voltage level of the specified domain */ -struct scmi_voltage_ops { - int (*num_domains_get)(const struct scmi_handle *handle); +struct scmi_voltage_proto_ops { + int (*num_domains_get)(const struct scmi_protocol_handle *ph); const struct scmi_voltage_info __must_check *(*info_get) - (const struct scmi_handle *handle, u32 domain_id); - int (*config_set)(const struct scmi_handle *handle, u32 domain_id, + (const struct scmi_protocol_handle *ph, u32 domain_id); + int (*config_set)(const struct scmi_protocol_handle *ph, u32 domain_id, u32 config); #define SCMI_VOLTAGE_ARCH_STATE_OFF 0x0 #define SCMI_VOLTAGE_ARCH_STATE_ON 0x7 - int (*config_get)(const struct scmi_handle *handle, u32 domain_id, + int (*config_get)(const struct scmi_protocol_handle *ph, u32 domain_id, u32 *config); - int (*level_set)(const struct scmi_handle *handle, u32 domain_id, + int (*level_set)(const struct scmi_protocol_handle *ph, u32 domain_id, u32 flags, s32 volt_uV); - int (*level_get)(const struct scmi_handle *handle, u32 domain_id, + int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain_id, s32 *volt_uV); }; /** * struct scmi_notify_ops - represents notifications' operations provided by * SCMI core - * @register_event_notifier: Register a notifier_block for the requested event - * @unregister_event_notifier: Unregister a notifier_block for the requested + * @devm_event_notifier_register: Managed registration of a notifier_block for + * the requested event + * @devm_event_notifier_unregister: Managed unregistration of a notifier_block + * for the requested event + * @event_notifier_register: Register a notifier_block for the requested event + * @event_notifier_unregister: Unregister a notifier_block for the requested * event * * A user can register/unregister its own notifier_block against the wanted @@ -551,7 +557,9 @@ struct scmi_voltage_ops { * tuple: (proto_id, evt_id, src_id) using the provided register/unregister * interface where: * - * @handle: The handle identifying the platform instance to use + * @sdev: The scmi_device to use when calling the devres managed ops devm_ + * @handle: The handle identifying the platform instance to use, when not + * calling the managed ops devm_ * @proto_id: The protocol ID as in SCMI Specification * @evt_id: The message ID of the desired event as in SCMI Specification * @src_id: A pointer to the desired source ID if different sources are @@ -574,11 +582,21 @@ struct scmi_voltage_ops { * @report: A custom struct describing the specific event delivered */ struct scmi_notify_ops { - int (*register_event_notifier)(const struct scmi_handle *handle, - u8 proto_id, u8 evt_id, u32 *src_id, + int (*devm_event_notifier_register)(struct scmi_device *sdev, + u8 proto_id, u8 evt_id, + const u32 *src_id, + struct notifier_block *nb); + int (*devm_event_notifier_unregister)(struct scmi_device *sdev, + u8 proto_id, u8 evt_id, + const u32 *src_id, + struct notifier_block *nb); + int (*event_notifier_register)(const struct scmi_handle *handle, + u8 proto_id, u8 evt_id, + const u32 *src_id, struct notifier_block *nb); - int (*unregister_event_notifier)(const struct scmi_handle *handle, - u8 proto_id, u8 evt_id, u32 *src_id, + int (*event_notifier_unregister)(const struct scmi_handle *handle, + u8 proto_id, u8 evt_id, + const u32 *src_id, struct notifier_block *nb); }; @@ -587,47 +605,21 @@ struct scmi_notify_ops { * * @dev: pointer to the SCMI device * @version: pointer to the structure containing SCMI version information - * @power_ops: pointer to set of power protocol operations - * @perf_ops: pointer to set of performance protocol operations - * @clk_ops: pointer to set of clock protocol operations - * @sensor_ops: pointer to set of sensor protocol operations - * @reset_ops: pointer to set of reset protocol operations - * @voltage_ops: pointer to set of voltage protocol operations + * @devm_protocol_get: devres managed method to acquire a protocol and get specific + * operations and a dedicated protocol handler + * @devm_protocol_put: devres managed method to release a protocol * @notify_ops: pointer to set of notifications related operations - * @perf_priv: pointer to private data structure specific to performance - * protocol(for internal use only) - * @clk_priv: pointer to private data structure specific to clock - * protocol(for internal use only) - * @power_priv: pointer to private data structure specific to power - * protocol(for internal use only) - * @sensor_priv: pointer to private data structure specific to sensors - * protocol(for internal use only) - * @reset_priv: pointer to private data structure specific to reset - * protocol(for internal use only) - * @voltage_priv: pointer to private data structure specific to voltage - * protocol(for internal use only) - * @notify_priv: pointer to private data structure specific to notifications - * (for internal use only) */ struct scmi_handle { struct device *dev; struct scmi_revision_info *version; - const struct scmi_perf_ops *perf_ops; - const struct scmi_clk_ops *clk_ops; - const struct scmi_power_ops *power_ops; - const struct scmi_sensor_ops *sensor_ops; - const struct scmi_reset_ops *reset_ops; - const struct scmi_voltage_ops *voltage_ops; + + const void __must_check * + (*devm_protocol_get)(struct scmi_device *sdev, u8 proto, + struct scmi_protocol_handle **ph); + void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto); + const struct scmi_notify_ops *notify_ops; - /* for protocol internal use */ - void *perf_priv; - void *clk_priv; - void *power_priv; - void *sensor_priv; - void *reset_priv; - void *voltage_priv; - void *notify_priv; - void *system_priv; }; enum scmi_std_protocol { @@ -712,9 +704,21 @@ static inline void scmi_driver_unregister(struct scmi_driver *driver) {} #define module_scmi_driver(__scmi_driver) \ module_driver(__scmi_driver, scmi_register, scmi_unregister) -typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *); -int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn); -void scmi_protocol_unregister(int protocol_id); +/** + * module_scmi_protocol() - Helper macro for registering a scmi protocol + * @__scmi_protocol: scmi_protocol structure + * + * Helper macro for scmi drivers to set up proper module init / exit + * functions. Replaces module_init() and module_exit() and keeps people from + * printing pointless things to the kernel log when their driver is loaded. + */ +#define module_scmi_protocol(__scmi_protocol) \ + module_driver(__scmi_protocol, \ + scmi_protocol_register, scmi_protocol_unregister) + +struct scmi_protocol; +int scmi_protocol_register(const struct scmi_protocol *proto); +void scmi_protocol_unregister(const struct scmi_protocol *proto); /* SCMI Notification API - Custom Event Reports */ enum scmi_notification_events { diff --git a/include/linux/security.h b/include/linux/security.h index 8aeebd6646dc..06f7c50ce77f 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -291,9 +291,11 @@ void security_bprm_committed_creds(struct linux_binprm *bprm); int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); int security_sb_alloc(struct super_block *sb); +void security_sb_delete(struct super_block *sb); void security_sb_free(struct super_block *sb); void security_free_mnt_opts(void **mnt_opts); int security_sb_eat_lsm_opts(char *options, void **mnt_opts); +int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts); int security_sb_remount(struct super_block *sb, void *mnt_opts); int security_sb_kern_mount(struct super_block *sb); int security_sb_show_options(struct seq_file *m, struct super_block *sb); @@ -414,7 +416,8 @@ int security_task_fix_setgid(struct cred *new, const struct cred *old, int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); int security_task_getsid(struct task_struct *p); -void security_task_getsecid(struct task_struct *p, u32 *secid); +void security_task_getsecid_subj(struct task_struct *p, u32 *secid); +void security_task_getsecid_obj(struct task_struct *p, u32 *secid); int security_task_setnice(struct task_struct *p, int nice); int security_task_setioprio(struct task_struct *p, int ioprio); int security_task_getioprio(struct task_struct *p); @@ -631,6 +634,9 @@ static inline int security_sb_alloc(struct super_block *sb) return 0; } +static inline void security_sb_delete(struct super_block *sb) +{ } + static inline void security_sb_free(struct super_block *sb) { } @@ -646,6 +652,13 @@ static inline int security_sb_remount(struct super_block *sb, return 0; } +static inline int security_sb_mnt_opts_compat(struct super_block *sb, + void *mnt_opts) +{ + return 0; +} + + static inline int security_sb_kern_mount(struct super_block *sb) { return 0; @@ -1098,7 +1111,12 @@ static inline int security_task_getsid(struct task_struct *p) return 0; } -static inline void security_task_getsecid(struct task_struct *p, u32 *secid) +static inline void security_task_getsecid_subj(struct task_struct *p, u32 *secid) +{ + *secid = 0; +} + +static inline void security_task_getsecid_obj(struct task_struct *p, u32 *secid) { *secid = 0; } diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index 9d6c28cc4d8f..5b31c5147969 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -72,6 +72,31 @@ static inline unsigned int seq_buf_used(struct seq_buf *s) } /** + * seq_buf_terminate - Make sure buffer is nul terminated + * @s: the seq_buf descriptor to terminate. + * + * This makes sure that the buffer in @s is nul terminated and + * safe to read as a string. + * + * Note, if this is called when the buffer has overflowed, then + * the last byte of the buffer is zeroed, and the len will still + * point passed it. + * + * After this function is called, s->buffer is safe to use + * in string operations. + */ +static inline void seq_buf_terminate(struct seq_buf *s) +{ + if (WARN_ON(s->size == 0)) + return; + + if (seq_buf_buffer_left(s)) + s->buffer[s->len] = 0; + else + s->buffer[s->size - 1] = 0; +} + +/** * seq_buf_get_buf - get buffer to write arbitrary data to * @s: the seq_buf handle * @bufp: the beginning of the buffer is stored here diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index b83b3ae3c877..723b1fa1177e 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -146,6 +146,10 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int); int seq_open_private(struct file *, const struct seq_operations *, int); int seq_release_private(struct inode *, struct file *); +#ifdef CONFIG_BINARY_PRINTF +void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary); +#endif + #define DEFINE_SEQ_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index e1b684e33841..d7ed00f1594e 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -500,19 +500,19 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int c return 0; } -static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) +static inline void uart_unlock_and_check_sysrq(struct uart_port *port) { int sysrq_ch; if (!port->has_sysrq) { - spin_unlock_irqrestore(&port->lock, irqflags); + spin_unlock(&port->lock); return; } sysrq_ch = port->sysrq_ch; port->sysrq_ch = 0; - spin_unlock_irqrestore(&port->lock, irqflags); + spin_unlock(&port->lock); if (sysrq_ch) handle_sysrq(sysrq_ch); @@ -526,9 +526,9 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int c { return 0; } -static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) +static inline void uart_unlock_and_check_sysrq(struct uart_port *port) { - spin_unlock_irqrestore(&port->lock, irqflags); + spin_unlock(&port->lock); } #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h index ca2c5393dc6b..f6c3323fc4c5 100644 --- a/include/linux/serial_s3c.h +++ b/include/linux/serial_s3c.h @@ -246,6 +246,22 @@ S5PV210_UFCON_TXTRIG4 | \ S5PV210_UFCON_RXTRIG4) +#define APPLE_S5L_UCON_RXTO_ENA 9 +#define APPLE_S5L_UCON_RXTHRESH_ENA 12 +#define APPLE_S5L_UCON_TXTHRESH_ENA 13 +#define APPLE_S5L_UCON_RXTO_ENA_MSK (1 << APPLE_S5L_UCON_RXTO_ENA) +#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_RXTHRESH_ENA) +#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_TXTHRESH_ENA) + +#define APPLE_S5L_UCON_DEFAULT (S3C2410_UCON_TXIRQMODE | \ + S3C2410_UCON_RXIRQMODE | \ + S3C2410_UCON_RXFIFO_TOI) + +#define APPLE_S5L_UTRSTAT_RXTHRESH (1<<4) +#define APPLE_S5L_UTRSTAT_TXTHRESH (1<<5) +#define APPLE_S5L_UTRSTAT_RXTO (1<<9) +#define APPLE_S5L_UTRSTAT_ALL_FLAGS (0x3f0) + #ifndef __ASSEMBLY__ #include <linux/serial_core.h> diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 38893e4dd0f0..302094b855fb 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -542,6 +542,9 @@ phy_interface_t sfp_select_interface(struct sfp_bus *bus, int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo); int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, u8 *data); +int sfp_get_module_eeprom_by_page(struct sfp_bus *bus, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack); void sfp_upstream_start(struct sfp_bus *bus); void sfp_upstream_stop(struct sfp_bus *bus); void sfp_bus_put(struct sfp_bus *bus); @@ -587,6 +590,13 @@ static inline int sfp_get_module_eeprom(struct sfp_bus *bus, return -EOPNOTSUPP; } +static inline int sfp_get_module_eeprom_by_page(struct sfp_bus *bus, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + static inline void sfp_upstream_start(struct sfp_bus *bus) { } diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 0f80123650e2..1eac79ce57d4 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -79,13 +79,14 @@ struct shrinker { #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ /* Flags */ -#define SHRINKER_NUMA_AWARE (1 << 0) -#define SHRINKER_MEMCG_AWARE (1 << 1) +#define SHRINKER_REGISTERED (1 << 0) +#define SHRINKER_NUMA_AWARE (1 << 1) +#define SHRINKER_MEMCG_AWARE (1 << 2) /* * It just makes sense when the shrinker is also MEMCG_AWARE for now, * non-MEMCG_AWARE shrinker should not have this flag set. */ -#define SHRINKER_NONSLAB (1 << 2) +#define SHRINKER_NONSLAB (1 << 3) extern int prealloc_shrinker(struct shrinker *shrinker); extern void register_shrinker_prepared(struct shrinker *shrinker); diff --git a/include/linux/signal.h b/include/linux/signal.h index 205526c4003a..201f88e3738b 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -40,9 +40,11 @@ enum siginfo_layout { SIL_TIMER, SIL_POLL, SIL_FAULT, + SIL_FAULT_TRAPNO, SIL_FAULT_MCEERR, SIL_FAULT_BNDERR, SIL_FAULT_PKUERR, + SIL_PERF_EVENT, SIL_CHLD, SIL_RT, SIL_SYS, @@ -265,6 +267,7 @@ static inline void init_sigpending(struct sigpending *sig) } extern void flush_sigqueue(struct sigpending *queue); +extern void exit_task_sigqueue_cache(struct task_struct *tsk); /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f2c9ee71cb2c..dbf820a50a39 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -657,6 +657,7 @@ typedef unsigned char *sk_buff_data_t; * @protocol: Packet protocol from driver * @destructor: Destruct function * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue) + * @_sk_redir: socket redirection information for skmsg * @_nfct: Associated connection, if any (with nfctinfo bits) * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @skb_iif: ifindex of device we arrived on @@ -756,6 +757,9 @@ struct sk_buff { void (*destructor)(struct sk_buff *skb); }; struct list_head tcp_tsorted_anchor; +#ifdef CONFIG_NET_SOCK_MSG + unsigned long _sk_redir; +#endif }; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@ -1137,7 +1141,7 @@ static inline bool skb_fclone_busy(const struct sock *sk, return skb->fclone == SKB_FCLONE_ORIG && refcount_read(&fclones->fclone_ref) > 1 && - fclones->skb2.sk == sk; + READ_ONCE(fclones->skb2.sk) == sk; } /** @@ -1289,10 +1293,10 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) void __skb_get_hash(struct sk_buff *skb); u32 __skb_get_hash_symmetric(const struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); -u32 __skb_get_poff(const struct sk_buff *skb, void *data, +u32 __skb_get_poff(const struct sk_buff *skb, const void *data, const struct flow_keys_basic *keys, int hlen); __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, - void *data, int hlen_proto); + const void *data, int hlen_proto); static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) @@ -1311,9 +1315,8 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, bool __skb_flow_dissect(const struct net *net, const struct sk_buff *skb, struct flow_dissector *flow_dissector, - void *target_container, - void *data, __be16 proto, int nhoff, int hlen, - unsigned int flags); + void *target_container, const void *data, + __be16 proto, int nhoff, int hlen, unsigned int flags); static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector *flow_dissector, @@ -1335,9 +1338,9 @@ static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, static inline bool skb_flow_dissect_flow_keys_basic(const struct net *net, const struct sk_buff *skb, - struct flow_keys_basic *flow, void *data, - __be16 proto, int nhoff, int hlen, - unsigned int flags) + struct flow_keys_basic *flow, + const void *data, __be16 proto, + int nhoff, int hlen, unsigned int flags) { memset(flow, 0, sizeof(*flow)); return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow, @@ -3623,6 +3626,7 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, unsigned int flags); int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, int len); +int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); unsigned int skb_zerocopy_headlen(const struct sk_buff *from); int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, @@ -3675,14 +3679,13 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); static inline void * __must_check -__skb_header_pointer(const struct sk_buff *skb, int offset, - int len, void *data, int hlen, void *buffer) +__skb_header_pointer(const struct sk_buff *skb, int offset, int len, + const void *data, int hlen, void *buffer) { - if (hlen - offset >= len) - return data + offset; + if (likely(hlen - offset >= len)) + return (void *)data + offset; - if (!skb || - skb_copy_bits(skb, offset, buffer, len) < 0) + if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0)) return NULL; return buffer; diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 822c048934e3..aba0f0f429be 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -56,7 +56,8 @@ struct sk_msg { struct sk_psock_progs { struct bpf_prog *msg_parser; - struct bpf_prog *skb_parser; + struct bpf_prog *stream_parser; + struct bpf_prog *stream_verdict; struct bpf_prog *skb_verdict; }; @@ -70,12 +71,6 @@ struct sk_psock_link { void *link_raw; }; -struct sk_psock_parser { - struct strparser strp; - bool enabled; - void (*saved_data_ready)(struct sock *sk); -}; - struct sk_psock_work_state { struct sk_buff *skb; u32 len; @@ -90,9 +85,12 @@ struct sk_psock { u32 eval; struct sk_msg *cork; struct sk_psock_progs progs; - struct sk_psock_parser parser; +#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) + struct strparser strp; +#endif struct sk_buff_head ingress_skb; struct list_head ingress_msg; + spinlock_t ingress_lock; unsigned long state; struct list_head link; spinlock_t link_lock; @@ -100,13 +98,14 @@ struct sk_psock { void (*saved_unhash)(struct sock *sk); void (*saved_close)(struct sock *sk, long timeout); void (*saved_write_space)(struct sock *sk); + void (*saved_data_ready)(struct sock *sk); + int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock, + bool restore); struct proto *sk_proto; + struct mutex work_mutex; struct sk_psock_work_state work_state; struct work_struct work; - union { - struct rcu_head rcu; - struct work_struct gc; - }; + struct rcu_work rwork; }; int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, @@ -127,6 +126,10 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, struct sk_msg *msg, u32 bytes); int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, struct sk_msg *msg, u32 bytes); +int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags, + long timeo, int *err); +int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, + int len, int flags); static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) { @@ -287,7 +290,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk) static inline void sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { + spin_lock_bh(&psock->ingress_lock); list_add_tail(&msg->list, &psock->ingress_msg); + spin_unlock_bh(&psock->ingress_lock); +} + +static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) +{ + struct sk_msg *msg; + + spin_lock_bh(&psock->ingress_lock); + msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); + if (msg) + list_del(&msg->list); + spin_unlock_bh(&psock->ingress_lock); + return msg; +} + +static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock) +{ + struct sk_msg *msg; + + spin_lock_bh(&psock->ingress_lock); + msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); + spin_unlock_bh(&psock->ingress_lock); + return msg; +} + +static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock, + struct sk_msg *msg) +{ + struct sk_msg *ret; + + spin_lock_bh(&psock->ingress_lock); + if (list_is_last(&msg->list, &psock->ingress_msg)) + ret = NULL; + else + ret = list_next_entry(msg, list); + spin_unlock_bh(&psock->ingress_lock); + return ret; } static inline bool sk_psock_queue_empty(const struct sk_psock *psock) @@ -295,6 +336,13 @@ static inline bool sk_psock_queue_empty(const struct sk_psock *psock) return psock ? list_empty(&psock->ingress_msg) : true; } +static inline void kfree_sk_msg(struct sk_msg *msg) +{ + if (msg->skb) + consume_skb(msg->skb); + kfree(msg); +} + static inline void sk_psock_report_error(struct sk_psock *psock, int err) { struct sock *sk = psock->sk; @@ -304,10 +352,27 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err) } struct sk_psock *sk_psock_init(struct sock *sk, int node); +void sk_psock_stop(struct sk_psock *psock, bool wait); +#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); +#else +static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) +{ + return -EOPNOTSUPP; +} + +static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) +{ +} + +static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) +{ +} +#endif + void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock); void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); @@ -327,8 +392,6 @@ static inline void sk_psock_free_link(struct sk_psock_link *link) struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); -void __sk_psock_purge_ingress_msg(struct sk_psock *psock); - static inline void sk_psock_cork_free(struct sk_psock *psock) { if (psock->cork) { @@ -338,30 +401,11 @@ static inline void sk_psock_cork_free(struct sk_psock *psock) } } -static inline void sk_psock_update_proto(struct sock *sk, - struct sk_psock *psock, - struct proto *ops) -{ - /* Pairs with lockless read in sk_clone_lock() */ - WRITE_ONCE(sk->sk_prot, ops); -} - static inline void sk_psock_restore_proto(struct sock *sk, struct sk_psock *psock) { - if (inet_csk_has_ulp(sk)) { - /* TLS does not have an unhash proto in SW cases, but we need - * to ensure we stop using the sock_map unhash routine because - * the associated psock is being removed. So use the original - * unhash handler. - */ - WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); - tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); - } else { - sk->sk_write_space = psock->saved_write_space; - /* Pairs with lockless read in sk_clone_lock() */ - WRITE_ONCE(sk->sk_prot, psock->sk_proto); - } + if (psock->psock_update_sk_prot) + psock->psock_update_sk_prot(sk, psock, true); } static inline void sk_psock_set_state(struct sk_psock *psock, @@ -394,7 +438,6 @@ static inline struct sk_psock *sk_psock_get(struct sock *sk) return psock; } -void sk_psock_stop(struct sock *sk, struct sk_psock *psock); void sk_psock_drop(struct sock *sk, struct sk_psock *psock); static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) @@ -405,8 +448,8 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) { - if (psock->parser.enabled) - psock->parser.saved_data_ready(sk); + if (psock->saved_data_ready) + psock->saved_data_ready(sk); else sk->sk_data_ready(sk); } @@ -435,7 +478,8 @@ static inline int psock_replace_prog(struct bpf_prog **pprog, static inline void psock_progs_drop(struct sk_psock_progs *progs) { psock_set_prog(&progs->msg_parser, NULL); - psock_set_prog(&progs->skb_parser, NULL); + psock_set_prog(&progs->stream_parser, NULL); + psock_set_prog(&progs->stream_verdict, NULL); psock_set_prog(&progs->skb_verdict, NULL); } @@ -445,6 +489,44 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock) { if (!psock) return false; - return psock->parser.enabled; + return !!psock->saved_data_ready; +} + +#if IS_ENABLED(CONFIG_NET_SOCK_MSG) + +/* We only have one bit so far. */ +#define BPF_F_PTR_MASK ~(BPF_F_INGRESS) + +static inline bool skb_bpf_ingress(const struct sk_buff *skb) +{ + unsigned long sk_redir = skb->_sk_redir; + + return sk_redir & BPF_F_INGRESS; +} + +static inline void skb_bpf_set_ingress(struct sk_buff *skb) +{ + skb->_sk_redir |= BPF_F_INGRESS; +} + +static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir, + bool ingress) +{ + skb->_sk_redir = (unsigned long)sk_redir; + if (ingress) + skb->_sk_redir |= BPF_F_INGRESS; +} + +static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb) +{ + unsigned long sk_redir = skb->_sk_redir; + + return (struct sock *)(sk_redir & BPF_F_PTR_MASK); +} + +static inline void skb_bpf_redirect_clear(struct sk_buff *skb) +{ + skb->_sk_redir = 0; } +#endif /* CONFIG_NET_SOCK_MSG */ #endif /* _LINUX_SKMSG_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 7ae604076767..0c97d788762c 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -186,8 +186,10 @@ void kfree(const void *); void kfree_sensitive(const void *); size_t __ksize(const void *); size_t ksize(const void *); +#ifdef CONFIG_PRINTK bool kmem_valid_obj(void *object); void kmem_dump_obj(void *object); +#endif #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR void __check_heap_object(const void *ptr, unsigned long n, struct page *page, diff --git a/include/linux/smp.h b/include/linux/smp.h index 70c6f6284dcf..510519e8a1eb 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -50,30 +50,60 @@ extern unsigned int total_cpus; int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, int wait); +void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, + void *info, bool wait, const struct cpumask *mask); + +int smp_call_function_single_async(int cpu, struct __call_single_data *csd); + /* - * Call a function on all processors + * Cpus stopping functions in panic. All have default weak definitions. + * Architecture-dependent code may override them. */ -void on_each_cpu(smp_call_func_t func, void *info, int wait); +void panic_smp_self_stop(void); +void nmi_panic_self_stop(struct pt_regs *regs); +void crash_smp_send_stop(void); /* - * Call a function on processors specified by mask, which might include - * the local one. + * Call a function on all processors */ -void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, - void *info, bool wait); +static inline void on_each_cpu(smp_call_func_t func, void *info, int wait) +{ + on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask); +} + +/** + * on_each_cpu_mask(): Run a function on processors specified by + * cpumask, which may include the local processor. + * @mask: The set of cpus to run on (only runs on online subset). + * @func: The function to run. This must be fast and non-blocking. + * @info: An arbitrary pointer to pass to the function. + * @wait: If true, wait (atomically) until function has completed + * on other CPUs. + * + * If @wait is true, then returns once @func has returned. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. The + * exception is that it may be used during early boot while + * early_boot_irqs_disabled is set. + */ +static inline void on_each_cpu_mask(const struct cpumask *mask, + smp_call_func_t func, void *info, bool wait) +{ + on_each_cpu_cond_mask(NULL, func, info, wait, mask); +} /* * Call a function on each processor for which the supplied function * cond_func returns a positive value. This may include the local - * processor. + * processor. May be used during early boot while early_boot_irqs_disabled is + * set. Use local_irq_save/restore() instead of local_irq_disable/enable(). */ -void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, - void *info, bool wait); - -void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, - void *info, bool wait, const struct cpumask *mask); - -int smp_call_function_single_async(int cpu, call_single_data_t *csd); +static inline void on_each_cpu_cond(smp_cond_func_t cond_func, + smp_call_func_t func, void *info, bool wait) +{ + on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); +} #ifdef CONFIG_SMP diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h index 7f0bc3cf4d61..137f9f2ac4c3 100644 --- a/include/linux/soc/qcom/apr.h +++ b/include/linux/soc/qcom/apr.h @@ -113,7 +113,7 @@ void apr_driver_unregister(struct apr_driver *drv); /** * module_apr_driver() - Helper macro for registering a aprbus driver - * @__aprbus_driver: aprbus_driver struct + * @__apr_driver: apr_driver struct * * Helper macro for aprbus drivers which do not do anything special in * module init/exit. This eliminates a lot of boilerplate. Each module diff --git a/include/linux/soc/qcom/irq.h b/include/linux/soc/qcom/irq.h index 9e1ece58e55b..72b9231e9fdd 100644 --- a/include/linux/soc/qcom/irq.h +++ b/include/linux/soc/qcom/irq.h @@ -7,7 +7,7 @@ #define GPIO_NO_WAKE_IRQ ~0U -/** +/* * QCOM specific IRQ domain flags that distinguishes the handling of wakeup * capable interrupts by different interrupt controllers. * diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 64fc582ae415..437c9df13229 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -35,7 +35,7 @@ #define LLCC_WRCACHE 31 /** - * llcc_slice_desc - Cache slice descriptor + * struct llcc_slice_desc - Cache slice descriptor * @slice_id: llcc slice id * @slice_size: Size allocated for the llcc slice */ @@ -45,7 +45,7 @@ struct llcc_slice_desc { }; /** - * llcc_edac_reg_data - llcc edac registers data for each error type + * struct llcc_edac_reg_data - llcc edac registers data for each error type * @name: Name of the error * @synd_reg: Syndrome register address * @count_status_reg: Status register address to read the error count @@ -69,7 +69,7 @@ struct llcc_edac_reg_data { }; /** - * llcc_drv_data - Data associated with the llcc driver + * struct llcc_drv_data - Data associated with the llcc driver * @regmap: regmap associated with the llcc device * @bcast_regmap: regmap associated with llcc broadcast offset * @cfg: pointer to the data structure for slice configuration diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h index e712f94b89fc..b1f80e756d2a 100644 --- a/include/linux/soc/qcom/qmi.h +++ b/include/linux/soc/qcom/qmi.h @@ -16,7 +16,7 @@ struct socket; /** - * qmi_header - wireformat header of QMI messages + * struct qmi_header - wireformat header of QMI messages * @type: type of message * @txn_id: transaction id * @msg_id: message id @@ -93,7 +93,7 @@ struct qmi_elem_info { #define QMI_ERR_NOT_SUPPORTED_V01 94 /** - * qmi_response_type_v01 - common response header (decoded) + * struct qmi_response_type_v01 - common response header (decoded) * @result: result of the transaction * @error: error value, when @result is QMI_RESULT_FAILURE_V01 */ diff --git a/include/linux/socket.h b/include/linux/socket.h index 385894b4a8bb..b8fc5c53ba6f 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -85,7 +85,7 @@ struct mmsghdr { /* * POSIX 1003.1g - ancillary data object information - * Ancillary data consits of a sequence of pairs of + * Ancillary data consists of a sequence of pairs of * (cmsghdr, cmsg_data[]) */ diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index d08039d65825..ced07f8fde87 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -125,6 +125,12 @@ enum sdw_dpn_grouping { SDW_BLK_GRP_CNT_4 = 3, }; +/* block packing mode enum */ +enum sdw_dpn_pkg_mode { + SDW_BLK_PKG_PER_PORT = 0, + SDW_BLK_PKG_PER_CHANNEL = 1 +}; + /** * enum sdw_stream_type: data stream type * @@ -405,6 +411,7 @@ struct sdw_slave_prop { * command * @mclk_freq: clock reference passed to SoundWire Master, in Hz. * @hw_disabled: if true, the Master is not functional, typically due to pin-mux + * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification */ struct sdw_master_prop { u32 revision; @@ -421,8 +428,29 @@ struct sdw_master_prop { u32 err_threshold; u32 mclk_freq; bool hw_disabled; + u64 quirks; }; +/* Definitions for Master quirks */ + +/* + * In a number of platforms bus clashes are reported after a hardware + * reset but without any explanations or evidence of a real problem. + * The following quirk will discard all initial bus clash interrupts + * but will leave the detection on should real bus clashes happen + */ +#define SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH BIT(0) + +/* + * Some Slave devices have known issues with incorrect parity errors + * reported after a hardware reset. However during integration unexplained + * parity errors can be reported by Slave devices, possibly due to electrical + * issues at the Master level. + * The following quirk will discard all initial parity errors but will leave + * the detection on should real parity errors happen. + */ +#define SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY BIT(1) + int sdw_master_read_prop(struct sdw_bus *bus); int sdw_slave_read_prop(struct sdw_slave *slave); @@ -614,6 +642,7 @@ struct sdw_slave_ops { * @debugfs: Slave debugfs * @node: node for bus list * @port_ready: Port ready completion flag for each Slave port + * @m_port_map: static Master port map for each Slave port * @dev_num: Current Device Number, values can be 0 or dev_num_sticky * @dev_num_sticky: one-time static Device Number assigned by Bus * @probed: boolean tracking driver state @@ -645,6 +674,7 @@ struct sdw_slave { #endif struct list_head node; struct completion port_ready[SDW_MAX_PORTS]; + unsigned int m_port_map[SDW_MAX_PORTS]; enum sdw_clk_stop_mode curr_clk_stop_mode; u16 dev_num; u16 dev_num_sticky; @@ -804,6 +834,7 @@ struct sdw_defer { /** * struct sdw_master_ops - Master driver ops * @read_prop: Read Master properties + * @override_adr: Override value read from firmware (quirk for buggy firmware) * @xfer_msg: Transfer message callback * @xfer_msg_defer: Defer version of transfer message callback * @reset_page_addr: Reset the SCP page address registers @@ -813,7 +844,8 @@ struct sdw_defer { */ struct sdw_master_ops { int (*read_prop)(struct sdw_bus *bus); - + u64 (*override_adr) + (struct sdw_bus *bus, u64 addr); enum sdw_command_response (*xfer_msg) (struct sdw_bus *bus, struct sdw_msg *msg); enum sdw_command_response (*xfer_msg_defer) @@ -1009,5 +1041,7 @@ int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value); int sdw_read_no_pm(struct sdw_slave *slave, u32 addr); int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); +int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id); +void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id); #endif /* __SOUNDWIRE_H */ diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h index 2d42641499a6..2e2a622e56da 100644 --- a/include/linux/spi/altera.h +++ b/include/linux/spi/altera.h @@ -5,10 +5,13 @@ #ifndef __LINUX_SPI_ALTERA_H #define __LINUX_SPI_ALTERA_H +#include <linux/interrupt.h> #include <linux/regmap.h> #include <linux/spi/spi.h> #include <linux/types.h> +#define ALTERA_SPI_MAX_CS 32 + /** * struct altera_spi_platform_data - Platform data of the Altera SPI driver * @mode_bits: Mode bits of SPI master. @@ -26,4 +29,22 @@ struct altera_spi_platform_data { struct spi_board_info *devices; }; +struct altera_spi { + int irq; + int len; + int count; + int bytes_per_word; + u32 imr; + + /* data buffers */ + const unsigned char *tx; + unsigned char *rx; + + struct regmap *regmap; + u32 regoff; + struct device *dev; +}; + +extern irqreturn_t altera_spi_irq(int irq, void *dev); +extern void altera_spi_init_master(struct spi_master *master); #endif /* __LINUX_SPI_ALTERA_H */ diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h index 778ae8eb1f3e..9ad9a06e488d 100644 --- a/include/linux/spi/mmc_spi.h +++ b/include/linux/spi/mmc_spi.h @@ -35,16 +35,7 @@ struct mmc_spi_platform_data { void (*setpower)(struct device *, unsigned int maskval); }; -#ifdef CONFIG_OF extern struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi); extern void mmc_spi_put_pdata(struct spi_device *spi); -#else -static inline struct mmc_spi_platform_data * -mmc_spi_get_pdata(struct spi_device *spi) -{ - return spi->dev.platform_data; -} -static inline void mmc_spi_put_pdata(struct spi_device *spi) {} -#endif /* CONFIG_OF */ #endif /* __LINUX_SPI_MMC_SPI_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 592897fa4f03..74239d65c7fd 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -19,7 +19,7 @@ #include <uapi/linux/spi/spi.h> struct dma_chan; -struct property_entry; +struct software_node; struct spi_controller; struct spi_transfer; struct spi_controller_mem_ops; @@ -247,7 +247,6 @@ static inline void *spi_get_drvdata(struct spi_device *spi) } struct spi_message; -struct spi_transfer; /** * struct spi_driver - Host side "protocol" driver @@ -510,6 +509,9 @@ struct spi_controller { #define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ + /* flag indicating this is a non-devres managed controller */ + bool devm_allocated; + /* flag indicating this is an SPI slave controller */ bool slave; @@ -642,8 +644,8 @@ struct spi_controller { int *cs_gpios; struct gpio_desc **cs_gpiods; bool use_gpio_descriptors; - u8 unused_native_cs; - u8 max_native_cs; + s8 unused_native_cs; + s8 max_native_cs; /* statistics */ struct spi_statistics statistics; @@ -832,9 +834,6 @@ extern void spi_res_release(struct spi_controller *ctlr, * @delay: delay to be introduced after this transfer before * (optionally) changing the chipselect status, then starting * the next transfer or completing this @spi_message. - * @delay_usecs: microseconds to delay after this transfer before - * (optionally) changing the chipselect status, then starting - * the next transfer or completing this @spi_message. * @word_delay: inter word delay to be introduced after each word size * (set by bits_per_word) transmission. * @effective_speed_hz: the effective SCK-speed that was used to @@ -946,7 +945,6 @@ struct spi_transfer { #define SPI_NBITS_DUAL 0x02 /* 2bits transfer */ #define SPI_NBITS_QUAD 0x04 /* 4bits transfer */ u8 bits_per_word; - u16 delay_usecs; struct spi_delay delay; struct spi_delay cs_change_delay; struct spi_delay word_delay; @@ -1060,14 +1058,6 @@ spi_transfer_del(struct spi_transfer *t) static inline int spi_transfer_delay_exec(struct spi_transfer *t) { - struct spi_delay d; - - if (t->delay_usecs) { - d.value = t->delay_usecs; - d.unit = SPI_DELAY_UNIT_USECS; - return spi_delay_exec(&d, NULL); - } - return spi_delay_exec(&t->delay, t); } @@ -1409,7 +1399,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) * @modalias: Initializes spi_device.modalias; identifies the driver. * @platform_data: Initializes spi_device.platform_data; the particular * data stored there is driver-specific. - * @properties: Additional device properties for the device. + * @swnode: Software node for the device. * @controller_data: Initializes spi_device.controller_data; some * controllers need hints about hardware setup, e.g. for DMA. * @irq: Initializes spi_device.irq; depends on how the board is wired. @@ -1442,12 +1432,11 @@ struct spi_board_info { * * platform_data goes to spi_device.dev.platform_data, * controller_data goes to spi_device.controller_data, - * device properties are copied and attached to spi_device, * irq is copied too */ char modalias[SPI_NAME_SIZE]; const void *platform_data; - const struct property_entry *properties; + const struct software_node *swnode; void *controller_data; int irq; diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 50e2df30b0aa..9edecb494e9e 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -52,8 +52,27 @@ typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr); */ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs); + +/** + * arch_stack_walk_reliable - Architecture specific function to walk the + * stack reliably + * + * @consume_entry: Callback which is invoked by the architecture code for + * each entry. + * @cookie: Caller supplied pointer which is handed back to + * @consume_entry + * @task: Pointer to a task struct, can be NULL + * + * This function returns an error if it detects any unreliable + * features of the stack. Otherwise it guarantees that the stack + * trace is reliable. + * + * If the task is not 'current', the caller *must* ensure the task is + * inactive and its stack is pinned. + */ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task); + void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, const struct pt_regs *regs); diff --git a/include/linux/statfs.h b/include/linux/statfs.h index 20f695b90aab..02c862686ea3 100644 --- a/include/linux/statfs.h +++ b/include/linux/statfs.h @@ -4,6 +4,7 @@ #include <linux/types.h> #include <asm/statfs.h> +#include <asm/byteorder.h> struct kstatfs { long f_type; @@ -50,4 +51,11 @@ static inline __kernel_fsid_t u64_to_fsid(u64 v) return (__kernel_fsid_t){.val = {(u32)v, (u32)(v>>32)}}; } +/* Fold 16 bytes uuid to 64 bit fsid */ +static inline __kernel_fsid_t uuid_to_fsid(__u8 *uuid) +{ + return u64_to_fsid(le64_to_cpup((void *)uuid) ^ + le64_to_cpup((void *)(uuid + sizeof(u64)))); +} + #endif diff --git a/include/linux/static_call.h b/include/linux/static_call.h index 85ecc789f4ff..fc94faa53b5b 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -20,6 +20,7 @@ * static_call(name)(args...); * static_call_cond(name)(args...); * static_call_update(name, func); + * static_call_query(name); * * Usage example: * @@ -91,6 +92,10 @@ * * which will include the required value tests to avoid NULL-pointer * dereferences. + * + * To query which function is currently set to be called, use: + * + * func = static_call_query(name); */ #include <linux/types.h> @@ -113,11 +118,13 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool #define static_call_update(name, func) \ ({ \ - BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \ + typeof(&STATIC_CALL_TRAMP(name)) __F = (func); \ __static_call_update(&STATIC_CALL_KEY(name), \ - STATIC_CALL_TRAMP_ADDR(name), func); \ + STATIC_CALL_TRAMP_ADDR(name), __F); \ }) +#define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func)) + #ifdef CONFIG_HAVE_STATIC_CALL_INLINE extern int __init static_call_init(void); @@ -128,16 +135,6 @@ struct static_call_mod { struct static_call_site *sites; }; -struct static_call_key { - void *func; - union { - /* bit 0: 0 = mods, 1 = sites */ - unsigned long type; - struct static_call_mod *mods; - struct static_call_site *sites; - }; -}; - /* For finding the key associated with a trampoline */ struct static_call_tramp_key { s32 tramp; @@ -187,10 +184,6 @@ extern long __static_call_return0(void); static inline int static_call_init(void) { return 0; } -struct static_call_key { - void *func; -}; - #define __DEFINE_STATIC_CALL(name, _func, _func_init) \ DECLARE_STATIC_CALL(name, _func); \ struct static_call_key STATIC_CALL_KEY(name) = { \ @@ -205,6 +198,7 @@ struct static_call_key { }; \ ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) + #define static_call_cond(name) (void)__static_call(name) static inline @@ -243,10 +237,6 @@ static inline long __static_call_return0(void) static inline int static_call_init(void) { return 0; } -struct static_call_key { - void *func; -}; - static inline long __static_call_return0(void) { return 0; diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h index ae5662d368b9..5a00b8b2cf9f 100644 --- a/include/linux/static_call_types.h +++ b/include/linux/static_call_types.h @@ -58,11 +58,25 @@ struct static_call_site { __raw_static_call(name); \ }) +struct static_call_key { + void *func; + union { + /* bit 0: 0 = mods, 1 = sites */ + unsigned long type; + struct static_call_mod *mods; + struct static_call_site *sites; + }; +}; + #else /* !CONFIG_HAVE_STATIC_CALL_INLINE */ #define __STATIC_CALL_ADDRESSABLE(name) #define __static_call(name) __raw_static_call(name) +struct static_call_key { + void *func; +}; + #endif /* CONFIG_HAVE_STATIC_CALL_INLINE */ #ifdef MODULE @@ -77,6 +91,10 @@ struct static_call_site { #else +struct static_call_key { + void *func; +}; + #define static_call(name) \ ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func)) diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index a302982de2d7..0db36360ef21 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -81,6 +81,7 @@ struct stmmac_mdio_bus_data { unsigned int phy_mask; unsigned int has_xpcs; + unsigned int xpcs_an_inband; int *irqs; int probed_phy_irq; bool needs_reset; @@ -95,6 +96,8 @@ struct stmmac_dma_cfg { int mixed_burst; bool aal; bool eame; + bool multi_msi_en; + bool dche; }; #define AXI_BLEN 7 @@ -143,6 +146,32 @@ struct stmmac_txq_cfg { int tbs_en; }; +/* FPE link state */ +enum stmmac_fpe_state { + FPE_STATE_OFF = 0, + FPE_STATE_CAPABLE = 1, + FPE_STATE_ENTERING_ON = 2, + FPE_STATE_ON = 3, +}; + +/* FPE link-partner hand-shaking mPacket type */ +enum stmmac_mpacket_type { + MPACKET_VERIFY = 0, + MPACKET_RESPONSE = 1, +}; + +enum stmmac_fpe_task_state_t { + __FPE_REMOVING, + __FPE_TASK_SCHED, +}; + +struct stmmac_fpe_cfg { + bool enable; /* FPE enable */ + bool hs_enable; /* FPE handshake enable */ + enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */ + enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */ +}; + struct plat_stmmacenet_data { int bus_id; int phy_addr; @@ -154,6 +183,7 @@ struct plat_stmmacenet_data { struct device_node *mdio_node; struct stmmac_dma_cfg *dma_cfg; struct stmmac_est *est; + struct stmmac_fpe_cfg *fpe_cfg; int clk_csr; int has_gmac; int enh_desc; @@ -180,9 +210,13 @@ struct plat_stmmacenet_data { void (*fix_mac_speed)(void *priv, unsigned int speed); int (*serdes_powerup)(struct net_device *ndev, void *priv); void (*serdes_powerdown)(struct net_device *ndev, void *priv); + void (*ptp_clk_freq_config)(void *priv); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); struct mac_device_info *(*setup)(void *priv); + int (*clks_config)(void *priv, bool enabled); + int (*crosststamp)(ktime_t *device, struct system_counterval_t *system, + void *ctx); void *bsp_priv; struct clk *stmmac_clk; struct clk *pclk; @@ -203,5 +237,17 @@ struct plat_stmmacenet_data { u8 vlan_fail_q; unsigned int eee_usecs_rate; struct pci_dev *pdev; + bool has_crossts; + int int_snapshot_num; + int ext_snapshot_num; + bool ext_snapshot_en; + bool multi_msi_en; + int msi_mac_vec; + int msi_wol_vec; + int msi_lpi_vec; + int msi_sfty_ce_vec; + int msi_sfty_ue_vec; + int msi_rx_base_vec; + int msi_tx_base_vec; }; #endif diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 31ee3b6047c3..e91d51ea028b 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -248,6 +248,7 @@ struct svc_rqst { size_t rq_xprt_hlen; /* xprt header len */ struct xdr_buf rq_arg; struct xdr_stream rq_arg_stream; + struct xdr_stream rq_res_stream; struct page *rq_scratch_page; struct xdr_buf rq_res; struct page *rq_pages[RPCSVC_MAXPAGES + 1]; @@ -574,4 +575,28 @@ static inline void svcxdr_init_decode(struct svc_rqst *rqstp) xdr_set_scratch_page(xdr, rqstp->rq_scratch_page); } +/** + * svcxdr_init_encode - Prepare an xdr_stream for svc Reply encoding + * @rqstp: controlling server RPC transaction context + * + */ +static inline void svcxdr_init_encode(struct svc_rqst *rqstp) +{ + struct xdr_stream *xdr = &rqstp->rq_res_stream; + struct xdr_buf *buf = &rqstp->rq_res; + struct kvec *resv = buf->head; + + xdr_reset_scratch_buffer(xdr); + + xdr->buf = buf; + xdr->iov = resv; + xdr->p = resv->iov_base + resv->iov_len; + xdr->end = resv->iov_base + PAGE_SIZE - rqstp->rq_auth_slack; + buf->len = resv->iov_len; + xdr->page_ptr = buf->pages - 1; + buf->buflen = PAGE_SIZE * (1 + rqstp->rq_page_end - buf->pages); + buf->buflen -= rqstp->rq_auth_slack; + xdr->rqst = NULL; +} + #endif /* SUNRPC_SVC_H */ diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 1e76ed688044..3184465de3a0 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -94,6 +94,8 @@ struct svcxprt_rdma { spinlock_t sc_rw_ctxt_lock; struct list_head sc_rw_ctxts; + u32 sc_pending_recvs; + u32 sc_recv_batch; struct list_head sc_rq_dto_q; spinlock_t sc_rq_dto_lock; struct ib_qp *sc_qp; @@ -104,7 +106,6 @@ struct svcxprt_rdma { wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */ unsigned long sc_flags; - struct list_head sc_read_complete_q; struct work_struct sc_work; struct llist_head sc_recv_ctxts; @@ -133,12 +134,10 @@ struct svc_rdma_recv_ctxt { struct rpc_rdma_cid rc_cid; struct ib_sge rc_recv_sge; void *rc_recv_buf; - struct xdr_buf rc_arg; struct xdr_stream rc_stream; bool rc_temp; u32 rc_byte_len; unsigned int rc_page_count; - unsigned int rc_hdr_count; u32 rc_inv_rkey; __be32 rc_msgtype; @@ -148,8 +147,6 @@ struct svc_rdma_recv_ctxt { struct svc_rdma_chunk *rc_cur_result_payload; struct svc_rdma_pcl rc_write_pcl; struct svc_rdma_pcl rc_reply_pcl; - - struct page *rc_pages[RPCSVC_MAXPAGES]; }; struct svc_rdma_send_ctxt { @@ -158,12 +155,12 @@ struct svc_rdma_send_ctxt { struct ib_send_wr sc_send_wr; struct ib_cqe sc_cqe; + struct completion sc_done; struct xdr_buf sc_hdrbuf; struct xdr_stream sc_stream; void *sc_xprt_buf; - int sc_page_count; int sc_cur_sge_no; - struct page *sc_pages[RPCSVC_MAXPAGES]; + struct ib_sge sc_sges[]; }; diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 92455e0d5244..571f605bc91e 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -130,6 +130,7 @@ void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, int svc_create_xprt(struct svc_serv *, const char *, struct net *, const int, const unsigned short, int, const struct cred *); +void svc_xprt_received(struct svc_xprt *xprt); void svc_xprt_do_enqueue(struct svc_xprt *xprt); void svc_xprt_enqueue(struct svc_xprt *xprt); void svc_xprt_put(struct svc_xprt *xprt); @@ -143,6 +144,7 @@ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen); void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *xprt); void svc_age_temp_xprts_now(struct svc_serv *, struct sockaddr *); +void svc_xprt_deferred_close(struct svc_xprt *xprt); static inline void svc_xprt_get(struct svc_xprt *xprt) { diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 2bc75c167f00..a965cbc136ad 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -395,6 +395,40 @@ static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr) } /** + * xdr_encode_bool - Encode a boolean item + * @p: address in a buffer into which to encode + * @n: boolean value to encode + * + * Return value: + * Address of item following the encoded boolean + */ +static inline __be32 *xdr_encode_bool(__be32 *p, u32 n) +{ + *p = n ? xdr_one : xdr_zero; + return p++; +} + +/** + * xdr_stream_encode_bool - Encode a boolean item + * @xdr: pointer to xdr_stream + * @n: boolean value to encode + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline int xdr_stream_encode_bool(struct xdr_stream *xdr, __u32 n) +{ + const size_t len = XDR_UNIT; + __be32 *p = xdr_reserve_space(xdr, len); + + if (unlikely(!p)) + return -EMSGSIZE; + xdr_encode_bool(p, n); + return len; +} + +/** * xdr_stream_encode_u32 - Encode a 32-bit integer * @xdr: pointer to xdr_stream * @n: integer to encode diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index d2e97ee802af..d81fe8b364d0 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -247,6 +247,7 @@ struct rpc_xprt { struct rpc_task * snd_task; /* Task blocked in send */ struct list_head xmit_queue; /* Send queue */ + atomic_long_t xmit_queuelen; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ #if defined(CONFIG_SUNRPC_BACKCHANNEL) diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h index f4b1ba887384..0806796eabcb 100644 --- a/include/linux/surface_aggregator/controller.h +++ b/include/linux/surface_aggregator/controller.h @@ -344,16 +344,16 @@ struct ssam_request_spec_md { * request has been fully completed. The required transport buffer will be * allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl)``, returning the status of the request, which is zero on success and - * negative on failure. The ``ctrl`` parameter is the controller via which the - * request is being sent. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl)``, returning the status of the request, which is + * zero on success and negative on failure. The ``ctrl`` parameter is the + * controller via which the request is being sent. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \ - int name(struct ssam_controller *ctrl) \ + static int name(struct ssam_controller *ctrl) \ { \ struct ssam_request_spec s = (struct ssam_request_spec)spec; \ struct ssam_request rqst; \ @@ -383,17 +383,17 @@ struct ssam_request_spec_md { * returning once the request has been fully completed. The required transport * buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl, const atype *arg)``, returning the status of the request, which is - * zero on success and negative on failure. The ``ctrl`` parameter is the - * controller via which the request is sent. The request argument is specified - * via the ``arg`` pointer. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, const atype *arg)``, returning the status of the + * request, which is zero on success and negative on failure. The ``ctrl`` + * parameter is the controller via which the request is sent. The request + * argument is specified via the ``arg`` pointer. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_W(name, atype, spec...) \ - int name(struct ssam_controller *ctrl, const atype *arg) \ + static int name(struct ssam_controller *ctrl, const atype *arg) \ { \ struct ssam_request_spec s = (struct ssam_request_spec)spec; \ struct ssam_request rqst; \ @@ -424,17 +424,17 @@ struct ssam_request_spec_md { * request itself, returning once the request has been fully completed. The * required transport buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl, rtype *ret)``, returning the status of the request, which is zero on - * success and negative on failure. The ``ctrl`` parameter is the controller - * via which the request is sent. The request's return value is written to the - * memory pointed to by the ``ret`` parameter. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, rtype *ret)``, returning the status of the request, + * which is zero on success and negative on failure. The ``ctrl`` parameter is + * the controller via which the request is sent. The request's return value is + * written to the memory pointed to by the ``ret`` parameter. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \ - int name(struct ssam_controller *ctrl, rtype *ret) \ + static int name(struct ssam_controller *ctrl, rtype *ret) \ { \ struct ssam_request_spec s = (struct ssam_request_spec)spec; \ struct ssam_request rqst; \ @@ -483,17 +483,17 @@ struct ssam_request_spec_md { * returning once the request has been fully completed. The required transport * buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl, u8 tid, u8 iid)``, returning the status of the request, which is - * zero on success and negative on failure. The ``ctrl`` parameter is the - * controller via which the request is sent, ``tid`` the target ID for the - * request, and ``iid`` the instance ID. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, u8 tid, u8 iid)``, returning the status of the + * request, which is zero on success and negative on failure. The ``ctrl`` + * parameter is the controller via which the request is sent, ``tid`` the + * target ID for the request, and ``iid`` the instance ID. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_MD_N(name, spec...) \ - int name(struct ssam_controller *ctrl, u8 tid, u8 iid) \ + static int name(struct ssam_controller *ctrl, u8 tid, u8 iid) \ { \ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \ struct ssam_request rqst; \ @@ -524,18 +524,18 @@ struct ssam_request_spec_md { * the request itself, returning once the request has been fully completed. * The required transport buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the status of the - * request, which is zero on success and negative on failure. The ``ctrl`` - * parameter is the controller via which the request is sent, ``tid`` the - * target ID for the request, and ``iid`` the instance ID. The request argument - * is specified via the ``arg`` pointer. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the + * status of the request, which is zero on success and negative on failure. + * The ``ctrl`` parameter is the controller via which the request is sent, + * ``tid`` the target ID for the request, and ``iid`` the instance ID. The + * request argument is specified via the ``arg`` pointer. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, atype, spec...) \ - int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)\ + static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg) \ { \ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \ struct ssam_request rqst; \ @@ -567,18 +567,18 @@ struct ssam_request_spec_md { * execution of the request itself, returning once the request has been fully * completed. The required transport buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_controller - * *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status of the request, - * which is zero on success and negative on failure. The ``ctrl`` parameter is - * the controller via which the request is sent, ``tid`` the target ID for the - * request, and ``iid`` the instance ID. The request's return value is written - * to the memory pointed to by the ``ret`` parameter. + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status + * of the request, which is zero on success and negative on failure. The + * ``ctrl`` parameter is the controller via which the request is sent, ``tid`` + * the target ID for the request, and ``iid`` the instance ID. The request's + * return value is written to the memory pointed to by the ``ret`` parameter. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \ - int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret) \ + static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret) \ { \ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \ struct ssam_request rqst; \ diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h index 02f3e06c0a60..6ff9c58b3e17 100644 --- a/include/linux/surface_aggregator/device.h +++ b/include/linux/surface_aggregator/device.h @@ -98,9 +98,9 @@ struct ssam_device_uid { | (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0), \ .domain = d, \ .category = cat, \ - .target = ((tid) != SSAM_ANY_TID) ? (tid) : 0, \ - .instance = ((iid) != SSAM_ANY_IID) ? (iid) : 0, \ - .function = ((fun) != SSAM_ANY_FUN) ? (fun) : 0 \ + .target = __builtin_choose_expr((tid) != SSAM_ANY_TID, (tid), 0), \ + .instance = __builtin_choose_expr((iid) != SSAM_ANY_IID, (iid), 0), \ + .function = __builtin_choose_expr((fun) != SSAM_ANY_FUN, (fun), 0) /** * SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with @@ -336,17 +336,18 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d); * request has been fully completed. The required transport buffer will be * allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_device *sdev)``, - * returning the status of the request, which is zero on success and negative - * on failure. The ``sdev`` parameter specifies both the target device of the - * request and by association the controller via which the request is sent. + * The generated function is defined as ``static int name(struct ssam_device + * *sdev)``, returning the status of the request, which is zero on success and + * negative on failure. The ``sdev`` parameter specifies both the target + * device of the request and by association the controller via which the + * request is sent. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_CL_N(name, spec...) \ SSAM_DEFINE_SYNC_REQUEST_MD_N(__raw_##name, spec) \ - int name(struct ssam_device *sdev) \ + static int name(struct ssam_device *sdev) \ { \ return __raw_##name(sdev->ctrl, sdev->uid.target, \ sdev->uid.instance); \ @@ -368,19 +369,19 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d); * itself, returning once the request has been fully completed. The required * transport buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_device *sdev, - * const atype *arg)``, returning the status of the request, which is zero on - * success and negative on failure. The ``sdev`` parameter specifies both the - * target device of the request and by association the controller via which - * the request is sent. The request's argument is specified via the ``arg`` - * pointer. + * The generated function is defined as ``static int name(struct ssam_device + * *sdev, const atype *arg)``, returning the status of the request, which is + * zero on success and negative on failure. The ``sdev`` parameter specifies + * both the target device of the request and by association the controller via + * which the request is sent. The request's argument is specified via the + * ``arg`` pointer. * * Refer to ssam_request_sync_onstack() for more details on the behavior of * the generated function. */ #define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, atype, spec...) \ SSAM_DEFINE_SYNC_REQUEST_MD_W(__raw_##name, atype, spec) \ - int name(struct ssam_device *sdev, const atype *arg) \ + static int name(struct ssam_device *sdev, const atype *arg) \ { \ return __raw_##name(sdev->ctrl, sdev->uid.target, \ sdev->uid.instance, arg); \ @@ -402,8 +403,8 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d); * itself, returning once the request has been fully completed. The required * transport buffer will be allocated on the stack. * - * The generated function is defined as ``int name(struct ssam_device *sdev, - * rtype *ret)``, returning the status of the request, which is zero on + * The generated function is defined as ``static int name(struct ssam_device + * *sdev, rtype *ret)``, returning the status of the request, which is zero on * success and negative on failure. The ``sdev`` parameter specifies both the * target device of the request and by association the controller via which * the request is sent. The request's return value is written to the memory @@ -414,7 +415,7 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d); */ #define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...) \ SSAM_DEFINE_SYNC_REQUEST_MD_R(__raw_##name, rtype, spec) \ - int name(struct ssam_device *sdev, rtype *ret) \ + static int name(struct ssam_device *sdev, rtype *ret) \ { \ return __raw_##name(sdev->ctrl, sdev->uid.target, \ sdev->uid.instance, ret); \ diff --git a/include/linux/swap.h b/include/linux/swap.h index 4cc6ec3bf0ab..144727041e78 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -10,8 +10,10 @@ #include <linux/sched.h> #include <linux/node.h> #include <linux/fs.h> +#include <linux/pagemap.h> #include <linux/atomic.h> #include <linux/page-flags.h> +#include <uapi/linux/mempolicy.h> #include <asm/page.h> struct notifier_block; @@ -339,6 +341,20 @@ extern void lru_note_cost(struct lruvec *lruvec, bool file, extern void lru_note_cost_page(struct page *); extern void lru_cache_add(struct page *); extern void mark_page_accessed(struct page *); + +extern atomic_t lru_disable_count; + +static inline bool lru_cache_disabled(void) +{ + return atomic_read(&lru_disable_count); +} + +static inline void lru_cache_enable(void) +{ + atomic_dec(&lru_disable_count); +} + +extern void lru_cache_disable(void); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); @@ -378,6 +394,12 @@ extern int sysctl_min_slab_ratio; #define node_reclaim_mode 0 #endif +static inline bool node_reclaim_enabled(void) +{ + /* Is any node_reclaim_mode bit set? */ + return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); +} + extern void check_move_unevictable_pages(struct pagevec *pvec); extern int kswapd_run(int nid); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 5857a937c637..216854a5e513 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -6,6 +6,7 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/limits.h> +#include <linux/spinlock.h> struct device; struct page; @@ -36,20 +37,11 @@ enum swiotlb_force { extern void swiotlb_init(int verbose); int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); -extern unsigned long swiotlb_nr_tbl(void); unsigned long swiotlb_size_or_default(void); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); extern int swiotlb_late_init_with_default_size(size_t default_size); extern void __init swiotlb_update_mem_attributes(void); -/* - * Enumeration for sync targets - */ -enum dma_sync_target { - SYNC_FOR_CPU = 0, - SYNC_FOR_DEVICE = 1, -}; - phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, size_t mapping_size, size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); @@ -57,32 +49,70 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, size_t mapping_size, - size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); -extern void swiotlb_tbl_sync_single(struct device *hwdev, - phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, - enum dma_sync_target target); - +void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir); +void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir); dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs); #ifdef CONFIG_SWIOTLB extern enum swiotlb_force swiotlb_force; -extern phys_addr_t io_tlb_start, io_tlb_end; + +/** + * struct io_tlb_mem - IO TLB Memory Pool Descriptor + * + * @start: The start address of the swiotlb memory pool. Used to do a quick + * range check to see if the memory was in fact allocated by this + * API. + * @end: The end address of the swiotlb memory pool. Used to do a quick + * range check to see if the memory was in fact allocated by this + * API. + * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and + * @end. This is command line adjustable via setup_io_tlb_npages. + * @used: The number of used IO TLB block. + * @list: The free list describing the number of free entries available + * from each index. + * @index: The index to start searching in the next round. + * @orig_addr: The original address corresponding to a mapped entry. + * @alloc_size: Size of the allocated buffer. + * @lock: The lock to protect the above data structures in the map and + * unmap calls. + * @debugfs: The dentry to debugfs. + * @late_alloc: %true if allocated using the page allocator + */ +struct io_tlb_mem { + phys_addr_t start; + phys_addr_t end; + unsigned long nslabs; + unsigned long used; + unsigned int index; + spinlock_t lock; + struct dentry *debugfs; + bool late_alloc; + struct io_tlb_slot { + phys_addr_t orig_addr; + size_t alloc_size; + unsigned int list; + } slots[]; +}; +extern struct io_tlb_mem *io_tlb_default_mem; static inline bool is_swiotlb_buffer(phys_addr_t paddr) { - return paddr >= io_tlb_start && paddr < io_tlb_end; + struct io_tlb_mem *mem = io_tlb_default_mem; + + return mem && paddr >= mem->start && paddr < mem->end; } void __init swiotlb_exit(void); unsigned int swiotlb_max_segment(void); size_t swiotlb_max_mapping_size(struct device *dev); bool is_swiotlb_active(void); -void __init swiotlb_adjust_size(unsigned long new_size); +void __init swiotlb_adjust_size(unsigned long size); #else #define swiotlb_force SWIOTLB_NO_FORCE static inline bool is_swiotlb_buffer(phys_addr_t paddr) @@ -106,7 +136,7 @@ static inline bool is_swiotlb_active(void) return false; } -static inline void swiotlb_adjust_size(unsigned long new_size) +static inline void swiotlb_adjust_size(unsigned long size) { } #endif /* CONFIG_SWIOTLB */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 2839dc9a7c01..050511e8f1f8 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -69,6 +69,8 @@ struct io_uring_params; struct clone_args; struct open_how; struct mount_attr; +struct landlock_ruleset_attr; +enum landlock_rule_type; #include <linux/types.h> #include <linux/aio_abi.h> @@ -483,6 +485,8 @@ asmlinkage long sys_pipe2(int __user *fildes, int flags); /* fs/quota.c */ asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, qid_t id, void __user *addr); +asmlinkage long sys_quotactl_path(unsigned int cmd, const char __user *mountpoint, + qid_t id, void __user *addr); /* fs/readdir.c */ asmlinkage long sys_getdents64(unsigned int fd, @@ -1041,6 +1045,11 @@ asmlinkage long sys_pidfd_send_signal(int pidfd, int sig, siginfo_t __user *info, unsigned int flags); asmlinkage long sys_pidfd_getfd(int pidfd, int fd, unsigned int flags); +asmlinkage long sys_landlock_create_ruleset(const struct landlock_ruleset_attr __user *attr, + size_t size, __u32 flags); +asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type rule_type, + const void __user *rule_attr, __u32 flags); +asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags); /* * Architecture-specific system calls diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 51298a4f4623..d99ca99837de 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -53,6 +53,8 @@ int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *); int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); +int proc_dou8vec_minmax(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *); int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *); diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 0d848a1e9e62..5b8a93f288bb 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -22,6 +22,8 @@ enum task_work_notify_mode { int task_work_add(struct task_struct *task, struct callback_head *twork, enum task_work_notify_mode mode); +struct callback_head *task_work_cancel_match(struct task_struct *task, + bool (*match)(struct callback_head *, void *data), void *data); struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); void task_work_run(void); diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 6ac7bb1d2b1f..d296f3b88fb9 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -91,7 +91,7 @@ struct thermal_cooling_device_ops { struct thermal_cooling_device { int id; - char type[THERMAL_NAME_LENGTH]; + char *type; struct device device; struct device_node *np; void *devdata; @@ -390,7 +390,6 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); -void thermal_notify_framework(struct thermal_zone_device *, int); int thermal_zone_device_enable(struct thermal_zone_device *tz); int thermal_zone_device_disable(struct thermal_zone_device *tz); void thermal_zone_device_critical(struct thermal_zone_device *tz); @@ -436,10 +435,6 @@ static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } -static inline void thermal_notify_framework(struct thermal_zone_device *tz, - int trip) -{ } - static inline int thermal_zone_device_enable(struct thermal_zone_device *tz) { return -ENODEV; } diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 659a0a810fa1..e7c96c37174f 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -146,6 +146,7 @@ struct tb_property_dir *tb_property_parse_dir(const u32 *block, size_t block_len); ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block, size_t block_len); +struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir); struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid); void tb_property_free_dir(struct tb_property_dir *dir); int tb_property_add_immediate(struct tb_property_dir *parent, const char *key, @@ -179,23 +180,24 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); * @route: Route string the other domain can be reached * @vendor: Vendor ID of the remote domain * @device: Device ID of the demote domain + * @local_max_hopid: Maximum input HopID of this host + * @remote_max_hopid: Maximum input HopID of the remote host * @lock: Lock to serialize access to the following fields of this structure * @vendor_name: Name of the vendor (or %NULL if not known) * @device_name: Name of the device (or %NULL if not known) * @link_speed: Speed of the link in Gb/s * @link_width: Width of the link (1 or 2) * @is_unplugged: The XDomain is unplugged - * @resume: The XDomain is being resumed * @needs_uuid: If the XDomain does not have @remote_uuid it will be * queried first - * @transmit_path: HopID which the remote end expects us to transmit - * @transmit_ring: Local ring (hop) where outgoing packets are pushed - * @receive_path: HopID which we expect the remote end to transmit - * @receive_ring: Local ring (hop) where incoming packets arrive * @service_ids: Used to generate IDs for the services - * @properties: Properties exported by the remote domain - * @property_block_gen: Generation of @properties - * @properties_lock: Lock protecting @properties. + * @in_hopids: Input HopIDs for DMA tunneling + * @out_hopids; Output HopIDs for DMA tunneling + * @local_property_block: Local block of properties + * @local_property_block_gen: Generation of @local_property_block + * @local_property_block_len: Length of the @local_property_block in dwords + * @remote_properties: Properties exported by the remote domain + * @remote_property_block_gen: Generation of @remote_properties * @get_uuid_work: Work used to retrieve @remote_uuid * @uuid_retries: Number of times left @remote_uuid is requested before * giving up @@ -225,21 +227,23 @@ struct tb_xdomain { u64 route; u16 vendor; u16 device; + unsigned int local_max_hopid; + unsigned int remote_max_hopid; struct mutex lock; const char *vendor_name; const char *device_name; unsigned int link_speed; unsigned int link_width; bool is_unplugged; - bool resume; bool needs_uuid; - u16 transmit_path; - u16 transmit_ring; - u16 receive_path; - u16 receive_ring; struct ida service_ids; - struct tb_property_dir *properties; - u32 property_block_gen; + struct ida in_hopids; + struct ida out_hopids; + u32 *local_property_block; + u32 local_property_block_gen; + u32 local_property_block_len; + struct tb_property_dir *remote_properties; + u32 remote_property_block_gen; struct delayed_work get_uuid_work; int uuid_retries; struct delayed_work get_properties_work; @@ -252,10 +256,22 @@ struct tb_xdomain { int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd); void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd); -int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, - u16 transmit_ring, u16 receive_path, - u16 receive_ring); -int tb_xdomain_disable_paths(struct tb_xdomain *xd); +int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid); +void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid); +int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid); +void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid); +int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path, + int transmit_ring, int receive_path, + int receive_ring); +int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path, + int transmit_ring, int receive_path, + int receive_ring); + +static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd) +{ + return tb_xdomain_disable_paths(xd, -1, -1, -1, -1); +} + struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid); struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route); diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h index 754b74a2167f..c6540ceea143 100644 --- a/include/linux/timecounter.h +++ b/include/linux/timecounter.h @@ -124,7 +124,7 @@ extern u64 timecounter_read(struct timecounter *tc); * This allows conversion of cycle counter values which were generated * in the past. */ -extern u64 timecounter_cyc2time(struct timecounter *tc, +extern u64 timecounter_cyc2time(const struct timecounter *tc, u64 cycle_tstamp); #endif diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index c6792cf01bc7..78a98bdff76d 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -3,6 +3,7 @@ #define _LINUX_TIMEKEEPING_H #include <linux/errno.h> +#include <linux/clocksource_ids.h> /* Included from linux/ktime.h */ @@ -243,11 +244,12 @@ struct ktime_timestamps { * @cs_was_changed_seq: The sequence number of clocksource change events */ struct system_time_snapshot { - u64 cycles; - ktime_t real; - ktime_t raw; - unsigned int clock_was_set_seq; - u8 cs_was_changed_seq; + u64 cycles; + ktime_t real; + ktime_t raw; + enum clocksource_ids cs_id; + unsigned int clock_was_set_seq; + u8 cs_was_changed_seq; }; /** diff --git a/include/linux/timex.h b/include/linux/timex.h index 9c2e54faf9b7..059b18eb1f1f 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -133,7 +133,7 @@ /* * kernel variables - * Note: maximum error = NTP synch distance = dispersion + delay / 2; + * Note: maximum error = NTP sync distance = dispersion + delay / 2; * estimated error = NTP dispersion. */ extern unsigned long tick_usec; /* USER_HZ period (usec) */ diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 543aa3b1dedc..aa11fe323c56 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -305,6 +305,8 @@ struct tpm_buf { }; enum tpm2_object_attributes { + TPM2_OA_FIXED_TPM = BIT(1), + TPM2_OA_FIXED_PARENT = BIT(4), TPM2_OA_USER_WITH_AUTH = BIT(6), }; diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 28e7af1406f2..ad413b382a3c 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -206,7 +206,7 @@ static inline unsigned int tracing_gen_ctx_dec(void) trace_ctx = tracing_gen_ctx(); /* - * Subtract one from the preeption counter if preemption is enabled, + * Subtract one from the preemption counter if preemption is enabled, * see trace_event_buffer_reserve()for details. */ if (IS_ENABLED(CONFIG_PREEMPTION)) @@ -404,7 +404,6 @@ trace_get_fields(struct trace_event_call *event_call) return event_call->class->get_fields(event_call); } -struct trace_array; struct trace_subsystem_dir; enum { @@ -640,7 +639,8 @@ enum event_trigger_type { extern int filter_match_preds(struct event_filter *filter, void *rec); extern enum event_trigger_type -event_triggers_call(struct trace_event_file *file, void *rec, +event_triggers_call(struct trace_event_file *file, + struct trace_buffer *buffer, void *rec, struct ring_buffer_event *event); extern void event_triggers_post_call(struct trace_event_file *file, @@ -664,7 +664,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { if (eflags & EVENT_FILE_FL_TRIGGER_MODE) - event_triggers_call(file, NULL, NULL); + event_triggers_call(file, NULL, NULL, NULL); if (eflags & EVENT_FILE_FL_SOFT_DISABLED) return true; if (eflags & EVENT_FILE_FL_PID_FILTER) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 9cfb099da58f..13f65420f188 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -465,7 +465,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * * * * The declared 'local variable' is called '__entry' * * - * * __field(pid_t, prev_prid) is equivalent to a standard declariton: + * * __field(pid_t, prev_prid) is equivalent to a standard declaration: * * * * pid_t prev_pid; * * diff --git a/include/linux/tty.h b/include/linux/tty.h index 95fc2f100f12..e5d6b1f28823 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -17,30 +17,6 @@ /* - * Lock subclasses for tty locks - * - * TTY_LOCK_NORMAL is for normal ttys and master ptys. - * TTY_LOCK_SLAVE is for slave ptys only. - * - * Lock subclasses are necessary for handling nested locking with pty pairs. - * tty locks which use nested locking: - * - * legacy_mutex - Nested tty locks are necessary for releasing pty pairs. - * The stable lock order is master pty first, then slave pty. - * termios_rwsem - The stable lock order is tty_buffer lock->termios_rwsem. - * Subclassing this lock enables the slave pty to hold its - * termios_rwsem when claiming the master tty_buffer lock. - * tty_buffer lock - slave ptys can claim nested buffer lock when handling - * signal chars. The stable lock order is slave pty, then - * master. - */ - -enum { - TTY_LOCK_NORMAL = 0, - TTY_LOCK_SLAVE, -}; - -/* * (Note: the *_driver.minor_start values 1, 64, 128, 192 are * hardcoded at present.) */ @@ -284,7 +260,7 @@ struct tty_operations; struct tty_struct { int magic; struct kref kref; - struct device *dev; + struct device *dev; /* class device or NULL (e.g. ptys, serdev) */ struct tty_driver *driver; const struct tty_operations *ops; int index; @@ -373,21 +349,6 @@ struct tty_file_private { #define TTY_LDISC_CHANGING 20 /* Change pending - non-block IO */ #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ -/* Values for tty->flow_change */ -#define TTY_THROTTLE_SAFE 1 -#define TTY_UNTHROTTLE_SAFE 2 - -static inline void __tty_set_flow_change(struct tty_struct *tty, int val) -{ - tty->flow_change = val; -} - -static inline void tty_set_flow_change(struct tty_struct *tty, int val) -{ - tty->flow_change = val; - smp_mb(); -} - static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file) { return file->f_flags & O_NONBLOCK || @@ -419,10 +380,6 @@ extern struct tty_struct *tty_kopen_exclusive(dev_t device); extern struct tty_struct *tty_kopen_shared(dev_t device); extern void tty_kclose(struct tty_struct *tty); extern int tty_dev_name_to_number(const char *name, dev_t *number); -extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout); -extern void tty_ldisc_unlock(struct tty_struct *tty); -extern ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *); -extern struct file *tty_release_redirect(struct tty_struct *tty); #else static inline void tty_kref_put(struct tty_struct *tty) { } @@ -475,14 +432,10 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) extern const char *tty_driver_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); -extern int __tty_check_change(struct tty_struct *tty, int sig); -extern int tty_check_change(struct tty_struct *tty); -extern void __stop_tty(struct tty_struct *tty); extern void stop_tty(struct tty_struct *tty); -extern void __start_tty(struct tty_struct *tty); extern void start_tty(struct tty_struct *tty); extern int tty_register_driver(struct tty_driver *driver); -extern int tty_unregister_driver(struct tty_driver *driver); +extern void tty_unregister_driver(struct tty_driver *driver); extern struct device *tty_register_device(struct tty_driver *driver, unsigned index, struct device *dev); extern struct device *tty_register_device_attr(struct tty_driver *driver, @@ -506,23 +459,11 @@ extern int tty_get_icount(struct tty_struct *tty, extern int is_current_pgrp_orphaned(void); extern void tty_hangup(struct tty_struct *tty); extern void tty_vhangup(struct tty_struct *tty); -extern void tty_vhangup_session(struct tty_struct *tty); extern int tty_hung_up_p(struct file *filp); extern void do_SAK(struct tty_struct *tty); extern void __do_SAK(struct tty_struct *tty); -extern void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty); -extern int tty_signal_session_leader(struct tty_struct *tty, int exit_session); -extern void session_clear_tty(struct pid *session); extern void no_tty(void); -extern void tty_buffer_free_all(struct tty_port *port); -extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); -extern void tty_buffer_init(struct tty_port *port); -extern void tty_buffer_set_lock_subclass(struct tty_port *port); -extern bool tty_buffer_restart_work(struct tty_port *port); -extern bool tty_buffer_cancel_work(struct tty_port *port); -extern void tty_buffer_flush_work(struct tty_port *port); extern speed_t tty_termios_baud_rate(struct ktermios *termios); -extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); extern void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, speed_t obaud); extern void tty_encode_baud_rate(struct tty_struct *tty, @@ -550,27 +491,16 @@ extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); extern void tty_ldisc_deref(struct tty_ldisc *); extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); -extern void tty_ldisc_hangup(struct tty_struct *tty, bool reset); -extern int tty_ldisc_reinit(struct tty_struct *tty, int disc); extern const struct seq_operations tty_ldiscs_seq_ops; extern void tty_wakeup(struct tty_struct *tty); extern void tty_ldisc_flush(struct tty_struct *tty); -extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); -extern long tty_jobctrl_ioctl(struct tty_struct *tty, struct tty_struct *real_tty, - struct file *file, unsigned int cmd, unsigned long arg); extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg); -extern void tty_default_fops(struct file_operations *fops); -extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx); -extern int tty_alloc_file(struct file *file); -extern void tty_add_file(struct tty_struct *tty, struct file *file); -extern void tty_free_file(struct file *file); extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); extern void tty_release_struct(struct tty_struct *tty, int idx); -extern int tty_release(struct inode *inode, struct file *filp); extern void tty_init_termios(struct tty_struct *tty); extern void tty_save_termios(struct tty_struct *tty); extern int tty_standard_install(struct tty_driver *driver, @@ -578,8 +508,6 @@ extern int tty_standard_install(struct tty_driver *driver, extern struct mutex tty_mutex; -#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock)) - extern void tty_port_init(struct tty_port *port); extern void tty_port_link_device(struct tty_port *port, struct tty_driver *driver, unsigned index); @@ -699,13 +627,8 @@ static inline int tty_port_users(struct tty_port *port) extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); extern int tty_unregister_ldisc(int disc); extern int tty_set_ldisc(struct tty_struct *tty, int disc); -extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); -extern void tty_ldisc_release(struct tty_struct *tty); -extern int __must_check tty_ldisc_init(struct tty_struct *tty); -extern void tty_ldisc_deinit(struct tty_struct *tty); extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, char *f, int count); -extern void tty_sysctl_init(void); /* n_tty.c */ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); @@ -717,20 +640,10 @@ static inline void n_tty_init(void) { } /* tty_audit.c */ #ifdef CONFIG_AUDIT -extern void tty_audit_add_data(struct tty_struct *tty, const void *data, - size_t size); extern void tty_audit_exit(void); extern void tty_audit_fork(struct signal_struct *sig); -extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); extern int tty_audit_push(void); #else -static inline void tty_audit_add_data(struct tty_struct *tty, const void *data, - size_t size) -{ -} -static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) -{ -} static inline void tty_audit_exit(void) { } @@ -772,16 +685,4 @@ static inline void proc_tty_register_driver(struct tty_driver *d) {} static inline void proc_tty_unregister_driver(struct tty_driver *d) {} #endif -#define tty_msg(fn, tty, f, ...) \ - fn("%s %s: " f, tty_driver_name(tty), tty_name(tty), ##__VA_ARGS__) - -#define tty_debug(tty, f, ...) tty_msg(pr_debug, tty, f, ##__VA_ARGS__) -#define tty_info(tty, f, ...) tty_msg(pr_info, tty, f, ##__VA_ARGS__) -#define tty_notice(tty, f, ...) tty_msg(pr_notice, tty, f, ##__VA_ARGS__) -#define tty_warn(tty, f, ...) tty_msg(pr_warn, tty, f, ##__VA_ARGS__) -#define tty_err(tty, f, ...) tty_msg(pr_err, tty, f, ##__VA_ARGS__) - -#define tty_info_ratelimited(tty, f, ...) \ - tty_msg(pr_info_ratelimited, tty, f, ##__VA_ARGS__) - #endif diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 61c3372d3f32..2f719b471d52 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -228,7 +228,7 @@ * * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel * structure to complete. This method is optional and will only be called - * if provided (otherwise EINVAL will be returned). + * if provided (otherwise ENOTTY will be returned). */ #include <linux/export.h> diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index 572a07976116..31284b55bd4f 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -173,7 +173,6 @@ extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, struct tty_ldisc_ops { - int magic; char *name; int num; int flags; @@ -218,8 +217,6 @@ struct tty_ldisc { struct tty_struct *tty; }; -#define TTY_LDISC_MAGIC 0x5403 - #define LDISC_FLAG_DEFINED 0x00000001 #define MODULE_ALIAS_LDISC(ldisc) \ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index c7c6e8b8344d..c05e903cef02 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -397,6 +397,7 @@ long strnlen_user_nofault(const void __user *unsafe_addr, long count); #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) +#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e) static inline unsigned long user_access_save(void) { return 0UL; } static inline void user_access_restore(unsigned long flags) { } #endif diff --git a/include/linux/udp.h b/include/linux/udp.h index aa84597bdc33..ae66dadd8543 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -51,7 +51,9 @@ struct udp_sock { * different encapsulation layer set * this */ - gro_enabled:1; /* Can accept GRO packets */ + gro_enabled:1, /* Request GRO aggregation */ + accept_udp_l4:1, + accept_udp_fraglist:1; /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -131,8 +133,22 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk, static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) { - return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) && - skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4; + if (!skb_is_gso(skb)) + return false; + + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4) + return true; + + if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist) + return true; + + return false; +} + +static inline void udp_allow_gso(struct sock *sk) +{ + udp_sk(sk)->accept_udp_l4 = 1; + udp_sk(sk)->accept_udp_fraglist = 1; } #define udp_portaddr_for_each_entry(__sk, list) \ diff --git a/include/linux/uio.h b/include/linux/uio.h index 27ff8eb786dc..d3ec87706d75 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -24,6 +24,7 @@ enum iter_type { ITER_BVEC = 16, ITER_PIPE = 32, ITER_DISCARD = 64, + ITER_XARRAY = 128, }; struct iov_iter { @@ -39,6 +40,7 @@ struct iov_iter { const struct iovec *iov; const struct kvec *kvec; const struct bio_vec *bvec; + struct xarray *xarray; struct pipe_inode_info *pipe; }; union { @@ -47,6 +49,7 @@ struct iov_iter { unsigned int head; unsigned int start_head; }; + loff_t xarray_start; }; }; @@ -80,6 +83,11 @@ static inline bool iov_iter_is_discard(const struct iov_iter *i) return iov_iter_type(i) == ITER_DISCARD; } +static inline bool iov_iter_is_xarray(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_XARRAY; +} + static inline unsigned char iov_iter_rw(const struct iov_iter *i) { return i->type & (READ | WRITE); @@ -221,6 +229,8 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_ void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, size_t count); void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); +void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, + loff_t start, size_t count); ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start); ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, diff --git a/include/linux/usb.h b/include/linux/usb.h index d6a41841b93e..eaae24217e8a 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -560,6 +560,7 @@ struct usb3_lpm_parameters { * @speed: device speed: high/full/low (or error) * @rx_lanes: number of rx lanes in use, USB 3.2 adds dual-lane support * @tx_lanes: number of tx lanes in use, USB 3.2 adds dual-lane support + * @ssp_rate: SuperSpeed Plus phy signaling rate and lane count * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub * @ttport: device port on that tt hub * @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints @@ -636,6 +637,7 @@ struct usb_device { enum usb_device_speed speed; unsigned int rx_lanes; unsigned int tx_lanes; + enum usb_ssp_rate ssp_rate; struct usb_tt *tt; int ttport; @@ -841,7 +843,7 @@ extern int usb_free_streams(struct usb_interface *interface, /* used these for multi-interface device registration */ extern int usb_driver_claim_interface(struct usb_driver *driver, - struct usb_interface *iface, void *priv); + struct usb_interface *iface, void *data); /** * usb_interface_claimed - returns true iff an interface is claimed @@ -880,6 +882,15 @@ extern struct usb_host_interface *usb_find_alt_setting( unsigned int iface_num, unsigned int alt_num); +#if IS_REACHABLE(CONFIG_USB) +int usb_for_each_port(void *data, int (*fn)(struct device *, void *)); +#else +static inline int usb_for_each_port(void *data, int (*fn)(struct device *, void *)) +{ + return 0; +} +#endif + /* port claiming functions */ int usb_hub_claim_port(struct usb_device *hdev, unsigned port1, struct usb_dev_state *owner); @@ -1259,8 +1270,6 @@ struct usb_device_driver { #define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \ drvwrap.driver) -extern struct bus_type usb_bus_type; - /** * struct usb_class_driver - identifies a USB driver that wants to use the USB major number * @name: the usb class device name for this driver. Will show up in sysfs. diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index abdd310c77f0..1cffa34740b0 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -45,73 +45,15 @@ enum usb_ssp_rate { USB_SSP_GEN_2x2, }; -/** - * usb_ep_type_string() - Returns human readable-name of the endpoint type. - * @ep_type: The endpoint type to return human-readable name for. If it's not - * any of the types: USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT}, - * usually got by usb_endpoint_type(), the string 'unknown' will be returned. - */ extern const char *usb_ep_type_string(int ep_type); - -/** - * usb_speed_string() - Returns human readable-name of the speed. - * @speed: The speed to return human-readable name for. If it's not - * any of the speeds defined in usb_device_speed enum, string for - * USB_SPEED_UNKNOWN will be returned. - */ extern const char *usb_speed_string(enum usb_device_speed speed); - -/** - * usb_get_maximum_speed - Get maximum requested speed for a given USB - * controller. - * @dev: Pointer to the given USB controller device - * - * The function gets the maximum speed string from property "maximum-speed", - * and returns the corresponding enum usb_device_speed. - */ extern enum usb_device_speed usb_get_maximum_speed(struct device *dev); - -/** - * usb_get_maximum_ssp_rate - Get the signaling rate generation and lane count - * of a SuperSpeed Plus capable device. - * @dev: Pointer to the given USB controller device - * - * If the string from "maximum-speed" property is super-speed-plus-genXxY where - * 'X' is the generation number and 'Y' is the number of lanes, then this - * function returns the corresponding enum usb_ssp_rate. - */ extern enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev); - -/** - * usb_state_string - Returns human readable name for the state. - * @state: The state to return a human-readable name for. If it's not - * any of the states devices in usb_device_state_string enum, - * the string UNKNOWN will be returned. - */ extern const char *usb_state_string(enum usb_device_state state); +unsigned int usb_decode_interval(const struct usb_endpoint_descriptor *epd, + enum usb_device_speed speed); #ifdef CONFIG_TRACING -/** - * usb_decode_ctrl - Returns human readable representation of control request. - * @str: buffer to return a human-readable representation of control request. - * This buffer should have about 200 bytes. - * @size: size of str buffer. - * @bRequestType: matches the USB bmRequestType field - * @bRequest: matches the USB bRequest field - * @wValue: matches the USB wValue field (CPU byte order) - * @wIndex: matches the USB wIndex field (CPU byte order) - * @wLength: matches the USB wLength field (CPU byte order) - * - * Function returns decoded, formatted and human-readable description of - * control request packet. - * - * The usage scenario for this is for tracepoints, so function as a return - * use the same value as in parameters. This approach allows to use this - * function in TP_printk - * - * Important: wValue, wIndex, wLength parameters before invoking this function - * should be processed by le16_to_cpu macro. - */ extern const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType, __u8 bRequest, __u16 wValue, __u16 wIndex, __u16 wLength); diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h index dd742afdc03f..89fc901e778f 100644 --- a/include/linux/usb/ehci_pdriver.h +++ b/include/linux/usb/ehci_pdriver.h @@ -50,6 +50,7 @@ struct usb_ehci_pdata { unsigned no_io_watchdog:1; unsigned reset_on_resume:1; unsigned dma_mask_64:1; + unsigned spurious_oc:1; /* Turn on all power and clocks */ int (*power_on)(struct platform_device *pdev); diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index 70d681918d01..bf00259493e0 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -493,4 +493,6 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP) #define PD_N_HARD_RESET_COUNT 2 +#define PD_P_SNK_STDBY_MW 2500 /* 2500 mW */ + #endif /* __LINUX_USB_PD_H */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 952272002e48..8c63fa9bfc74 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -130,6 +130,8 @@ static inline void usb_set_serial_port_data(struct usb_serial_port *port, * @dev: pointer to the struct usb_device for this device * @type: pointer to the struct usb_serial_driver for this device * @interface: pointer to the struct usb_interface for this device + * @sibling: pointer to the struct usb_interface of any sibling interface + * @suspend_count: number of suspended (sibling) interfaces * @num_ports: the number of ports this device has * @num_interrupt_in: number of interrupt in endpoints we have * @num_interrupt_out: number of interrupt out endpoints we have @@ -145,8 +147,9 @@ struct usb_serial { struct usb_device *dev; struct usb_serial_driver *type; struct usb_interface *interface; + struct usb_interface *sibling; + unsigned int suspend_count; unsigned char disconnected:1; - unsigned char suspending:1; unsigned char attached:1; unsigned char minors_reserved:1; unsigned char num_ports; @@ -276,7 +279,7 @@ struct usb_serial_driver { int (*write_room)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); - int (*get_serial)(struct tty_struct *tty, struct serial_struct *ss); + void (*get_serial)(struct tty_struct *tty, struct serial_struct *ss); int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss); void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); @@ -335,6 +338,9 @@ static inline void usb_serial_console_disconnect(struct usb_serial *serial) {} /* Functions needed by other parts of the usbserial core */ struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor); void usb_serial_put(struct usb_serial *serial); + +int usb_serial_claim_interface(struct usb_serial *serial, struct usb_interface *intf); + int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port); int usb_serial_generic_write_start(struct usb_serial_port *port, gfp_t mem_flags); int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port, diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 91b4303ca305..e2e44bb1dad8 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -17,6 +17,7 @@ struct typec_partner; struct typec_cable; struct typec_plug; struct typec_port; +struct typec_altmode_ops; struct fwnode_handle; struct device; @@ -138,6 +139,11 @@ struct typec_altmode struct typec_altmode *typec_port_register_altmode(struct typec_port *port, const struct typec_altmode_desc *desc); + +void typec_port_register_altmodes(struct typec_port *port, + const struct typec_altmode_ops *ops, void *drvdata, + struct typec_altmode **altmodes, size_t n); + void typec_unregister_altmode(struct typec_altmode *altmode); struct typec_port *typec_altmode2port(struct typec_altmode *alt); @@ -298,4 +304,17 @@ int typec_find_port_data_role(const char *name); void typec_partner_set_svdm_version(struct typec_partner *partner, enum usb_pd_svdm_ver svdm_version); int typec_get_negotiated_svdm_version(struct typec_port *port); + +#if IS_REACHABLE(CONFIG_TYPEC) +int typec_link_port(struct device *port); +void typec_unlink_port(struct device *port); +#else +static inline int typec_link_port(struct device *port) +{ + return 0; +} + +static inline void typec_unlink_port(struct device *port) { } +#endif + #endif /* __LINUX_USB_TYPEC_H */ diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index cfbfd6fe01df..8336e86ce606 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -53,6 +53,9 @@ struct usbnet { u32 hard_mtu; /* count any extra framing */ size_t rx_urb_size; /* size for rx urbs */ struct mii_if_info mii; + long rx_speed; /* If MII not used */ + long tx_speed; /* If MII not used */ +# define SPEED_UNSET -1 /* various kinds of pending driver work */ struct sk_buff_head rxq; @@ -81,8 +84,6 @@ struct usbnet { # define EVENT_LINK_CHANGE 11 # define EVENT_SET_RX_MODE 12 # define EVENT_NO_IP_ALIGN 13 - u32 rx_speed; /* in bps - NOT Mbps */ - u32 tx_speed; /* in bps - NOT Mbps */ }; static inline struct usb_driver *driver_of(struct usb_interface *intf) @@ -267,10 +268,12 @@ extern void usbnet_pause_rx(struct usbnet *); extern void usbnet_resume_rx(struct usbnet *); extern void usbnet_purge_paused_rxq(struct usbnet *); -extern int usbnet_get_link_ksettings(struct net_device *net, +extern int usbnet_get_link_ksettings_mii(struct net_device *net, struct ethtool_link_ksettings *cmd); -extern int usbnet_set_link_ksettings(struct net_device *net, +extern int usbnet_set_link_ksettings_mii(struct net_device *net, const struct ethtool_link_ksettings *cmd); +extern int usbnet_get_link_ksettings_internal(struct net_device *net, + struct ethtool_link_ksettings *cmd); extern u32 usbnet_get_link(struct net_device *net); extern u32 usbnet_get_msglevel(struct net_device *); extern void usbnet_set_msglevel(struct net_device *, u32); diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 64cf8ebdc4ec..1d08dbbcfe32 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -50,6 +50,10 @@ enum ucount_type { UCOUNT_INOTIFY_INSTANCES, UCOUNT_INOTIFY_WATCHES, #endif +#ifdef CONFIG_FANOTIFY + UCOUNT_FANOTIFY_GROUPS, + UCOUNT_FANOTIFY_MARKS, +#endif UCOUNT_COUNTS, }; @@ -63,6 +67,9 @@ struct user_namespace { kgid_t group; struct ns_common ns; unsigned long flags; + /* parent_could_setfcap: true if the creator if this ns had CAP_SETFCAP + * in its effective capability set at the child ns creation time. */ + bool parent_could_setfcap; #ifdef CONFIG_KEYS /* List of joinable keyrings in this namespace. Modification access of diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index a8e5f3ea9bb2..794d1538b8ba 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -17,6 +17,9 @@ #include <linux/mm.h> #include <asm-generic/pgtable_uffd.h> +/* The set of all possible UFFD-related VM flags. */ +#define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR) + /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining * new flags, since they might collide with O_* ones. We want @@ -34,6 +37,22 @@ extern int sysctl_unprivileged_userfaultfd; extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); +/* + * The mode of operation for __mcopy_atomic and its helpers. + * + * This is almost an implementation detail (mcopy_atomic below doesn't take this + * as a parameter), but it's exposed here because memory-kind-specific + * implementations (e.g. hugetlbfs) need to know the mode of operation. + */ +enum mcopy_atomic_mode { + /* A normal copy_from_user into the destination range. */ + MCOPY_ATOMIC_NORMAL, + /* Don't copy; map the destination range to the zero page. */ + MCOPY_ATOMIC_ZEROPAGE, + /* Just install pte(s) with the existing page(s) in the page cache. */ + MCOPY_ATOMIC_CONTINUE, +}; + extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool *mmap_changing, __u64 mode); @@ -41,6 +60,8 @@ extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long len, bool *mmap_changing); +extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start, + unsigned long len, bool *mmap_changing); extern int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, unsigned long len, bool enable_wp, bool *mmap_changing); @@ -52,6 +73,22 @@ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; } +/* + * Never enable huge pmd sharing on some uffd registered vmas: + * + * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry. + * + * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for + * VMAs which share huge pmds. (If you have two mappings to the same + * underlying pages, and fault in the non-UFFD-registered one with a write, + * with huge pmd sharing this would *also* setup the second UFFD-registered + * mapping, and we'd not get minor faults.) + */ +static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) +{ + return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); +} + static inline bool userfaultfd_missing(struct vm_area_struct *vma) { return vma->vm_flags & VM_UFFD_MISSING; @@ -62,6 +99,11 @@ static inline bool userfaultfd_wp(struct vm_area_struct *vma) return vma->vm_flags & VM_UFFD_WP; } +static inline bool userfaultfd_minor(struct vm_area_struct *vma) +{ + return vma->vm_flags & VM_UFFD_MINOR; +} + static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, pte_t pte) { @@ -76,7 +118,7 @@ static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, static inline bool userfaultfd_armed(struct vm_area_struct *vma) { - return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); + return vma->vm_flags & __VM_UFFD_FLAGS; } extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); @@ -123,6 +165,11 @@ static inline bool userfaultfd_wp(struct vm_area_struct *vma) return false; } +static inline bool userfaultfd_minor(struct vm_area_struct *vma) +{ + return false; +} + static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, pte_t pte) { diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 15fa085fab05..f311d227aa1b 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -8,7 +8,7 @@ #include <linux/vhost_iotlb.h> /** - * vDPA callback definition. + * struct vdpa_calllback - vDPA callback definition. * @callback: interrupt callback function * @private: the data passed to the callback function */ @@ -18,7 +18,7 @@ struct vdpa_callback { }; /** - * vDPA notification area + * struct vdpa_notification_area - vDPA notification area * @addr: base address of the notification area * @size: size of the notification area */ @@ -28,7 +28,7 @@ struct vdpa_notification_area { }; /** - * vDPA vq_state definition + * struct vdpa_vq_state - vDPA vq_state definition * @avail_index: available index */ struct vdpa_vq_state { @@ -38,7 +38,7 @@ struct vdpa_vq_state { struct vdpa_mgmt_dev; /** - * vDPA device - representation of a vDPA device + * struct vdpa_device - representation of a vDPA device * @dev: underlying device * @dma_dev: the actual device that is performing DMA * @config: the configuration ops for this device. @@ -59,7 +59,7 @@ struct vdpa_device { }; /** - * vDPA IOVA range - the IOVA range support by the device + * struct vdpa_iova_range - the IOVA range support by the device * @first: start of the IOVA range * @last: end of the IOVA range */ @@ -69,7 +69,7 @@ struct vdpa_iova_range { }; /** - * vDPA_config_ops - operations for configuring a vDPA device. + * struct vdpa_config_ops - operations for configuring a vDPA device. * Note: vDPA device drivers are required to implement all of the * operations unless it is mentioned to be optional in the following * list. @@ -150,6 +150,9 @@ struct vdpa_iova_range { * @set_status: Set the device status * @vdev: vdpa device * @status: virtio device status + * @get_config_size: Get the size of the configuration space + * @vdev: vdpa device + * Returns size_t: configuration size * @get_config: Read from device specific configuration space * @vdev: vdpa device * @offset: offset from the beginning of @@ -231,6 +234,7 @@ struct vdpa_config_ops { u32 (*get_vendor_id)(struct vdpa_device *vdev); u8 (*get_status)(struct vdpa_device *vdev); void (*set_status)(struct vdpa_device *vdev, u8 status); + size_t (*get_config_size)(struct vdpa_device *vdev); void (*get_config)(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len); void (*set_config)(struct vdpa_device *vdev, unsigned int offset, @@ -267,7 +271,7 @@ int _vdpa_register_device(struct vdpa_device *vdev, int nvqs); void _vdpa_unregister_device(struct vdpa_device *vdev); /** - * vdpa_driver - operations for a vDPA driver + * struct vdpa_driver - operations for a vDPA driver * @driver: underlying device driver * @probe: the function to call when a device is found. Returns 0 or -errno. * @remove: the function to call when a device is removed. @@ -344,18 +348,18 @@ static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, } /** - * vdpa_mgmtdev_ops - vdpa device ops - * @dev_add: Add a vdpa device using alloc and register - * @mdev: parent device to use for device addition - * @name: name of the new vdpa device - * Driver need to add a new device using _vdpa_register_device() - * after fully initializing the vdpa device. Driver must return 0 - * on success or appropriate error code. - * @dev_del: Remove a vdpa device using unregister - * @mdev: parent device to use for device removal - * @dev: vdpa device to remove - * Driver need to remove the specified device by calling - * _vdpa_unregister_device(). + * struct vdpa_mgmtdev_ops - vdpa device ops + * @dev_add: Add a vdpa device using alloc and register + * @mdev: parent device to use for device addition + * @name: name of the new vdpa device + * Driver need to add a new device using _vdpa_register_device() + * after fully initializing the vdpa device. Driver must return 0 + * on success or appropriate error code. + * @dev_del: Remove a vdpa device using unregister + * @mdev: parent device to use for device removal + * @dev: vdpa device to remove + * Driver need to remove the specified device by calling + * _vdpa_unregister_device(). */ struct vdpa_mgmtdev_ops { int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name); diff --git a/include/linux/vfio.h b/include/linux/vfio.h index b7e18bde5aa8..a2c5b30e1763 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -15,6 +15,17 @@ #include <linux/poll.h> #include <uapi/linux/vfio.h> +struct vfio_device { + struct device *dev; + const struct vfio_device_ops *ops; + struct vfio_group *group; + + /* Members below here are private, not for driver use */ + refcount_t refcount; + struct completion comp; + struct list_head group_next; +}; + /** * struct vfio_device_ops - VFIO bus driver device callbacks * @@ -32,30 +43,28 @@ */ struct vfio_device_ops { char *name; - int (*open)(void *device_data); - void (*release)(void *device_data); - ssize_t (*read)(void *device_data, char __user *buf, + int (*open)(struct vfio_device *vdev); + void (*release)(struct vfio_device *vdev); + ssize_t (*read)(struct vfio_device *vdev, char __user *buf, size_t count, loff_t *ppos); - ssize_t (*write)(void *device_data, const char __user *buf, + ssize_t (*write)(struct vfio_device *vdev, const char __user *buf, size_t count, loff_t *size); - long (*ioctl)(void *device_data, unsigned int cmd, + long (*ioctl)(struct vfio_device *vdev, unsigned int cmd, unsigned long arg); - int (*mmap)(void *device_data, struct vm_area_struct *vma); - void (*request)(void *device_data, unsigned int count); - int (*match)(void *device_data, char *buf); + int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma); + void (*request)(struct vfio_device *vdev, unsigned int count); + int (*match)(struct vfio_device *vdev, char *buf); }; extern struct iommu_group *vfio_iommu_group_get(struct device *dev); extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); -extern int vfio_add_group_dev(struct device *dev, - const struct vfio_device_ops *ops, - void *device_data); - -extern void *vfio_del_group_dev(struct device *dev); +void vfio_init_group_dev(struct vfio_device *device, struct device *dev, + const struct vfio_device_ops *ops); +int vfio_register_group_dev(struct vfio_device *device); +void vfio_unregister_group_dev(struct vfio_device *device); extern struct vfio_device *vfio_device_get_from_dev(struct device *dev); extern void vfio_device_put(struct vfio_device *device); -extern void *vfio_device_data(struct vfio_device *device); /* events for the backend driver notify callback */ enum vfio_iommu_notify_type { diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index fc6dfeba04a5..dc6ddce92066 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -112,7 +112,9 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev, #if defined(CONFIG_VGA_ARB) extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); #else -#define vga_put(pdev, rsrc) +static inline void vga_put(struct pci_dev *pdev, unsigned int rsrc) +{ +} #endif diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index f26acbeec965..6a95b58fd0f4 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -13,6 +13,8 @@ struct virtio_pci_modern_device { void __iomem *device; /* Base of vq notifications (non-legacy mode). */ void __iomem *notify_base; + /* Physical base of vq notifications */ + resource_size_t notify_pa; /* Where to read and clear interrupt */ u8 __iomem *isr; @@ -99,13 +101,8 @@ void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, u16 idx); u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); -u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, - u16 idx); -void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, - size_t minlen, - u32 align, - u32 start, u32 size, - size_t *len); +void __iomem * vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, + u16 index, resource_size_t *pa); int vp_modern_probe(struct virtio_pci_modern_device *mdev); void vp_modern_remove(struct virtio_pci_modern_device *mdev); #endif diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 18e75974d4e3..ae0dd1948c2b 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -71,6 +71,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, #endif +#ifdef CONFIG_CMA + CMA_ALLOC_SUCCESS, + CMA_ALLOC_FAIL, +#endif UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ @@ -121,6 +125,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, SWAP_RA, SWAP_RA_HIT, #endif +#ifdef CONFIG_X86 + DIRECT_MAP_LEVEL2_SPLIT, + DIRECT_MAP_LEVEL3_SPLIT, +#endif NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index df92211cf771..4d668abb6391 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -26,13 +26,14 @@ struct notifier_block; /* in notifier.h */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ +#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */ /* * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. * * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after * shadow memory has been mapped. It's used to handle allocation errors so that - * we don't try to poision shadow on free if it was never allocated. + * we don't try to poison shadow on free if it was never allocated. * * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to * determine which allocations need the module shadow freed. @@ -42,7 +43,7 @@ struct notifier_block; /* in notifier.h */ /* * Maximum alignment for ioremap() regions. - * Can be overriden by arch-specific value. + * Can be overridden by arch-specific value. */ #ifndef IOREMAP_MAX_ORDER #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ @@ -54,6 +55,9 @@ struct vm_struct { unsigned long size; unsigned long flags; struct page **pages; +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC + unsigned int page_order; +#endif unsigned int nr_pages; phys_addr_t phys_addr; const void *caller; @@ -78,6 +82,28 @@ struct vmap_area { }; }; +/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */ +#ifndef arch_vmap_p4d_supported +static inline bool arch_vmap_p4d_supported(pgprot_t prot) +{ + return false; +} +#endif + +#ifndef arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + return false; +} +#endif + +#ifndef arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + return false; +} +#endif + /* * Highlevel APIs for driver use */ @@ -166,13 +192,27 @@ void free_vm_area(struct vm_struct *area); extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); +static inline bool is_vm_area_hugepages(const void *addr) +{ + /* + * This may not 100% tell if the area is mapped with > PAGE_SIZE + * page table entries, if for some reason the architecture indicates + * larger sizes are available but decides not to use them, nothing + * prevents that. This only indicates the size of the physical page + * allocated in the vmalloc layer. + */ +#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC + return find_vm_area(addr)->page_order > 0; +#else + return false; +#endif +} + #ifdef CONFIG_MMU -extern int map_kernel_range_noflush(unsigned long start, unsigned long size, - pgprot_t prot, struct page **pages); -int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, - struct page **pages); -extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); -extern void unmap_kernel_range(unsigned long addr, unsigned long size); +int vmap_range(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift); +void vunmap_range(unsigned long addr, unsigned long end); static inline void set_vm_flush_reset_perms(void *addr) { struct vm_struct *vm = find_vm_area(addr); @@ -180,27 +220,15 @@ static inline void set_vm_flush_reset_perms(void *addr) if (vm) vm->flags |= VM_FLUSH_RESET_PERMS; } + #else -static inline int -map_kernel_range_noflush(unsigned long start, unsigned long size, - pgprot_t prot, struct page **pages) -{ - return size >> PAGE_SHIFT; -} -#define map_kernel_range map_kernel_range_noflush -static inline void -unmap_kernel_range_noflush(unsigned long addr, unsigned long size) -{ -} -#define unmap_kernel_range unmap_kernel_range_noflush static inline void set_vm_flush_reset_perms(void *addr) { } #endif -/* for /dev/kmem */ +/* for /proc/kcore */ extern long vread(char *buf, char *addr, unsigned long count); -extern long vwrite(char *buf, char *addr, unsigned long count); /* * Internals. Dont't use.. @@ -241,7 +269,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) int register_vmap_purge_notifier(struct notifier_block *nb); int unregister_vmap_purge_notifier(struct notifier_block *nb); -#ifdef CONFIG_MMU +#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK) bool vmalloc_dump_obj(void *object); #else static inline bool vmalloc_dump_obj(void *object) { return false; } diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 506d625163a1..3299cd69e4ca 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -512,16 +512,10 @@ static inline void mod_lruvec_page_state(struct page *page, #endif /* CONFIG_MEMCG */ -static inline void __inc_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) -{ - __mod_lruvec_state(lruvec, idx, 1); -} - -static inline void __dec_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) +static inline void inc_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx) { - __mod_lruvec_state(lruvec, idx, -1); + mod_lruvec_state(lruvec, idx, 1); } static inline void __inc_lruvec_page_state(struct page *page, @@ -536,18 +530,6 @@ static inline void __dec_lruvec_page_state(struct page *page, __mod_lruvec_page_state(page, idx, -1); } -static inline void inc_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) -{ - mod_lruvec_state(lruvec, idx, 1); -} - -static inline void dec_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx) -{ - mod_lruvec_state(lruvec, idx, -1); -} - static inline void inc_lruvec_page_state(struct page *page, enum node_stat_item idx) { diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 59bd50f99291..84db7b8f912f 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -46,6 +46,9 @@ struct vringh { /* IOTLB for this vring */ struct vhost_iotlb *iotlb; + /* spinlock to synchronize IOTLB accesses */ + spinlock_t *iotlb_lock; + /* The function to call to notify the guest about added buffers */ void (*notify)(struct vringh *); }; @@ -196,6 +199,19 @@ static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov) kiov->iov = NULL; } +static inline size_t vringh_kiov_length(struct vringh_kiov *kiov) +{ + size_t len = 0; + int i; + + for (i = kiov->i; i < kiov->used; i++) + len += kiov->iov[i].iov_len; + + return len; +} + +void vringh_kiov_advance(struct vringh_kiov *kiov, size_t len); + int vringh_getdesc_kern(struct vringh *vrh, struct vringh_kiov *riov, struct vringh_kiov *wiov, @@ -258,7 +274,8 @@ static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) #if IS_REACHABLE(CONFIG_VHOST_IOTLB) -void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb); +void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb, + spinlock_t *iotlb_lock); int vringh_init_iotlb(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 041d6524d144..3684487d01e1 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -3,12 +3,46 @@ #define _LINUX_KERNEL_VTIME_H #include <linux/context_tracking_state.h> +#include <linux/sched.h> + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include <asm/vtime.h> #endif +/* + * Common vtime APIs + */ +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +extern void vtime_account_kernel(struct task_struct *tsk); +extern void vtime_account_idle(struct task_struct *tsk); +#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ -struct task_struct; +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +extern void arch_vtime_task_switch(struct task_struct *tsk); +extern void vtime_user_enter(struct task_struct *tsk); +extern void vtime_user_exit(struct task_struct *tsk); +extern void vtime_guest_enter(struct task_struct *tsk); +extern void vtime_guest_exit(struct task_struct *tsk); +extern void vtime_init_idle(struct task_struct *tsk, int cpu); +#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ +static inline void vtime_user_enter(struct task_struct *tsk) { } +static inline void vtime_user_exit(struct task_struct *tsk) { } +static inline void vtime_guest_enter(struct task_struct *tsk) { } +static inline void vtime_guest_exit(struct task_struct *tsk) { } +static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } +#endif + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset); +extern void vtime_account_softirq(struct task_struct *tsk); +extern void vtime_account_hardirq(struct task_struct *tsk); +extern void vtime_flush(struct task_struct *tsk); +#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ +static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { } +static inline void vtime_account_softirq(struct task_struct *tsk) { } +static inline void vtime_account_hardirq(struct task_struct *tsk) { } +static inline void vtime_flush(struct task_struct *tsk) { } +#endif /* * vtime_accounting_enabled_this_cpu() definitions/declarations @@ -18,6 +52,18 @@ struct task_struct; static inline bool vtime_accounting_enabled_this_cpu(void) { return true; } extern void vtime_task_switch(struct task_struct *prev); +static __always_inline void vtime_account_guest_enter(void) +{ + vtime_account_kernel(current); + current->flags |= PF_VCPU; +} + +static __always_inline void vtime_account_guest_exit(void) +{ + vtime_account_kernel(current); + current->flags &= ~PF_VCPU; +} + #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) /* @@ -49,49 +95,37 @@ static inline void vtime_task_switch(struct task_struct *prev) vtime_task_switch_generic(prev); } +static __always_inline void vtime_account_guest_enter(void) +{ + if (vtime_accounting_enabled_this_cpu()) + vtime_guest_enter(current); + else + current->flags |= PF_VCPU; +} + +static __always_inline void vtime_account_guest_exit(void) +{ + if (vtime_accounting_enabled_this_cpu()) + vtime_guest_exit(current); + else + current->flags &= ~PF_VCPU; +} + #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ -static inline bool vtime_accounting_enabled_cpu(int cpu) {return false; } static inline bool vtime_accounting_enabled_this_cpu(void) { return false; } static inline void vtime_task_switch(struct task_struct *prev) { } -#endif - -/* - * Common vtime APIs - */ -#ifdef CONFIG_VIRT_CPU_ACCOUNTING -extern void vtime_account_kernel(struct task_struct *tsk); -extern void vtime_account_idle(struct task_struct *tsk); -#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ -static inline void vtime_account_kernel(struct task_struct *tsk) { } -#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ +static __always_inline void vtime_account_guest_enter(void) +{ + current->flags |= PF_VCPU; +} -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern void arch_vtime_task_switch(struct task_struct *tsk); -extern void vtime_user_enter(struct task_struct *tsk); -extern void vtime_user_exit(struct task_struct *tsk); -extern void vtime_guest_enter(struct task_struct *tsk); -extern void vtime_guest_exit(struct task_struct *tsk); -extern void vtime_init_idle(struct task_struct *tsk, int cpu); -#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ -static inline void vtime_user_enter(struct task_struct *tsk) { } -static inline void vtime_user_exit(struct task_struct *tsk) { } -static inline void vtime_guest_enter(struct task_struct *tsk) { } -static inline void vtime_guest_exit(struct task_struct *tsk) { } -static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } -#endif +static __always_inline void vtime_account_guest_exit(void) +{ + current->flags &= ~PF_VCPU; +} -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset); -extern void vtime_account_softirq(struct task_struct *tsk); -extern void vtime_account_hardirq(struct task_struct *tsk); -extern void vtime_flush(struct task_struct *tsk); -#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ -static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { } -static inline void vtime_account_softirq(struct task_struct *tsk) { } -static inline void vtime_account_hardirq(struct task_struct *tsk) { } -static inline void vtime_flush(struct task_struct *tsk) { } #endif diff --git a/include/linux/wmi.h b/include/linux/wmi.h index 8ef7e7faea1e..2cb3913c1f50 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h @@ -37,7 +37,7 @@ struct wmi_driver { const struct wmi_device_id *id_table; int (*probe)(struct wmi_device *wdev, const void *context); - int (*remove)(struct wmi_device *wdev); + void (*remove)(struct wmi_device *wdev); void (*notify)(struct wmi_device *device, union acpi_object *data); long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd, struct wmi_ioctl_buffer *arg); diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 6ecf2a0220db..b77f39f319ad 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -48,39 +48,26 @@ struct ww_acquire_ctx { #endif }; -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \ - , .ww_class = class -#else -# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) -#endif - #define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \ { .stamp = ATOMIC_LONG_INIT(0) \ , .acquire_name = #ww_class "_acquire" \ , .mutex_name = #ww_class "_mutex" \ , .is_wait_die = _is_wait_die } -#define __WW_MUTEX_INITIALIZER(lockname, class) \ - { .base = __MUTEX_INITIALIZER(lockname.base) \ - __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } - #define DEFINE_WD_CLASS(classname) \ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1) #define DEFINE_WW_CLASS(classname) \ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0) -#define DEFINE_WW_MUTEX(mutexname, ww_class) \ - struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) - /** * ww_mutex_init - initialize the w/w mutex * @lock: the mutex to be initialized * @ww_class: the w/w class the mutex should belong to * * Initialize the w/w mutex to unlocked state and associate it with the given - * class. + * class. Static define macro for w/w mutex is not provided and this function + * is the only way to properly initialize the w/w mutex. * * It is not allowed to initialize an already locked mutex. */ diff --git a/include/linux/wwan.h b/include/linux/wwan.h new file mode 100644 index 000000000000..aa05a253dcf9 --- /dev/null +++ b/include/linux/wwan.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */ + +#ifndef __WWAN_H +#define __WWAN_H + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> + +/** + * enum wwan_port_type - WWAN port types + * @WWAN_PORT_AT: AT commands + * @WWAN_PORT_MBIM: Mobile Broadband Interface Model control + * @WWAN_PORT_QMI: Qcom modem/MSM interface for modem control + * @WWAN_PORT_QCDM: Qcom Modem diagnostic interface + * @WWAN_PORT_FIREHOSE: XML based command protocol + * @WWAN_PORT_MAX: Number of supported port types + */ +enum wwan_port_type { + WWAN_PORT_AT, + WWAN_PORT_MBIM, + WWAN_PORT_QMI, + WWAN_PORT_QCDM, + WWAN_PORT_FIREHOSE, + WWAN_PORT_MAX, +}; + +struct wwan_port; + +/** struct wwan_port_ops - The WWAN port operations + * @start: The routine for starting the WWAN port device. + * @stop: The routine for stopping the WWAN port device. + * @tx: The routine that sends WWAN port protocol data to the device. + * + * The wwan_port_ops structure contains a list of low-level operations + * that control a WWAN port device. All functions are mandatory. + */ +struct wwan_port_ops { + int (*start)(struct wwan_port *port); + void (*stop)(struct wwan_port *port); + int (*tx)(struct wwan_port *port, struct sk_buff *skb); +}; + +/** + * wwan_create_port - Add a new WWAN port + * @parent: Device to use as parent and shared by all WWAN ports + * @type: WWAN port type + * @ops: WWAN port operations + * @drvdata: Pointer to caller driver data + * + * Allocate and register a new WWAN port. The port will be automatically exposed + * to user as a character device and attached to the right virtual WWAN device, + * based on the parent pointer. The parent pointer is the device shared by all + * components of a same WWAN modem (e.g. USB dev, PCI dev, MHI controller...). + * + * drvdata will be placed in the WWAN port device driver data and can be + * retrieved with wwan_port_get_drvdata(). + * + * This function must be balanced with a call to wwan_remove_port(). + * + * Returns a valid pointer to wwan_port on success or PTR_ERR on failure + */ +struct wwan_port *wwan_create_port(struct device *parent, + enum wwan_port_type type, + const struct wwan_port_ops *ops, + void *drvdata); + +/** + * wwan_remove_port - Remove a WWAN port + * @port: WWAN port to remove + * + * Remove a previously created port. + */ +void wwan_remove_port(struct wwan_port *port); + +/** + * wwan_port_rx - Receive data from the WWAN port + * @port: WWAN port for which data is received + * @skb: Pointer to the rx buffer + * + * A port driver calls this function upon data reception (MBIM, AT...). + */ +void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb); + +/** + * wwan_port_txoff - Stop TX on WWAN port + * @port: WWAN port for which TX must be stopped + * + * Used for TX flow control, a port driver calls this function to indicate TX + * is temporary unavailable (e.g. due to ring buffer fullness). + */ +void wwan_port_txoff(struct wwan_port *port); + + +/** + * wwan_port_txon - Restart TX on WWAN port + * @port: WWAN port for which TX must be restarted + * + * Used for TX flow control, a port driver calls this function to indicate TX + * is available again. + */ +void wwan_port_txon(struct wwan_port *port); + +/** + * wwan_port_get_drvdata - Retrieve driver data from a WWAN port + * @port: Related WWAN port + */ +void *wwan_port_get_drvdata(struct wwan_port *port); + +#endif /* __WWAN_H */ diff --git a/include/media/cec.h b/include/media/cec.h index cd35ae6b7560..208c9613c07e 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -28,8 +28,8 @@ * @minor: device node minor number * @registered: the device was correctly registered * @unregistered: the device was unregistered - * @fhs_lock: lock to control access to the filehandle list * @fhs: the list of open filehandles (cec_fh) + * @lock: lock to control access to this structure * * This structure represents a cec-related device node. * diff --git a/include/media/davinci/isif.h b/include/media/davinci/isif.h index e66589c4022d..8369acd26e7e 100644 --- a/include/media/davinci/isif.h +++ b/include/media/davinci/isif.h @@ -177,7 +177,7 @@ struct isif_black_clamp { * 1 - clamp value calculated separately for all colors */ __u8 bc_mode_color; - /* Vrtical start position for bc subtraction */ + /* Vertical start position for bc subtraction */ __u16 vert_start_sub; /* Black clamp for horizontal direction */ struct isif_horz_bclamp horz; @@ -193,7 +193,7 @@ struct isif_color_space_conv { /* Enable color space conversion */ __u8 en; /* - * csc coeffient table. S8Q5, M00 at index 0, M01 at index 1, and + * csc coefficient table. S8Q5, M00 at index 0, M01 at index 1, and * so forth */ struct isif_float_8 coeff[ISIF_CSC_NUM_COEFF]; @@ -340,7 +340,7 @@ struct isif_data_formatter { }; struct isif_df_csc { - /* Color Space Conversion confguration, 0 - csc, 1 - df */ + /* Color Space Conversion configuration, 0 - csc, 1 - df */ __u8 df_or_csc; /* csc configuration valid if df_or_csc is 0 */ struct isif_color_space_conv csc; @@ -406,7 +406,7 @@ struct isif_config_params_raw { struct isif_linearize linearize; /* Data formatter or CSC */ struct isif_df_csc df_csc; - /* Defect Pixel Correction (DFC) confguration */ + /* Defect Pixel Correction (DFC) configuration */ struct isif_dfc dfc; /* Black/Digital Clamp configuration */ struct isif_black_clamp bclamp; diff --git a/include/media/davinci/vpbe_osd.h b/include/media/davinci/vpbe_osd.h index e1b1c76aa50f..a4fc4f2a56fb 100644 --- a/include/media/davinci/vpbe_osd.h +++ b/include/media/davinci/vpbe_osd.h @@ -54,9 +54,9 @@ enum osd_win_layer { * @PIXFMT_4BPP: 4-bits-per-pixel bitmap * @PIXFMT_8BPP: 8-bits-per-pixel bitmap * @PIXFMT_RGB565: 16-bits-per-pixel RGB565 - * @PIXFMT_YCbCrI: YUV 4:2:2 + * @PIXFMT_YCBCRI: YUV 4:2:2 * @PIXFMT_RGB888: 24-bits-per-pixel RGB888 - * @PIXFMT_YCrCbI: YUV 4:2:2 with chroma swap + * @PIXFMT_YCRCBI: YUV 4:2:2 with chroma swap * @PIXFMT_NV12: YUV 4:2:0 planar * @PIXFMT_OSD_ATTR: OSD Attribute Window pixel format (4bpp) * @@ -210,7 +210,7 @@ enum osd_cursor_h_width { }; /** - * enum davinci_cursor_v_width + * enum osd_cursor_v_width * @V_WIDTH_1: vertical line width is 1 line * @V_WIDTH_2: vertical line width is 2 lines * @V_WIDTH_4: vertical line width is 4 lines diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h index b04a38be5183..2f6b0861322a 100644 --- a/include/media/dvbdev.h +++ b/include/media/dvbdev.h @@ -421,7 +421,7 @@ void dvb_module_release(struct i2c_client *client); * dvb_attach - attaches a DVB frontend into the DVB core. * * @FUNCTION: function on a frontend module to be called. - * @ARGS...: @FUNCTION arguments. + * @ARGS: @FUNCTION arguments. * * This ancillary function loads a frontend module in runtime and runs * the @FUNCTION function there, with @ARGS. diff --git a/include/media/i2c/adv7343.h b/include/media/i2c/adv7343.h index e4142b1ef8cd..b8937035c5d3 100644 --- a/include/media/i2c/adv7343.h +++ b/include/media/i2c/adv7343.h @@ -21,7 +21,7 @@ #define ADV7343_SVIDEO_ID (2) /** - * adv7343_power_mode - power mode configuration. + * struct adv7343_power_mode - power mode configuration. * @sleep_mode: on enable the current consumption is reduced to micro ampere * level. All DACs and the internal PLL circuit are disabled. * Registers can be read from and written in sleep mode. diff --git a/include/media/i2c/mt9t112.h b/include/media/i2c/mt9t112.h index cc80d5cc2104..e678b6ae8e2f 100644 --- a/include/media/i2c/mt9t112.h +++ b/include/media/i2c/mt9t112.h @@ -14,7 +14,7 @@ struct mt9t112_pll_divider { }; /** - * mt9t112_platform_data - mt9t112 driver interface + * struct mt9t112_platform_data - mt9t112 driver interface * @flags: Sensor media bus configuration. * @divider: Sensor PLL configuration */ diff --git a/include/media/i2c/noon010pc30.h b/include/media/i2c/noon010pc30.h index a035d2d9a564..d1b2e06a1de0 100644 --- a/include/media/i2c/noon010pc30.h +++ b/include/media/i2c/noon010pc30.h @@ -10,6 +10,7 @@ #define NOON010PC30_H /** + * struct noon010pc30_platform_data - platform data * @clk_rate: the clock frequency in Hz * @gpio_nreset: GPIO driving nRESET pin * @gpio_nstby: GPIO driving nSTBY pin diff --git a/include/media/i2c/ov772x.h b/include/media/i2c/ov772x.h index a1702d420087..26f363ea4001 100644 --- a/include/media/i2c/ov772x.h +++ b/include/media/i2c/ov772x.h @@ -46,7 +46,7 @@ struct ov772x_edge_ctrl { } /** - * ov772x_camera_info - ov772x driver interface structure + * struct ov772x_camera_info - ov772x driver interface structure * @flags: Sensor configuration flags * @edgectrl: Sensor edge control */ diff --git a/include/media/i2c/s5c73m3.h b/include/media/i2c/s5c73m3.h index ccb9e5448762..a51f1025ba1c 100644 --- a/include/media/i2c/s5c73m3.h +++ b/include/media/i2c/s5c73m3.h @@ -35,6 +35,7 @@ struct s5c73m3_gpio { * @mclk_frequency: sensor's master clock frequency in Hz * @gpio_reset: GPIO driving RESET pin * @gpio_stby: GPIO driving STBY pin + * @bus_type: bus type * @nlanes: maximum number of MIPI-CSI lanes used * @horiz_flip: default horizontal image flip value, non zero to enable * @vert_flip: default vertical image flip value, non zero to enable diff --git a/include/media/i2c/s5k4ecgx.h b/include/media/i2c/s5k4ecgx.h index fccb7be8ed8f..92202eb35249 100644 --- a/include/media/i2c/s5k4ecgx.h +++ b/include/media/i2c/s5k4ecgx.h @@ -11,7 +11,7 @@ /** * struct s5k4ecgx_gpio - data structure describing a GPIO - * @gpio : GPIO number + * @gpio: GPIO number * @level: indicates active state of the @gpio */ struct s5k4ecgx_gpio { @@ -20,9 +20,9 @@ struct s5k4ecgx_gpio { }; /** - * struct ss5k4ecgx_platform_data- s5k4ecgx driver platform data + * struct s5k4ecgx_platform_data - s5k4ecgx driver platform data * @gpio_reset: GPIO driving RESET pin - * @gpio_stby : GPIO driving STBY pin + * @gpio_stby: GPIO driving STBY pin */ struct s5k4ecgx_platform_data { diff --git a/include/media/i2c/s5k6aa.h b/include/media/i2c/s5k6aa.h index fd78e85e8b78..eb3444d8b731 100644 --- a/include/media/i2c/s5k6aa.h +++ b/include/media/i2c/s5k6aa.h @@ -28,6 +28,7 @@ struct s5k6aa_gpio { * @mclk_frequency: sensor's master clock frequency in Hz * @gpio_reset: GPIO driving RESET pin * @gpio_stby: GPIO driving STBY pin + * @bus_type: bus type * @nlanes: maximum number of MIPI-CSI lanes used * @horiz_flip: default horizontal image flip value, non zero to enable * @vert_flip: default vertical image flip value, non zero to enable diff --git a/include/media/i2c/tvp514x.h b/include/media/i2c/tvp514x.h index 0c1bb04bdbcb..837efff0a6a0 100644 --- a/include/media/i2c/tvp514x.h +++ b/include/media/i2c/tvp514x.h @@ -29,10 +29,7 @@ #define PAL_NUM_ACTIVE_PIXELS (720) #define PAL_NUM_ACTIVE_LINES (576) -/** - * enum tvp514x_input - enum for different decoder input pin - * configuration. - */ +/* enum for different decoder input pin configuration */ enum tvp514x_input { /* * CVBS input selection @@ -69,11 +66,7 @@ enum tvp514x_input { INPUT_INVALID }; -/** - * enum tvp514x_output - enum for output format - * supported. - * - */ +/* enum for output format supported. */ enum tvp514x_output { OUTPUT_10BIT_422_EMBEDDED_SYNC = 0, OUTPUT_20BIT_422_SEPERATE_SYNC, diff --git a/include/media/i2c/tw9910.h b/include/media/i2c/tw9910.h index 92d31bd1afe6..77da94f909e3 100644 --- a/include/media/i2c/tw9910.h +++ b/include/media/i2c/tw9910.h @@ -13,9 +13,7 @@ #ifndef __TW9910_H__ #define __TW9910_H__ -/** - * tw9910_mpout_pin - MPOUT (multi-purpose output) pin functions - */ +/* MPOUT (multi-purpose output) pin functions */ enum tw9910_mpout_pin { TW9910_MPO_VLOSS, TW9910_MPO_HLOCK, @@ -28,10 +26,10 @@ enum tw9910_mpout_pin { }; /** - * tw9910_video_info - tw9910 driver interface structure + * struct tw9910_video_info - tw9910 driver interface structure * @buswidth: Parallel data bus width (8 or 16). * @mpout: Selected function of MPOUT (multi-purpose output) pin. - * See &enum tw9910_mpout_pin + * See enum tw9910_mpout_pin */ struct tw9910_video_info { unsigned long buswidth; diff --git a/include/media/media-entity.h b/include/media/media-entity.h index cbdfcb79d0d0..09737b47881f 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -155,7 +155,7 @@ struct media_link { * uniquely identified by the pad number. * @PAD_SIGNAL_ANALOG: * The pad contains an analog signal. It can be Radio Frequency, - * Intermediate Frequency, a baseband signal or sub-cariers. + * Intermediate Frequency, a baseband signal or sub-carriers. * Tuner inputs, IF-PLL demodulators, composite and s-video signals * should use it. * @PAD_SIGNAL_DV: @@ -885,6 +885,11 @@ int media_entity_get_fwnode_pad(struct media_entity *entity, * * @graph: Media graph structure that will be used to walk the graph * @mdev: Pointer to the &media_device that contains the object + * + * The caller is required to hold the media_device graph_mutex during the graph + * walk until the graph state is released. + * + * Returns zero on success or a negative error code otherwise. */ __must_check int media_graph_walk_init( struct media_graph *graph, struct media_device *mdev); diff --git a/include/media/rc-map.h b/include/media/rc-map.h index 30f138ebab6f..b5585d14fff4 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -277,9 +277,12 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_LEADTEK_Y04G0051 "rc-leadtek-y04g0051" #define RC_MAP_LME2510 "rc-lme2510" #define RC_MAP_MANLI "rc-manli" +#define RC_MAP_MECOOL_KII_PRO "rc-mecool-kii-pro" +#define RC_MAP_MECOOL_KIII_PRO "rc-mecool-kiii-pro" #define RC_MAP_MEDION_X10 "rc-medion-x10" #define RC_MAP_MEDION_X10_DIGITAINER "rc-medion-x10-digitainer" #define RC_MAP_MEDION_X10_OR2X "rc-medion-x10-or2x" +#define RC_MAP_MINIX_NEO "rc-minix-neo" #define RC_MAP_MSI_DIGIVOX_II "rc-msi-digivox-ii" #define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii" #define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere" @@ -338,6 +341,7 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_WINFAST "rc-winfast" #define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe" #define RC_MAP_X96MAX "rc-x96max" +#define RC_MAP_XBOX_360 "rc-xbox-360" #define RC_MAP_XBOX_DVD "rc-xbox-dvd" #define RC_MAP_ZX_IRDEC "rc-zx-irdec" diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h index f572e1279182..5b275a845c20 100644 --- a/include/media/v4l2-async.h +++ b/include/media/v4l2-async.h @@ -286,16 +286,16 @@ void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier); int v4l2_async_register_subdev(struct v4l2_subdev *sd); /** - * v4l2_async_register_subdev_sensor_common - registers a sensor sub-device to - * the asynchronous sub-device - * framework and parse set up common - * sensor related devices + * v4l2_async_register_subdev_sensor - registers a sensor sub-device to the + * asynchronous sub-device framework and + * parse set up common sensor related + * devices * * @sd: pointer to struct &v4l2_subdev * * This function is just like v4l2_async_register_subdev() with the exception * that calling it will also parse firmware interfaces for remote references - * using v4l2_async_notifier_parse_fwnode_sensor_common() and registers the + * using v4l2_async_notifier_parse_fwnode_sensor() and registers the * async sub-devices. The sub-device is similarly unregistered by calling * v4l2_async_unregister_subdev(). * @@ -305,7 +305,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd); * to register it. */ int __must_check -v4l2_async_register_subdev_sensor_common(struct v4l2_subdev *sd); +v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd); /** * v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index 167ca8c8424f..a5953b812878 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -18,7 +18,6 @@ * This will move to the public headers once this API is fully stable. */ #include <media/mpeg2-ctrls.h> -#include <media/vp8-ctrls.h> #include <media/hevc-ctrls.h> /* forward references */ @@ -50,10 +49,12 @@ struct video_device; * @p_h264_slice_params: Pointer to a struct v4l2_ctrl_h264_slice_params. * @p_h264_decode_params: Pointer to a struct v4l2_ctrl_h264_decode_params. * @p_h264_pred_weights: Pointer to a struct v4l2_ctrl_h264_pred_weights. - * @p_vp8_frame_header: Pointer to a VP8 frame header structure. + * @p_vp8_frame: Pointer to a VP8 frame params structure. * @p_hevc_sps: Pointer to an HEVC sequence parameter set structure. * @p_hevc_pps: Pointer to an HEVC picture parameter set structure. * @p_hevc_slice_params: Pointer to an HEVC slice parameters structure. + * @p_hdr10_cll: Pointer to an HDR10 Content Light Level structure. + * @p_hdr10_mastering: Pointer to an HDR10 Mastering Display structure. * @p_area: Pointer to an area. * @p: Pointer to a compound value. * @p_const: Pointer to a constant compound value. @@ -74,10 +75,12 @@ union v4l2_ctrl_ptr { struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; struct v4l2_ctrl_h264_decode_params *p_h264_decode_params; struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights; - struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header; + struct v4l2_ctrl_vp8_frame *p_vp8_frame; struct v4l2_ctrl_hevc_sps *p_hevc_sps; struct v4l2_ctrl_hevc_pps *p_hevc_pps; struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params; + struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll; + struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering; struct v4l2_area *p_area; void *p; const void *p_const; @@ -301,12 +304,14 @@ struct v4l2_ctrl { * the control has been applied. This prevents applying controls * from a cluster with multiple controls twice (when the first * control of a cluster is applied, they all are). - * @req: If set, this refers to another request that sets this control. + * @valid_p_req: If set, then p_req contains the control value for the request. * @p_req: If the control handler containing this control reference * is bound to a media request, then this points to the - * value of the control that should be applied when the request + * value of the control that must be applied when the request * is executed, or to the value of the control at the time - * that the request was completed. + * that the request was completed. If @valid_p_req is false, + * then this control was never set for this request and the + * control will not be updated when this request is applied. * * Each control handler has a list of these refs. The list_head is used to * keep a sorted-by-control-ID list of all controls, while the next pointer @@ -319,7 +324,7 @@ struct v4l2_ctrl_ref { struct v4l2_ctrl_helper *helper; bool from_other_dev; bool req_done; - struct v4l2_ctrl_ref *req; + bool valid_p_req; union v4l2_ctrl_ptr p_req; }; @@ -346,7 +351,7 @@ struct v4l2_ctrl_ref { * @error: The error code of the first failed control addition. * @request_is_queued: True if the request was queued. * @requests: List to keep track of open control handler request objects. - * For the parent control handler (@req_obj.req == NULL) this + * For the parent control handler (@req_obj.ops == NULL) this * is the list header. When the parent control handler is * removed, it has to unbind and put all these requests since * they refer to the parent. diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h index 80d21ad8d603..7ab033b819eb 100644 --- a/include/media/v4l2-fwnode.h +++ b/include/media/v4l2-fwnode.h @@ -510,27 +510,6 @@ v4l2_async_notifier_parse_fwnode_endpoints(struct device *dev, size_t asd_struct_size, parse_endpoint_func parse_endpoint); -/** - * v4l2_async_notifier_parse_fwnode_sensor_common - parse common references on - * sensors for async sub-devices - * @dev: the device node the properties of which are parsed for references - * @notifier: the async notifier where the async subdevs will be added - * - * Parse common sensor properties for remote devices related to the - * sensor and set up async sub-devices for them. - * - * Any notifier populated using this function must be released with a call to - * v4l2_async_notifier_release() after it has been unregistered and the async - * sub-devices are no longer in use, even in the case the function returned an - * error. - * - * Return: 0 on success - * -ENOMEM if memory allocation failed - * -EINVAL if property parsing failed - */ -int v4l2_async_notifier_parse_fwnode_sensor_common(struct device *dev, - struct v4l2_async_notifier *notifier); - /* Helper macros to access the connector links. */ /** v4l2_connector_last_link - Helper macro to get the first diff --git a/include/media/v4l2-h264.h b/include/media/v4l2-h264.h index d2314f4d4490..4b1c71c935e0 100644 --- a/include/media/v4l2-h264.h +++ b/include/media/v4l2-h264.h @@ -66,11 +66,11 @@ v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder, u8 *b0_reflist, u8 *b1_reflist); /** - * v4l2_h264_build_b_ref_lists() - Build the P reference list + * v4l2_h264_build_p_ref_list() - Build the P reference list * * @builder: reference list builder context - * @p_reflist: 16-bytes array used to store the P reference list. Each entry - * is an index in the DPB + * @reflist: 16-bytes array used to store the P reference list. Each entry + * is an index in the DPB * * This functions builds the P reference lists. This procedure is describe in * section '8.2.4 Decoding process for reference picture lists construction' diff --git a/include/media/v4l2-jpeg.h b/include/media/v4l2-jpeg.h index ddba2a56c321..2dba843ce3bd 100644 --- a/include/media/v4l2-jpeg.h +++ b/include/media/v4l2-jpeg.h @@ -88,10 +88,30 @@ struct v4l2_jpeg_scan_header { }; /** + * enum v4l2_jpeg_app14_tf - APP14 transform flag + * According to Rec. ITU-T T.872 (06/2012) 6.5.3 + * APP14 segment is for color encoding, it contains a transform flag, + * which may have values of 0, 1 and 2 and are interpreted as follows: + * @V4L2_JPEG_APP14_TF_CMYK_RGB: CMYK for images encoded with four components + * RGB for images encoded with three components + * @V4L2_JPEG_APP14_TF_YCBCR: an image encoded with three components using YCbCr + * @V4L2_JPEG_APP14_TF_YCCK: an image encoded with four components using YCCK + * @V4L2_JPEG_APP14_TF_UNKNOWN: indicate app14 is not present + */ +enum v4l2_jpeg_app14_tf { + V4L2_JPEG_APP14_TF_CMYK_RGB = 0, + V4L2_JPEG_APP14_TF_YCBCR = 1, + V4L2_JPEG_APP14_TF_YCCK = 2, + V4L2_JPEG_APP14_TF_UNKNOWN = -1, +}; + +/** * struct v4l2_jpeg_header - parsed JPEG header * @sof: pointer to frame header and size * @sos: pointer to scan header and size + * @num_dht: number of entries in @dht * @dht: pointers to huffman tables and sizes + * @num_dqt: number of entries in @dqt * @dqt: pointers to quantization tables and sizes * @frame: parsed frame header * @scan: pointer to parsed scan header, optional @@ -100,6 +120,7 @@ struct v4l2_jpeg_scan_header { * order, optional * @restart_interval: number of MCU per restart interval, Ri * @ecs_offset: buffer offset in bytes to the entropy coded segment + * @app14_tf: transform flag from app14 data * * When this structure is passed to v4l2_jpeg_parse_header, the optional scan, * quantization_tables, and huffman_tables pointers must be initialized to NULL @@ -119,6 +140,7 @@ struct v4l2_jpeg_header { struct v4l2_jpeg_reference *huffman_tables; u16 restart_interval; size_t ecs_offset; + enum v4l2_jpeg_app14_tf app14_tf; }; int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out); diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h index bdaa5f2f8ca2..c181685923d5 100644 --- a/include/media/v4l2-mc.h +++ b/include/media/v4l2-mc.h @@ -91,6 +91,7 @@ int v4l_vb2q_enable_media_source(struct vb2_queue *q); * * @src_sd: pointer to a source subdev * @sink: pointer to a subdev sink pad + * @flags: the link flags * * This function searches for fwnode endpoint connections from a source * subdevice to a single sink pad, and if suitable connections are found, @@ -98,6 +99,11 @@ int v4l_vb2q_enable_media_source(struct vb2_queue *q); * called by the sink subdevice, in its v4l2-async notifier subdev bound * callback, to create links from a bound source subdevice. * + * The @flags argument specifies the link flags. The caller shall ensure that + * the flags are valid regardless of the number of links that may be created. + * For instance, setting the MEDIA_LNK_FL_ENABLED flag will cause all created + * links to be enabled, which isn't valid if more than one link is created. + * * .. note:: * * Any sink subdevice that calls this function must implement the @@ -107,7 +113,7 @@ int v4l_vb2q_enable_media_source(struct vb2_queue *q); * Return 0 on success or a negative error code on failure. */ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd, - struct media_pad *sink); + struct media_pad *sink, u32 flags); /** * v4l2_create_fwnode_links - Create fwnode-based links from a source diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 799ba61b5b6f..12955cb460d2 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -154,9 +154,11 @@ struct vb2_mem_ops { * @dbuf: dma_buf - shared buffer object. * @dbuf_mapped: flag to show whether dbuf is mapped or not * @bytesused: number of bytes occupied by data in the plane (payload). - * @length: size of this plane (NOT the payload) in bytes. + * @length: size of this plane (NOT the payload) in bytes. The maximum + * valid size is MAX_UINT - PAGE_SIZE. * @min_length: minimum required size of this plane (NOT the payload) in bytes. - * @length is always greater or equal to @min_length. + * @length is always greater or equal to @min_length, and like + * @length, it is limited to MAX_UINT - PAGE_SIZE. * @m: Union with memtype-specific data. * @m.offset: when memory in the associated struct vb2_buffer is * %VB2_MEMORY_MMAP, equals the offset from the start of diff --git a/include/media/vp8-ctrls.h b/include/media/vp8-ctrls.h deleted file mode 100644 index 3969550df148..000000000000 --- a/include/media/vp8-ctrls.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * These are the VP8 state controls for use with stateless VP8 - * codec drivers. - * - * It turns out that these structs are not stable yet and will undergo - * more changes. So keep them private until they are stable and ready to - * become part of the official public API. - */ - -#ifndef _VP8_CTRLS_H_ -#define _VP8_CTRLS_H_ - -#include <linux/types.h> - -#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') - -#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER (V4L2_CID_CODEC_BASE + 2000) -#define V4L2_CTRL_TYPE_VP8_FRAME_HEADER 0x301 - -#define V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED 0x01 -#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP 0x02 -#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_FEATURE_DATA 0x04 -#define V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE 0x08 - -struct v4l2_vp8_segment_header { - __s8 quant_update[4]; - __s8 lf_update[4]; - __u8 segment_probs[3]; - __u8 padding; - __u32 flags; -}; - -#define V4L2_VP8_LF_HEADER_ADJ_ENABLE 0x01 -#define V4L2_VP8_LF_HEADER_DELTA_UPDATE 0x02 -#define V4L2_VP8_LF_FILTER_TYPE_SIMPLE 0x04 -struct v4l2_vp8_loopfilter_header { - __s8 ref_frm_delta[4]; - __s8 mb_mode_delta[4]; - __u8 sharpness_level; - __u8 level; - __u16 padding; - __u32 flags; -}; - -struct v4l2_vp8_quantization_header { - __u8 y_ac_qi; - __s8 y_dc_delta; - __s8 y2_dc_delta; - __s8 y2_ac_delta; - __s8 uv_dc_delta; - __s8 uv_ac_delta; - __u16 padding; -}; - -#define V4L2_VP8_COEFF_PROB_CNT 11 -#define V4L2_VP8_MV_PROB_CNT 19 -struct v4l2_vp8_entropy_header { - __u8 coeff_probs[4][8][3][V4L2_VP8_COEFF_PROB_CNT]; - __u8 y_mode_probs[4]; - __u8 uv_mode_probs[3]; - __u8 mv_probs[2][V4L2_VP8_MV_PROB_CNT]; - __u8 padding[3]; -}; - -struct v4l2_vp8_entropy_coder_state { - __u8 range; - __u8 value; - __u8 bit_count; - __u8 padding; -}; - -#define V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME 0x01 -#define V4L2_VP8_FRAME_HEADER_FLAG_EXPERIMENTAL 0x02 -#define V4L2_VP8_FRAME_HEADER_FLAG_SHOW_FRAME 0x04 -#define V4L2_VP8_FRAME_HEADER_FLAG_MB_NO_SKIP_COEFF 0x08 -#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN 0x10 -#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT 0x20 - -#define VP8_FRAME_IS_KEY_FRAME(hdr) \ - (!!((hdr)->flags & V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME)) - -struct v4l2_ctrl_vp8_frame_header { - struct v4l2_vp8_segment_header segment_header; - struct v4l2_vp8_loopfilter_header lf_header; - struct v4l2_vp8_quantization_header quant_header; - struct v4l2_vp8_entropy_header entropy_header; - struct v4l2_vp8_entropy_coder_state coder_state; - - __u16 width; - __u16 height; - - __u8 horizontal_scale; - __u8 vertical_scale; - - __u8 version; - __u8 prob_skip_false; - __u8 prob_intra; - __u8 prob_last; - __u8 prob_gf; - __u8 num_dct_parts; - - __u32 first_part_size; - __u32 first_part_header_bits; - __u32 dct_part_sizes[8]; - - __u64 last_frame_ts; - __u64 golden_frame_ts; - __u64 alt_frame_ts; - - __u64 flags; -}; - -#endif diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 18f783dcd55f..78ea3e332688 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -233,7 +233,6 @@ void ipv6_mc_unmap(struct inet6_dev *idev); void ipv6_mc_remap(struct inet6_dev *idev); void ipv6_mc_init_dev(struct inet6_dev *idev); void ipv6_mc_destroy_dev(struct inet6_dev *idev); -int ipv6_mc_check_icmpv6(struct sk_buff *skb); int ipv6_mc_check_mld(struct sk_buff *skb); void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp); diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index f6abcc0bbd6e..cee5f83c0f11 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -53,7 +53,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, struct msghdr *, size_t, rxrpc_notify_end_tx_t); int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *, - struct iov_iter *, bool, u32 *, u16 *); + struct iov_iter *, size_t *, bool, u32 *, u16 *); bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, u32, int, const char *); void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index ba2f439bc04d..ea4ae551c426 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -320,6 +320,7 @@ enum { HCI_BREDR_ENABLED, HCI_LE_SCAN_INTERRUPTED, HCI_WIDEBAND_SPEECH_ENABLED, + HCI_EVENT_FILTER_CONFIGURED, HCI_DUT_MODE, HCI_VENDOR_DIAG, diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index ebdd4afe30d2..c73ac52af186 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -584,6 +584,11 @@ struct hci_dev { #if IS_ENABLED(CONFIG_BT_MSFTEXT) __u16 msft_opcode; void *msft_data; + bool msft_curve_validity; +#endif + +#if IS_ENABLED(CONFIG_BT_AOSPEXT) + bool aosp_capable; #endif int (*open)(struct hci_dev *hdev); @@ -704,6 +709,7 @@ struct hci_chan { struct sk_buff_head data_q; unsigned int sent; __u8 state; + bool amp; }; struct hci_conn_params { @@ -1238,6 +1244,13 @@ static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode) #endif } +static inline void hci_set_aosp_capable(struct hci_dev *hdev) +{ +#if IS_ENABLED(CONFIG_BT_AOSPEXT) + hdev->aosp_capable = true; +#endif +} + int hci_dev_open(__u16 dev); int hci_dev_close(__u16 dev); int hci_dev_do_close(struct hci_dev *hdev); @@ -1742,8 +1755,8 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 #define DISCOV_BREDR_INQUIRY_LEN 0x08 #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ -#define DISCOV_LE_FAST_ADV_INT_MIN 100 /* msec */ -#define DISCOV_LE_FAST_ADV_INT_MAX 150 /* msec */ +#define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */ +#define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */ void mgmt_fill_version_info(void *ver); int mgmt_new_settings(struct hci_dev *hdev); diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 61800a7b6192..3c4f550e5a8b 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -494,6 +494,7 @@ struct l2cap_le_credits { #define L2CAP_ECRED_MIN_MTU 64 #define L2CAP_ECRED_MIN_MPS 64 +#define L2CAP_ECRED_MAX_CID 5 struct l2cap_ecred_conn_req { __le16 psm; diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 839a2028009e..a7cffb069565 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -578,6 +578,7 @@ struct mgmt_rp_add_advertising { #define MGMT_ADV_PARAM_TIMEOUT BIT(13) #define MGMT_ADV_PARAM_INTERVALS BIT(14) #define MGMT_ADV_PARAM_TX_POWER BIT(15) +#define MGMT_ADV_PARAM_SCAN_RSP BIT(16) #define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \ MGMT_ADV_FLAG_SEC_CODED) diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h index 0e85713f56df..2926f1f00d65 100644 --- a/include/net/bpf_sk_storage.h +++ b/include/net/bpf_sk_storage.h @@ -27,7 +27,6 @@ struct bpf_local_storage_elem; struct bpf_sk_storage_diag; struct sk_buff; struct nlattr; -struct sock; #ifdef CONFIG_BPF_SYSCALL int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 911fae42b0c0..58c2cd417e89 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -11,6 +11,7 @@ */ #include <linux/ethtool.h> +#include <uapi/linux/rfkill.h> #include <linux/netdevice.h> #include <linux/debugfs.h> #include <linux/list.h> @@ -359,7 +360,7 @@ struct ieee80211_sta_he_cap { }; /** - * struct ieee80211_sband_iftype_data + * struct ieee80211_sband_iftype_data - sband data per interface type * * This structure encapsulates sband data that is relevant for the * interface types defined in @types_mask. Each type in the @@ -3520,6 +3521,8 @@ struct cfg80211_pmsr_result { * @non_trigger_based: use non trigger based ranging for the measurement * If neither @trigger_based nor @non_trigger_based is set, * EDCA based ranging will be used. + * @lmr_feedback: negotiate for I2R LMR feedback. Only valid if either + * @trigger_based or @non_trigger_based is set. * * See also nl80211 for the respective attribute documentation. */ @@ -3531,7 +3534,8 @@ struct cfg80211_pmsr_ftm_request_peer { request_lci:1, request_civicloc:1, trigger_based:1, - non_trigger_based:1; + non_trigger_based:1, + lmr_feedback:1; u8 num_bursts_exp; u8 burst_duration; u8 ftms_per_burst; @@ -5606,7 +5610,7 @@ static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan) * which is, for this function, given as a bitmap of indices of * rates in the band's bitrate table. */ -struct ieee80211_rate * +const struct ieee80211_rate * ieee80211_get_response_rate(struct ieee80211_supported_band *sband, u32 basic_rates, int bitrate); @@ -5756,7 +5760,7 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); */ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, const u8 *addr, enum nl80211_iftype iftype, - u8 data_offset); + u8 data_offset, bool is_amsdu); /** * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 @@ -5768,7 +5772,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, enum nl80211_iftype iftype) { - return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0); + return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false); } /** @@ -6633,11 +6637,19 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, */ /** - * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state + * wiphy_rfkill_set_hw_state_reason - notify cfg80211 about hw block state * @wiphy: the wiphy * @blocked: block status + * @reason: one of reasons in &enum rfkill_hard_block_reasons */ -void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked); +void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked, + enum rfkill_hard_block_reasons reason); + +static inline void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) +{ + wiphy_rfkill_set_hw_state_reason(wiphy, blocked, + RFKILL_HARD_BLOCK_SIGNAL); +} /** * wiphy_rfkill_start_polling - start polling rfkill @@ -6731,7 +6743,7 @@ cfg80211_vendor_cmd_alloc_reply_skb(struct wiphy *wiphy, int approxlen) int cfg80211_vendor_cmd_reply(struct sk_buff *skb); /** - * cfg80211_vendor_cmd_get_sender + * cfg80211_vendor_cmd_get_sender - get the current sender netlink ID * @wiphy: the wiphy * * Return the current netlink port ID in a vendor command handler. diff --git a/include/net/devlink.h b/include/net/devlink.h index 853420db5d32..7c984cadfec4 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -98,11 +98,13 @@ struct devlink_port_pci_vf_attrs { * @controller: Associated controller number * @sf: Associated PCI SF for of the PCI PF for this port. * @pf: Associated PCI PF number for this port. + * @external: when set, indicates if a port is for an external controller */ struct devlink_port_pci_sf_attrs { u32 controller; u32 sf; u16 pf; + u8 external:1; }; /** @@ -1508,7 +1510,8 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 contro void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller, u16 pf, u16 vf, bool external); void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, - u32 controller, u16 pf, u32 sf); + u32 controller, u16 pf, u32 sf, + bool external); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, diff --git a/include/net/dsa.h b/include/net/dsa.h index 83a933e563fe..e1a2610a0e06 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -49,10 +49,12 @@ struct phylink_link_state; #define DSA_TAG_PROTO_XRS700X_VALUE 19 #define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20 #define DSA_TAG_PROTO_SEVILLE_VALUE 21 +#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE, + DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE, DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE, DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE, DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE, @@ -115,20 +117,6 @@ struct dsa_netdevice_ops { #define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \ MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE)) -struct dsa_skb_cb { - struct sk_buff *clone; -}; - -struct __dsa_skb_cb { - struct dsa_skb_cb cb; - u8 priv[48 - sizeof(struct dsa_skb_cb)]; -}; - -#define DSA_SKB_CB(skb) ((struct dsa_skb_cb *)((skb)->cb)) - -#define DSA_SKB_CB_PRIV(skb) \ - ((void *)(skb)->cb + offsetof(struct __dsa_skb_cb, priv)) - struct dsa_switch_tree { struct list_head list; @@ -147,6 +135,11 @@ struct dsa_switch_tree { /* Tagging protocol operations */ const struct dsa_device_ops *tag_ops; + /* Default tagging protocol preferred by the switches in this + * tree. + */ + enum dsa_tag_protocol default_proto; + /* * Configuration data for the platform device that owns * this dsa switch tree instance. @@ -258,7 +251,7 @@ struct dsa_port { unsigned int index; const char *name; struct dsa_port *cpu_dp; - const char *mac; + u8 mac[ETH_ALEN]; struct device_node *dn; unsigned int ageing_time; bool vlan_filtering; @@ -491,6 +484,20 @@ static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp) return dp->vlan_filtering; } +static inline +struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp) +{ + if (!dp->bridge_dev) + return NULL; + + if (dp->lag_dev) + return dp->lag_dev; + else if (dp->hsr_dev) + return dp->hsr_dev; + + return dp->slave; +} + typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid, bool is_static, void *data); struct dsa_switch_ops { @@ -561,6 +568,8 @@ struct dsa_switch_ops { int port, uint64_t *data); void (*get_stats64)(struct dsa_switch *ds, int port, struct rtnl_link_stats64 *s); + void (*self_test)(struct dsa_switch *ds, int port, + struct ethtool_test *etest, u64 *data); /* * ethtool Wake-on-LAN @@ -717,8 +726,8 @@ struct dsa_switch_ops { struct ifreq *ifr); int (*port_hwtstamp_set)(struct dsa_switch *ds, int port, struct ifreq *ifr); - bool (*port_txtstamp)(struct dsa_switch *ds, int port, - struct sk_buff *clone, unsigned int type); + void (*port_txtstamp)(struct dsa_switch *ds, int port, + struct sk_buff *skb); bool (*port_rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb, unsigned int type); diff --git a/include/net/flow.h b/include/net/flow.h index 39d0cedcddee..6f5e70240071 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -59,7 +59,6 @@ union flowi_uli { __le16 sport; } dnports; - __be32 spi; __be32 gre_key; struct { @@ -90,7 +89,6 @@ struct flowi4 { #define fl4_dport uli.ports.dport #define fl4_icmp_type uli.icmpt.type #define fl4_icmp_code uli.icmpt.code -#define fl4_ipsec_spi uli.spi #define fl4_mh_type uli.mht.type #define fl4_gre_key uli.gre_key } __attribute__((__aligned__(BITS_PER_LONG/8))); @@ -150,7 +148,6 @@ struct flowi6 { #define fl6_dport uli.ports.dport #define fl6_icmp_type uli.icmpt.type #define fl6_icmp_code uli.icmpt.code -#define fl6_ipsec_spi uli.spi #define fl6_mh_type uli.mht.type #define fl6_gre_key uli.gre_key __u32 mp_hash; diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index cc10b10dc3a1..ffd386ea0dbb 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -350,7 +350,7 @@ static inline bool flow_keys_have_l4(const struct flow_keys *keys) u32 flow_hash_from_keys(struct flow_keys *keys); void skb_flow_get_icmp_tci(const struct sk_buff *skb, struct flow_dissector_key_icmp *key_icmp, - void *data, int thoff, int hlen); + const void *data, int thoff, int hlen); static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector, enum flow_dissector_key_id key_id) @@ -368,8 +368,8 @@ static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissec struct bpf_flow_dissector { struct bpf_flow_keys *flow_keys; const struct sk_buff *skb; - void *data; - void *data_end; + const void *data; + const void *data_end; }; static inline void diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index e6bd8ebf9ac3..dc5c1e69cd9f 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -147,6 +147,7 @@ enum flow_action_id { FLOW_ACTION_MPLS_POP, FLOW_ACTION_MPLS_MANGLE, FLOW_ACTION_GATE, + FLOW_ACTION_PPPOE_PUSH, NUM_FLOW_ACTIONS, }; @@ -234,6 +235,8 @@ struct flow_action_entry { u32 index; u32 burst; u64 rate_bytes_ps; + u64 burst_pkt; + u64 rate_pkt_ps; u32 mtu; } police; struct { /* FLOW_ACTION_CT */ @@ -272,6 +275,9 @@ struct flow_action_entry { u32 num_entries; struct action_gate_entry *entries; } gate; + struct { /* FLOW_ACTION_PPPOE_PUSH */ + u16 sid; + } pppoe; }; struct flow_action_cookie *cookie; /* user defined action cookie */ }; diff --git a/include/net/gro.h b/include/net/gro.h index 8a6eb5303cc4..01edaf3fdda0 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -3,10 +3,23 @@ #ifndef _NET_IPV6_GRO_H #define _NET_IPV6_GRO_H +#include <linux/indirect_call_wrapper.h> + +struct list_head; +struct sk_buff; + INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *, struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *, struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int)); + +#define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \ +({ \ + unlikely(gro_recursion_inc_test(skb)) ? \ + NAPI_GRO_CB(skb)->flush |= 1, NULL : \ + INDIRECT_CALL_INET(cb, f2, f1, head, skb); \ +}) + #endif /* _NET_IPV6_GRO_H */ diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 8bf5906073bc..71bb4cc4d05d 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -78,6 +78,7 @@ struct inet6_ifaddr { struct ip6_sf_socklist { unsigned int sl_max; unsigned int sl_count; + struct rcu_head rcu; struct in6_addr sl_addr[]; }; @@ -91,18 +92,18 @@ struct ipv6_mc_socklist { int ifindex; unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ struct ipv6_mc_socklist __rcu *next; - rwlock_t sflock; - struct ip6_sf_socklist *sflist; + struct ip6_sf_socklist __rcu *sflist; struct rcu_head rcu; }; struct ip6_sf_list { - struct ip6_sf_list *sf_next; + struct ip6_sf_list __rcu *sf_next; struct in6_addr sf_addr; unsigned long sf_count[2]; /* include/exclude counts */ unsigned char sf_gsresp; /* include in g & s response? */ unsigned char sf_oldin; /* change state */ unsigned char sf_crcount; /* retrans. left to send */ + struct rcu_head rcu; }; #define MAF_TIMER_RUNNING 0x01 @@ -114,19 +115,19 @@ struct ip6_sf_list { struct ifmcaddr6 { struct in6_addr mca_addr; struct inet6_dev *idev; - struct ifmcaddr6 *next; - struct ip6_sf_list *mca_sources; - struct ip6_sf_list *mca_tomb; + struct ifmcaddr6 __rcu *next; + struct ip6_sf_list __rcu *mca_sources; + struct ip6_sf_list __rcu *mca_tomb; unsigned int mca_sfmode; unsigned char mca_crcount; unsigned long mca_sfcount[2]; - struct timer_list mca_timer; + struct delayed_work mca_work; unsigned int mca_flags; int mca_users; refcount_t mca_refcnt; - spinlock_t mca_lock; unsigned long mca_cstamp; unsigned long mca_tstamp; + struct rcu_head rcu; }; /* Anycast stuff */ @@ -165,9 +166,8 @@ struct inet6_dev { struct list_head addr_list; - struct ifmcaddr6 *mc_list; - struct ifmcaddr6 *mc_tomb; - spinlock_t mc_lock; + struct ifmcaddr6 __rcu *mc_list; + struct ifmcaddr6 __rcu *mc_tomb; unsigned char mc_qrv; /* Query Robustness Variable */ unsigned char mc_gq_running; @@ -179,9 +179,18 @@ struct inet6_dev { unsigned long mc_qri; /* Query Response Interval */ unsigned long mc_maxdelay; - struct timer_list mc_gq_timer; /* general query timer */ - struct timer_list mc_ifc_timer; /* interface change timer */ - struct timer_list mc_dad_timer; /* dad complete mc timer */ + struct delayed_work mc_gq_work; /* general query work */ + struct delayed_work mc_ifc_work; /* interface change work */ + struct delayed_work mc_dad_work; /* dad complete mc work */ + struct delayed_work mc_query_work; /* mld query work */ + struct delayed_work mc_report_work; /* mld report work */ + + struct sk_buff_head mc_query_queue; /* mld query queue */ + struct sk_buff_head mc_report_queue; /* mld report queue */ + + spinlock_t mc_query_lock; /* mld query queue lock */ + spinlock_t mc_report_lock; /* mld query report lock */ + struct mutex mc_lock; /* mld global lock */ struct ifacaddr6 *ac_list; rwlock_t lock; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index bd1f396cc9c7..448bf2b34759 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -30,6 +30,7 @@ */ #define NEXTHDR_HOP 0 /* Hop-by-hop option header. */ +#define NEXTHDR_IPV4 4 /* IPv4 in IPv6 */ #define NEXTHDR_TCP 6 /* TCP segment. */ #define NEXTHDR_UDP 17 /* UDP message. */ #define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */ diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h index 8fce558b5fea..afbce90c4480 100644 --- a/include/net/ipv6_stubs.h +++ b/include/net/ipv6_stubs.h @@ -66,6 +66,8 @@ struct ipv6_stub { int (*ipv6_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); + struct net_device *(*ipv6_dev_find)(struct net *net, const struct in6_addr *addr, + struct net_device *dev); }; extern const struct ipv6_stub *ipv6_stub __read_mostly; diff --git a/include/net/lapb.h b/include/net/lapb.h index eee73442a1ba..124ee122f2c8 100644 --- a/include/net/lapb.h +++ b/include/net/lapb.h @@ -92,7 +92,7 @@ struct lapb_cb { unsigned short n2, n2count; unsigned short t1, t2; struct timer_list t1timer, t2timer; - bool t1timer_stop, t2timer_stop; + bool t1timer_running, t2timer_running; /* Internal control information */ struct sk_buff_head write_queue; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 2d1d629e5d14..445b66c6eb7e 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1768,10 +1768,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * * This can be used by mac80211 drivers with direct cfg80211 APIs * (like the vendor commands) that needs to get the wdev for a vif. - * - * Note that this function may return %NULL if the given wdev isn't - * associated with a vif that the driver knows about (e.g. monitor - * or AP_VLAN interfaces.) + * This can also be useful to get the netdev associated to a vif. */ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif); @@ -2399,6 +2396,12 @@ struct ieee80211_txq { * @IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD: Hardware supports rx decapsulation * offload * + * @IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP: Hardware supports concurrent rx + * decapsulation offload and passing raw 802.11 frames for monitor iface. + * If this is supported, the driver must pass both 802.3 frames for real + * usage and 802.11 frames with %RX_FLAG_ONLY_MONITOR set for monitor to + * the stack. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2453,6 +2456,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT, IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD, IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD, + IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS diff --git a/include/net/mld.h b/include/net/mld.h index 496bddb59942..c07359808493 100644 --- a/include/net/mld.h +++ b/include/net/mld.h @@ -92,6 +92,9 @@ struct mld2_query { #define MLD_EXP_MIN_LIMIT 32768UL #define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1) +#define MLD_MAX_QUEUE 8 +#define MLD_MAX_SKBS 32 + static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2) { /* RFC3810, 5.1.3. Maximum Response Code */ diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 5694370be3d4..83f23774b908 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -30,8 +30,27 @@ struct mptcp_ext { ack64:1, mpc_map:1, frozen:1, - __unused:1; - /* one byte hole */ + reset_transient:1; + u8 reset_reason:4; +}; + +#define MPTCP_RM_IDS_MAX 8 + +struct mptcp_rm_list { + u8 ids[MPTCP_RM_IDS_MAX]; + u8 nr; +}; + +struct mptcp_addr_info { + u8 id; + sa_family_t family; + __be16 port; + union { + struct in_addr addr; +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + struct in6_addr addr6; +#endif + }; }; struct mptcp_out_options { @@ -39,18 +58,13 @@ struct mptcp_out_options { u16 suboptions; u64 sndr_key; u64 rcvr_key; - union { - struct in_addr addr; -#if IS_ENABLED(CONFIG_MPTCP_IPV6) - struct in6_addr addr6; -#endif - }; - u8 addr_id; - u16 port; u64 ahmac; - u8 rm_id; + struct mptcp_addr_info addr; + struct mptcp_rm_list rm_list; u8 join_id; u8 backup; + u8 reset_reason:4; + u8 reset_transient:1; u32 nonce; u64 thmac; u32 token; @@ -149,6 +163,16 @@ void mptcp_seq_show(struct seq_file *seq); int mptcp_subflow_init_cookie_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb); + +__be32 mptcp_get_reset_option(const struct sk_buff *skb); + +static inline __be32 mptcp_reset_option(const struct sk_buff *skb) +{ + if (skb_ext_exist(skb, SKB_EXT_MPTCP)) + return mptcp_get_reset_option(skb); + + return htonl(0u); +} #else static inline void mptcp_init(void) @@ -229,6 +253,8 @@ static inline int mptcp_subflow_init_cookie_req(struct request_sock *req, { return 0; /* TCP fallback */ } + +static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); } #endif /* CONFIG_MPTCP */ #if IS_ENABLED(CONFIG_MPTCP_IPV6) diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index dcaee24a4d87..fa5887143f0d 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -22,7 +22,6 @@ #include <net/netns/nexthop.h> #include <net/netns/ieee802154_6lowpan.h> #include <net/netns/sctp.h> -#include <net/netns/dccp.h> #include <net/netns/netfilter.h> #include <net/netns/x_tables.h> #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@ -130,9 +129,6 @@ struct net { #if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE) struct netns_sctp sctp; #endif -#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) - struct netns_dccp dccp; -#endif #ifdef CONFIG_NETFILTER struct netns_nf nf; struct netns_xt xt; @@ -142,15 +138,6 @@ struct net { #if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE) struct netns_nftables nft; #endif -#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) - struct netns_nf_frag nf_frag; - struct ctl_table_header *nf_frag_frags_hdr; -#endif - struct sock *nfnl; - struct sock *nfnl_stash; -#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) - struct list_head nfct_timeout_list; -#endif #endif #ifdef CONFIG_WEXT_CORE struct sk_buff_head wext_nlevents; @@ -407,7 +394,6 @@ int register_pernet_device(struct pernet_operations *); void unregister_pernet_device(struct pernet_operations *); struct ctl_table; -struct ctl_table_header; #ifdef CONFIG_SYSCTL int net_sysctl_init(void); diff --git a/include/net/netfilter/ipv4/nf_defrag_ipv4.h b/include/net/netfilter/ipv4/nf_defrag_ipv4.h index bcbd724cc048..7fda9ce9f694 100644 --- a/include/net/netfilter/ipv4/nf_defrag_ipv4.h +++ b/include/net/netfilter/ipv4/nf_defrag_ipv4.h @@ -3,6 +3,7 @@ #define _NF_DEFRAG_IPV4_H struct net; -int nf_defrag_ipv4_enable(struct net *); +int nf_defrag_ipv4_enable(struct net *net); +void nf_defrag_ipv4_disable(struct net *net); #endif /* _NF_DEFRAG_IPV4_H */ diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index 7b3c873f8839..e95483192d1b 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -4,7 +4,4 @@ extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; -#include <linux/sysctl.h> -extern struct ctl_table nf_ct_ipv6_sysctl_table[]; - #endif /* _NF_CONNTRACK_IPV6_H*/ diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h index 6d31cd041143..0fd8a4159662 100644 --- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h +++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h @@ -5,7 +5,8 @@ #include <linux/skbuff.h> #include <linux/types.h> -int nf_defrag_ipv6_enable(struct net *); +int nf_defrag_ipv6_enable(struct net *net); +void nf_defrag_ipv6_disable(struct net *net); int nf_ct_frag6_init(void); void nf_ct_frag6_cleanup(void); @@ -13,4 +14,10 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user); struct inet_frags_ctl; +struct nft_ct_frag6_pernet { + struct ctl_table_header *nf_frag_frags_hdr; + struct fqdir *fqdir; + unsigned int users; +}; + #endif /* _NF_DEFRAG_IPV6_H */ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 439379ca9ffa..06dc6db70d18 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -44,9 +44,23 @@ union nf_conntrack_expect_proto { }; struct nf_conntrack_net { + /* only used when new connection is allocated: */ + atomic_t count; + unsigned int expect_count; + u8 sysctl_auto_assign_helper; + bool auto_assign_helper_warned; + + /* only used from work queues, configuration plane, and so on: */ unsigned int users4; unsigned int users6; unsigned int users_bridge; +#ifdef CONFIG_SYSCTL + struct ctl_table_header *sysctl_header; +#endif +#ifdef CONFIG_NF_CONNTRACK_EVENTS + struct delayed_work ecache_dwork; + struct netns_ct *ct_net; +#endif }; #include <linux/types.h> @@ -324,6 +338,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, void nf_ct_tmpl_free(struct nf_conn *tmpl); u32 nf_ct_get_id(const struct nf_conn *ct); +u32 nf_conntrack_count(const struct net *net); static inline void nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index eb81f9195e28..d00ba6048e44 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -171,12 +171,18 @@ void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, struct nf_conntrack_expect *exp, u32 portid, int report); +void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state); + void nf_conntrack_ecache_pernet_init(struct net *net); void nf_conntrack_ecache_pernet_fini(struct net *net); int nf_conntrack_ecache_init(void); void nf_conntrack_ecache_fini(void); +static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net) +{ + return net->ct.ecache_dwork_pending; +} #else /* CONFIG_NF_CONNTRACK_EVENTS */ static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e, @@ -186,6 +192,11 @@ static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e, { } +static inline void nf_conntrack_ecache_work(struct net *net, + enum nf_ct_ecache_state s) +{ +} + static inline void nf_conntrack_ecache_pernet_init(struct net *net) { } @@ -203,26 +214,6 @@ static inline void nf_conntrack_ecache_fini(void) { } +static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net) { return false; } #endif /* CONFIG_NF_CONNTRACK_EVENTS */ - -static inline void nf_conntrack_ecache_delayed_work(struct net *net) -{ -#ifdef CONFIG_NF_CONNTRACK_EVENTS - if (!delayed_work_pending(&net->ct.ecache_dwork)) { - schedule_delayed_work(&net->ct.ecache_dwork, HZ); - net->ct.ecache_dwork_pending = true; - } -#endif -} - -static inline void nf_conntrack_ecache_work(struct net *net) -{ -#ifdef CONFIG_NF_CONNTRACK_EVENTS - if (net->ct.ecache_dwork_pending) { - net->ct.ecache_dwork_pending = false; - mod_delayed_work(system_wq, &net->ct.ecache_dwork, 0); - } -#endif -} - #endif /*_NF_CONNTRACK_ECACHE_H*/ diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 54c4d5c908a5..48ef7460ff30 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -21,6 +21,8 @@ struct nf_flow_key { struct flow_dissector_key_control control; struct flow_dissector_key_control enc_control; struct flow_dissector_key_basic basic; + struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; union { struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; @@ -86,8 +88,17 @@ static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable) enum flow_offload_tuple_dir { FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL, FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY, - FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX }; +#define FLOW_OFFLOAD_DIR_MAX IP_CT_DIR_MAX + +enum flow_offload_xmit_type { + FLOW_OFFLOAD_XMIT_UNSPEC = 0, + FLOW_OFFLOAD_XMIT_NEIGH, + FLOW_OFFLOAD_XMIT_XFRM, + FLOW_OFFLOAD_XMIT_DIRECT, +}; + +#define NF_FLOW_TABLE_ENCAP_MAX 2 struct flow_offload_tuple { union { @@ -107,15 +118,31 @@ struct flow_offload_tuple { u8 l3proto; u8 l4proto; + struct { + u16 id; + __be16 proto; + } encap[NF_FLOW_TABLE_ENCAP_MAX]; /* All members above are keys for lookups, see flow_offload_hash(). */ struct { } __hash; - u8 dir; - + u8 dir:2, + xmit_type:2, + encap_num:2, + in_vlan_ingress:2; u16 mtu; - - struct dst_entry *dst_cache; + union { + struct { + struct dst_entry *dst_cache; + u32 dst_cookie; + }; + struct { + u32 ifidx; + u32 hw_ifidx; + u8 h_source[ETH_ALEN]; + u8 h_dest[ETH_ALEN]; + } out; + }; }; struct flow_offload_tuple_rhash { @@ -130,7 +157,6 @@ enum nf_flow_flags { NF_FLOW_HW, NF_FLOW_HW_DYING, NF_FLOW_HW_DEAD, - NF_FLOW_HW_REFRESH, NF_FLOW_HW_PENDING, }; @@ -158,7 +184,23 @@ static inline __s32 nf_flow_timeout_delta(unsigned int timeout) struct nf_flow_route { struct { - struct dst_entry *dst; + struct dst_entry *dst; + struct { + u32 ifindex; + struct { + u16 id; + __be16 proto; + } encap[NF_FLOW_TABLE_ENCAP_MAX]; + u8 num_encaps:2, + ingress_vlans:2; + } in; + struct { + u32 ifindex; + u32 hw_ifindex; + u8 h_source[ETH_ALEN]; + u8 h_dest[ETH_ALEN]; + } out; + enum flow_offload_xmit_type xmit_type; } tuple[FLOW_OFFLOAD_DIR_MAX]; }; @@ -229,12 +271,12 @@ void nf_flow_table_free(struct nf_flowtable *flow_table); void flow_offload_teardown(struct flow_offload *flow); -int nf_flow_snat_port(const struct flow_offload *flow, - struct sk_buff *skb, unsigned int thoff, - u8 protocol, enum flow_offload_tuple_dir dir); -int nf_flow_dnat_port(const struct flow_offload *flow, - struct sk_buff *skb, unsigned int thoff, - u8 protocol, enum flow_offload_tuple_dir dir); +void nf_flow_snat_port(const struct flow_offload *flow, + struct sk_buff *skb, unsigned int thoff, + u8 protocol, enum flow_offload_tuple_dir dir); +void nf_flow_dnat_port(const struct flow_offload *flow, + struct sk_buff *skb, unsigned int thoff, + u8 protocol, enum flow_offload_tuple_dir dir); struct flow_ports { __be16 source, dest; diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h index 716db4a0fed8..e55eedc84ed7 100644 --- a/include/net/netfilter/nf_log.h +++ b/include/net/netfilter/nf_log.h @@ -68,7 +68,6 @@ void nf_log_unbind_pf(struct net *net, u_int8_t pf); int nf_logger_find_get(int pf, enum nf_log_type type); void nf_logger_put(int pf, enum nf_log_type type); -void nf_logger_request_module(int pf, enum nf_log_type type); #define MODULE_ALIAS_NF_LOGGER(family, type) \ MODULE_ALIAS("nf-logger-" __stringify(family) "-" __stringify(type)) @@ -99,28 +98,4 @@ struct nf_log_buf; struct nf_log_buf *nf_log_buf_open(void); __printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...); void nf_log_buf_close(struct nf_log_buf *m); - -/* common logging functions */ -int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb, - u8 proto, int fragment, unsigned int offset); -int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, - u8 proto, int fragment, unsigned int offset, - unsigned int logflags); -void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m, - struct sock *sk); -void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb); -void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, - unsigned int hooknum, const struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - const struct nf_loginfo *loginfo, - const char *prefix); -void nf_log_l2packet(struct net *net, u_int8_t pf, - __be16 protocol, - unsigned int hooknum, - const struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - const struct nf_loginfo *loginfo, const char *prefix); - #endif /* _NF_LOG_H */ diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h index 0d412dd63707..987111ae5240 100644 --- a/include/net/netfilter/nf_nat.h +++ b/include/net/netfilter/nf_nat.h @@ -104,8 +104,6 @@ unsigned int nf_nat_inet_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); -int nf_xfrm_me_harder(struct net *n, struct sk_buff *s, unsigned int family); - static inline int nf_nat_initialized(struct nf_conn *ct, enum nf_nat_manip_type manip) { diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 5aaced6bf13e..27eeb613bb4e 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -13,6 +13,7 @@ #include <net/netfilter/nf_flow_table.h> #include <net/netlink.h> #include <net/flow_offload.h> +#include <net/netns/generic.h> #define NFT_MAX_HOOKS (NF_INET_INGRESS + 1) @@ -496,6 +497,7 @@ struct nft_set { u8 dlen; u8 num_exprs; struct nft_expr *exprs[NFT_SET_EXPR_MAX]; + struct list_head catchall_list; unsigned char data[] __attribute__((aligned(__alignof__(u64)))); }; @@ -521,6 +523,10 @@ struct nft_set *nft_set_lookup_global(const struct net *net, const struct nlattr *nla_set_id, u8 genmask); +struct nft_set_ext *nft_set_catchall_lookup(const struct net *net, + const struct nft_set *set); +void *nft_set_catchall_gc(const struct nft_set *set); + static inline unsigned long nft_set_gc_interval(const struct nft_set *set) { return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ; @@ -867,6 +873,8 @@ struct nft_expr_ops { int (*offload)(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow, const struct nft_expr *expr); + void (*offload_stats)(struct nft_expr *expr, + const struct flow_stats *stats); u32 offload_flags; const struct nft_expr_type *type; void *data; @@ -1498,13 +1506,16 @@ struct nft_trans_chain { struct nft_trans_table { bool update; - bool enable; + u8 state; + u32 flags; }; #define nft_trans_table_update(trans) \ (((struct nft_trans_table *)trans->data)->update) -#define nft_trans_table_enable(trans) \ - (((struct nft_trans_table *)trans->data)->enable) +#define nft_trans_table_state(trans) \ + (((struct nft_trans_table *)trans->data)->state) +#define nft_trans_table_flags(trans) \ + (((struct nft_trans_table *)trans->data)->flags) struct nft_trans_elem { struct nft_set *set; @@ -1559,4 +1570,27 @@ void nf_tables_trans_destroy_flush_work(void); int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result); __be64 nf_jiffies64_to_msecs(u64 input); +#ifdef CONFIG_MODULES +__printf(2, 3) int nft_request_module(struct net *net, const char *fmt, ...); +#else +static inline int nft_request_module(struct net *net, const char *fmt, ...) { return -ENOENT; } +#endif + +struct nftables_pernet { + struct list_head tables; + struct list_head commit_list; + struct list_head module_list; + struct list_head notify_list; + struct mutex commit_mutex; + unsigned int base_seq; + u8 validate_state; +}; + +extern unsigned int nf_tables_net_id; + +static inline struct nftables_pernet *nft_pernet(const struct net *net) +{ + return net_generic(net, nf_tables_net_id); +} + #endif /* _NET_NF_TABLES_H */ diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h index 1d34fe154fe0..f9d95ff82df8 100644 --- a/include/net/netfilter/nf_tables_offload.h +++ b/include/net/netfilter/nf_tables_offload.h @@ -4,11 +4,16 @@ #include <net/flow_offload.h> #include <net/netfilter/nf_tables.h> +enum nft_offload_reg_flags { + NFT_OFFLOAD_F_NETWORK2HOST = (1 << 0), +}; + struct nft_offload_reg { u32 key; u32 len; u32 base_offset; u32 offset; + u32 flags; struct nft_data data; struct nft_data mask; }; @@ -45,6 +50,7 @@ struct nft_flow_key { struct flow_dissector_key_ports tp; struct flow_dissector_key_ip ip; struct flow_dissector_key_vlan vlan; + struct flow_dissector_key_vlan cvlan; struct flow_dissector_key_eth_addrs eth_addrs; struct flow_dissector_key_meta meta; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ @@ -68,16 +74,21 @@ void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow, struct nft_rule; struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule); +int nft_flow_rule_stats(const struct nft_chain *chain, const struct nft_rule *rule); void nft_flow_rule_destroy(struct nft_flow_rule *flow); int nft_flow_rule_offload_commit(struct net *net); -#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \ +#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags) \ (__reg)->base_offset = \ offsetof(struct nft_flow_key, __base); \ (__reg)->offset = \ offsetof(struct nft_flow_key, __base.__field); \ (__reg)->len = __len; \ (__reg)->key = __key; \ + (__reg)->flags = __flags; + +#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \ + NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0) #define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg) \ NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \ diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 806454e767bf..ad0a95c2335e 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -24,9 +24,9 @@ struct nf_generic_net { struct nf_tcp_net { unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX]; - int tcp_loose; - int tcp_be_liberal; - int tcp_max_retrans; + u8 tcp_loose; + u8 tcp_be_liberal; + u8 tcp_max_retrans; }; enum udp_conntrack { @@ -45,7 +45,7 @@ struct nf_icmp_net { #ifdef CONFIG_NF_CT_PROTO_DCCP struct nf_dccp_net { - int dccp_loose; + u8 dccp_loose; unsigned int dccp_timeout[CT_DCCP_MAX + 1]; }; #endif @@ -93,22 +93,15 @@ struct ct_pcpu { }; struct netns_ct { - atomic_t count; - unsigned int expect_count; #ifdef CONFIG_NF_CONNTRACK_EVENTS - struct delayed_work ecache_dwork; bool ecache_dwork_pending; #endif - bool auto_assign_helper_warned; -#ifdef CONFIG_SYSCTL - struct ctl_table_header *sysctl_header; -#endif - unsigned int sysctl_log_invalid; /* Log invalid packets */ - int sysctl_events; - int sysctl_acct; - int sysctl_auto_assign_helper; - int sysctl_tstamp; - int sysctl_checksum; + u8 sysctl_log_invalid; /* Log invalid packets */ + u8 sysctl_events; + u8 sysctl_acct; + u8 sysctl_auto_assign_helper; + u8 sysctl_tstamp; + u8 sysctl_checksum; struct ct_pcpu __percpu *pcpu_lists; struct ip_conntrack_stat __percpu *stat; diff --git a/include/net/netns/dccp.h b/include/net/netns/dccp.h deleted file mode 100644 index cdbc4f5b8390..000000000000 --- a/include/net/netns/dccp.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __NETNS_DCCP_H__ -#define __NETNS_DCCP_H__ - -struct sock; - -struct netns_dccp { - struct sock *v4_ctl_sk; - struct sock *v6_ctl_sk; -}; - -#endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 70a2a085dd1a..f6af8d96d3c6 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -11,7 +11,6 @@ #include <linux/rcupdate.h> #include <linux/siphash.h> -struct tcpm_hash_bucket; struct ctl_table_header; struct ipv4_devconf; struct fib_rules_ops; @@ -33,14 +32,18 @@ struct inet_hashinfo; struct inet_timewait_death_row { atomic_t tw_count; + char tw_pad[L1_CACHE_BYTES - sizeof(atomic_t)]; - struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp; + struct inet_hashinfo *hashinfo; int sysctl_max_tw_buckets; }; struct tcp_fastopen_context; struct netns_ipv4 { + /* Please keep tcp_death_row at first field in netns_ipv4 */ + struct inet_timewait_death_row tcp_death_row ____cacheline_aligned_in_smp; + #ifdef CONFIG_SYSCTL struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; @@ -54,17 +57,17 @@ struct netns_ipv4 { struct mutex ra_mutex; #ifdef CONFIG_IP_MULTIPLE_TABLES struct fib_rules_ops *rules_ops; - bool fib_has_custom_rules; - unsigned int fib_rules_require_fldissect; struct fib_table __rcu *fib_main; struct fib_table __rcu *fib_default; + unsigned int fib_rules_require_fldissect; + bool fib_has_custom_rules; #endif bool fib_has_custom_local_routes; + bool fib_offload_disabled; #ifdef CONFIG_IP_ROUTE_CLASSID int fib_num_tclassid_users; #endif struct hlist_head *fib_table_hash; - bool fib_offload_disabled; struct sock *fibnl; struct sock * __percpu *icmp_sk; @@ -73,52 +76,43 @@ struct netns_ipv4 { struct inet_peer_base *peers; struct sock * __percpu *tcp_sk; struct fqdir *fqdir; -#ifdef CONFIG_NETFILTER - struct xt_table *iptable_filter; - struct xt_table *iptable_mangle; - struct xt_table *iptable_raw; - struct xt_table *arptable_filter; -#ifdef CONFIG_SECURITY - struct xt_table *iptable_security; -#endif - struct xt_table *nat_table; -#endif - int sysctl_icmp_echo_ignore_all; - int sysctl_icmp_echo_ignore_broadcasts; - int sysctl_icmp_ignore_bogus_error_responses; + u8 sysctl_icmp_echo_ignore_all; + u8 sysctl_icmp_echo_enable_probe; + u8 sysctl_icmp_echo_ignore_broadcasts; + u8 sysctl_icmp_ignore_bogus_error_responses; + u8 sysctl_icmp_errors_use_inbound_ifaddr; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; - int sysctl_icmp_errors_use_inbound_ifaddr; struct local_ports ip_local_ports; - int sysctl_tcp_ecn; - int sysctl_tcp_ecn_fallback; + u8 sysctl_tcp_ecn; + u8 sysctl_tcp_ecn_fallback; - int sysctl_ip_default_ttl; - int sysctl_ip_no_pmtu_disc; - int sysctl_ip_fwd_use_pmtu; - int sysctl_ip_fwd_update_priority; - int sysctl_ip_nonlocal_bind; - int sysctl_ip_autobind_reuse; + u8 sysctl_ip_default_ttl; + u8 sysctl_ip_no_pmtu_disc; + u8 sysctl_ip_fwd_use_pmtu; + u8 sysctl_ip_fwd_update_priority; + u8 sysctl_ip_nonlocal_bind; + u8 sysctl_ip_autobind_reuse; /* Shall we try to damage output packets if routing dev changes? */ - int sysctl_ip_dynaddr; - int sysctl_ip_early_demux; + u8 sysctl_ip_dynaddr; + u8 sysctl_ip_early_demux; #ifdef CONFIG_NET_L3_MASTER_DEV - int sysctl_raw_l3mdev_accept; + u8 sysctl_raw_l3mdev_accept; #endif - int sysctl_tcp_early_demux; - int sysctl_udp_early_demux; + u8 sysctl_tcp_early_demux; + u8 sysctl_udp_early_demux; - int sysctl_nexthop_compat_mode; + u8 sysctl_nexthop_compat_mode; - int sysctl_fwmark_reflect; - int sysctl_tcp_fwmark_accept; + u8 sysctl_fwmark_reflect; + u8 sysctl_tcp_fwmark_accept; #ifdef CONFIG_NET_L3_MASTER_DEV - int sysctl_tcp_l3mdev_accept; + u8 sysctl_tcp_l3mdev_accept; #endif - int sysctl_tcp_mtu_probing; + u8 sysctl_tcp_mtu_probing; int sysctl_tcp_mtu_probe_floor; int sysctl_tcp_base_mss; int sysctl_tcp_min_snd_mss; @@ -126,55 +120,55 @@ struct netns_ipv4 { u32 sysctl_tcp_probe_interval; int sysctl_tcp_keepalive_time; - int sysctl_tcp_keepalive_probes; int sysctl_tcp_keepalive_intvl; + u8 sysctl_tcp_keepalive_probes; - int sysctl_tcp_syn_retries; - int sysctl_tcp_synack_retries; - int sysctl_tcp_syncookies; + u8 sysctl_tcp_syn_retries; + u8 sysctl_tcp_synack_retries; + u8 sysctl_tcp_syncookies; int sysctl_tcp_reordering; - int sysctl_tcp_retries1; - int sysctl_tcp_retries2; - int sysctl_tcp_orphan_retries; + u8 sysctl_tcp_retries1; + u8 sysctl_tcp_retries2; + u8 sysctl_tcp_orphan_retries; + u8 sysctl_tcp_tw_reuse; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; - int sysctl_tcp_tw_reuse; - int sysctl_tcp_sack; - int sysctl_tcp_window_scaling; - int sysctl_tcp_timestamps; - int sysctl_tcp_early_retrans; - int sysctl_tcp_recovery; - int sysctl_tcp_thin_linear_timeouts; - int sysctl_tcp_slow_start_after_idle; - int sysctl_tcp_retrans_collapse; - int sysctl_tcp_stdurg; - int sysctl_tcp_rfc1337; - int sysctl_tcp_abort_on_overflow; - int sysctl_tcp_fack; + u8 sysctl_tcp_sack; + u8 sysctl_tcp_window_scaling; + u8 sysctl_tcp_timestamps; + u8 sysctl_tcp_early_retrans; + u8 sysctl_tcp_recovery; + u8 sysctl_tcp_thin_linear_timeouts; + u8 sysctl_tcp_slow_start_after_idle; + u8 sysctl_tcp_retrans_collapse; + u8 sysctl_tcp_stdurg; + u8 sysctl_tcp_rfc1337; + u8 sysctl_tcp_abort_on_overflow; + u8 sysctl_tcp_fack; /* obsolete */ int sysctl_tcp_max_reordering; - int sysctl_tcp_dsack; - int sysctl_tcp_app_win; int sysctl_tcp_adv_win_scale; - int sysctl_tcp_frto; - int sysctl_tcp_nometrics_save; - int sysctl_tcp_no_ssthresh_metrics_save; - int sysctl_tcp_moderate_rcvbuf; - int sysctl_tcp_tso_win_divisor; - int sysctl_tcp_workaround_signed_windows; + u8 sysctl_tcp_dsack; + u8 sysctl_tcp_app_win; + u8 sysctl_tcp_frto; + u8 sysctl_tcp_nometrics_save; + u8 sysctl_tcp_no_ssthresh_metrics_save; + u8 sysctl_tcp_moderate_rcvbuf; + u8 sysctl_tcp_tso_win_divisor; + u8 sysctl_tcp_workaround_signed_windows; int sysctl_tcp_limit_output_bytes; int sysctl_tcp_challenge_ack_limit; - int sysctl_tcp_min_tso_segs; int sysctl_tcp_min_rtt_wlen; - int sysctl_tcp_autocorking; + u8 sysctl_tcp_min_tso_segs; + u8 sysctl_tcp_autocorking; + u8 sysctl_tcp_reflect_tos; + u8 sysctl_tcp_comp_sack_nr; int sysctl_tcp_invalid_ratelimit; int sysctl_tcp_pacing_ss_ratio; int sysctl_tcp_pacing_ca_ratio; int sysctl_tcp_wmem[3]; int sysctl_tcp_rmem[3]; - int sysctl_tcp_comp_sack_nr; unsigned long sysctl_tcp_comp_sack_delay_ns; unsigned long sysctl_tcp_comp_sack_slack_ns; - struct inet_timewait_death_row tcp_death_row; int sysctl_max_syn_backlog; int sysctl_tcp_fastopen; const struct tcp_congestion_ops __rcu *tcp_congestion_control; @@ -183,20 +177,19 @@ struct netns_ipv4 { unsigned int sysctl_tcp_fastopen_blackhole_timeout; atomic_t tfo_active_disable_times; unsigned long tfo_active_disable_stamp; - int sysctl_tcp_reflect_tos; int sysctl_udp_wmem_min; int sysctl_udp_rmem_min; - int sysctl_fib_notify_on_flag_change; + u8 sysctl_fib_notify_on_flag_change; #ifdef CONFIG_NET_L3_MASTER_DEV - int sysctl_udp_l3mdev_accept; + u8 sysctl_udp_l3mdev_accept; #endif + u8 sysctl_igmp_llm_reports; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; - int sysctl_igmp_llm_reports; int sysctl_igmp_qrv; struct ping_group_range ping_group_range; @@ -217,8 +210,8 @@ struct netns_ipv4 { #endif #endif #ifdef CONFIG_IP_ROUTE_MULTIPATH - int sysctl_fib_multipath_use_neigh; - int sysctl_fib_multipath_hash_policy; + u8 sysctl_fib_multipath_use_neigh; + u8 sysctl_fib_multipath_hash_policy; #endif struct fib_notifier_ops *notifier_ops; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 21c0debbd39e..6153c8067009 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -20,7 +20,6 @@ struct netns_sysctl_ipv6 { struct ctl_table_header *frags_hdr; struct ctl_table_header *xfrm6_hdr; #endif - int bindv6only; int flush_delay; int ip6_rt_max_size; int ip6_rt_gc_min_interval; @@ -29,21 +28,22 @@ struct netns_sysctl_ipv6 { int ip6_rt_gc_elasticity; int ip6_rt_mtu_expires; int ip6_rt_min_advmss; - int multipath_hash_policy; - int flowlabel_consistency; - int auto_flowlabels; + u8 bindv6only; + u8 multipath_hash_policy; + u8 flowlabel_consistency; + u8 auto_flowlabels; int icmpv6_time; - int icmpv6_echo_ignore_all; - int icmpv6_echo_ignore_multicast; - int icmpv6_echo_ignore_anycast; + u8 icmpv6_echo_ignore_all; + u8 icmpv6_echo_ignore_multicast; + u8 icmpv6_echo_ignore_anycast; DECLARE_BITMAP(icmpv6_ratemask, ICMPV6_MSG_MAX + 1); unsigned long *icmpv6_ratemask_ptr; - int anycast_src_echo_reply; - int ip_nonlocal_bind; - int fwmark_reflect; + u8 anycast_src_echo_reply; + u8 ip_nonlocal_bind; + u8 fwmark_reflect; + u8 flowlabel_state_ranges; int idgen_retries; int idgen_delay; - int flowlabel_state_ranges; int flowlabel_reflect; int max_dst_opts_cnt; int max_hbh_opts_cnt; @@ -51,24 +51,18 @@ struct netns_sysctl_ipv6 { int max_hbh_opts_len; int seg6_flowlabel; bool skip_notify_on_dev_down; - int fib_notify_on_flag_change; + u8 fib_notify_on_flag_change; }; struct netns_ipv6 { + /* Keep ip6_dst_ops at the beginning of netns_sysctl_ipv6 */ + struct dst_ops ip6_dst_ops; + struct netns_sysctl_ipv6 sysctl; struct ipv6_devconf *devconf_all; struct ipv6_devconf *devconf_dflt; struct inet_peer_base *peers; struct fqdir *fqdir; -#ifdef CONFIG_NETFILTER - struct xt_table *ip6table_filter; - struct xt_table *ip6table_mangle; - struct xt_table *ip6table_raw; -#ifdef CONFIG_SECURITY - struct xt_table *ip6table_security; -#endif - struct xt_table *ip6table_nat; -#endif struct fib6_info *fib6_null_entry; struct rt6_info *ip6_null_entry; struct rt6_statistics *rt6_stats; @@ -76,7 +70,6 @@ struct netns_ipv6 { struct hlist_head *fib_table_hash; struct fib6_table *fib6_main_tbl; struct list_head fib6_walkers; - struct dst_ops ip6_dst_ops; rwlock_t fib6_walker_lock; spinlock_t fib6_gc_lock; unsigned int ip6_rt_gc_expire; diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h index 59b2c3a3db42..7e373664b1e7 100644 --- a/include/net/netns/mib.h +++ b/include/net/netns/mib.h @@ -5,22 +5,19 @@ #include <net/snmp.h> struct netns_mib { - DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics); +#if IS_ENABLED(CONFIG_IPV6) + DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics); +#endif + + DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); DEFINE_SNMP_STAT(struct linux_mib, net_statistics); - DEFINE_SNMP_STAT(struct udp_mib, udp_statistics); - DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics); - DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics); - DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics); + DEFINE_SNMP_STAT(struct udp_mib, udp_statistics); #if IS_ENABLED(CONFIG_IPV6) - struct proc_dir_entry *proc_net_devsnmp6; DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6); - DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6); - DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics); - DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics); - DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics); #endif + #ifdef CONFIG_XFRM_STATISTICS DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics); #endif @@ -30,6 +27,19 @@ struct netns_mib { #ifdef CONFIG_MPTCP DEFINE_SNMP_STAT(struct mptcp_mib, mptcp_statistics); #endif + + DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics); +#if IS_ENABLED(CONFIG_IPV6) + DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6); +#endif + + DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics); + DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics); +#if IS_ENABLED(CONFIG_IPV6) + DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics); + DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics); + struct proc_dir_entry *proc_net_devsnmp6; +#endif }; #endif diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h index ca043342c0eb..15e2b13fb0c0 100644 --- a/include/net/netns/netfilter.h +++ b/include/net/netns/netfilter.h @@ -28,11 +28,5 @@ struct netns_nf { #if IS_ENABLED(CONFIG_DECNET) struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS]; #endif -#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) - bool defrag_ipv4; -#endif -#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) - bool defrag_ipv6; -#endif }; #endif diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index 6c0806bd8d1e..8c77832d0240 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -5,14 +5,7 @@ #include <linux/list.h> struct netns_nftables { - struct list_head tables; - struct list_head commit_list; - struct list_head module_list; - struct list_head notify_list; - struct mutex commit_mutex; - unsigned int base_seq; u8 gencursor; - u8 validate_state; }; #endif diff --git a/include/net/netns/x_tables.h b/include/net/netns/x_tables.h index 9bc5a12fdbb0..d02316ec2906 100644 --- a/include/net/netns/x_tables.h +++ b/include/net/netns/x_tables.h @@ -5,17 +5,8 @@ #include <linux/list.h> #include <linux/netfilter_defs.h> -struct ebt_table; - struct netns_xt { - struct list_head tables[NFPROTO_NUMPROTO]; bool notrack_deprecated_warning; bool clusterip_deprecated_warning; -#if defined(CONFIG_BRIDGE_NF_EBTABLES) || \ - defined(CONFIG_BRIDGE_NF_EBTABLES_MODULE) - struct ebt_table *broute_table; - struct ebt_table *frame_filter; - struct ebt_table *frame_nat; -#endif }; #endif diff --git a/include/net/nexthop.h b/include/net/nexthop.h index a10a319d7eb2..10e1777877e6 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -40,6 +40,12 @@ struct nh_config { struct nlattr *nh_grp; u16 nh_grp_type; + u16 nh_grp_res_num_buckets; + unsigned long nh_grp_res_idle_timer; + unsigned long nh_grp_res_unbalanced_timer; + bool nh_grp_res_has_num_buckets; + bool nh_grp_res_has_idle_timer; + bool nh_grp_res_has_unbalanced_timer; struct nlattr *nh_encap; u16 nh_encap_type; @@ -63,6 +69,32 @@ struct nh_info { }; }; +struct nh_res_bucket { + struct nh_grp_entry __rcu *nh_entry; + atomic_long_t used_time; + unsigned long migrated_time; + bool occupied; + u8 nh_flags; +}; + +struct nh_res_table { + struct net *net; + u32 nhg_id; + struct delayed_work upkeep_dw; + + /* List of NHGEs that have too few buckets ("uw" for underweight). + * Reclaimed buckets will be given to entries in this list. + */ + struct list_head uw_nh_entries; + unsigned long unbalanced_since; + + u32 idle_timer; + u32 unbalanced_timer; + + u16 num_nh_buckets; + struct nh_res_bucket nh_buckets[]; +}; + struct nh_grp_entry { struct nexthop *nh; u8 weight; @@ -70,7 +102,14 @@ struct nh_grp_entry { union { struct { atomic_t upper_bound; - } mpath; + } hthr; + struct { + /* Member on uw_nh_entries. */ + struct list_head uw_nh_entry; + + u16 count_buckets; + u16 wants_buckets; + } res; }; struct list_head nh_list; @@ -80,9 +119,13 @@ struct nh_grp_entry { struct nh_group { struct nh_group *spare; /* spare group for removals */ u16 num_nh; - bool mpath; + bool is_multipath; + bool hash_threshold; + bool resilient; bool fdb_nh; bool has_v4; + + struct nh_res_table __rcu *res_table; struct nh_grp_entry nh_entries[]; }; @@ -112,11 +155,15 @@ struct nexthop { enum nexthop_event_type { NEXTHOP_EVENT_DEL, NEXTHOP_EVENT_REPLACE, + NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE, + NEXTHOP_EVENT_BUCKET_REPLACE, }; enum nh_notifier_info_type { NH_NOTIFIER_INFO_TYPE_SINGLE, NH_NOTIFIER_INFO_TYPE_GRP, + NH_NOTIFIER_INFO_TYPE_RES_TABLE, + NH_NOTIFIER_INFO_TYPE_RES_BUCKET, }; struct nh_notifier_single_info { @@ -143,6 +190,19 @@ struct nh_notifier_grp_info { struct nh_notifier_grp_entry_info nh_entries[]; }; +struct nh_notifier_res_bucket_info { + u16 bucket_index; + unsigned int idle_timer_ms; + bool force; + struct nh_notifier_single_info old_nh; + struct nh_notifier_single_info new_nh; +}; + +struct nh_notifier_res_table_info { + u16 num_nh_buckets; + struct nh_notifier_single_info nhs[]; +}; + struct nh_notifier_info { struct net *net; struct netlink_ext_ack *extack; @@ -151,6 +211,8 @@ struct nh_notifier_info { union { struct nh_notifier_single_info *nh; struct nh_notifier_grp_info *nh_grp; + struct nh_notifier_res_table_info *nh_res_table; + struct nh_notifier_res_bucket_info *nh_res_bucket; }; }; @@ -158,6 +220,10 @@ int register_nexthop_notifier(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb); void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap); +void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index, + bool offload, bool trap); +void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets, + unsigned long *activity); /* caller is holding rcu or rtnl; no reference taken to nexthop */ struct nexthop *nexthop_find_by_id(struct net *net, u32 id); @@ -212,7 +278,7 @@ static inline bool nexthop_is_multipath(const struct nexthop *nh) struct nh_group *nh_grp; nh_grp = rcu_dereference_rtnl(nh->nh_grp); - return nh_grp->mpath; + return nh_grp->is_multipath; } return false; } @@ -227,7 +293,7 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh) struct nh_group *nh_grp; nh_grp = rcu_dereference_rtnl(nh->nh_grp); - if (nh_grp->mpath) + if (nh_grp->is_multipath) rc = nh_grp->num_nh; } @@ -308,7 +374,7 @@ struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel) struct nh_group *nh_grp; nh_grp = rcu_dereference_rtnl(nh->nh_grp); - if (nh_grp->mpath) { + if (nh_grp->is_multipath) { nh = nexthop_mpath_select(nh_grp, nhsel); if (!nh) return NULL; diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 43c9c5d2bedb..1df0f8074c9d 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -298,6 +298,7 @@ int nci_nfcc_loopback(struct nci_dev *ndev, void *data, size_t data_len, struct sk_buff **resp); struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev); +void nci_hci_deallocate(struct nci_dev *ndev); int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event, const u8 *param, size_t param_len); int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, @@ -430,8 +431,6 @@ struct nci_uart_ops { int (*open)(struct nci_uart *nci_uart); void (*close)(struct nci_uart *nci_uart); int (*recv)(struct nci_uart *nci_uart, struct sk_buff *skb); - int (*recv_buf)(struct nci_uart *nci_uart, const u8 *data, char *flags, - int count); int (*send)(struct nci_uart *nci_uart, struct sk_buff *skb); void (*tx_start)(struct nci_uart *nci_uart); void (*tx_done)(struct nci_uart *nci_uart); diff --git a/include/net/page_pool.h b/include/net/page_pool.h index b5b195305346..b4b6de909c93 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -65,7 +65,7 @@ #define PP_ALLOC_CACHE_REFILL 64 struct pp_alloc_cache { u32 count; - void *cache[PP_ALLOC_CACHE_SIZE]; + struct page *cache[PP_ALLOC_CACHE_SIZE]; }; struct page_pool_params { @@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, static inline dma_addr_t page_pool_get_dma_addr(struct page *page) { - return page->dma_addr; + dma_addr_t ret = page->dma_addr[0]; + if (sizeof(dma_addr_t) > sizeof(unsigned long)) + ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16; + return ret; +} + +static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) +{ + page->dma_addr[0] = addr; + if (sizeof(dma_addr_t) > sizeof(unsigned long)) + page->dma_addr[1] = upper_32_bits(addr); } static inline bool is_page_pool_compiled_in(void) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 255e4f4b521f..ec7823921bd2 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -709,6 +709,17 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, cls_common->extack = extack; } +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) +static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb) +{ + struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); + + if (tc_skb_ext) + memset(tc_skb_ext, 0, sizeof(*tc_skb_ext)); + return tc_skb_ext; +} +#endif + enum tc_matchall_command { TC_CLSMATCHALL_REPLACE, TC_CLSMATCHALL_DESTROY, diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 15b1b30f454e..6d7b12cba015 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -128,12 +128,7 @@ void __qdisc_run(struct Qdisc *q); static inline void qdisc_run(struct Qdisc *q) { if (qdisc_run_begin(q)) { - /* NOLOCK qdisc must check 'state' under the qdisc seqlock - * to avoid racing with dev_qdisc_reset() - */ - if (!(q->flags & TCQ_F_NOLOCK) || - likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) - __qdisc_run(q); + __qdisc_run(q); qdisc_run_end(q); } } @@ -188,4 +183,13 @@ struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload *offload); void taprio_offload_free(struct tc_taprio_qopt_offload *offload); +/* Ensure skb_mstamp_ns, which might have been populated with the txtime, is + * not mistaken for a software timestamp, because this will otherwise prevent + * the dispatch of hardware timestamps to the socket. + */ +static inline void skb_txtime_consumed(struct sk_buff *skb) +{ + skb->tstamp = ktime_set(0, 0); +} + #endif diff --git a/include/net/psample.h b/include/net/psample.h index 68ae16bb0a4a..e328c5127757 100644 --- a/include/net/psample.h +++ b/include/net/psample.h @@ -14,6 +14,19 @@ struct psample_group { struct rcu_head rcu; }; +struct psample_metadata { + u32 trunc_size; + int in_ifindex; + int out_ifindex; + u16 out_tc; + u64 out_tc_occ; /* bytes */ + u64 latency; /* nanoseconds */ + u8 out_tc_valid:1, + out_tc_occ_valid:1, + latency_valid:1, + unused:5; +}; + struct psample_group *psample_group_get(struct net *net, u32 group_num); void psample_group_take(struct psample_group *group); void psample_group_put(struct psample_group *group); @@ -21,15 +34,13 @@ void psample_group_put(struct psample_group *group); #if IS_ENABLED(CONFIG_PSAMPLE) void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, - u32 trunc_size, int in_ifindex, int out_ifindex, - u32 sample_rate); + u32 sample_rate, const struct psample_metadata *md); #else static inline void psample_sample_packet(struct psample_group *group, - struct sk_buff *skb, u32 trunc_size, - int in_ifindex, int out_ifindex, - u32 sample_rate) + struct sk_buff *skb, u32 sample_rate, + const struct psample_metadata *md) { } diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 2d6eb60c58c8..1e625519ae96 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -36,6 +36,7 @@ struct qdisc_rate_table { enum qdisc_state_t { __QDISC_STATE_SCHED, __QDISC_STATE_DEACTIVATED, + __QDISC_STATE_MISSED, }; struct qdisc_size_table { @@ -159,8 +160,33 @@ static inline bool qdisc_is_empty(const struct Qdisc *qdisc) static inline bool qdisc_run_begin(struct Qdisc *qdisc) { if (qdisc->flags & TCQ_F_NOLOCK) { + if (spin_trylock(&qdisc->seqlock)) + goto nolock_empty; + + /* If the MISSED flag is set, it means other thread has + * set the MISSED flag before second spin_trylock(), so + * we can return false here to avoid multi cpus doing + * the set_bit() and second spin_trylock() concurrently. + */ + if (test_bit(__QDISC_STATE_MISSED, &qdisc->state)) + return false; + + /* Set the MISSED flag before the second spin_trylock(), + * if the second spin_trylock() return false, it means + * other cpu holding the lock will do dequeuing for us + * or it will see the MISSED flag set after releasing + * lock and reschedule the net_tx_action() to do the + * dequeuing. + */ + set_bit(__QDISC_STATE_MISSED, &qdisc->state); + + /* Retry again in case other CPU may not see the new flag + * after it releases the lock at the end of qdisc_run_end(). + */ if (!spin_trylock(&qdisc->seqlock)) return false; + +nolock_empty: WRITE_ONCE(qdisc->empty, false); } else if (qdisc_is_running(qdisc)) { return false; @@ -176,8 +202,15 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) static inline void qdisc_run_end(struct Qdisc *qdisc) { write_seqcount_end(&qdisc->running); - if (qdisc->flags & TCQ_F_NOLOCK) + if (qdisc->flags & TCQ_F_NOLOCK) { spin_unlock(&qdisc->seqlock); + + if (unlikely(test_bit(__QDISC_STATE_MISSED, + &qdisc->state))) { + clear_bit(__QDISC_STATE_MISSED, &qdisc->state); + __netif_schedule(qdisc); + } + } } static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) @@ -1242,6 +1275,20 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res, res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); } +struct psched_pktrate { + u64 rate_pkts_ps; /* packets per second */ + u32 mult; + u8 shift; +}; + +static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r, + unsigned int pkt_num) +{ + return ((u64)pkt_num * r->mult) >> r->shift; +} + +void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64); + /* Mini Qdisc serves for specific needs of ingress/clsact Qdisc. * The fast path only needs to access filter list and to update stats */ diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index e8df72e1627a..5e848884ff61 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -68,7 +68,6 @@ enum sctp_verb { SCTP_CMD_ASSOC_FAILED, /* Handle association failure. */ SCTP_CMD_DISCARD_PACKET, /* Discard the whole packet. */ SCTP_CMD_GEN_SHUTDOWN, /* Generate a SHUTDOWN chunk. */ - SCTP_CMD_UPDATE_ASSOC, /* Update association information. */ SCTP_CMD_PURGE_OUTQUEUE, /* Purge all data waiting to be sent. */ SCTP_CMD_SETUP_T2, /* Hi-level, setup T2-shutdown parms. */ SCTP_CMD_RTO_PENDING, /* Set transport's rto_pending. */ diff --git a/include/net/selftests.h b/include/net/selftests.h new file mode 100644 index 000000000000..e65e8d230d33 --- /dev/null +++ b/include/net/selftests.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NET_SELFTESTS +#define _NET_SELFTESTS + +#include <linux/ethtool.h> + +#if IS_ENABLED(CONFIG_NET_SELFTESTS) + +void net_selftest(struct net_device *ndev, struct ethtool_test *etest, + u64 *buf); +int net_selftest_get_count(void); +void net_selftest_get_strings(u8 *data); + +#else + +static inline void net_selftest(struct net_device *ndev, struct ethtool_test *etest, + u64 *buf) +{ +} + +static inline int net_selftest_get_count(void) +{ + return 0; +} + +static inline void net_selftest_get_strings(u8 *data) +{ +} + +#endif +#endif /* _NET_SELFTESTS */ diff --git a/include/net/sock.h b/include/net/sock.h index 8487f58da36d..0e962d8bc73b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1118,6 +1118,7 @@ struct inet_hashinfo; struct raw_hashinfo; struct smc_hashinfo; struct module; +struct sk_psock; /* * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes @@ -1188,6 +1189,11 @@ struct proto { void (*unhash)(struct sock *sk); void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); +#ifdef CONFIG_BPF_SYSCALL + int (*psock_update_sk_prot)(struct sock *sk, + struct sk_psock *psock, + bool restore); +#endif /* Keeping track of sockets in use */ #ifdef CONFIG_PROC_FS @@ -2225,13 +2231,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) sk_mem_charge(sk, skb->truesize); } -static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk) +static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk) { if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { skb_orphan(skb); skb->destructor = sock_efree; skb->sk = sk; + return true; } + return false; } void sk_reset_timer(struct sock *sk, struct timer_list *timer, diff --git a/include/net/switchdev.h b/include/net/switchdev.h index b7fc7d0f54e2..f1a5a9a3634d 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -68,6 +68,7 @@ enum switchdev_obj_id { }; struct switchdev_obj { + struct list_head list; struct net_device *orig_dev; enum switchdev_obj_id id; u32 flags; @@ -208,6 +209,7 @@ struct switchdev_notifier_fdb_info { const unsigned char *addr; u16 vid; u8 added_by_user:1, + is_local:1, offloaded:1; }; diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h index 6d1e26b709b5..72649512dcdd 100644 --- a/include/net/tc_act/tc_police.h +++ b/include/net/tc_act/tc_police.h @@ -10,10 +10,13 @@ struct tcf_police_params { s64 tcfp_burst; u32 tcfp_mtu; s64 tcfp_mtu_ptoks; + s64 tcfp_pkt_burst; struct psched_ratecfg rate; bool rate_present; struct psched_ratecfg peak; bool peak_present; + struct psched_pktrate ppsrate; + bool pps_present; struct rcu_head rcu; }; @@ -24,6 +27,7 @@ struct tcf_police { spinlock_t tcfp_lock ____cacheline_aligned_in_smp; s64 tcfp_toks; s64 tcfp_ptoks; + s64 tcfp_pkttoks; s64 tcfp_t_c; }; @@ -97,6 +101,54 @@ static inline u32 tcf_police_burst(const struct tc_action *act) return burst; } +static inline u64 tcf_police_rate_pkt_ps(const struct tc_action *act) +{ + struct tcf_police *police = to_police(act); + struct tcf_police_params *params; + + params = rcu_dereference_protected(police->params, + lockdep_is_held(&police->tcf_lock)); + return params->ppsrate.rate_pkts_ps; +} + +static inline u32 tcf_police_burst_pkt(const struct tc_action *act) +{ + struct tcf_police *police = to_police(act); + struct tcf_police_params *params; + u32 burst; + + params = rcu_dereference_protected(police->params, + lockdep_is_held(&police->tcf_lock)); + + /* + * "rate" pkts "burst" nanoseconds + * ------------ * ------------------- + * 1 second 2^6 ticks + * + * ------------------------------------ + * NSEC_PER_SEC nanoseconds + * ------------------------ + * 2^6 ticks + * + * "rate" pkts "burst" nanoseconds 2^6 ticks + * = ------------ * ------------------- * ------------------------ + * 1 second 2^6 ticks NSEC_PER_SEC nanoseconds + * + * "rate" * "burst" + * = ---------------- pkts/nanosecond + * NSEC_PER_SEC^2 + * + * + * "rate" * "burst" + * = ---------------- pkts/second + * NSEC_PER_SEC + */ + burst = div_u64(params->tcfp_pkt_burst * params->ppsrate.rate_pkts_ps, + NSEC_PER_SEC); + + return burst; +} + static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act) { struct tcf_police *police = to_police(act); diff --git a/include/net/tcp.h b/include/net/tcp.h index 963cd86d12dd..d05193cb0d99 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -883,36 +883,11 @@ struct tcp_skb_cb { struct inet6_skb_parm h6; #endif } header; /* For incoming skbs */ - struct { - __u32 flags; - struct sock *sk_redir; - void *data_end; - } bpf; }; }; #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) -static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) -{ - TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); -} - -static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb) -{ - return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS; -} - -static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb) -{ - return TCP_SKB_CB(skb)->bpf.sk_redir; -} - -static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb) -{ - TCP_SKB_CB(skb)->bpf.sk_redir = NULL; -} - extern const struct inet_connection_sock_af_ops ipv4_specific; #if IS_ENABLED(CONFIG_IPV6) @@ -1060,44 +1035,56 @@ struct rate_sample { }; struct tcp_congestion_ops { - struct list_head list; - u32 key; - u32 flags; - - /* initialize private data (optional) */ - void (*init)(struct sock *sk); - /* cleanup private data (optional) */ - void (*release)(struct sock *sk); +/* fast path fields are put first to fill one cache line */ /* return slow start threshold (required) */ u32 (*ssthresh)(struct sock *sk); + /* do new cwnd calculation (required) */ void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked); + /* call before changing ca_state (optional) */ void (*set_state)(struct sock *sk, u8 new_state); + /* call when cwnd event occurs (optional) */ void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); + /* call when ack arrives (optional) */ void (*in_ack_event)(struct sock *sk, u32 flags); - /* new value of cwnd after loss (required) */ - u32 (*undo_cwnd)(struct sock *sk); + /* hook for packet ack accounting (optional) */ void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); + /* override sysctl_tcp_min_tso_segs */ u32 (*min_tso_segs)(struct sock *sk); - /* returns the multiplier used in tcp_sndbuf_expand (optional) */ - u32 (*sndbuf_expand)(struct sock *sk); + /* call when packets are delivered to update cwnd and pacing rate, * after all the ca_state processing. (optional) */ void (*cong_control)(struct sock *sk, const struct rate_sample *rs); + + + /* new value of cwnd after loss (required) */ + u32 (*undo_cwnd)(struct sock *sk); + /* returns the multiplier used in tcp_sndbuf_expand (optional) */ + u32 (*sndbuf_expand)(struct sock *sk); + +/* control/slow paths put last */ /* get info for inet_diag (optional) */ size_t (*get_info)(struct sock *sk, u32 ext, int *attr, union tcp_cc_info *info); - char name[TCP_CA_NAME_MAX]; - struct module *owner; -}; + char name[TCP_CA_NAME_MAX]; + struct module *owner; + struct list_head list; + u32 key; + u32 flags; + + /* initialize private data (optional) */ + void (*init)(struct sock *sk); + /* cleanup private data (optional) */ + void (*release)(struct sock *sk); +} ____cacheline_aligned_in_smp; int tcp_register_congestion_control(struct tcp_congestion_ops *type); void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); @@ -2222,25 +2209,26 @@ void tcp_update_ulp(struct sock *sk, struct proto *p, __MODULE_INFO(alias, alias_userspace, name); \ __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name) +#ifdef CONFIG_NET_SOCK_MSG struct sk_msg; struct sk_psock; -#ifdef CONFIG_BPF_STREAM_PARSER +#ifdef CONFIG_BPF_SYSCALL struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock); +int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); -#else -static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) -{ -} -#endif /* CONFIG_BPF_STREAM_PARSER */ +#endif /* CONFIG_BPF_SYSCALL */ -#ifdef CONFIG_NET_SOCK_MSG int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes, int flags); -int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, - struct msghdr *msg, int len, int flags); #endif /* CONFIG_NET_SOCK_MSG */ +#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG) +static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) +{ +} +#endif + #ifdef CONFIG_CGROUP_BPF static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops, struct sk_buff *skb, diff --git a/include/net/udp.h b/include/net/udp.h index a132a02b2f2c..360df454356c 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -329,6 +329,8 @@ struct sock *__udp6_lib_lookup(struct net *net, struct sk_buff *skb); struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, __be16 sport, __be16 dport); +int udp_read_sock(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor); /* UDP uses skb->dev_scratch to cache as much information as possible and avoid * possibly multiple cache miss on dequeue() @@ -515,9 +517,33 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk, return segs; } -#ifdef CONFIG_BPF_STREAM_PARSER +static inline void udp_post_segment_fix_csum(struct sk_buff *skb) +{ + /* UDP-lite can't land here - no GRO */ + WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov); + + /* UDP packets generated with UDP_SEGMENT and traversing: + * + * UDP tunnel(xmit) -> veth (segmentation) -> veth (gro) -> UDP tunnel (rx) + * + * can reach an UDP socket with CHECKSUM_NONE, because + * __iptunnel_pull_header() converts CHECKSUM_PARTIAL into NONE. + * SKB_GSO_UDP_L4 or SKB_GSO_FRAGLIST packets with no UDP tunnel will + * have a valid checksum, as the GRO engine validates the UDP csum + * before the aggregation and nobody strips such info in between. + * Instead of adding another check in the tunnel fastpath, we can force + * a valid csum after the segmentation. + * Additionally fixup the UDP CB. + */ + UDP_SKB_CB(skb)->cscov = skb->len; + if (skb->ip_summed == CHECKSUM_NONE && !skb->csum_valid) + skb->csum_valid = 1; +} + +#ifdef CONFIG_BPF_SYSCALL struct sk_psock; struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock); -#endif /* BPF_STREAM_PARSER */ +int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); +#endif #endif /* _UDP_H */ diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index cc17bc957548..9c0722c6d7ac 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -80,19 +80,6 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); void __xsk_map_flush(void); -static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, - u32 key) -{ - struct xsk_map *m = container_of(map, struct xsk_map, map); - struct xdp_sock *xs; - - if (key >= map->max_entries) - return NULL; - - xs = READ_ONCE(m->xsk_map[key]); - return xs; -} - #else static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) @@ -109,12 +96,6 @@ static inline void __xsk_map_flush(void) { } -static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, - u32 key) -{ - return NULL; -} - #endif /* CONFIG_XDP_SOCKETS */ #endif /* _LINUX_XDP_SOCK_H */ diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h index bae29f50adff..226ae3702d8a 100644 --- a/include/rdma/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -10,7 +10,7 @@ #include <rdma/ib_verbs.h> -int rdma_query_gid(struct ib_device *device, u8 port_num, int index, +int rdma_query_gid(struct ib_device *device, u32 port_num, int index, union ib_gid *gid); void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr); const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, @@ -20,10 +20,10 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device, const struct ib_gid_attr *rdma_find_gid_by_port(struct ib_device *ib_dev, const union ib_gid *gid, enum ib_gid_type gid_type, - u8 port, + u32 port, struct net_device *ndev); const struct ib_gid_attr *rdma_find_gid_by_filter( - struct ib_device *device, const union ib_gid *gid, u8 port_num, + struct ib_device *device, const union ib_gid *gid, u32 port_num, bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *, void *), void *context); @@ -43,7 +43,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr); * the local software cache. */ int ib_get_cached_pkey(struct ib_device *device_handle, - u8 port_num, + u32 port_num, int index, u16 *pkey); @@ -59,7 +59,7 @@ int ib_get_cached_pkey(struct ib_device *device_handle, * the local software cache. */ int ib_find_cached_pkey(struct ib_device *device, - u8 port_num, + u32 port_num, u16 pkey, u16 *index); @@ -75,7 +75,7 @@ int ib_find_cached_pkey(struct ib_device *device, * the local software cache. */ int ib_find_exact_cached_pkey(struct ib_device *device, - u8 port_num, + u32 port_num, u16 pkey, u16 *index); @@ -89,7 +89,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device, * the local software cache. */ int ib_get_cached_lmc(struct ib_device *device, - u8 port_num, + u32 port_num, u8 *lmc); /** @@ -102,12 +102,12 @@ int ib_get_cached_lmc(struct ib_device *device, * the local software cache. */ int ib_get_cached_port_state(struct ib_device *device, - u8 port_num, + u32 port_num, enum ib_port_state *port_active); bool rdma_is_zero_gid(const union ib_gid *gid); const struct ib_gid_attr *rdma_get_gid_attr(struct ib_device *device, - u8 port_num, int index); + u32 port_num, int index); void rdma_put_gid_attr(const struct ib_gid_attr *attr); void rdma_hold_gid_attr(const struct ib_gid_attr *attr); ssize_t rdma_query_gid_table(struct ib_device *device, diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 8dfb1ddf345a..f1d34f06a68b 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -668,7 +668,7 @@ struct ib_mad_reg_req { * @registration_flags: Registration flags to set for this agent */ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, - u8 port_num, + u32 port_num, enum ib_qp_type qp_type, struct ib_mad_reg_req *mad_reg_req, u8 rmpp_version, diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index 4c52c2fd22a1..ba3c808a3789 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -423,7 +423,7 @@ struct ib_sa_query; void ib_sa_cancel_query(int id, struct ib_sa_query *query); int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device, - u8 port_num, struct sa_path_rec *rec, + u32 port_num, struct sa_path_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct sa_path_rec *resp, @@ -431,7 +431,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device, void *context, struct ib_sa_query **query); int ib_sa_service_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, u8 method, + struct ib_device *device, u32 port_num, u8 method, struct ib_sa_service_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, @@ -477,7 +477,8 @@ struct ib_sa_multicast { * group, and the user must rejoin the group to continue using it. */ struct ib_sa_multicast *ib_sa_join_multicast(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, + struct ib_device *device, + u32 port_num, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, gfp_t gfp_mask, int (*callback)(int status, @@ -506,20 +507,20 @@ void ib_sa_free_multicast(struct ib_sa_multicast *multicast); * @mgid: MGID of multicast group. * @rec: Location to copy SA multicast member record. */ -int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num, +int ib_sa_get_mcmember_rec(struct ib_device *device, u32 port_num, union ib_gid *mgid, struct ib_sa_mcmember_rec *rec); /** * ib_init_ah_from_mcmember - Initialize address handle attributes based on * an SA multicast member record. */ -int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, +int ib_init_ah_from_mcmember(struct ib_device *device, u32 port_num, struct ib_sa_mcmember_rec *rec, struct net_device *ndev, enum ib_gid_type gid_type, struct rdma_ah_attr *ah_attr); -int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, +int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num, struct sa_path_rec *rec, struct rdma_ah_attr *ah_attr, const struct ib_gid_attr *sgid_attr); @@ -538,7 +539,7 @@ void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec); /* Support GuidInfoRecord */ int ib_sa_guid_info_rec_query(struct ib_sa_client *client, - struct ib_device *device, u8 port_num, + struct ib_device *device, u32 port_num, struct ib_sa_guidinfo_rec *rec, ib_sa_comp_mask comp_mask, u8 method, unsigned long timeout_ms, gfp_t gfp_mask, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index ca28fca5736b..7e2f3699b898 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -152,7 +152,7 @@ struct ib_gid_attr { union ib_gid gid; enum ib_gid_type gid_type; u16 index; - u8 port_num; + u32 port_num; }; enum { @@ -736,7 +736,7 @@ struct ib_event { struct ib_qp *qp; struct ib_srq *srq; struct ib_wq *wq; - u8 port_num; + u32 port_num; } element; enum ib_event_type event; }; @@ -919,7 +919,7 @@ struct rdma_ah_attr { struct ib_global_route grh; u8 sl; u8 static_rate; - u8 port_num; + u32 port_num; u8 ah_flags; enum rdma_ah_attr_type type; union { @@ -1006,7 +1006,7 @@ struct ib_wc { u16 pkey_index; u8 sl; u8 dlid_path_bits; - u8 port_num; /* valid only for DR SMPs on switches */ + u32 port_num; /* valid only for DR SMPs on switches */ u8 smac[ETH_ALEN]; u16 vlan_id; u8 network_hdr_type; @@ -1161,7 +1161,7 @@ struct ib_qp_init_attr { /* * Only needed for special QP types, or when using the RW API. */ - u8 port_num; + u32 port_num; struct ib_rwq_ind_table *rwq_ind_tbl; u32 source_qpn; }; @@ -1280,11 +1280,11 @@ struct ib_qp_attr { u8 max_rd_atomic; u8 max_dest_rd_atomic; u8 min_rnr_timer; - u8 port_num; + u32 port_num; u8 timeout; u8 retry_cnt; u8 rnr_retry; - u8 alt_port_num; + u32 alt_port_num; u8 alt_timeout; u32 rate_limit; struct net_device *xmit_slave; @@ -1401,7 +1401,7 @@ struct ib_ud_wr { u32 remote_qpn; u32 remote_qkey; u16 pkey_index; /* valid for GSI only */ - u8 port_num; /* valid for DR SMPs on switch only */ + u32 port_num; /* valid for DR SMPs on switch only */ }; static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr) @@ -1610,6 +1610,11 @@ struct ib_srq { } xrc; }; } ext; + + /* + * Implementation details of the RDMA core, don't use in drivers: + */ + struct rdma_restrack_entry res; }; enum ib_raw_packet_caps { @@ -1708,7 +1713,7 @@ struct ib_qp_security; struct ib_port_pkey { enum port_pkey_state state; u16 pkey_index; - u8 port_num; + u32 port_num; struct list_head qp_list; struct list_head to_error_list; struct ib_qp_security *sec; @@ -1769,7 +1774,7 @@ struct ib_qp { enum ib_qp_type qp_type; struct ib_rwq_ind_table *rwq_ind_tbl; struct ib_qp_security *qp_sec; - u8 port; + u32 port; bool integrity_en; /* @@ -2065,7 +2070,7 @@ struct ib_flow_attr { u16 priority; u32 flags; u8 num_of_specs; - u8 port; + u32 port; union ib_flow_spec flows[]; }; @@ -2194,7 +2199,7 @@ enum rdma_netdev_t { struct rdma_netdev { void *clnt_priv; struct ib_device *hca; - u8 port_num; + u32 port_num; int mtu; /* @@ -2215,6 +2220,8 @@ struct rdma_netdev { int set_qkey, u32 qkey); int (*detach_mcast)(struct net_device *dev, struct ib_device *hca, union ib_gid *gid, u16 mlid); + /* timeout */ + void (*tx_timeout)(struct net_device *dev, unsigned int txqueue); }; struct rdma_netdev_alloc_params { @@ -2223,7 +2230,7 @@ struct rdma_netdev_alloc_params { unsigned int rxqs; void *param; - int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num, + int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num, struct net_device *netdev, void *param); }; @@ -2301,12 +2308,11 @@ struct ib_device_ops { int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc); int (*peek_cq)(struct ib_cq *cq, int wc_cnt); int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags); - int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt); int (*post_srq_recv)(struct ib_srq *srq, const struct ib_recv_wr *recv_wr, const struct ib_recv_wr **bad_recv_wr); int (*process_mad)(struct ib_device *device, int process_mad_flags, - u8 port_num, const struct ib_wc *in_wc, + u32 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad *in_mad, struct ib_mad *out_mad, size_t *out_mad_size, u16 *out_mad_pkey_index); @@ -2318,9 +2324,9 @@ struct ib_device_ops { void (*get_dev_fw_str)(struct ib_device *device, char *str); const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, int comp_vector); - int (*query_port)(struct ib_device *device, u8 port_num, + int (*query_port)(struct ib_device *device, u32 port_num, struct ib_port_attr *port_attr); - int (*modify_port)(struct ib_device *device, u8 port_num, + int (*modify_port)(struct ib_device *device, u32 port_num, int port_modify_mask, struct ib_port_modify *port_modify); /** @@ -2329,10 +2335,10 @@ struct ib_device_ops { * structure to avoid cache line misses when accessing struct ib_device * in fast paths. */ - int (*get_port_immutable)(struct ib_device *device, u8 port_num, + int (*get_port_immutable)(struct ib_device *device, u32 port_num, struct ib_port_immutable *immutable); enum rdma_link_layer (*get_link_layer)(struct ib_device *device, - u8 port_num); + u32 port_num); /** * When calling get_netdev, the HW vendor's driver should return the * net device of device @device at port @port_num or NULL if such @@ -2341,7 +2347,8 @@ struct ib_device_ops { * that this function returns NULL before the net device has finished * NETDEV_UNREGISTER state. */ - struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num); + struct net_device *(*get_netdev)(struct ib_device *device, + u32 port_num); /** * rdma netdev operation * @@ -2349,11 +2356,11 @@ struct ib_device_ops { * must return -EOPNOTSUPP if it doesn't support the specified type. */ struct net_device *(*alloc_rdma_netdev)( - struct ib_device *device, u8 port_num, enum rdma_netdev_t type, + struct ib_device *device, u32 port_num, enum rdma_netdev_t type, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *)); - int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num, + int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, struct rdma_netdev_alloc_params *params); /** @@ -2361,7 +2368,7 @@ struct ib_device_ops { * link layer is either IB or iWarp. It is no-op if @port_num port * is RoCE link layer. */ - int (*query_gid)(struct ib_device *device, u8 port_num, int index, + int (*query_gid)(struct ib_device *device, u32 port_num, int index, union ib_gid *gid); /** * When calling add_gid, the HW vendor's driver should add the gid @@ -2386,7 +2393,7 @@ struct ib_device_ops { * This function is only called when roce_gid_table is used. */ int (*del_gid)(const struct ib_gid_attr *attr, void **context); - int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index, + int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index, u16 *pkey); int (*alloc_ucontext)(struct ib_ucontext *context, struct ib_udata *udata); @@ -2475,16 +2482,16 @@ struct ib_device_ops { struct ib_flow_action *action, const struct ib_flow_action_attrs_esp *attr, struct uverbs_attr_bundle *attrs); - int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, + int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port, int state); - int (*get_vf_config)(struct ib_device *device, int vf, u8 port, + int (*get_vf_config)(struct ib_device *device, int vf, u32 port, struct ifla_vf_info *ivf); - int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, + int (*get_vf_stats)(struct ib_device *device, int vf, u32 port, struct ifla_vf_stats *stats); - int (*get_vf_guid)(struct ib_device *device, int vf, u8 port, + int (*get_vf_guid)(struct ib_device *device, int vf, u32 port, struct ifla_vf_guid *node_guid, struct ifla_vf_guid *port_guid); - int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, + int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid, int type); struct ib_wq *(*create_wq)(struct ib_pd *pd, struct ib_wq_init_attr *init_attr, @@ -2522,7 +2529,7 @@ struct ib_device_ops { * struct tells the core to set a default lifespan. */ struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, - u8 port_num); + u32 port_num); /** * get_hw_stats - Fill in the counter value(s) in the stats struct. * @index - The index in the value array we wish to have updated, or @@ -2536,12 +2543,12 @@ struct ib_device_ops { * one given in index at their option */ int (*get_hw_stats)(struct ib_device *device, - struct rdma_hw_stats *stats, u8 port, int index); + struct rdma_hw_stats *stats, u32 port, int index); /* * This function is called once for each port when a ib device is * registered. */ - int (*init_port)(struct ib_device *device, u8 port_num, + int (*init_port)(struct ib_device *device, u32 port_num, struct kobject *port_sysfs); /** * Allows rdma drivers to add their own restrack attributes. @@ -2685,7 +2692,7 @@ struct ib_device { /* CQ adaptive moderation (RDMA DIM) */ u16 use_cq_dim:1; u8 node_type; - u8 phys_port_cnt; + u32 phys_port_cnt; struct ib_device_attr attrs; struct attribute_group *hw_stats_ag; struct rdma_hw_stats *hw_stats; @@ -2751,7 +2758,7 @@ struct ib_client { * netdev. */ struct net_device *(*get_net_dev_by_params)( struct ib_device *dev, - u8 port, + u32 port, u16 pkey, const union ib_gid *gid, const struct sockaddr *addr, @@ -2932,10 +2939,10 @@ void ib_unregister_event_handler(struct ib_event_handler *event_handler); void ib_dispatch_event(const struct ib_event *event); int ib_query_port(struct ib_device *device, - u8 port_num, struct ib_port_attr *port_attr); + u32 port_num, struct ib_port_attr *port_attr); enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, - u8 port_num); + u32 port_num); /** * rdma_cap_ib_switch - Check if the device is IB switch @@ -2959,7 +2966,7 @@ static inline bool rdma_cap_ib_switch(const struct ib_device *device) * * Return start port number */ -static inline u8 rdma_start_port(const struct ib_device *device) +static inline u32 rdma_start_port(const struct ib_device *device) { return rdma_cap_ib_switch(device) ? 0 : 1; } @@ -2970,9 +2977,10 @@ static inline u8 rdma_start_port(const struct ib_device *device) * @iter - The unsigned int to store the port number */ #define rdma_for_each_port(device, iter) \ - for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \ - unsigned int, iter))); \ - iter <= rdma_end_port(device); (iter)++) + for (iter = rdma_start_port(device + \ + BUILD_BUG_ON_ZERO(!__same_type(u32, \ + iter))); \ + iter <= rdma_end_port(device); iter++) /** * rdma_end_port - Return the last valid port number for the device @@ -2982,7 +2990,7 @@ static inline u8 rdma_start_port(const struct ib_device *device) * * Return last port number */ -static inline u8 rdma_end_port(const struct ib_device *device) +static inline u32 rdma_end_port(const struct ib_device *device) { return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; } @@ -2995,55 +3003,63 @@ static inline int rdma_is_port_valid(const struct ib_device *device, } static inline bool rdma_is_grh_required(const struct ib_device *device, - u8 port_num) + u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_PORT_IB_GRH_REQUIRED; } -static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) +static inline bool rdma_protocol_ib(const struct ib_device *device, + u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_PROT_IB; } -static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) +static inline bool rdma_protocol_roce(const struct ib_device *device, + u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); } -static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) +static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, + u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; } -static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) +static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, + u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; } -static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) +static inline bool rdma_protocol_iwarp(const struct ib_device *device, + u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; } -static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) +static inline bool rdma_ib_or_roce(const struct ib_device *device, + u32 port_num) { return rdma_protocol_ib(device, port_num) || rdma_protocol_roce(device, port_num); } -static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num) +static inline bool rdma_protocol_raw_packet(const struct ib_device *device, + u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET; } -static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num) +static inline bool rdma_protocol_usnic(const struct ib_device *device, + u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_PROT_USNIC; @@ -3061,7 +3077,7 @@ static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_n * * Return: true if the port supports sending/receiving of MAD packets. */ -static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) +static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_IB_MAD; @@ -3086,7 +3102,7 @@ static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) * * Return: true if the port supports OPA MAD packet formats. */ -static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) +static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_OPA_MAD; @@ -3112,7 +3128,7 @@ static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) * * Return: true if the port provides an SMI. */ -static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) +static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_IB_SMI; @@ -3133,7 +3149,7 @@ static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) * Return: true if the port supports an IB CM (this does not guarantee that * a CM is actually running however). */ -static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) +static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_IB_CM; @@ -3151,7 +3167,7 @@ static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) * Return: true if the port supports an iWARP CM (this does not guarantee that * a CM is actually running however). */ -static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) +static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_IW_CM; @@ -3172,7 +3188,7 @@ static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) * Administration interface. This does not imply that the SA service is * running locally. */ -static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) +static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_IB_SA; @@ -3195,7 +3211,8 @@ static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) * overhead of registering/unregistering with the SM and tracking of the * total number of queue pairs attached to the multicast group. */ -static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) +static inline bool rdma_cap_ib_mcast(const struct ib_device *device, + u32 port_num) { return rdma_cap_ib_sa(device, port_num); } @@ -3213,7 +3230,7 @@ static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num * Return: true if the port uses a GID address to identify devices on the * network. */ -static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) +static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_AF_IB; @@ -3235,7 +3252,7 @@ static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) * addition of a Global Route Header built from our Ethernet Address * Handle into our header list for connectionless packets. */ -static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) +static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num) { return device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_ETH_AH; @@ -3250,7 +3267,7 @@ static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) * Return: true if we are running on an OPA device which supports * the extended OPA addressing. */ -static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num) +static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num) { return (device->port_data[port_num].immutable.core_cap_flags & RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH; @@ -3268,7 +3285,8 @@ static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num) * Return the max MAD size required by the Port. Will return 0 if the port * does not support MADs */ -static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) +static inline size_t rdma_max_mad_size(const struct ib_device *device, + u32 port_num) { return device->port_data[port_num].immutable.max_mad_size; } @@ -3287,7 +3305,7 @@ static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_n * its GIDs. */ static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, - u8 port_num) + u32 port_num) { return rdma_protocol_roce(device, port_num) && device->ops.add_gid && device->ops.del_gid; @@ -3328,7 +3346,7 @@ static inline bool rdma_core_cap_opa_port(struct ib_device *device, * Return the MTU size supported by the port as an integer value. Will return * -1 if enum value of mtu is not supported. */ -static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port, +static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port, int mtu) { if (rdma_core_cap_opa_port(device, port)) @@ -3345,7 +3363,7 @@ static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port, * * Return the MTU size supported by the port as an integer value. */ -static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port, +static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port, struct ib_port_attr *attr) { if (rdma_core_cap_opa_port(device, port)) @@ -3354,34 +3372,34 @@ static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port, return ib_mtu_enum_to_int(attr->max_mtu); } -int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, +int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port, int state); -int ib_get_vf_config(struct ib_device *device, int vf, u8 port, +int ib_get_vf_config(struct ib_device *device, int vf, u32 port, struct ifla_vf_info *info); -int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, +int ib_get_vf_stats(struct ib_device *device, int vf, u32 port, struct ifla_vf_stats *stats); -int ib_get_vf_guid(struct ib_device *device, int vf, u8 port, +int ib_get_vf_guid(struct ib_device *device, int vf, u32 port, struct ifla_vf_guid *node_guid, struct ifla_vf_guid *port_guid); -int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, +int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid, int type); int ib_query_pkey(struct ib_device *device, - u8 port_num, u16 index, u16 *pkey); + u32 port_num, u16 index, u16 *pkey); int ib_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify); int ib_modify_port(struct ib_device *device, - u8 port_num, int port_modify_mask, + u32 port_num, int port_modify_mask, struct ib_port_modify *port_modify); int ib_find_gid(struct ib_device *device, union ib_gid *gid, - u8 *port_num, u16 *index); + u32 *port_num, u16 *index); int ib_find_pkey(struct ib_device *device, - u8 port_num, u16 pkey, u16 *index); + u32 port_num, u16 pkey, u16 *index); enum ib_pd_flags { /* @@ -3496,7 +3514,7 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); * attributes which are initialized using ib_init_ah_attr_from_wc(). * */ -int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, +int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num, const struct ib_wc *wc, const struct ib_grh *grh, struct rdma_ah_attr *ah_attr); @@ -3513,7 +3531,7 @@ int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, * in all UD QP post sends. */ struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, - const struct ib_grh *grh, u8 port_num); + const struct ib_grh *grh, u32 port_num); /** * rdma_modify_ah - Modifies the address vector associated with an address @@ -3915,20 +3933,6 @@ struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe, void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe); -/** - * ib_req_ncomp_notif - Request completion notification when there are - * at least the specified number of unreaped completions on the CQ. - * @cq: The CQ to generate an event for. - * @wc_cnt: The number of unreaped completions that should be on the - * CQ before an event is generated. - */ -static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) -{ - return cq->device->ops.req_ncomp_notif ? - cq->device->ops.req_ncomp_notif(cq, wc_cnt) : - -ENOSYS; -} - /* * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to * NULL. This causes the ib_dma* helpers to just stash the kernel virtual @@ -4272,12 +4276,12 @@ struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, enum rdma_driver_id driver_id); struct ib_device *ib_device_get_by_name(const char *name, enum rdma_driver_id driver_id); -struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, +struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port, u16 pkey, const union ib_gid *gid, const struct sockaddr *addr); int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, unsigned int port); -struct net_device *ib_device_netdev(struct ib_device *dev, u8 port); +struct net_device *ib_device_netdev(struct ib_device *dev, u32 port); struct ib_wq *ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr); @@ -4311,7 +4315,8 @@ void ib_drain_rq(struct ib_qp *qp); void ib_drain_sq(struct ib_qp *qp); void ib_drain_qp(struct ib_qp *qp); -int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width); +int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, + u8 *width); static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) { @@ -4379,12 +4384,12 @@ static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr) return false; } -static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num) +static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num) { attr->port_num = port_num; } -static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) +static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) { return attr->port_num; } @@ -4482,7 +4487,7 @@ void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src); * @port_num: Port number */ static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, - u8 port_num) + u32 port_num) { if (rdma_protocol_roce(dev, port_num)) return RDMA_AH_ATTR_TYPE_ROCE; @@ -4554,12 +4559,12 @@ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); -struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, +struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *)); -int rdma_init_netdev(struct ib_device *device, u8 port_num, +int rdma_init_netdev(struct ib_device *device, u32 port_num, enum rdma_netdev_t type, const char *name, unsigned char name_assign_type, void (*setup)(struct net_device *), diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h index 91975400e1b3..03abd30e6c8c 100644 --- a/include/rdma/iw_cm.h +++ b/include/rdma/iw_cm.h @@ -70,6 +70,7 @@ struct iw_cm_id { u8 tos; bool tos_set:1; bool mapped:1; + bool afonly:1; }; struct iw_cm_conn_param { diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 32a67af18415..d989f030fae0 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -107,7 +107,7 @@ struct rdma_cm_id { struct rdma_route route; enum rdma_ucm_port_space ps; enum ib_qp_type qp_type; - u8 port_num; + u32 port_num; }; struct rdma_cm_id * @@ -331,6 +331,8 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse); int rdma_set_afonly(struct rdma_cm_id *id, int afonly); int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout); + +int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer); /** * rdma_get_service_id - Return the IB service ID for a specified address. * @id: Communication identifier associated with the address. diff --git a/include/rdma/rdma_counter.h b/include/rdma/rdma_counter.h index e75cf9742e04..0295b22cd1cd 100644 --- a/include/rdma/rdma_counter.h +++ b/include/rdma/rdma_counter.h @@ -40,26 +40,26 @@ struct rdma_counter { struct rdma_counter_mode mode; struct mutex lock; struct rdma_hw_stats *stats; - u8 port; + u32 port; }; void rdma_counter_init(struct ib_device *dev); void rdma_counter_release(struct ib_device *dev); -int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port, +int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mask mask, struct netlink_ext_ack *extack); -int rdma_counter_bind_qp_auto(struct ib_qp *qp, u8 port); +int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port); int rdma_counter_unbind_qp(struct ib_qp *qp, bool force); int rdma_counter_query_stats(struct rdma_counter *counter); -u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index); -int rdma_counter_bind_qpn(struct ib_device *dev, u8 port, +u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index); +int rdma_counter_bind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id); -int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, +int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port, u32 qp_num, u32 *counter_id); -int rdma_counter_unbind_qpn(struct ib_device *dev, u8 port, +int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id); -int rdma_counter_get_mode(struct ib_device *dev, u8 port, +int rdma_counter_get_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mode *mode, enum rdma_nl_counter_mask *mask); diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 9fd217b24916..2dafd7dbe893 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -92,7 +92,7 @@ struct rvt_ibport { /* * The pkey table is allocated and maintained by the driver. Drivers * need to have access to this before registering with rdmav. However - * rdmavt will need access to it so drivers need to proviee this during + * rdmavt will need access to it so drivers need to provide this during * the attach port API call. */ u16 *pkey_table; @@ -230,7 +230,7 @@ struct rvt_driver_provided { void (*do_send)(struct rvt_qp *qp); /* - * Returns a pointer to the undelying hardware's PCI device. This is + * Returns a pointer to the underlying hardware's PCI device. This is * used to display information as to what hardware is being referenced * in an output message */ @@ -245,7 +245,7 @@ struct rvt_driver_provided { void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp); /* - * Init a struture allocated with qp_priv_alloc(). This should be + * Init a structure allocated with qp_priv_alloc(). This should be * called after all qp fields have been initialized in rdmavt. */ int (*qp_priv_init)(struct rvt_dev_info *rdi, struct rvt_qp *qp, @@ -257,7 +257,7 @@ struct rvt_driver_provided { void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp); /* - * Inform the driver the particular qp in quesiton has been reset so + * Inform the driver the particular qp in question has been reset so * that it can clean up anything it needs to. */ void (*notify_qp_reset)(struct rvt_qp *qp); @@ -281,7 +281,7 @@ struct rvt_driver_provided { void (*stop_send_queue)(struct rvt_qp *qp); /* - * Have the drivr drain any in progress operations + * Have the driver drain any in progress operations */ void (*quiesce_qp)(struct rvt_qp *qp); @@ -309,16 +309,16 @@ struct rvt_driver_provided { /* * Query driver for the state of the port. */ - int (*query_port_state)(struct rvt_dev_info *rdi, u8 port_num, + int (*query_port_state)(struct rvt_dev_info *rdi, u32 port_num, struct ib_port_attr *props); /* * Tell driver to shutdown a port */ - int (*shut_down_port)(struct rvt_dev_info *rdi, u8 port_num); + int (*shut_down_port)(struct rvt_dev_info *rdi, u32 port_num); /* Tell driver to send a trap for changed port capabilities */ - void (*cap_mask_chg)(struct rvt_dev_info *rdi, u8 port_num); + void (*cap_mask_chg)(struct rvt_dev_info *rdi, u32 port_num); /* * The following functions can be safely ignored completely. Any use of @@ -338,7 +338,7 @@ struct rvt_driver_provided { /* Let the driver pick the next queue pair number*/ int (*alloc_qpn)(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, - enum ib_qp_type type, u8 port_num); + enum ib_qp_type type, u32 port_num); /* Determine if its safe or allowed to modify the qp */ int (*check_modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr, diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h index 05e18839eaff..79d109c47242 100644 --- a/include/rdma/restrack.h +++ b/include/rdma/restrack.h @@ -50,6 +50,10 @@ enum rdma_restrack_type { */ RDMA_RESTRACK_COUNTER, /** + * @RDMA_RESTRACK_SRQ: Shared receive queue (SRQ) + */ + RDMA_RESTRACK_SRQ, + /** * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations */ RDMA_RESTRACK_MAX diff --git a/include/rdma/rw.h b/include/rdma/rw.h index 6ad9dc836c10..d606cac48233 100644 --- a/include/rdma/rw.h +++ b/include/rdma/rw.h @@ -42,29 +42,29 @@ struct rdma_rw_ctx { }; }; -int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, +int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 sg_offset, u64 remote_addr, u32 rkey, enum dma_data_direction dir); -void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, - struct scatterlist *sg, u32 sg_cnt, - enum dma_data_direction dir); +void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, + u32 port_num, struct scatterlist *sg, u32 sg_cnt, + enum dma_data_direction dir); int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, - u8 port_num, struct scatterlist *sg, u32 sg_cnt, + u32 port_num, struct scatterlist *sg, u32 sg_cnt, struct scatterlist *prot_sg, u32 prot_sg_cnt, struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey, enum dma_data_direction dir); void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, - u8 port_num, struct scatterlist *sg, u32 sg_cnt, + u32 port_num, struct scatterlist *sg, u32 sg_cnt, struct scatterlist *prot_sg, u32 prot_sg_cnt, enum dma_data_direction dir); struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, - u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr); -int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, + u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr); +int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr); -unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num, +unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num, unsigned int maxpages); void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr); int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr); diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index 39ef204753ec..23bb404aba12 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h @@ -875,9 +875,14 @@ static inline __malloc void *uverbs_kcalloc(struct uverbs_attr_bundle *bundle, return ERR_PTR(-EOVERFLOW); return uverbs_zalloc(bundle, bytes); } -int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, - size_t idx, s64 lower_bound, u64 upper_bound, - s64 *def_val); + +int _uverbs_get_const_signed(s64 *to, + const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, s64 lower_bound, u64 upper_bound, + s64 *def_val); +int _uverbs_get_const_unsigned(u64 *to, + const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, u64 upper_bound, u64 *def_val); int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, size_t idx, const void *from, size_t size); #else @@ -921,27 +926,77 @@ uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, { return -EINVAL; } +static inline int +_uverbs_get_const_signed(s64 *to, + const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, s64 lower_bound, u64 upper_bound, + s64 *def_val) +{ + return -EINVAL; +} +static inline int +_uverbs_get_const_unsigned(u64 *to, + const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, u64 upper_bound, u64 *def_val) +{ + return -EINVAL; +} #endif -#define uverbs_get_const(_to, _attrs_bundle, _idx) \ +#define uverbs_get_const_signed(_to, _attrs_bundle, _idx) \ ({ \ s64 _val; \ - int _ret = _uverbs_get_const(&_val, _attrs_bundle, _idx, \ - type_min(typeof(*_to)), \ - type_max(typeof(*_to)), NULL); \ - (*_to) = _val; \ + int _ret = \ + _uverbs_get_const_signed(&_val, _attrs_bundle, _idx, \ + type_min(typeof(*(_to))), \ + type_max(typeof(*(_to))), NULL); \ + (*(_to)) = _val; \ _ret; \ }) -#define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \ +#define uverbs_get_const_unsigned(_to, _attrs_bundle, _idx) \ + ({ \ + u64 _val; \ + int _ret = \ + _uverbs_get_const_unsigned(&_val, _attrs_bundle, _idx, \ + type_max(typeof(*(_to))), NULL); \ + (*(_to)) = _val; \ + _ret; \ + }) + +#define uverbs_get_const_default_signed(_to, _attrs_bundle, _idx, _default) \ ({ \ s64 _val; \ s64 _def_val = _default; \ int _ret = \ - _uverbs_get_const(&_val, _attrs_bundle, _idx, \ - type_min(typeof(*_to)), \ - type_max(typeof(*_to)), &_def_val); \ - (*_to) = _val; \ + _uverbs_get_const_signed(&_val, _attrs_bundle, _idx, \ + type_min(typeof(*(_to))), \ + type_max(typeof(*(_to))), &_def_val); \ + (*(_to)) = _val; \ _ret; \ }) + +#define uverbs_get_const_default_unsigned(_to, _attrs_bundle, _idx, _default) \ + ({ \ + u64 _val; \ + u64 _def_val = _default; \ + int _ret = \ + _uverbs_get_const_unsigned(&_val, _attrs_bundle, _idx, \ + type_max(typeof(*(_to))), &_def_val); \ + (*(_to)) = _val; \ + _ret; \ + }) + +#define uverbs_get_const(_to, _attrs_bundle, _idx) \ + (is_signed_type(typeof(*(_to))) ? \ + uverbs_get_const_signed(_to, _attrs_bundle, _idx) : \ + uverbs_get_const_unsigned(_to, _attrs_bundle, _idx)) \ + +#define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \ + (is_signed_type(typeof(*(_to))) ? \ + uverbs_get_const_default_signed(_to, _attrs_bundle, _idx, \ + _default) : \ + uverbs_get_const_default_unsigned(_to, _attrs_bundle, _idx, \ + _default)) + #endif diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h index f04f5126f61e..ee7873f872c3 100644 --- a/include/rdma/uverbs_named_ioctl.h +++ b/include/rdma/uverbs_named_ioctl.h @@ -20,7 +20,7 @@ /* These are static so they do not need to be qualified */ #define UVERBS_METHOD_ATTRS(method_id) _method_attrs_##method_id -#define UVERBS_OBJECT_METHODS(object_id) _object_methods_##object_id +#define UVERBS_OBJECT_METHODS(object_id) _UVERBS_NAME(_object_methods_##object_id, __LINE__) #define DECLARE_UVERBS_NAMED_METHOD(_method_id, ...) \ static const struct uverbs_attr_def *const UVERBS_METHOD_ATTRS( \ diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 2568cb0627ec..fac8e89aed81 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -249,7 +249,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *, struct fc_frame *); /* libfcoe funcs */ -u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); +u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int); int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *, const struct libfc_function_template *, int init_fcp); u32 fcoe_fc_crc(struct fc_frame *fp); diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index e75cca25338a..246ced401683 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -180,16 +180,17 @@ static inline int scsi_is_wlun(u64 lun) /* * Internal return values. */ - -#define NEEDS_RETRY 0x2001 -#define SUCCESS 0x2002 -#define FAILED 0x2003 -#define QUEUED 0x2004 -#define SOFT_ERROR 0x2005 -#define ADD_TO_MLQUEUE 0x2006 -#define TIMEOUT_ERROR 0x2007 -#define SCSI_RETURN_NOT_HANDLED 0x2008 -#define FAST_IO_FAIL 0x2009 +enum scsi_disposition { + NEEDS_RETRY = 0x2001, + SUCCESS = 0x2002, + FAILED = 0x2003, + QUEUED = 0x2004, + SOFT_ERROR = 0x2005, + ADD_TO_MLQUEUE = 0x2006, + TIMEOUT_ERROR = 0x2007, + SCSI_RETURN_NOT_HANDLED = 0x2008, + FAST_IO_FAIL = 0x2009, +}; /* * Midlevel queue return values. diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index ace15b5dc956..fed024f4c02a 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -10,6 +10,7 @@ #include <linux/timer.h> #include <linux/scatterlist.h> #include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> #include <scsi/scsi_request.h> struct Scsi_Host; @@ -55,11 +56,10 @@ struct scsi_pointer { /* for scmd->flags */ #define SCMD_TAGGED (1 << 0) -#define SCMD_UNCHECKED_ISA_DMA (1 << 1) -#define SCMD_INITIALIZED (1 << 2) -#define SCMD_LAST (1 << 3) +#define SCMD_INITIALIZED (1 << 1) +#define SCMD_LAST (1 << 2) /* flags preserved across unprep / reprep */ -#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED) +#define SCMD_PRESERVED_FLAGS (SCMD_INITIALIZED) /* for scmd->state */ #define SCMD_STATE_COMPLETE 0 @@ -75,6 +75,8 @@ struct scsi_cmnd { int eh_eflags; /* Used by error handlr */ + int budget_token; + /* * This is set to jiffies as it was when the command was first * allocated. It is used to time how long the command has diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 1a5c9a3df6d6..ac6ab16abee7 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -8,6 +8,7 @@ #include <linux/blkdev.h> #include <scsi/scsi.h> #include <linux/atomic.h> +#include <linux/sbitmap.h> struct device; struct request_queue; @@ -106,7 +107,7 @@ struct scsi_device { struct list_head siblings; /* list of all devices on this host */ struct list_head same_target_siblings; /* just the devices sharing same target id */ - atomic_t device_busy; /* commands actually active on LLDD */ + struct sbitmap budget_map; atomic_t device_blocked; /* Device returned QUEUE_FULL. */ atomic_t restarts; @@ -145,7 +146,7 @@ struct scsi_device { struct scsi_vpd __rcu *vpd_pg80; struct scsi_vpd __rcu *vpd_pg89; unsigned char current_tag; /* current tag */ - struct scsi_target *sdev_target; /* used only for single_lun */ + struct scsi_target *sdev_target; blist_flags_t sdev_bflags; /* black/white flags as also found in * scsi_devinfo.[hc]. For now used only to @@ -590,6 +591,11 @@ static inline int scsi_device_supports_vpd(struct scsi_device *sdev) return 0; } +static inline int scsi_device_busy(struct scsi_device *sdev) +{ + return sbitmap_weight(&sdev->budget_map); +} + #define MODULE_ALIAS_SCSI_DEVICE(type) \ MODULE_ALIAS("scsi:t-" __stringify(type) "*") #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x" diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h index 2852e470a8ed..4df943c1b90b 100644 --- a/include/scsi/scsi_dh.h +++ b/include/scsi/scsi_dh.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Header file for SCSI device handler infrastruture. + * Header file for SCSI device handler infrastructure. * * Modified version of patches posted by Mike Christie <michaelc@cs.wisc.edu> * @@ -52,7 +52,8 @@ struct scsi_device_handler { /* Filled by the hardware handler */ struct module *module; const char *name; - int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); + enum scsi_disposition (*check_sense)(struct scsi_device *, + struct scsi_sense_hdr *); int (*attach)(struct scsi_device *); void (*detach)(struct scsi_device *); int (*activate)(struct scsi_device *, activate_complete, void *); diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h index 6bd5ed695a5e..468094254b3c 100644 --- a/include/scsi/scsi_eh.h +++ b/include/scsi/scsi_eh.h @@ -17,7 +17,7 @@ extern void scsi_report_device_reset(struct Scsi_Host *, int, int); extern int scsi_block_when_processing_errors(struct scsi_device *); extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd, struct scsi_sense_hdr *sshdr); -extern int scsi_check_sense(struct scsi_cmnd *); +extern enum scsi_disposition scsi_check_sense(struct scsi_cmnd *); static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr) { diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index e30fd963b97d..d0bf88d77f02 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -19,7 +19,6 @@ struct scsi_device; struct scsi_host_cmd_pool; struct scsi_target; struct Scsi_Host; -struct scsi_host_cmd_pool; struct scsi_transport_template; @@ -30,40 +29,15 @@ struct scsi_transport_template; #define MODE_TARGET 0x02 struct scsi_host_template { - struct module *module; - const char *name; - /* - * The info function will return whatever useful information the - * developer sees fit. If not provided, then the name field will - * be used instead. - * - * Status: OPTIONAL + * Put fields referenced in IO submission path together in + * same cacheline */ - const char *(* info)(struct Scsi_Host *); /* - * Ioctl interface - * - * Status: OPTIONAL - */ - int (*ioctl)(struct scsi_device *dev, unsigned int cmd, - void __user *arg); - - -#ifdef CONFIG_COMPAT - /* - * Compat handler. Handle 32bit ABI. - * When unknown ioctl is passed return -ENOIOCTLCMD. - * - * Status: OPTIONAL + * Additional per-command data allocated for the driver. */ - int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd, - void __user *arg); -#endif - - int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); - int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); + unsigned int cmd_size; /* * The queuecommand function is used to queue up a scsi @@ -111,6 +85,41 @@ struct scsi_host_template { */ void (*commit_rqs)(struct Scsi_Host *, u16); + struct module *module; + const char *name; + + /* + * The info function will return whatever useful information the + * developer sees fit. If not provided, then the name field will + * be used instead. + * + * Status: OPTIONAL + */ + const char *(*info)(struct Scsi_Host *); + + /* + * Ioctl interface + * + * Status: OPTIONAL + */ + int (*ioctl)(struct scsi_device *dev, unsigned int cmd, + void __user *arg); + + +#ifdef CONFIG_COMPAT + /* + * Compat handler. Handle 32bit ABI. + * When unknown ioctl is passed return -ENOIOCTLCMD. + * + * Status: OPTIONAL + */ + int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd, + void __user *arg); +#endif + + int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); + int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); + /* * This is an error handling strategy routine. You don't need to * define one of these if you don't want to - there is a default @@ -271,6 +280,16 @@ struct scsi_host_template { int (* map_queues)(struct Scsi_Host *shost); /* + * SCSI interface of blk_poll - poll for IO completions. + * Only applicable if SCSI LLD exposes multiple h/w queues. + * + * Return value: Number of completed entries found. + * + * Status: OPTIONAL + */ + int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num); + + /* * Check if scatterlists need to be padded for DMA draining. * * Status: OPTIONAL @@ -425,11 +444,6 @@ struct scsi_host_template { unsigned supported_mode:2; /* - * True if this host adapter uses unchecked DMA onto an ISA bus. - */ - unsigned unchecked_isa_dma:1; - - /* * True for emulated SCSI host adapters (e.g. ATAPI). */ unsigned emulated:1; @@ -484,10 +498,6 @@ struct scsi_host_template { */ u64 vendor_id; - /* - * Additional per-command data allocated for the driver. - */ - unsigned int cmd_size; struct scsi_host_cmd_pool *cmd_pool; /* Delay for runtime autosuspend */ @@ -616,8 +626,8 @@ struct Scsi_Host { * the total queue depth is can_queue. */ unsigned nr_hw_queues; + unsigned nr_maps; unsigned active_mode:2; - unsigned unchecked_isa_dma:1; /* * Host has requested that no further requests come through for the diff --git a/include/soc/at91/pm.h b/include/soc/at91/pm.h new file mode 100644 index 000000000000..7a41e53a3ffa --- /dev/null +++ b/include/soc/at91/pm.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Atmel Power Management + * + * Copyright (C) 2020 Atmel + * + * Author: Lee Jones <lee.jones@linaro.org> + */ + +#ifndef __SOC_ATMEL_PM_H +#define __SOC_ATMEL_PM_H + +void at91_pinctrl_gpio_suspend(void); +void at91_pinctrl_gpio_resume(void); + +#endif /* __SOC_ATMEL_PM_H */ diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h index cc9cdbc66403..73ad784fca96 100644 --- a/include/soc/bcm2835/raspberrypi-firmware.h +++ b/include/soc/bcm2835/raspberrypi-firmware.h @@ -140,7 +140,10 @@ int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, void *data, size_t len); int rpi_firmware_property_list(struct rpi_firmware *fw, void *data, size_t tag_size); +void rpi_firmware_put(struct rpi_firmware *fw); struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node); +struct rpi_firmware *devm_rpi_firmware_get(struct device *dev, + struct device_node *firmware_node); #else static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, void *data, size_t len) @@ -154,10 +157,17 @@ static inline int rpi_firmware_property_list(struct rpi_firmware *fw, return -ENOSYS; } +static inline void rpi_firmware_put(struct rpi_firmware *fw) { } static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node) { return NULL; } + +static inline struct rpi_firmware *devm_rpi_firmware_get(struct device *dev, + struct device_node *firmware_node) +{ + return NULL; +} #endif #endif /* __SOC_RASPBERRY_FIRMWARE_H__ */ diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h index 4925a1b59dc9..b02e9fe69146 100644 --- a/include/soc/fsl/qe/qe.h +++ b/include/soc/fsl/qe/qe.h @@ -239,37 +239,21 @@ static inline int qe_alive_during_sleep(void) #define qe_muram_dma cpm_muram_dma #define qe_muram_free_addr cpm_muram_free_addr -#ifdef CONFIG_PPC32 -#define qe_iowrite8(val, addr) out_8(addr, val) -#define qe_iowrite16be(val, addr) out_be16(addr, val) -#define qe_iowrite32be(val, addr) out_be32(addr, val) -#define qe_ioread8(addr) in_8(addr) -#define qe_ioread16be(addr) in_be16(addr) -#define qe_ioread32be(addr) in_be32(addr) -#else -#define qe_iowrite8(val, addr) iowrite8(val, addr) -#define qe_iowrite16be(val, addr) iowrite16be(val, addr) -#define qe_iowrite32be(val, addr) iowrite32be(val, addr) -#define qe_ioread8(addr) ioread8(addr) -#define qe_ioread16be(addr) ioread16be(addr) -#define qe_ioread32be(addr) ioread32be(addr) -#endif - -#define qe_setbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) | (_v), (_addr)) -#define qe_clrbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) & ~(_v), (_addr)) +#define qe_setbits_be32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr)) +#define qe_clrbits_be32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr)) -#define qe_setbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) | (_v), (_addr)) -#define qe_clrbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) & ~(_v), (_addr)) +#define qe_setbits_be16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr)) +#define qe_clrbits_be16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr)) -#define qe_setbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) | (_v), (_addr)) -#define qe_clrbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) & ~(_v), (_addr)) +#define qe_setbits_8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr)) +#define qe_clrbits_8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr)) #define qe_clrsetbits_be32(addr, clear, set) \ - qe_iowrite32be((qe_ioread32be(addr) & ~(clear)) | (set), (addr)) + iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr)) #define qe_clrsetbits_be16(addr, clear, set) \ - qe_iowrite16be((qe_ioread16be(addr) & ~(clear)) | (set), (addr)) + iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr)) #define qe_clrsetbits_8(addr, clear, set) \ - qe_iowrite8((qe_ioread8(addr) & ~(clear)) | (set), (addr)) + iowrite8((ioread8(addr) & ~(clear)) | (set), (addr)) /* Structure that defines QE firmware binary files. * diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 425ff29d9389..2f5ce4d4fdbf 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -51,6 +51,7 @@ */ /* Reserve some destination PGIDs at the end of the range: + * PGID_BLACKHOLE: used for not forwarding the frames * PGID_CPU: used for whitelisting certain MAC addresses, such as the addresses * of the switch port net devices, towards the CPU port module. * PGID_UC: the flooding destinations for unknown unicast traffic. @@ -59,6 +60,7 @@ * PGID_MCIPV6: the flooding destinations for IPv6 multicast traffic. * PGID_BC: the flooding destinations for broadcast traffic. */ +#define PGID_BLACKHOLE 57 #define PGID_CPU 58 #define PGID_UC 59 #define PGID_MC 60 @@ -73,7 +75,7 @@ #define for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) \ for ((pgid) = (ocelot)->num_phys_ports + 1; \ - (pgid) < PGID_CPU; \ + (pgid) < PGID_BLACKHOLE; \ (pgid)++) #define for_each_aggr_pgid(ocelot, pgid) \ @@ -611,6 +613,11 @@ struct ocelot_port { struct net_device *bond; bool lag_tx_active; + + u16 mrp_ring_id; + + struct net_device *bridge; + u8 stp_state; }; struct ocelot { @@ -630,10 +637,6 @@ struct ocelot { int num_frame_refs; int num_mact_rows; - struct net_device *hw_bridge_dev; - u16 bridge_mask; - u16 bridge_fwd_mask; - struct ocelot_port **ports; u8 base_mac[ETH_ALEN]; @@ -679,12 +682,6 @@ struct ocelot { /* Protects the PTP clock */ spinlock_t ptp_clock_lock; struct ptp_pin_desc ptp_pins[OCELOT_PTP_PINS_NUM]; - -#if IS_ENABLED(CONFIG_BRIDGE_MRP) - u16 mrp_ring_id; - struct net_device *mrp_p_port; - struct net_device *mrp_s_port; -#endif }; struct ocelot_policer { @@ -692,6 +689,15 @@ struct ocelot_policer { u32 burst; /* bytes */ }; +struct ocelot_skb_cb { + struct sk_buff *clone; + u8 ptp_cmd; + u8 ts_id; +}; + +#define OCELOT_SKB_CB(skb) \ + ((struct ocelot_skb_cb *)((skb)->cb)) + #define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) #define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi)) #define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri)) @@ -743,15 +749,16 @@ u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target, void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target, u32 val, u32 reg, u32 offset); -/* Packet I/O */ #if IS_ENABLED(CONFIG_MSCC_OCELOT_SWITCH_LIB) +/* Packet I/O */ bool ocelot_can_inject(struct ocelot *ocelot, int grp); void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, u32 rew_op, struct sk_buff *skb); int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb); void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp); +u32 ocelot_ptp_rew_op(struct sk_buff *skb); #else static inline bool ocelot_can_inject(struct ocelot *ocelot, int grp) @@ -775,6 +782,10 @@ static inline void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp) { } +static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb) +{ + return 0; +} #endif /* Hardware initialization */ @@ -806,10 +817,10 @@ int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, struct switchdev_brport_flags val); void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, struct switchdev_brport_flags val); -int ocelot_port_bridge_join(struct ocelot *ocelot, int port, - struct net_device *bridge); -int ocelot_port_bridge_leave(struct ocelot *ocelot, int port, +void ocelot_port_bridge_join(struct ocelot *ocelot, int port, struct net_device *bridge); +void ocelot_port_bridge_leave(struct ocelot *ocelot, int port, + struct net_device *bridge); int ocelot_fdb_dump(struct ocelot *ocelot, int port, dsa_fdb_dump_cb_t *cb, void *data); int ocelot_fdb_add(struct ocelot *ocelot, int port, @@ -823,8 +834,9 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid); int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr); int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr); -void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, - struct sk_buff *clone); +int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port, + struct sk_buff *skb, + struct sk_buff **clone); void ocelot_get_txtstamp(struct ocelot *ocelot); void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu); int ocelot_get_max_mtu(struct ocelot *ocelot, int port); diff --git a/include/soc/mscc/ocelot_ptp.h b/include/soc/mscc/ocelot_ptp.h index 6a7388fa7cc5..ded497d72bdb 100644 --- a/include/soc/mscc/ocelot_ptp.h +++ b/include/soc/mscc/ocelot_ptp.h @@ -37,8 +37,6 @@ enum { #define PTP_CFG_MISC_PTP_EN BIT(2) -#define PSEC_PER_SEC 1000000000000LL - #define PTP_CFG_CLK_ADJ_CFG_ENA BIT(0) #define PTP_CFG_CLK_ADJ_CFG_DIR BIT(1) diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index d731407e23bb..d2fbe6a8b25b 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -7,6 +7,7 @@ #define __SOC_TEGRA_MC_H__ #include <linux/bits.h> +#include <linux/debugfs.h> #include <linux/err.h> #include <linux/interconnect-provider.h> #include <linux/reset-controller.h> @@ -175,6 +176,8 @@ struct tegra_mc_soc { unsigned int num_resets; const struct tegra_mc_icc_ops *icc_ops; + + int (*init)(struct tegra_mc *mc); }; struct tegra_mc { @@ -196,6 +199,10 @@ struct tegra_mc { struct icc_provider provider; spinlock_t lock; + + struct { + struct dentry *root; + } debugfs; }; int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index ce2fba49c95d..1f78b09bba55 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -34,6 +34,8 @@ struct target_backend_ops { int (*configure_device)(struct se_device *); void (*destroy_device)(struct se_device *); void (*free_device)(struct se_device *device); + struct se_dev_plug *(*plug_device)(struct se_device *se_dev); + void (*unplug_device)(struct se_dev_plug *se_plug); ssize_t (*set_configfs_dev_params)(struct se_device *, const char *, ssize_t); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 54dcc0eb25fa..d1f7d2a45354 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -127,25 +127,25 @@ enum transport_state_table { /* Used for struct se_cmd->se_cmd_flags */ enum se_cmd_flags_table { - SCF_SUPPORTED_SAM_OPCODE = 0x00000001, - SCF_TRANSPORT_TASK_SENSE = 0x00000002, - SCF_EMULATED_TASK_SENSE = 0x00000004, - SCF_SCSI_DATA_CDB = 0x00000008, - SCF_SCSI_TMR_CDB = 0x00000010, - SCF_FUA = 0x00000080, - SCF_SE_LUN_CMD = 0x00000100, - SCF_BIDI = 0x00000400, - SCF_SENT_CHECK_CONDITION = 0x00000800, - SCF_OVERFLOW_BIT = 0x00001000, - SCF_UNDERFLOW_BIT = 0x00002000, - SCF_ALUA_NON_OPTIMIZED = 0x00008000, - SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, - SCF_COMPARE_AND_WRITE = 0x00080000, - SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, - SCF_ACK_KREF = 0x00400000, - SCF_USE_CPUID = 0x00800000, - SCF_TASK_ATTR_SET = 0x01000000, - SCF_TREAT_READ_AS_NORMAL = 0x02000000, + SCF_SUPPORTED_SAM_OPCODE = (1 << 0), + SCF_TRANSPORT_TASK_SENSE = (1 << 1), + SCF_EMULATED_TASK_SENSE = (1 << 2), + SCF_SCSI_DATA_CDB = (1 << 3), + SCF_SCSI_TMR_CDB = (1 << 4), + SCF_FUA = (1 << 5), + SCF_SE_LUN_CMD = (1 << 6), + SCF_BIDI = (1 << 7), + SCF_SENT_CHECK_CONDITION = (1 << 8), + SCF_OVERFLOW_BIT = (1 << 9), + SCF_UNDERFLOW_BIT = (1 << 10), + SCF_ALUA_NON_OPTIMIZED = (1 << 11), + SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = (1 << 12), + SCF_COMPARE_AND_WRITE = (1 << 13), + SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = (1 << 14), + SCF_ACK_KREF = (1 << 15), + SCF_USE_CPUID = (1 << 16), + SCF_TASK_ATTR_SET = (1 << 17), + SCF_TREAT_READ_AS_NORMAL = (1 << 18), }; /* @@ -488,7 +488,7 @@ struct se_cmd { /* Only used for internal passthrough and legacy TCM fabric modules */ struct se_session *se_sess; struct se_tmr_req *se_tmr_req; - struct list_head se_cmd_list; + struct llist_node se_cmd_list; struct completion *free_compl; struct completion *abrt_compl; const struct target_core_fabric_ops *se_tfo; @@ -765,9 +765,19 @@ struct se_dev_stat_grps { struct config_group scsi_lu_group; }; +struct se_cmd_queue { + struct llist_head cmd_list; + struct work_struct work; +}; + +struct se_dev_plug { + struct se_device *se_dev; +}; + struct se_device_queue { struct list_head state_list; spinlock_t lock; + struct se_cmd_queue sq; }; struct se_device { @@ -934,11 +944,20 @@ static inline struct se_portal_group *param_to_tpg(struct config_item *item) tpg_param_group); } +enum { + /* Use se_cmd's cpuid for completion */ + SE_COMPL_AFFINITY_CPUID = -1, + /* Complete on current CPU */ + SE_COMPL_AFFINITY_CURR_CPU = -2, +}; + struct se_wwn { struct target_fabric_configfs *wwn_tf; void *priv; struct config_group wwn_group; struct config_group fabric_stat_group; + struct config_group param_group; + int cmd_compl_affinity; }; static inline void atomic_inc_mb(atomic_t *v) diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index d60a3eb7517a..3c5ade7a04a6 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -148,18 +148,25 @@ void transport_deregister_session_configfs(struct se_session *); void transport_deregister_session(struct se_session *); -void transport_init_se_cmd(struct se_cmd *, +void __target_init_cmd(struct se_cmd *, const struct target_core_fabric_ops *, struct se_session *, u32, int, int, unsigned char *, u64); +int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, + unsigned char *sense, u64 unpacked_lun, u32 data_length, + int task_attr, int data_dir, int flags); +int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb, + struct scatterlist *sgl, u32 sgl_count, + struct scatterlist *sgl_bidi, u32 sgl_bidi_count, + struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp); +void target_submit(struct se_cmd *se_cmd); sense_reason_t transport_lookup_cmd_lun(struct se_cmd *); -sense_reason_t target_cmd_init_cdb(struct se_cmd *, unsigned char *); +sense_reason_t target_cmd_init_cdb(struct se_cmd *se_cmd, unsigned char *cdb, + gfp_t gfp); sense_reason_t target_cmd_parse_cdb(struct se_cmd *); -int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, - unsigned char *, unsigned char *, u64, u32, int, int, int, - struct scatterlist *, u32, struct scatterlist *, u32, - struct scatterlist *, u32); -int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, +void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, unsigned char *, u64, u32, int, int, int); +void target_queue_submission(struct se_cmd *se_cmd); + int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, unsigned char *sense, u64 unpacked_lun, void *fabric_tmr_ptr, unsigned char tm_type, diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 4a5cc8c64be3..3ccf591b2374 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -886,65 +886,52 @@ TRACE_EVENT(afs_call_done, __entry->rx_call) ); -TRACE_EVENT(afs_send_pages, - TP_PROTO(struct afs_call *call, struct msghdr *msg, - pgoff_t first, pgoff_t last, unsigned int offset), +TRACE_EVENT(afs_send_data, + TP_PROTO(struct afs_call *call, struct msghdr *msg), - TP_ARGS(call, msg, first, last, offset), + TP_ARGS(call, msg), TP_STRUCT__entry( __field(unsigned int, call ) - __field(pgoff_t, first ) - __field(pgoff_t, last ) - __field(unsigned int, nr ) - __field(unsigned int, bytes ) - __field(unsigned int, offset ) __field(unsigned int, flags ) + __field(loff_t, offset ) + __field(loff_t, count ) ), TP_fast_assign( __entry->call = call->debug_id; - __entry->first = first; - __entry->last = last; - __entry->nr = msg->msg_iter.nr_segs; - __entry->bytes = msg->msg_iter.count; - __entry->offset = offset; __entry->flags = msg->msg_flags; + __entry->offset = msg->msg_iter.xarray_start + msg->msg_iter.iov_offset; + __entry->count = iov_iter_count(&msg->msg_iter); ), - TP_printk(" c=%08x %lx-%lx-%lx b=%x o=%x f=%x", - __entry->call, - __entry->first, __entry->first + __entry->nr - 1, __entry->last, - __entry->bytes, __entry->offset, + TP_printk(" c=%08x o=%llx n=%llx f=%x", + __entry->call, __entry->offset, __entry->count, __entry->flags) ); -TRACE_EVENT(afs_sent_pages, - TP_PROTO(struct afs_call *call, pgoff_t first, pgoff_t last, - pgoff_t cursor, int ret), +TRACE_EVENT(afs_sent_data, + TP_PROTO(struct afs_call *call, struct msghdr *msg, int ret), - TP_ARGS(call, first, last, cursor, ret), + TP_ARGS(call, msg, ret), TP_STRUCT__entry( __field(unsigned int, call ) - __field(pgoff_t, first ) - __field(pgoff_t, last ) - __field(pgoff_t, cursor ) __field(int, ret ) + __field(loff_t, offset ) + __field(loff_t, count ) ), TP_fast_assign( __entry->call = call->debug_id; - __entry->first = first; - __entry->last = last; - __entry->cursor = cursor; __entry->ret = ret; + __entry->offset = msg->msg_iter.xarray_start + msg->msg_iter.iov_offset; + __entry->count = iov_iter_count(&msg->msg_iter); ), - TP_printk(" c=%08x %lx-%lx c=%lx r=%d", - __entry->call, - __entry->first, __entry->last, - __entry->cursor, __entry->ret) + TP_printk(" c=%08x o=%llx n=%llx r=%x", + __entry->call, __entry->offset, __entry->count, + __entry->ret) ); TRACE_EVENT(afs_dir_check_failed, @@ -969,30 +956,33 @@ TRACE_EVENT(afs_dir_check_failed, ); TRACE_EVENT(afs_page_dirty, - TP_PROTO(struct afs_vnode *vnode, const char *where, - pgoff_t page, unsigned long priv), + TP_PROTO(struct afs_vnode *vnode, const char *where, struct page *page), - TP_ARGS(vnode, where, page, priv), + TP_ARGS(vnode, where, page), TP_STRUCT__entry( __field(struct afs_vnode *, vnode ) __field(const char *, where ) __field(pgoff_t, page ) - __field(unsigned long, priv ) + __field(unsigned long, from ) + __field(unsigned long, to ) ), TP_fast_assign( __entry->vnode = vnode; __entry->where = where; - __entry->page = page; - __entry->priv = priv; + __entry->page = page->index; + __entry->from = afs_page_dirty_from(page, page->private); + __entry->to = afs_page_dirty_to(page, page->private); + __entry->to |= (afs_is_page_dirty_mmapped(page->private) ? + (1UL << (BITS_PER_LONG - 1)) : 0); ), - TP_printk("vn=%p %lx %s %zx-%zx%s", + TP_printk("vn=%p %lx %s %lx-%lx%s", __entry->vnode, __entry->page, __entry->where, - afs_page_dirty_from(__entry->priv), - afs_page_dirty_to(__entry->priv), - afs_is_page_dirty_mmapped(__entry->priv) ? " M" : "") + __entry->from, + __entry->to & ~(1UL << (BITS_PER_LONG - 1)), + __entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "") ); TRACE_EVENT(afs_call_state, diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 0551ea65374f..a41dd8a0c730 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1903,6 +1903,18 @@ DEFINE_EVENT(btrfs__block_group, btrfs_add_unused_block_group, TP_ARGS(bg_cache) ); +DEFINE_EVENT(btrfs__block_group, btrfs_add_reclaim_block_group, + TP_PROTO(const struct btrfs_block_group *bg_cache), + + TP_ARGS(bg_cache) +); + +DEFINE_EVENT(btrfs__block_group, btrfs_reclaim_block_group, + TP_PROTO(const struct btrfs_block_group *bg_cache), + + TP_ARGS(bg_cache) +); + DEFINE_EVENT(btrfs__block_group, btrfs_skip_unused_block_group, TP_PROTO(const struct btrfs_block_group *bg_cache), diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h index 5017a8829270..c3d354702cb0 100644 --- a/include/trace/events/cma.h +++ b/include/trace/events/cma.h @@ -8,28 +8,31 @@ #include <linux/types.h> #include <linux/tracepoint.h> -TRACE_EVENT(cma_alloc, +DECLARE_EVENT_CLASS(cma_alloc_class, - TP_PROTO(unsigned long pfn, const struct page *page, - unsigned int count, unsigned int align), + TP_PROTO(const char *name, unsigned long pfn, const struct page *page, + unsigned long count, unsigned int align), - TP_ARGS(pfn, page, count, align), + TP_ARGS(name, pfn, page, count, align), TP_STRUCT__entry( + __string(name, name) __field(unsigned long, pfn) __field(const struct page *, page) - __field(unsigned int, count) + __field(unsigned long, count) __field(unsigned int, align) ), TP_fast_assign( + __assign_str(name, name); __entry->pfn = pfn; __entry->page = page; __entry->count = count; __entry->align = align; ), - TP_printk("pfn=%lx page=%p count=%u align=%u", + TP_printk("name=%s pfn=%lx page=%p count=%lu align=%u", + __get_str(name), __entry->pfn, __entry->page, __entry->count, @@ -38,29 +41,72 @@ TRACE_EVENT(cma_alloc, TRACE_EVENT(cma_release, - TP_PROTO(unsigned long pfn, const struct page *page, - unsigned int count), + TP_PROTO(const char *name, unsigned long pfn, const struct page *page, + unsigned long count), - TP_ARGS(pfn, page, count), + TP_ARGS(name, pfn, page, count), TP_STRUCT__entry( + __string(name, name) __field(unsigned long, pfn) __field(const struct page *, page) - __field(unsigned int, count) + __field(unsigned long, count) ), TP_fast_assign( + __assign_str(name, name); __entry->pfn = pfn; __entry->page = page; __entry->count = count; ), - TP_printk("pfn=%lx page=%p count=%u", + TP_printk("name=%s pfn=%lx page=%p count=%lu", + __get_str(name), __entry->pfn, __entry->page, __entry->count) ); +TRACE_EVENT(cma_alloc_start, + + TP_PROTO(const char *name, unsigned long count, unsigned int align), + + TP_ARGS(name, count, align), + + TP_STRUCT__entry( + __string(name, name) + __field(unsigned long, count) + __field(unsigned int, align) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->count = count; + __entry->align = align; + ), + + TP_printk("name=%s count=%lu align=%u", + __get_str(name), + __entry->count, + __entry->align) +); + +DEFINE_EVENT(cma_alloc_class, cma_alloc_finish, + + TP_PROTO(const char *name, unsigned long pfn, const struct page *page, + unsigned long count, unsigned int align), + + TP_ARGS(name, pfn, page, count, align) +); + +DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry, + + TP_PROTO(const char *name, unsigned long pfn, const struct page *page, + unsigned long count, unsigned int align), + + TP_ARGS(name, pfn, page, count, align) +); + #endif /* _TRACE_CMA_H */ /* This part must be outside protection */ diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 70ae5497b73a..0ea36b2b0662 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -1358,64 +1358,6 @@ TRACE_EVENT(ext4_read_block_bitmap_load, __entry->group, __entry->prefetch) ); -TRACE_EVENT(ext4_direct_IO_enter, - TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw), - - TP_ARGS(inode, offset, len, rw), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( loff_t, pos ) - __field( unsigned long, len ) - __field( int, rw ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->pos = offset; - __entry->len = len; - __entry->rw = rw; - ), - - TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, - __entry->pos, __entry->len, __entry->rw) -); - -TRACE_EVENT(ext4_direct_IO_exit, - TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, - int rw, int ret), - - TP_ARGS(inode, offset, len, rw, ret), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( loff_t, pos ) - __field( unsigned long, len ) - __field( int, rw ) - __field( int, ret ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->pos = offset; - __entry->len = len; - __entry->rw = rw; - __entry->ret = ret; - ), - - TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, - __entry->pos, __entry->len, - __entry->rw, __entry->ret) -); - DECLARE_EVENT_CLASS(ext4__fallocate_mode, TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode), @@ -1962,124 +1904,6 @@ TRACE_EVENT(ext4_get_implied_cluster_alloc_exit, __entry->len, show_mflags(__entry->flags), __entry->ret) ); -TRACE_EVENT(ext4_ext_put_in_cache, - TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len, - ext4_fsblk_t start), - - TP_ARGS(inode, lblk, len, start), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( ext4_lblk_t, lblk ) - __field( unsigned int, len ) - __field( ext4_fsblk_t, start ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->lblk = lblk; - __entry->len = len; - __entry->start = start; - ), - - TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, - (unsigned) __entry->lblk, - __entry->len, - (unsigned long long) __entry->start) -); - -TRACE_EVENT(ext4_ext_in_cache, - TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret), - - TP_ARGS(inode, lblk, ret), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( ext4_lblk_t, lblk ) - __field( int, ret ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->lblk = lblk; - __entry->ret = ret; - ), - - TP_printk("dev %d,%d ino %lu lblk %u ret %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, - (unsigned) __entry->lblk, - __entry->ret) - -); - -TRACE_EVENT(ext4_find_delalloc_range, - TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to, - int reverse, int found, ext4_lblk_t found_blk), - - TP_ARGS(inode, from, to, reverse, found, found_blk), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( ext4_lblk_t, from ) - __field( ext4_lblk_t, to ) - __field( int, reverse ) - __field( int, found ) - __field( ext4_lblk_t, found_blk ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->from = from; - __entry->to = to; - __entry->reverse = reverse; - __entry->found = found; - __entry->found_blk = found_blk; - ), - - TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d " - "(blk = %u)", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, - (unsigned) __entry->from, (unsigned) __entry->to, - __entry->reverse, __entry->found, - (unsigned) __entry->found_blk) -); - -TRACE_EVENT(ext4_get_reserved_cluster_alloc, - TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len), - - TP_ARGS(inode, lblk, len), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( ext4_lblk_t, lblk ) - __field( unsigned int, len ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->lblk = lblk; - __entry->len = len; - ), - - TP_printk("dev %d,%d ino %lu lblk %u len %u", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, - (unsigned) __entry->lblk, - __entry->len) -); - TRACE_EVENT(ext4_ext_show_extent, TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, unsigned short len), diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h index e801f4910522..d233f2916584 100644 --- a/include/trace/events/intel_iommu.h +++ b/include/trace/events/intel_iommu.h @@ -15,126 +15,6 @@ #include <linux/tracepoint.h> #include <linux/intel-iommu.h> -DECLARE_EVENT_CLASS(dma_map, - TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, - size_t size), - - TP_ARGS(dev, dev_addr, phys_addr, size), - - TP_STRUCT__entry( - __string(dev_name, dev_name(dev)) - __field(dma_addr_t, dev_addr) - __field(phys_addr_t, phys_addr) - __field(size_t, size) - ), - - TP_fast_assign( - __assign_str(dev_name, dev_name(dev)); - __entry->dev_addr = dev_addr; - __entry->phys_addr = phys_addr; - __entry->size = size; - ), - - TP_printk("dev=%s dev_addr=0x%llx phys_addr=0x%llx size=%zu", - __get_str(dev_name), - (unsigned long long)__entry->dev_addr, - (unsigned long long)__entry->phys_addr, - __entry->size) -); - -DEFINE_EVENT(dma_map, map_single, - TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, - size_t size), - TP_ARGS(dev, dev_addr, phys_addr, size) -); - -DEFINE_EVENT(dma_map, bounce_map_single, - TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr, - size_t size), - TP_ARGS(dev, dev_addr, phys_addr, size) -); - -DECLARE_EVENT_CLASS(dma_unmap, - TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), - - TP_ARGS(dev, dev_addr, size), - - TP_STRUCT__entry( - __string(dev_name, dev_name(dev)) - __field(dma_addr_t, dev_addr) - __field(size_t, size) - ), - - TP_fast_assign( - __assign_str(dev_name, dev_name(dev)); - __entry->dev_addr = dev_addr; - __entry->size = size; - ), - - TP_printk("dev=%s dev_addr=0x%llx size=%zu", - __get_str(dev_name), - (unsigned long long)__entry->dev_addr, - __entry->size) -); - -DEFINE_EVENT(dma_unmap, unmap_single, - TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), - TP_ARGS(dev, dev_addr, size) -); - -DEFINE_EVENT(dma_unmap, unmap_sg, - TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), - TP_ARGS(dev, dev_addr, size) -); - -DEFINE_EVENT(dma_unmap, bounce_unmap_single, - TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size), - TP_ARGS(dev, dev_addr, size) -); - -DECLARE_EVENT_CLASS(dma_map_sg, - TP_PROTO(struct device *dev, int index, int total, - struct scatterlist *sg), - - TP_ARGS(dev, index, total, sg), - - TP_STRUCT__entry( - __string(dev_name, dev_name(dev)) - __field(dma_addr_t, dev_addr) - __field(phys_addr_t, phys_addr) - __field(size_t, size) - __field(int, index) - __field(int, total) - ), - - TP_fast_assign( - __assign_str(dev_name, dev_name(dev)); - __entry->dev_addr = sg->dma_address; - __entry->phys_addr = sg_phys(sg); - __entry->size = sg->dma_length; - __entry->index = index; - __entry->total = total; - ), - - TP_printk("dev=%s [%d/%d] dev_addr=0x%llx phys_addr=0x%llx size=%zu", - __get_str(dev_name), __entry->index, __entry->total, - (unsigned long long)__entry->dev_addr, - (unsigned long long)__entry->phys_addr, - __entry->size) -); - -DEFINE_EVENT(dma_map_sg, map_sg, - TP_PROTO(struct device *dev, int index, int total, - struct scatterlist *sg), - TP_ARGS(dev, index, total, sg) -); - -DEFINE_EVENT(dma_map_sg, bounce_map_sg, - TP_PROTO(struct device *dev, int index, int total, - struct scatterlist *sg), - TP_ARGS(dev, index, total, sg) -); - TRACE_EVENT(qi_submit, TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3), diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 9f0d3b7d56b0..abb8b24744fd 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -49,7 +49,7 @@ TRACE_EVENT(io_uring_create, ); /** - * io_uring_register - called after a buffer/file/eventfd was succesfully + * io_uring_register - called after a buffer/file/eventfd was successfully * registered for a ring * * @ctx: pointer to a ring context structure @@ -290,29 +290,32 @@ TRACE_EVENT(io_uring_fail_link, * @ctx: pointer to a ring context structure * @user_data: user data associated with the request * @res: result of the request + * @cflags: completion flags * */ TRACE_EVENT(io_uring_complete, - TP_PROTO(void *ctx, u64 user_data, long res), + TP_PROTO(void *ctx, u64 user_data, long res, unsigned cflags), - TP_ARGS(ctx, user_data, res), + TP_ARGS(ctx, user_data, res, cflags), TP_STRUCT__entry ( __field( void *, ctx ) __field( u64, user_data ) __field( long, res ) + __field( unsigned, cflags ) ), TP_fast_assign( __entry->ctx = ctx; __entry->user_data = user_data; __entry->res = res; + __entry->cflags = cflags; ), - TP_printk("ring %p, user_data 0x%llx, result %ld", + TP_printk("ring %p, user_data 0x%llx, result %ld, cflags %x", __entry->ctx, (unsigned long long)__entry->user_data, - __entry->res) + __entry->res, __entry->cflags) ); diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 3a60b6b6db32..829a75692cc0 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -343,6 +343,26 @@ static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr) #define __PTR_TO_HASHVAL #endif +#define TRACE_MM_PAGES \ + EM(MM_FILEPAGES) \ + EM(MM_ANONPAGES) \ + EM(MM_SWAPENTS) \ + EMe(MM_SHMEMPAGES) + +#undef EM +#undef EMe + +#define EM(a) TRACE_DEFINE_ENUM(a); +#define EMe(a) TRACE_DEFINE_ENUM(a); + +TRACE_MM_PAGES + +#undef EM +#undef EMe + +#define EM(a) { a, #a }, +#define EMe(a) { a, #a } + TRACE_EVENT(rss_stat, TP_PROTO(struct mm_struct *mm, @@ -365,10 +385,10 @@ TRACE_EVENT(rss_stat, __entry->size = (count << PAGE_SHIFT); ), - TP_printk("mm_id=%u curr=%d member=%d size=%ldB", + TP_printk("mm_id=%u curr=%d type=%s size=%ldB", __entry->mm_id, __entry->curr, - __entry->member, + __print_symbolic(__entry->member, TRACE_MM_PAGES), __entry->size) ); #endif /* _TRACE_KMEM_H */ diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 49d7d0fe29f6..37e1e1a2d67d 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -255,30 +255,6 @@ TRACE_EVENT(kvm_fpu, TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol)) ); -TRACE_EVENT(kvm_age_page, - TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref), - TP_ARGS(gfn, level, slot, ref), - - TP_STRUCT__entry( - __field( u64, hva ) - __field( u64, gfn ) - __field( u8, level ) - __field( u8, referenced ) - ), - - TP_fast_assign( - __entry->gfn = gfn; - __entry->level = level; - __entry->hva = ((gfn - slot->base_gfn) << - PAGE_SHIFT) + slot->userspace_addr; - __entry->referenced = ref; - ), - - TP_printk("hva %llx gfn %llx level %u %s", - __entry->hva, __entry->gfn, __entry->level, - __entry->referenced ? "YOUNG" : "OLD") -); - #ifdef CONFIG_KVM_ASYNC_PF DECLARE_EVENT_CLASS(kvm_async_get_page_class, @@ -462,6 +438,72 @@ TRACE_EVENT(kvm_dirty_ring_exit, TP_printk("vcpu %d", __entry->vcpu_id) ); +TRACE_EVENT(kvm_unmap_hva_range, + TP_PROTO(unsigned long start, unsigned long end), + TP_ARGS(start, end), + + TP_STRUCT__entry( + __field( unsigned long, start ) + __field( unsigned long, end ) + ), + + TP_fast_assign( + __entry->start = start; + __entry->end = end; + ), + + TP_printk("mmu notifier unmap range: %#016lx -- %#016lx", + __entry->start, __entry->end) +); + +TRACE_EVENT(kvm_set_spte_hva, + TP_PROTO(unsigned long hva), + TP_ARGS(hva), + + TP_STRUCT__entry( + __field( unsigned long, hva ) + ), + + TP_fast_assign( + __entry->hva = hva; + ), + + TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva) +); + +TRACE_EVENT(kvm_age_hva, + TP_PROTO(unsigned long start, unsigned long end), + TP_ARGS(start, end), + + TP_STRUCT__entry( + __field( unsigned long, start ) + __field( unsigned long, end ) + ), + + TP_fast_assign( + __entry->start = start; + __entry->end = end; + ), + + TP_printk("mmu notifier age hva: %#016lx -- %#016lx", + __entry->start, __entry->end) +); + +TRACE_EVENT(kvm_test_age_hva, + TP_PROTO(unsigned long hva), + TP_ARGS(hva), + + TP_STRUCT__entry( + __field( unsigned long, hva ) + ), + + TP_fast_assign( + __entry->hva = hva; + ), + + TP_printk("mmu notifier test age hva: %#016lx", __entry->hva) +); + #endif /* _TRACE_KVM_MAIN_H */ /* This part must be outside protection */ diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h index c0e7d24ca256..f9802562edf6 100644 --- a/include/trace/events/kyber.h +++ b/include/trace/events/kyber.h @@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency, ), TP_fast_assign( - __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); + __entry->dev = disk_devt(queue_to_disk(q)); strlcpy(__entry->domain, domain, sizeof(__entry->domain)); strlcpy(__entry->type, type, sizeof(__entry->type)); __entry->percentile = percentile; @@ -59,7 +59,7 @@ TRACE_EVENT(kyber_adjust, ), TP_fast_assign( - __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); + __entry->dev = disk_devt(queue_to_disk(q)); strlcpy(__entry->domain, domain, sizeof(__entry->domain)); __entry->depth = depth; ), @@ -81,7 +81,7 @@ TRACE_EVENT(kyber_throttled, ), TP_fast_assign( - __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); + __entry->dev = disk_devt(queue_to_disk(q)); strlcpy(__entry->domain, domain, sizeof(__entry->domain)); ), diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index 4d434398d64d..9fb2a3bbcdfb 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -20,7 +20,8 @@ EM( MR_SYSCALL, "syscall_or_cpuset") \ EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \ EM( MR_NUMA_MISPLACED, "numa_misplaced") \ - EMe(MR_CONTIG_RANGE, "contig_range") + EM( MR_CONTIG_RANGE, "contig_range") \ + EMe(MR_LONGTERM_PIN, "longterm_pin") /* * First define the enums in the above macros to be exported to userspace @@ -81,6 +82,28 @@ TRACE_EVENT(mm_migrate_pages, __print_symbolic(__entry->mode, MIGRATE_MODE), __print_symbolic(__entry->reason, MIGRATE_REASON)) ); + +TRACE_EVENT(mm_migrate_pages_start, + + TP_PROTO(enum migrate_mode mode, int reason), + + TP_ARGS(mode, reason), + + TP_STRUCT__entry( + __field(enum migrate_mode, mode) + __field(int, reason) + ), + + TP_fast_assign( + __entry->mode = mode; + __entry->reason = reason; + ), + + TP_printk("mode=%s reason=%s", + __print_symbolic(__entry->mode, MIGRATE_MODE), + __print_symbolic(__entry->reason, MIGRATE_REASON)) +); + #endif /* _TRACE_MIGRATE_H */ /* This part must be outside protection */ diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 67018d367b9f..629c7a0eaff2 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -137,6 +137,12 @@ IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) #define IF_HAVE_VM_SOFTDIRTY(flag,name) #endif +#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR +# define IF_HAVE_UFFD_MINOR(flag, name) {flag, name}, +#else +# define IF_HAVE_UFFD_MINOR(flag, name) +#endif + #define __def_vmaflag_names \ {VM_READ, "read" }, \ {VM_WRITE, "write" }, \ @@ -148,6 +154,7 @@ IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) {VM_MAYSHARE, "mayshare" }, \ {VM_GROWSDOWN, "growsdown" }, \ {VM_UFFD_MISSING, "uffd_missing" }, \ +IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \ {VM_PFNMAP, "pfnmap" }, \ {VM_DENYWRITE, "denywrite" }, \ {VM_UFFD_WP, "uffd_wp" }, \ diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h new file mode 100644 index 000000000000..775a46d0b0f0 --- /dev/null +++ b/include/trace/events/mptcp.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mptcp + +#if !defined(_TRACE_MPTCP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MPTCP_H + +#include <linux/tracepoint.h> + +#define show_mapping_status(status) \ + __print_symbolic(status, \ + { 0, "MAPPING_OK" }, \ + { 1, "MAPPING_INVALID" }, \ + { 2, "MAPPING_EMPTY" }, \ + { 3, "MAPPING_DATA_FIN" }, \ + { 4, "MAPPING_DUMMY" }) + +TRACE_EVENT(mptcp_subflow_get_send, + + TP_PROTO(struct mptcp_subflow_context *subflow), + + TP_ARGS(subflow), + + TP_STRUCT__entry( + __field(bool, active) + __field(bool, free) + __field(u32, snd_wnd) + __field(u32, pace) + __field(u8, backup) + __field(u64, ratio) + ), + + TP_fast_assign( + struct sock *ssk; + + __entry->active = mptcp_subflow_active(subflow); + __entry->backup = subflow->backup; + + if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock)) + __entry->free = sk_stream_memory_free(subflow->tcp_sock); + else + __entry->free = 0; + + ssk = mptcp_subflow_tcp_sock(subflow); + if (ssk && sk_fullsock(ssk)) { + __entry->snd_wnd = tcp_sk(ssk)->snd_wnd; + __entry->pace = ssk->sk_pacing_rate; + } else { + __entry->snd_wnd = 0; + __entry->pace = 0; + } + + if (ssk && sk_fullsock(ssk) && __entry->pace) + __entry->ratio = div_u64((u64)ssk->sk_wmem_queued << 32, __entry->pace); + else + __entry->ratio = 0; + ), + + TP_printk("active=%d free=%d snd_wnd=%u pace=%u backup=%u ratio=%llu", + __entry->active, __entry->free, + __entry->snd_wnd, __entry->pace, + __entry->backup, __entry->ratio) +); + +DECLARE_EVENT_CLASS(mptcp_dump_mpext, + + TP_PROTO(struct mptcp_ext *mpext), + + TP_ARGS(mpext), + + TP_STRUCT__entry( + __field(u64, data_ack) + __field(u64, data_seq) + __field(u32, subflow_seq) + __field(u16, data_len) + __field(u8, use_map) + __field(u8, dsn64) + __field(u8, data_fin) + __field(u8, use_ack) + __field(u8, ack64) + __field(u8, mpc_map) + __field(u8, frozen) + __field(u8, reset_transient) + __field(u8, reset_reason) + ), + + TP_fast_assign( + __entry->data_ack = mpext->ack64 ? mpext->data_ack : mpext->data_ack32; + __entry->data_seq = mpext->data_seq; + __entry->subflow_seq = mpext->subflow_seq; + __entry->data_len = mpext->data_len; + __entry->use_map = mpext->use_map; + __entry->dsn64 = mpext->dsn64; + __entry->data_fin = mpext->data_fin; + __entry->use_ack = mpext->use_ack; + __entry->ack64 = mpext->ack64; + __entry->mpc_map = mpext->mpc_map; + __entry->frozen = mpext->frozen; + __entry->reset_transient = mpext->reset_transient; + __entry->reset_reason = mpext->reset_reason; + ), + + TP_printk("data_ack=%llu data_seq=%llu subflow_seq=%u data_len=%u use_map=%u dsn64=%u data_fin=%u use_ack=%u ack64=%u mpc_map=%u frozen=%u reset_transient=%u reset_reason=%u", + __entry->data_ack, __entry->data_seq, + __entry->subflow_seq, __entry->data_len, + __entry->use_map, __entry->dsn64, + __entry->data_fin, __entry->use_ack, + __entry->ack64, __entry->mpc_map, + __entry->frozen, __entry->reset_transient, + __entry->reset_reason) +); + +DEFINE_EVENT(mptcp_dump_mpext, get_mapping_status, + TP_PROTO(struct mptcp_ext *mpext), + TP_ARGS(mpext)); + +TRACE_EVENT(ack_update_msk, + + TP_PROTO(u64 data_ack, u64 old_snd_una, + u64 new_snd_una, u64 new_wnd_end, + u64 msk_wnd_end), + + TP_ARGS(data_ack, old_snd_una, + new_snd_una, new_wnd_end, + msk_wnd_end), + + TP_STRUCT__entry( + __field(u64, data_ack) + __field(u64, old_snd_una) + __field(u64, new_snd_una) + __field(u64, new_wnd_end) + __field(u64, msk_wnd_end) + ), + + TP_fast_assign( + __entry->data_ack = data_ack; + __entry->old_snd_una = old_snd_una; + __entry->new_snd_una = new_snd_una; + __entry->new_wnd_end = new_wnd_end; + __entry->msk_wnd_end = msk_wnd_end; + ), + + TP_printk("data_ack=%llu old_snd_una=%llu new_snd_una=%llu new_wnd_end=%llu msk_wnd_end=%llu", + __entry->data_ack, __entry->old_snd_una, + __entry->new_snd_una, __entry->new_wnd_end, + __entry->msk_wnd_end) +); + +TRACE_EVENT(subflow_check_data_avail, + + TP_PROTO(__u8 status, struct sk_buff *skb), + + TP_ARGS(status, skb), + + TP_STRUCT__entry( + __field(u8, status) + __field(const void *, skb) + ), + + TP_fast_assign( + __entry->status = status; + __entry->skb = skb; + ), + + TP_printk("mapping_status=%s, skb=%p", + show_mapping_status(__entry->status), + __entry->skb) +); + +#endif /* _TRACE_MPTCP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h new file mode 100644 index 000000000000..de1c64635e42 --- /dev/null +++ b/include/trace/events/netfs.h @@ -0,0 +1,261 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Network filesystem support module tracepoints + * + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM netfs + +#if !defined(_TRACE_NETFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NETFS_H + +#include <linux/tracepoint.h> + +/* + * Define enums for tracing information. + */ +#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY + +enum netfs_read_trace { + netfs_read_trace_expanded, + netfs_read_trace_readahead, + netfs_read_trace_readpage, + netfs_read_trace_write_begin, +}; + +enum netfs_rreq_trace { + netfs_rreq_trace_assess, + netfs_rreq_trace_done, + netfs_rreq_trace_free, + netfs_rreq_trace_resubmit, + netfs_rreq_trace_unlock, + netfs_rreq_trace_unmark, + netfs_rreq_trace_write, +}; + +enum netfs_sreq_trace { + netfs_sreq_trace_download_instead, + netfs_sreq_trace_free, + netfs_sreq_trace_prepare, + netfs_sreq_trace_resubmit_short, + netfs_sreq_trace_submit, + netfs_sreq_trace_terminated, + netfs_sreq_trace_write, + netfs_sreq_trace_write_skip, + netfs_sreq_trace_write_term, +}; + +enum netfs_failure { + netfs_fail_check_write_begin, + netfs_fail_copy_to_cache, + netfs_fail_read, + netfs_fail_short_readpage, + netfs_fail_short_write_begin, + netfs_fail_prepare_write, +}; + +#endif + +#define netfs_read_traces \ + EM(netfs_read_trace_expanded, "EXPANDED ") \ + EM(netfs_read_trace_readahead, "READAHEAD") \ + EM(netfs_read_trace_readpage, "READPAGE ") \ + E_(netfs_read_trace_write_begin, "WRITEBEGN") + +#define netfs_rreq_traces \ + EM(netfs_rreq_trace_assess, "ASSESS") \ + EM(netfs_rreq_trace_done, "DONE ") \ + EM(netfs_rreq_trace_free, "FREE ") \ + EM(netfs_rreq_trace_resubmit, "RESUBM") \ + EM(netfs_rreq_trace_unlock, "UNLOCK") \ + EM(netfs_rreq_trace_unmark, "UNMARK") \ + E_(netfs_rreq_trace_write, "WRITE ") + +#define netfs_sreq_sources \ + EM(NETFS_FILL_WITH_ZEROES, "ZERO") \ + EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \ + EM(NETFS_READ_FROM_CACHE, "READ") \ + E_(NETFS_INVALID_READ, "INVL") \ + +#define netfs_sreq_traces \ + EM(netfs_sreq_trace_download_instead, "RDOWN") \ + EM(netfs_sreq_trace_free, "FREE ") \ + EM(netfs_sreq_trace_prepare, "PREP ") \ + EM(netfs_sreq_trace_resubmit_short, "SHORT") \ + EM(netfs_sreq_trace_submit, "SUBMT") \ + EM(netfs_sreq_trace_terminated, "TERM ") \ + EM(netfs_sreq_trace_write, "WRITE") \ + EM(netfs_sreq_trace_write_skip, "SKIP ") \ + E_(netfs_sreq_trace_write_term, "WTERM") + +#define netfs_failures \ + EM(netfs_fail_check_write_begin, "check-write-begin") \ + EM(netfs_fail_copy_to_cache, "copy-to-cache") \ + EM(netfs_fail_read, "read") \ + EM(netfs_fail_short_readpage, "short-readpage") \ + EM(netfs_fail_short_write_begin, "short-write-begin") \ + E_(netfs_fail_prepare_write, "prep-write") + + +/* + * Export enum symbols via userspace. + */ +#undef EM +#undef E_ +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define E_(a, b) TRACE_DEFINE_ENUM(a); + +netfs_read_traces; +netfs_rreq_traces; +netfs_sreq_sources; +netfs_sreq_traces; +netfs_failures; + +/* + * Now redefine the EM() and E_() macros to map the enums to the strings that + * will be printed in the output. + */ +#undef EM +#undef E_ +#define EM(a, b) { a, b }, +#define E_(a, b) { a, b } + +TRACE_EVENT(netfs_read, + TP_PROTO(struct netfs_read_request *rreq, + loff_t start, size_t len, + enum netfs_read_trace what), + + TP_ARGS(rreq, start, len, what), + + TP_STRUCT__entry( + __field(unsigned int, rreq ) + __field(unsigned int, cookie ) + __field(loff_t, start ) + __field(size_t, len ) + __field(enum netfs_read_trace, what ) + ), + + TP_fast_assign( + __entry->rreq = rreq->debug_id; + __entry->cookie = rreq->cookie_debug_id; + __entry->start = start; + __entry->len = len; + __entry->what = what; + ), + + TP_printk("R=%08x %s c=%08x s=%llx %zx", + __entry->rreq, + __print_symbolic(__entry->what, netfs_read_traces), + __entry->cookie, + __entry->start, __entry->len) + ); + +TRACE_EVENT(netfs_rreq, + TP_PROTO(struct netfs_read_request *rreq, + enum netfs_rreq_trace what), + + TP_ARGS(rreq, what), + + TP_STRUCT__entry( + __field(unsigned int, rreq ) + __field(unsigned short, flags ) + __field(enum netfs_rreq_trace, what ) + ), + + TP_fast_assign( + __entry->rreq = rreq->debug_id; + __entry->flags = rreq->flags; + __entry->what = what; + ), + + TP_printk("R=%08x %s f=%02x", + __entry->rreq, + __print_symbolic(__entry->what, netfs_rreq_traces), + __entry->flags) + ); + +TRACE_EVENT(netfs_sreq, + TP_PROTO(struct netfs_read_subrequest *sreq, + enum netfs_sreq_trace what), + + TP_ARGS(sreq, what), + + TP_STRUCT__entry( + __field(unsigned int, rreq ) + __field(unsigned short, index ) + __field(short, error ) + __field(unsigned short, flags ) + __field(enum netfs_read_source, source ) + __field(enum netfs_sreq_trace, what ) + __field(size_t, len ) + __field(size_t, transferred ) + __field(loff_t, start ) + ), + + TP_fast_assign( + __entry->rreq = sreq->rreq->debug_id; + __entry->index = sreq->debug_index; + __entry->error = sreq->error; + __entry->flags = sreq->flags; + __entry->source = sreq->source; + __entry->what = what; + __entry->len = sreq->len; + __entry->transferred = sreq->transferred; + __entry->start = sreq->start; + ), + + TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d", + __entry->rreq, __entry->index, + __print_symbolic(__entry->what, netfs_sreq_traces), + __print_symbolic(__entry->source, netfs_sreq_sources), + __entry->flags, + __entry->start, __entry->transferred, __entry->len, + __entry->error) + ); + +TRACE_EVENT(netfs_failure, + TP_PROTO(struct netfs_read_request *rreq, + struct netfs_read_subrequest *sreq, + int error, enum netfs_failure what), + + TP_ARGS(rreq, sreq, error, what), + + TP_STRUCT__entry( + __field(unsigned int, rreq ) + __field(unsigned short, index ) + __field(short, error ) + __field(unsigned short, flags ) + __field(enum netfs_read_source, source ) + __field(enum netfs_failure, what ) + __field(size_t, len ) + __field(size_t, transferred ) + __field(loff_t, start ) + ), + + TP_fast_assign( + __entry->rreq = rreq->debug_id; + __entry->index = sreq ? sreq->debug_index : 0; + __entry->error = error; + __entry->flags = sreq ? sreq->flags : 0; + __entry->source = sreq ? sreq->source : NETFS_INVALID_READ; + __entry->what = what; + __entry->len = sreq ? sreq->len : 0; + __entry->transferred = sreq ? sreq->transferred : 0; + __entry->start = sreq ? sreq->start : 0; + ), + + TP_printk("R=%08x[%u] %s f=%02x s=%llx %zx/%zx %s e=%d", + __entry->rreq, __entry->index, + __print_symbolic(__entry->source, netfs_sreq_sources), + __entry->flags, + __entry->start, __entry->transferred, __entry->len, + __print_symbolic(__entry->what, netfs_failures), + __entry->error) + ); + +#endif /* _TRACE_NETFS_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/random.h b/include/trace/events/random.h index 9570a10cb949..3d7b432ca5f3 100644 --- a/include/trace/events/random.h +++ b/include/trace/events/random.h @@ -85,28 +85,6 @@ TRACE_EVENT(credit_entropy_bits, __entry->entropy_count, (void *)__entry->IP) ); -TRACE_EVENT(push_to_pool, - TP_PROTO(const char *pool_name, int pool_bits, int input_bits), - - TP_ARGS(pool_name, pool_bits, input_bits), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, pool_bits ) - __field( int, input_bits ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->pool_bits = pool_bits; - __entry->input_bits = input_bits; - ), - - TP_printk("%s: pool_bits %d input_pool_bits %d", - __entry->pool_name, __entry->pool_bits, - __entry->input_bits) -); - TRACE_EVENT(debit_entropy, TP_PROTO(const char *pool_name, int debit_bits), @@ -161,35 +139,6 @@ TRACE_EVENT(add_disk_randomness, MINOR(__entry->dev), __entry->input_bits) ); -TRACE_EVENT(xfer_secondary_pool, - TP_PROTO(const char *pool_name, int xfer_bits, int request_bits, - int pool_entropy, int input_entropy), - - TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy, - input_entropy), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, xfer_bits ) - __field( int, request_bits ) - __field( int, pool_entropy ) - __field( int, input_entropy ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->xfer_bits = xfer_bits; - __entry->request_bits = request_bits; - __entry->pool_entropy = pool_entropy; - __entry->input_entropy = input_entropy; - ), - - TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d " - "input_entropy %d", __entry->pool_name, __entry->xfer_bits, - __entry->request_bits, __entry->pool_entropy, - __entry->input_entropy) -); - DECLARE_EVENT_CLASS(random__get_random_bytes, TP_PROTO(int nbytes, unsigned long IP), @@ -253,38 +202,6 @@ DEFINE_EVENT(random__extract_entropy, extract_entropy, TP_ARGS(pool_name, nbytes, entropy_count, IP) ); -DEFINE_EVENT(random__extract_entropy, extract_entropy_user, - TP_PROTO(const char *pool_name, int nbytes, int entropy_count, - unsigned long IP), - - TP_ARGS(pool_name, nbytes, entropy_count, IP) -); - -TRACE_EVENT(random_read, - TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left), - - TP_ARGS(got_bits, need_bits, pool_left, input_left), - - TP_STRUCT__entry( - __field( int, got_bits ) - __field( int, need_bits ) - __field( int, pool_left ) - __field( int, input_left ) - ), - - TP_fast_assign( - __entry->got_bits = got_bits; - __entry->need_bits = need_bits; - __entry->pool_left = pool_left; - __entry->input_left = input_left; - ), - - TP_printk("got_bits %d still_needed_bits %d " - "blocking_pool_entropy_left %d input_entropy_left %d", - __entry->got_bits, __entry->got_bits, __entry->pool_left, - __entry->input_left) -); - TRACE_EVENT(urandom_read, TP_PROTO(int got_bits, int pool_left, int input_left), diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 5fc29400e1a2..6768b64bc738 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -48,7 +48,7 @@ TRACE_EVENT(rcu_utilization, * RCU flavor, the grace-period number, and a string identifying the * grace-period-related event as follows: * - * "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL. + * "AccReadyCB": CPU accelerates new callbacks to RCU_NEXT_READY_TAIL. * "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL. * "newreq": Request a new grace period. * "start": Start a grace period. @@ -432,6 +432,34 @@ TRACE_EVENT_RCU(rcu_fqs, __entry->cpu, __entry->qsevent) ); +/* + * Tracepoint for RCU stall events. Takes a string identifying the RCU flavor + * and a string identifying which function detected the RCU stall as follows: + * + * "StallDetected": Scheduler-tick detects other CPU's stalls. + * "SelfDetected": Scheduler-tick detects a current CPU's stall. + * "ExpeditedStall": Expedited grace period detects stalls. + */ +TRACE_EVENT(rcu_stall_warning, + + TP_PROTO(const char *rcuname, const char *msg), + + TP_ARGS(rcuname, msg), + + TP_STRUCT__entry( + __field(const char *, rcuname) + __field(const char *, msg) + ), + + TP_fast_assign( + __entry->rcuname = rcuname; + __entry->msg = msg; + ), + + TP_printk("%s %s", + __entry->rcuname, __entry->msg) +); + #endif /* #if defined(CONFIG_TREE_RCU) */ /* diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index c838e7ac1c2d..bd55908c1bef 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -60,6 +60,46 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class, ), \ TP_ARGS(wc, cid)) +DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid + ), + + TP_ARGS(wc, cid), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(unsigned long, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->status = wc->status; + if (wc->status) + __entry->vendor_err = wc->vendor_err; + else + __entry->vendor_err = 0; + ), + + TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)", + __entry->cq_id, __entry->completion_id, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err + ) +); + +#define DEFINE_MR_COMPLETION_EVENT(name) \ + DEFINE_EVENT(rpcrdma_mr_completion_class, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(wc, cid)) + DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class, TP_PROTO( const struct ib_wc *wc, @@ -150,19 +190,17 @@ DECLARE_EVENT_CLASS(xprtrdma_rxprt, TP_ARGS(r_xprt), TP_STRUCT__entry( - __field(const void *, r_xprt) __string(addr, rpcrdma_addrstr(r_xprt)) __string(port, rpcrdma_portstr(r_xprt)) ), TP_fast_assign( - __entry->r_xprt = r_xprt; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("peer=[%s]:%s r_xprt=%p", - __get_str(addr), __get_str(port), __entry->r_xprt + TP_printk("peer=[%s]:%s", + __get_str(addr), __get_str(port) ) ); @@ -182,7 +220,6 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class, TP_ARGS(r_xprt, rc), TP_STRUCT__entry( - __field(const void *, r_xprt) __field(int, rc) __field(int, connect_status) __string(addr, rpcrdma_addrstr(r_xprt)) @@ -190,15 +227,14 @@ DECLARE_EVENT_CLASS(xprtrdma_connect_class, ), TP_fast_assign( - __entry->r_xprt = r_xprt; __entry->rc = rc; __entry->connect_status = r_xprt->rx_ep->re_connect_status; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d", - __get_str(addr), __get_str(port), __entry->r_xprt, + TP_printk("peer=[%s]:%s rc=%d connection status=%d", + __get_str(addr), __get_str(port), __entry->rc, __entry->connect_status ) ); @@ -343,7 +379,7 @@ DECLARE_EVENT_CLASS(xprtrdma_mr_class, __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; - __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->mr_id = mr->mr_ibmr->res.id; __entry->nents = mr->mr_nents; __entry->handle = mr->mr_handle; __entry->length = mr->mr_length; @@ -384,7 +420,7 @@ DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class, ), TP_fast_assign( - __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->mr_id = mr->mr_ibmr->res.id; __entry->nents = mr->mr_nents; __entry->handle = mr->mr_handle; __entry->length = mr->mr_length; @@ -495,22 +531,19 @@ TRACE_EVENT(xprtrdma_op_connect, TP_ARGS(r_xprt, delay), TP_STRUCT__entry( - __field(const void *, r_xprt) __field(unsigned long, delay) __string(addr, rpcrdma_addrstr(r_xprt)) __string(port, rpcrdma_portstr(r_xprt)) ), TP_fast_assign( - __entry->r_xprt = r_xprt; __entry->delay = delay; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu", - __get_str(addr), __get_str(port), __entry->r_xprt, - __entry->delay + TP_printk("peer=[%s]:%s delay=%lu", + __get_str(addr), __get_str(port), __entry->delay ) ); @@ -525,7 +558,6 @@ TRACE_EVENT(xprtrdma_op_set_cto, TP_ARGS(r_xprt, connect, reconnect), TP_STRUCT__entry( - __field(const void *, r_xprt) __field(unsigned long, connect) __field(unsigned long, reconnect) __string(addr, rpcrdma_addrstr(r_xprt)) @@ -533,51 +565,18 @@ TRACE_EVENT(xprtrdma_op_set_cto, ), TP_fast_assign( - __entry->r_xprt = r_xprt; __entry->connect = connect; __entry->reconnect = reconnect; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu", - __get_str(addr), __get_str(port), __entry->r_xprt, + TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu", + __get_str(addr), __get_str(port), __entry->connect / HZ, __entry->reconnect / HZ ) ); -TRACE_EVENT(xprtrdma_qp_event, - TP_PROTO( - const struct rpcrdma_ep *ep, - const struct ib_event *event - ), - - TP_ARGS(ep, event), - - TP_STRUCT__entry( - __field(unsigned long, event) - __string(name, event->device->name) - __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) - __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) - ), - - TP_fast_assign( - const struct rdma_cm_id *id = ep->re_id; - - __entry->event = event->event; - __assign_str(name, event->device->name); - memcpy(__entry->srcaddr, &id->route.addr.src_addr, - sizeof(struct sockaddr_in6)); - memcpy(__entry->dstaddr, &id->route.addr.dst_addr, - sizeof(struct sockaddr_in6)); - ), - - TP_printk("%pISpc -> %pISpc device=%s %s (%lu)", - __entry->srcaddr, __entry->dstaddr, __get_str(name), - rdma_show_ib_event(__entry->event), __entry->event - ) -); - /** ** Call events **/ @@ -591,22 +590,19 @@ TRACE_EVENT(xprtrdma_createmrs, TP_ARGS(r_xprt, count), TP_STRUCT__entry( - __field(const void *, r_xprt) __string(addr, rpcrdma_addrstr(r_xprt)) __string(port, rpcrdma_portstr(r_xprt)) __field(unsigned int, count) ), TP_fast_assign( - __entry->r_xprt = r_xprt; __entry->count = count; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs", - __get_str(addr), __get_str(port), __entry->r_xprt, - __entry->count + TP_printk("peer=[%s]:%s created %u MRs", + __get_str(addr), __get_str(port), __entry->count ) ); @@ -829,7 +825,7 @@ TRACE_EVENT(xprtrdma_post_recvs, TP_ARGS(r_xprt, count, status), TP_STRUCT__entry( - __field(const void *, r_xprt) + __field(u32, cq_id) __field(unsigned int, count) __field(int, status) __field(int, posted) @@ -838,16 +834,18 @@ TRACE_EVENT(xprtrdma_post_recvs, ), TP_fast_assign( - __entry->r_xprt = r_xprt; + const struct rpcrdma_ep *ep = r_xprt->rx_ep; + + __entry->cq_id = ep->re_attr.recv_cq->res.id; __entry->count = count; __entry->status = status; - __entry->posted = r_xprt->rx_ep->re_receive_count; + __entry->posted = ep->re_receive_count; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)", - __get_str(addr), __get_str(port), __entry->r_xprt, + TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active (rc %d)", + __get_str(addr), __get_str(port), __entry->cq_id, __entry->count, __entry->posted, __entry->status ) ); @@ -886,10 +884,10 @@ TRACE_EVENT(xprtrdma_post_linv_err, DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive); DEFINE_COMPLETION_EVENT(xprtrdma_wc_send); -DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg); -DEFINE_COMPLETION_EVENT(xprtrdma_wc_li); -DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake); -DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done); +DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg); +DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li); +DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake); +DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done); TRACE_EVENT(xprtrdma_frwr_alloc, TP_PROTO( @@ -905,7 +903,7 @@ TRACE_EVENT(xprtrdma_frwr_alloc, ), TP_fast_assign( - __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->mr_id = mr->mr_ibmr->res.id; __entry->rc = rc; ), @@ -933,7 +931,7 @@ TRACE_EVENT(xprtrdma_frwr_dereg, ), TP_fast_assign( - __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->mr_id = mr->mr_ibmr->res.id; __entry->nents = mr->mr_nents; __entry->handle = mr->mr_handle; __entry->length = mr->mr_length; @@ -966,7 +964,7 @@ TRACE_EVENT(xprtrdma_frwr_sgerr, ), TP_fast_assign( - __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->mr_id = mr->mr_ibmr->res.id; __entry->addr = mr->mr_sg->dma_address; __entry->dir = mr->mr_dir; __entry->nents = sg_nents; @@ -996,7 +994,7 @@ TRACE_EVENT(xprtrdma_frwr_maperr, ), TP_fast_assign( - __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->mr_id = mr->mr_ibmr->res.id; __entry->addr = mr->mr_sg->dma_address; __entry->dir = mr->mr_dir; __entry->num_mapped = num_mapped; @@ -1010,11 +1008,12 @@ TRACE_EVENT(xprtrdma_frwr_maperr, ) ); +DEFINE_MR_EVENT(fastreg); DEFINE_MR_EVENT(localinv); +DEFINE_MR_EVENT(reminv); DEFINE_MR_EVENT(map); DEFINE_ANON_MR_EVENT(unmap); -DEFINE_ANON_MR_EVENT(recycle); TRACE_EVENT(xprtrdma_dma_maperr, TP_PROTO( @@ -1248,22 +1247,19 @@ TRACE_EVENT(xprtrdma_cb_setup, TP_ARGS(r_xprt, reqs), TP_STRUCT__entry( - __field(const void *, r_xprt) __field(unsigned int, reqs) __string(addr, rpcrdma_addrstr(r_xprt)) __string(port, rpcrdma_portstr(r_xprt)) ), TP_fast_assign( - __entry->r_xprt = r_xprt; __entry->reqs = reqs; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs", - __get_str(addr), __get_str(port), - __entry->r_xprt, __entry->reqs + TP_printk("peer=[%s]:%s %u reqs", + __get_str(addr), __get_str(port), __entry->reqs ) ); diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index cbe3e152d24c..1eca2305ca42 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -174,7 +174,7 @@ DEFINE_EVENT(sched_wakeup_template, sched_waking, TP_ARGS(p)); /* - * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG. + * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING. * It is not always called from the waking context. */ DEFINE_EVENT(sched_wakeup_template, sched_wakeup, diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 036eb1f5c133..d02e01a27b69 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1079,6 +1079,46 @@ TRACE_EVENT(xprt_transmit, __entry->seqno, __entry->status) ); +TRACE_EVENT(xprt_retransmit, + TP_PROTO( + const struct rpc_rqst *rqst + ), + + TP_ARGS(rqst), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + __field(int, ntrans) + __field(int, version) + __string(progname, + rqst->rq_task->tk_client->cl_program->name) + __string(procedure, + rqst->rq_task->tk_msg.rpc_proc->p_name) + ), + + TP_fast_assign( + struct rpc_task *task = rqst->rq_task; + + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client ? + task->tk_client->cl_clid : -1; + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->ntrans = rqst->rq_ntrans; + __assign_str(progname, + task->tk_client->cl_program->name) + __entry->version = task->tk_client->cl_vers; + __assign_str(procedure, task->tk_msg.rpc_proc->p_name) + ), + + TP_printk( + "task:%u@%u xid=0x%08x %sv%d %s ntrans=%d", + __entry->task_id, __entry->client_id, __entry->xid, + __get_str(progname), __entry->version, __get_str(procedure), + __entry->ntrans) +); + TRACE_EVENT(xprt_ping, TP_PROTO(const struct rpc_xprt *xprt, int status), @@ -1141,7 +1181,6 @@ DECLARE_EVENT_CLASS(xprt_writelock_event, DEFINE_WRITELOCK_EVENT(reserve_xprt); DEFINE_WRITELOCK_EVENT(release_xprt); -DEFINE_WRITELOCK_EVENT(transmit_queued); DECLARE_EVENT_CLASS(xprt_cong_event, TP_PROTO( @@ -1781,6 +1820,7 @@ DECLARE_EVENT_CLASS(svc_xprt_event, ), \ TP_ARGS(xprt)) +DEFINE_SVC_XPRT_EVENT(received); DEFINE_SVC_XPRT_EVENT(no_write_space); DEFINE_SVC_XPRT_EVENT(close); DEFINE_SVC_XPRT_EVENT(detach); diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 19abb6c3eb73..6ad031c71be7 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -119,7 +119,7 @@ TRACE_EVENT(timer_expire_entry, * When used in combination with the timer_expire_entry tracepoint we can * determine the runtime of the timer callback function. * - * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might + * NOTE: Do NOT dereference timer in TP_fast_assign. The pointer might * be invalid. We solely track the pointer. */ DEFINE_EVENT(timer_class, timer_expire_exit, diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h index e151477d645c..1cb6f1afba0e 100644 --- a/include/trace/events/ufs.h +++ b/include/trace/events/ufs.h @@ -349,6 +349,27 @@ TRACE_EVENT(ufshcd_upiu, ) ); +TRACE_EVENT(ufshcd_exception_event, + + TP_PROTO(const char *dev_name, u16 status), + + TP_ARGS(dev_name, status), + + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(u16, status) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name); + __entry->status = status; + ), + + TP_printk("%s: status 0x%x", + __get_str(dev_name), __entry->status + ) +); + #endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */ /* This part must be outside protection */ diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index 76a97176ab81..fcad3645a70b 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -86,19 +86,15 @@ struct _bpf_dtab_netdev { }; #endif /* __DEVMAP_OBJ_TYPE */ -#define devmap_ifindex(tgt, map) \ - (((map->map_type == BPF_MAP_TYPE_DEVMAP || \ - map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \ - ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0) - DECLARE_EVENT_CLASS(xdp_redirect_template, TP_PROTO(const struct net_device *dev, const struct bpf_prog *xdp, const void *tgt, int err, - const struct bpf_map *map, u32 index), + enum bpf_map_type map_type, + u32 map_id, u32 index), - TP_ARGS(dev, xdp, tgt, err, map, index), + TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index), TP_STRUCT__entry( __field(int, prog_id) @@ -111,14 +107,22 @@ DECLARE_EVENT_CLASS(xdp_redirect_template, ), TP_fast_assign( + u32 ifindex = 0, map_index = index; + + if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) { + ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex; + } else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) { + ifindex = index; + map_index = 0; + } + __entry->prog_id = xdp->aux->id; __entry->act = XDP_REDIRECT; __entry->ifindex = dev->ifindex; __entry->err = err; - __entry->to_ifindex = map ? devmap_ifindex(tgt, map) : - index; - __entry->map_id = map ? map->id : 0; - __entry->map_index = map ? index : 0; + __entry->to_ifindex = ifindex; + __entry->map_id = map_id; + __entry->map_index = map_index; ), TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d" @@ -133,45 +137,49 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect, TP_PROTO(const struct net_device *dev, const struct bpf_prog *xdp, const void *tgt, int err, - const struct bpf_map *map, u32 index), - TP_ARGS(dev, xdp, tgt, err, map, index) + enum bpf_map_type map_type, + u32 map_id, u32 index), + TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) ); DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err, TP_PROTO(const struct net_device *dev, const struct bpf_prog *xdp, const void *tgt, int err, - const struct bpf_map *map, u32 index), - TP_ARGS(dev, xdp, tgt, err, map, index) + enum bpf_map_type map_type, + u32 map_id, u32 index), + TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) ); -#define _trace_xdp_redirect(dev, xdp, to) \ - trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to) +#define _trace_xdp_redirect(dev, xdp, to) \ + trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to) -#define _trace_xdp_redirect_err(dev, xdp, to, err) \ - trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to) +#define _trace_xdp_redirect_err(dev, xdp, to, err) \ + trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to) -#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \ - trace_xdp_redirect(dev, xdp, to, 0, map, index) +#define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \ + trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index) -#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \ - trace_xdp_redirect_err(dev, xdp, to, err, map, index) +#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \ + trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index) /* not used anymore, but kept around so as not to break old programs */ DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map, TP_PROTO(const struct net_device *dev, const struct bpf_prog *xdp, const void *tgt, int err, - const struct bpf_map *map, u32 index), - TP_ARGS(dev, xdp, tgt, err, map, index) + enum bpf_map_type map_type, + u32 map_id, u32 index), + TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) ); DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err, TP_PROTO(const struct net_device *dev, const struct bpf_prog *xdp, const void *tgt, int err, - const struct bpf_map *map, u32 index), - TP_ARGS(dev, xdp, tgt, err, map, index) + enum bpf_map_type map_type, + u32 map_id, u32 index), + TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) ); TRACE_EVENT(xdp_cpumap_kthread, diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h index 3b61b587e137..44a3f565264d 100644 --- a/include/trace/events/xen.h +++ b/include/trace/events/xen.h @@ -346,7 +346,7 @@ TRACE_EVENT(xen_mmu_flush_tlb_one_user, TP_printk("addr %lx", __entry->addr) ); -TRACE_EVENT(xen_mmu_flush_tlb_others, +TRACE_EVENT(xen_mmu_flush_tlb_multi, TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm, unsigned long addr, unsigned long end), TP_ARGS(cpus, mm, addr, end), diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index d2597000407a..5a3c221f4c9d 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -63,9 +63,6 @@ union __sifields { /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ struct { void __user *_addr; /* faulting insn/memory ref. */ -#ifdef __ARCH_SI_TRAPNO - int _trapno; /* TRAP # which caused the signal */ -#endif #ifdef __ia64__ int _imm; /* immediate value for "break" */ unsigned int _flags; /* see ia64 si_flags */ @@ -75,6 +72,8 @@ union __sifields { #define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \ sizeof(short) : __alignof__(void *)) union { + /* used on alpha and sparc */ + int _trapno; /* TRAP # which caused the signal */ /* * used when si_code=BUS_MCEERR_AR or * used when si_code=BUS_MCEERR_AO @@ -91,6 +90,11 @@ union __sifields { char _dummy_pkey[__ADDR_BND_PKEY_PAD]; __u32 _pkey; } _addr_pkey; + /* used when si_code=TRAP_PERF */ + struct { + unsigned long _data; + __u32 _type; + } _perf; }; } _sigfault; @@ -148,13 +152,13 @@ typedef struct siginfo { #define si_int _sifields._rt._sigval.sival_int #define si_ptr _sifields._rt._sigval.sival_ptr #define si_addr _sifields._sigfault._addr -#ifdef __ARCH_SI_TRAPNO #define si_trapno _sifields._sigfault._trapno -#endif #define si_addr_lsb _sifields._sigfault._addr_lsb #define si_lower _sifields._sigfault._addr_bnd._lower #define si_upper _sifields._sigfault._addr_bnd._upper #define si_pkey _sifields._sigfault._addr_pkey._pkey +#define si_perf_data _sifields._sigfault._perf._data +#define si_perf_type _sifields._sigfault._perf._type #define si_band _sifields._sigpoll._band #define si_fd _sifields._sigpoll._fd #define si_call_addr _sifields._sigsys._call_addr @@ -253,7 +257,8 @@ typedef struct siginfo { #define TRAP_BRANCH 3 /* process taken branch trap */ #define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */ #define TRAP_UNK 5 /* undiagnosed trap */ -#define NSIGTRAP 5 +#define TRAP_PERF 6 /* perf event with sigtrap=1 */ +#define NSIGTRAP 6 /* * There is an additional set of SIGTRAP si_codes used by ptrace diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index ce58cff99b66..6de5a7fc066b 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -863,9 +863,18 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise) __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2) #define __NR_mount_setattr 442 __SYSCALL(__NR_mount_setattr, sys_mount_setattr) +#define __NR_quotactl_path 443 +__SYSCALL(__NR_quotactl_path, sys_quotactl_path) + +#define __NR_landlock_create_ruleset 444 +__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset) +#define __NR_landlock_add_rule 445 +__SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule) +#define __NR_landlock_restrict_self 446 +__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self) #undef __NR_syscalls -#define __NR_syscalls 443 +#define __NR_syscalls 447 /* * 32 bit systems traditionally used different diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 7fb9c09ee93f..728566542f8a 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -782,6 +782,12 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F /* query ras mask of enabled features*/ #define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20 +/* query video encode/decode caps */ +#define AMDGPU_INFO_VIDEO_CAPS 0x21 + /* Subquery id: Decode */ + #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0 + /* Subquery id: Encode */ + #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1 /* RAS MASK: UMC (VRAM) */ #define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0) @@ -878,6 +884,10 @@ struct drm_amdgpu_info { struct { __u32 type; } sensor_info; + + struct { + __u32 type; + } video_cap; }; }; @@ -1074,6 +1084,30 @@ struct drm_amdgpu_info_vce_clock_table { __u32 pad; }; +/* query video encode/decode caps */ +#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2 0 +#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4 1 +#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1 2 +#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC 3 +#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC 4 +#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG 5 +#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9 6 +#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1 7 +#define AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT 8 + +struct drm_amdgpu_info_video_codec_info { + __u32 valid; + __u32 max_width; + __u32 max_height; + __u32 max_pixels_per_frame; + __u32 max_level; + __u32 pad; +}; + +struct drm_amdgpu_info_video_caps { + struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT]; +}; + /* * Supported GPU families */ diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 0827037c5484..67b94bc3c885 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -625,30 +625,147 @@ struct drm_gem_open { __u64 size; }; +/** + * DRM_CAP_DUMB_BUFFER + * + * If set to 1, the driver supports creating dumb buffers via the + * &DRM_IOCTL_MODE_CREATE_DUMB ioctl. + */ #define DRM_CAP_DUMB_BUFFER 0x1 +/** + * DRM_CAP_VBLANK_HIGH_CRTC + * + * If set to 1, the kernel supports specifying a CRTC index in the high bits of + * &drm_wait_vblank_request.type. + * + * Starting kernel version 2.6.39, this capability is always set to 1. + */ #define DRM_CAP_VBLANK_HIGH_CRTC 0x2 +/** + * DRM_CAP_DUMB_PREFERRED_DEPTH + * + * The preferred bit depth for dumb buffers. + * + * The bit depth is the number of bits used to indicate the color of a single + * pixel excluding any padding. This is different from the number of bits per + * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per + * pixel. + * + * Note that this preference only applies to dumb buffers, it's irrelevant for + * other types of buffers. + */ #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 +/** + * DRM_CAP_DUMB_PREFER_SHADOW + * + * If set to 1, the driver prefers userspace to render to a shadow buffer + * instead of directly rendering to a dumb buffer. For best speed, userspace + * should do streaming ordered memory copies into the dumb buffer and never + * read from it. + * + * Note that this preference only applies to dumb buffers, it's irrelevant for + * other types of buffers. + */ #define DRM_CAP_DUMB_PREFER_SHADOW 0x4 +/** + * DRM_CAP_PRIME + * + * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT + * and &DRM_PRIME_CAP_EXPORT. + * + * PRIME buffers are exposed as dma-buf file descriptors. See + * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing". + */ #define DRM_CAP_PRIME 0x5 +/** + * DRM_PRIME_CAP_IMPORT + * + * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME + * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl. + */ #define DRM_PRIME_CAP_IMPORT 0x1 +/** + * DRM_PRIME_CAP_EXPORT + * + * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME + * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl. + */ #define DRM_PRIME_CAP_EXPORT 0x2 +/** + * DRM_CAP_TIMESTAMP_MONOTONIC + * + * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in + * struct drm_event_vblank. If set to 1, the kernel will report timestamps with + * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these + * clocks. + * + * Starting from kernel version 2.6.39, the default value for this capability + * is 1. Starting kernel version 4.15, this capability is always set to 1. + */ #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 +/** + * DRM_CAP_ASYNC_PAGE_FLIP + * + * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC. + */ #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 -/* - * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight - * combination for the hardware cursor. The intention is that a hardware - * agnostic userspace can query a cursor plane size to use. +/** + * DRM_CAP_CURSOR_WIDTH + * + * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid + * width x height combination for the hardware cursor. The intention is that a + * hardware agnostic userspace can query a cursor plane size to use. * * Note that the cross-driver contract is to merely return a valid size; * drivers are free to attach another meaning on top, eg. i915 returns the * maximum plane size. */ #define DRM_CAP_CURSOR_WIDTH 0x8 +/** + * DRM_CAP_CURSOR_HEIGHT + * + * See &DRM_CAP_CURSOR_WIDTH. + */ #define DRM_CAP_CURSOR_HEIGHT 0x9 +/** + * DRM_CAP_ADDFB2_MODIFIERS + * + * If set to 1, the driver supports supplying modifiers in the + * &DRM_IOCTL_MODE_ADDFB2 ioctl. + */ #define DRM_CAP_ADDFB2_MODIFIERS 0x10 +/** + * DRM_CAP_PAGE_FLIP_TARGET + * + * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and + * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in + * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP + * ioctl. + */ #define DRM_CAP_PAGE_FLIP_TARGET 0x11 +/** + * DRM_CAP_CRTC_IN_VBLANK_EVENT + * + * If set to 1, the kernel supports reporting the CRTC ID in + * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and + * &DRM_EVENT_FLIP_COMPLETE events. + * + * Starting kernel version 4.12, this capability is always set to 1. + */ #define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12 +/** + * DRM_CAP_SYNCOBJ + * + * If set to 1, the driver supports sync objects. See + * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects". + */ #define DRM_CAP_SYNCOBJ 0x13 +/** + * DRM_CAP_SYNCOBJ_TIMELINE + * + * If set to 1, the driver supports timeline operations on sync objects. See + * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects". + */ #define DRM_CAP_SYNCOBJ_TIMELINE 0x14 /* DRM_IOCTL_GET_CAP ioctl argument type */ diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 1c064627e6c3..a5e76aa06ad5 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -388,6 +388,7 @@ enum drm_mode_subconnector { #define DRM_MODE_CONNECTOR_DPI 17 #define DRM_MODE_CONNECTOR_WRITEBACK 18 #define DRM_MODE_CONNECTOR_SPI 19 +#define DRM_MODE_CONNECTOR_USB 20 /** * struct drm_mode_get_connector - Get connector metadata. @@ -990,7 +991,7 @@ struct drm_format_modifier { }; /** - * struct drm_mode_create_blob - Create New block property + * struct drm_mode_create_blob - Create New blob property * * Create a new 'blob' data property, copying length bytes from data pointer, * and returning new blob ID. diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 1987e2ea79a3..ddc47bbf48b6 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -943,6 +943,7 @@ struct drm_i915_gem_exec_object { __u64 offset; }; +/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */ struct drm_i915_gem_execbuffer { /** * List of buffers to be validated with their relocations to be diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index a6c1f3eb2623..5596d7c37f9e 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -76,6 +76,7 @@ struct drm_msm_timespec { #define MSM_PARAM_NR_RINGS 0x07 #define MSM_PARAM_PP_PGTABLE 0x08 /* => 1 for per-process pagetables, else 0 */ #define MSM_PARAM_FAULTS 0x09 +#define MSM_PARAM_SUSPENDS 0x0a struct drm_msm_param { __u32 pipe; /* in, MSM_PIPE_x */ diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index ec84ad106568..20e435fe657a 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -217,6 +217,18 @@ struct binder_node_info_for_ref { __u32 reserved3; }; +struct binder_freeze_info { + __u32 pid; + __u32 enable; + __u32 timeout_ms; +}; + +struct binder_frozen_status_info { + __u32 pid; + __u32 sync_recv; + __u32 async_recv; +}; + #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) @@ -227,6 +239,9 @@ struct binder_node_info_for_ref { #define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) #define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) #define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object) +#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info) +#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info) +#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32) /* * NOTE: Two special error codes you should check for when calling @@ -408,6 +423,19 @@ enum binder_driver_return_protocol { * The last transaction (either a bcTRANSACTION or * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. */ + + BR_FROZEN_REPLY = _IO('r', 18), + /* + * The target of the last transaction (either a bcTRANSACTION or + * a bcATTEMPT_ACQUIRE) is frozen. No parameters. + */ + + BR_ONEWAY_SPAM_SUSPECT = _IO('r', 19), + /* + * Current process sent too many oneway calls to target, and the last + * asynchronous transaction makes the allocated async buffer size exceed + * detection threshold. No parameters. + */ }; enum binder_driver_command_protocol { diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4ba4ef0ff63a..ec6d85a81744 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -93,7 +93,738 @@ union bpf_iter_link_info { } map; }; -/* BPF syscall commands, see bpf(2) man-page for details. */ +/* BPF syscall commands, see bpf(2) man-page for more details. */ +/** + * DOC: eBPF Syscall Preamble + * + * The operation to be performed by the **bpf**\ () system call is determined + * by the *cmd* argument. Each operation takes an accompanying argument, + * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see + * below). The size argument is the size of the union pointed to by *attr*. + */ +/** + * DOC: eBPF Syscall Commands + * + * BPF_MAP_CREATE + * Description + * Create a map and return a file descriptor that refers to the + * map. The close-on-exec file descriptor flag (see **fcntl**\ (2)) + * is automatically enabled for the new file descriptor. + * + * Applying **close**\ (2) to the file descriptor returned by + * **BPF_MAP_CREATE** will delete the map (but see NOTES). + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_MAP_LOOKUP_ELEM + * Description + * Look up an element with a given *key* in the map referred to + * by the file descriptor *map_fd*. + * + * The *flags* argument may be specified as one of the + * following: + * + * **BPF_F_LOCK** + * Look up the value of a spin-locked map without + * returning the lock. This must be specified if the + * elements contain a spinlock. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_MAP_UPDATE_ELEM + * Description + * Create or update an element (key/value pair) in a specified map. + * + * The *flags* argument should be specified as one of the + * following: + * + * **BPF_ANY** + * Create a new element or update an existing element. + * **BPF_NOEXIST** + * Create a new element only if it did not exist. + * **BPF_EXIST** + * Update an existing element. + * **BPF_F_LOCK** + * Update a spin_lock-ed map element. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, + * **E2BIG**, **EEXIST**, or **ENOENT**. + * + * **E2BIG** + * The number of elements in the map reached the + * *max_entries* limit specified at map creation time. + * **EEXIST** + * If *flags* specifies **BPF_NOEXIST** and the element + * with *key* already exists in the map. + * **ENOENT** + * If *flags* specifies **BPF_EXIST** and the element with + * *key* does not exist in the map. + * + * BPF_MAP_DELETE_ELEM + * Description + * Look up and delete an element by key in a specified map. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_MAP_GET_NEXT_KEY + * Description + * Look up an element by key in a specified map and return the key + * of the next element. Can be used to iterate over all elements + * in the map. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * The following cases can be used to iterate over all elements of + * the map: + * + * * If *key* is not found, the operation returns zero and sets + * the *next_key* pointer to the key of the first element. + * * If *key* is found, the operation returns zero and sets the + * *next_key* pointer to the key of the next element. + * * If *key* is the last element, returns -1 and *errno* is set + * to **ENOENT**. + * + * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or + * **EINVAL** on error. + * + * BPF_PROG_LOAD + * Description + * Verify and load an eBPF program, returning a new file + * descriptor associated with the program. + * + * Applying **close**\ (2) to the file descriptor returned by + * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES). + * + * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is + * automatically enabled for the new file descriptor. + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_OBJ_PIN + * Description + * Pin an eBPF program or map referred by the specified *bpf_fd* + * to the provided *pathname* on the filesystem. + * + * The *pathname* argument must not contain a dot ("."). + * + * On success, *pathname* retains a reference to the eBPF object, + * preventing deallocation of the object when the original + * *bpf_fd* is closed. This allow the eBPF object to live beyond + * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent + * process. + * + * Applying **unlink**\ (2) or similar calls to the *pathname* + * unpins the object from the filesystem, removing the reference. + * If no other file descriptors or filesystem nodes refer to the + * same object, it will be deallocated (see NOTES). + * + * The filesystem type for the parent directory of *pathname* must + * be **BPF_FS_MAGIC**. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_OBJ_GET + * Description + * Open a file descriptor for the eBPF object pinned to the + * specified *pathname*. + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_PROG_ATTACH + * Description + * Attach an eBPF program to a *target_fd* at the specified + * *attach_type* hook. + * + * The *attach_type* specifies the eBPF attachment point to + * attach the program to, and must be one of *bpf_attach_type* + * (see below). + * + * The *attach_bpf_fd* must be a valid file descriptor for a + * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap + * or sock_ops type corresponding to the specified *attach_type*. + * + * The *target_fd* must be a valid file descriptor for a kernel + * object which depends on the attach type of *attach_bpf_fd*: + * + * **BPF_PROG_TYPE_CGROUP_DEVICE**, + * **BPF_PROG_TYPE_CGROUP_SKB**, + * **BPF_PROG_TYPE_CGROUP_SOCK**, + * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, + * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, + * **BPF_PROG_TYPE_CGROUP_SYSCTL**, + * **BPF_PROG_TYPE_SOCK_OPS** + * + * Control Group v2 hierarchy with the eBPF controller + * enabled. Requires the kernel to be compiled with + * **CONFIG_CGROUP_BPF**. + * + * **BPF_PROG_TYPE_FLOW_DISSECTOR** + * + * Network namespace (eg /proc/self/ns/net). + * + * **BPF_PROG_TYPE_LIRC_MODE2** + * + * LIRC device path (eg /dev/lircN). Requires the kernel + * to be compiled with **CONFIG_BPF_LIRC_MODE2**. + * + * **BPF_PROG_TYPE_SK_SKB**, + * **BPF_PROG_TYPE_SK_MSG** + * + * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**). + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_PROG_DETACH + * Description + * Detach the eBPF program associated with the *target_fd* at the + * hook specified by *attach_type*. The program must have been + * previously attached using **BPF_PROG_ATTACH**. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_PROG_TEST_RUN + * Description + * Run the eBPF program associated with the *prog_fd* a *repeat* + * number of times against a provided program context *ctx_in* and + * data *data_in*, and return the modified program context + * *ctx_out*, *data_out* (for example, packet data), result of the + * execution *retval*, and *duration* of the test run. + * + * The sizes of the buffers provided as input and output + * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must + * be provided in the corresponding variables *ctx_size_in*, + * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any + * of these parameters are not provided (ie set to NULL), the + * corresponding size field must be zero. + * + * Some program types have particular requirements: + * + * **BPF_PROG_TYPE_SK_LOOKUP** + * *data_in* and *data_out* must be NULL. + * + * **BPF_PROG_TYPE_XDP** + * *ctx_in* and *ctx_out* must be NULL. + * + * **BPF_PROG_TYPE_RAW_TRACEPOINT**, + * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE** + * + * *ctx_out*, *data_in* and *data_out* must be NULL. + * *repeat* must be zero. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * **ENOSPC** + * Either *data_size_out* or *ctx_size_out* is too small. + * **ENOTSUPP** + * This command is not supported by the program type of + * the program referred to by *prog_fd*. + * + * BPF_PROG_GET_NEXT_ID + * Description + * Fetch the next eBPF program currently loaded into the kernel. + * + * Looks for the eBPF program with an id greater than *start_id* + * and updates *next_id* on success. If no other eBPF programs + * remain with ids higher than *start_id*, returns -1 and sets + * *errno* to **ENOENT**. + * + * Return + * Returns zero on success. On error, or when no id remains, -1 + * is returned and *errno* is set appropriately. + * + * BPF_MAP_GET_NEXT_ID + * Description + * Fetch the next eBPF map currently loaded into the kernel. + * + * Looks for the eBPF map with an id greater than *start_id* + * and updates *next_id* on success. If no other eBPF maps + * remain with ids higher than *start_id*, returns -1 and sets + * *errno* to **ENOENT**. + * + * Return + * Returns zero on success. On error, or when no id remains, -1 + * is returned and *errno* is set appropriately. + * + * BPF_PROG_GET_FD_BY_ID + * Description + * Open a file descriptor for the eBPF program corresponding to + * *prog_id*. + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_MAP_GET_FD_BY_ID + * Description + * Open a file descriptor for the eBPF map corresponding to + * *map_id*. + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_OBJ_GET_INFO_BY_FD + * Description + * Obtain information about the eBPF object corresponding to + * *bpf_fd*. + * + * Populates up to *info_len* bytes of *info*, which will be in + * one of the following formats depending on the eBPF object type + * of *bpf_fd*: + * + * * **struct bpf_prog_info** + * * **struct bpf_map_info** + * * **struct bpf_btf_info** + * * **struct bpf_link_info** + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_PROG_QUERY + * Description + * Obtain information about eBPF programs associated with the + * specified *attach_type* hook. + * + * The *target_fd* must be a valid file descriptor for a kernel + * object which depends on the attach type of *attach_bpf_fd*: + * + * **BPF_PROG_TYPE_CGROUP_DEVICE**, + * **BPF_PROG_TYPE_CGROUP_SKB**, + * **BPF_PROG_TYPE_CGROUP_SOCK**, + * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**, + * **BPF_PROG_TYPE_CGROUP_SOCKOPT**, + * **BPF_PROG_TYPE_CGROUP_SYSCTL**, + * **BPF_PROG_TYPE_SOCK_OPS** + * + * Control Group v2 hierarchy with the eBPF controller + * enabled. Requires the kernel to be compiled with + * **CONFIG_CGROUP_BPF**. + * + * **BPF_PROG_TYPE_FLOW_DISSECTOR** + * + * Network namespace (eg /proc/self/ns/net). + * + * **BPF_PROG_TYPE_LIRC_MODE2** + * + * LIRC device path (eg /dev/lircN). Requires the kernel + * to be compiled with **CONFIG_BPF_LIRC_MODE2**. + * + * **BPF_PROG_QUERY** always fetches the number of programs + * attached and the *attach_flags* which were used to attach those + * programs. Additionally, if *prog_ids* is nonzero and the number + * of attached programs is less than *prog_cnt*, populates + * *prog_ids* with the eBPF program ids of the programs attached + * at *target_fd*. + * + * The following flags may alter the result: + * + * **BPF_F_QUERY_EFFECTIVE** + * Only return information regarding programs which are + * currently effective at the specified *target_fd*. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_RAW_TRACEPOINT_OPEN + * Description + * Attach an eBPF program to a tracepoint *name* to access kernel + * internal arguments of the tracepoint in their raw form. + * + * The *prog_fd* must be a valid file descriptor associated with + * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**. + * + * No ABI guarantees are made about the content of tracepoint + * arguments exposed to the corresponding eBPF program. + * + * Applying **close**\ (2) to the file descriptor returned by + * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES). + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_BTF_LOAD + * Description + * Verify and load BPF Type Format (BTF) metadata into the kernel, + * returning a new file descriptor associated with the metadata. + * BTF is described in more detail at + * https://www.kernel.org/doc/html/latest/bpf/btf.html. + * + * The *btf* parameter must point to valid memory providing + * *btf_size* bytes of BTF binary metadata. + * + * The returned file descriptor can be passed to other **bpf**\ () + * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to + * associate the BTF with those objects. + * + * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional + * parameters to specify a *btf_log_buf*, *btf_log_size* and + * *btf_log_level* which allow the kernel to return freeform log + * output regarding the BTF verification process. + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_BTF_GET_FD_BY_ID + * Description + * Open a file descriptor for the BPF Type Format (BTF) + * corresponding to *btf_id*. + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_TASK_FD_QUERY + * Description + * Obtain information about eBPF programs associated with the + * target process identified by *pid* and *fd*. + * + * If the *pid* and *fd* are associated with a tracepoint, kprobe + * or uprobe perf event, then the *prog_id* and *fd_type* will + * be populated with the eBPF program id and file descriptor type + * of type **bpf_task_fd_type**. If associated with a kprobe or + * uprobe, the *probe_offset* and *probe_addr* will also be + * populated. Optionally, if *buf* is provided, then up to + * *buf_len* bytes of *buf* will be populated with the name of + * the tracepoint, kprobe or uprobe. + * + * The resulting *prog_id* may be introspected in deeper detail + * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_MAP_LOOKUP_AND_DELETE_ELEM + * Description + * Look up an element with the given *key* in the map referred to + * by the file descriptor *fd*, and if found, delete the element. + * + * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types + * implement this command as a "pop" operation, deleting the top + * element rather than one corresponding to *key*. + * The *key* and *key_len* parameters should be zeroed when + * issuing this operation for these map types. + * + * This command is only valid for the following map types: + * * **BPF_MAP_TYPE_QUEUE** + * * **BPF_MAP_TYPE_STACK** + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_MAP_FREEZE + * Description + * Freeze the permissions of the specified map. + * + * Write permissions may be frozen by passing zero *flags*. + * Upon success, no future syscall invocations may alter the + * map state of *map_fd*. Write operations from eBPF programs + * are still possible for a frozen map. + * + * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_BTF_GET_NEXT_ID + * Description + * Fetch the next BPF Type Format (BTF) object currently loaded + * into the kernel. + * + * Looks for the BTF object with an id greater than *start_id* + * and updates *next_id* on success. If no other BTF objects + * remain with ids higher than *start_id*, returns -1 and sets + * *errno* to **ENOENT**. + * + * Return + * Returns zero on success. On error, or when no id remains, -1 + * is returned and *errno* is set appropriately. + * + * BPF_MAP_LOOKUP_BATCH + * Description + * Iterate and fetch multiple elements in a map. + * + * Two opaque values are used to manage batch operations, + * *in_batch* and *out_batch*. Initially, *in_batch* must be set + * to NULL to begin the batched operation. After each subsequent + * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant + * *out_batch* as the *in_batch* for the next operation to + * continue iteration from the current point. + * + * The *keys* and *values* are output parameters which must point + * to memory large enough to hold *count* items based on the key + * and value size of the map *map_fd*. The *keys* buffer must be + * of *key_size* * *count*. The *values* buffer must be of + * *value_size* * *count*. + * + * The *elem_flags* argument may be specified as one of the + * following: + * + * **BPF_F_LOCK** + * Look up the value of a spin-locked map without + * returning the lock. This must be specified if the + * elements contain a spinlock. + * + * On success, *count* elements from the map are copied into the + * user buffer, with the keys copied into *keys* and the values + * copied into the corresponding indices in *values*. + * + * If an error is returned and *errno* is not **EFAULT**, *count* + * is set to the number of successfully processed elements. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * May set *errno* to **ENOSPC** to indicate that *keys* or + * *values* is too small to dump an entire bucket during + * iteration of a hash-based map type. + * + * BPF_MAP_LOOKUP_AND_DELETE_BATCH + * Description + * Iterate and delete all elements in a map. + * + * This operation has the same behavior as + * **BPF_MAP_LOOKUP_BATCH** with two exceptions: + * + * * Every element that is successfully returned is also deleted + * from the map. This is at least *count* elements. Note that + * *count* is both an input and an output parameter. + * * Upon returning with *errno* set to **EFAULT**, up to + * *count* elements may be deleted without returning the keys + * and values of the deleted elements. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_MAP_UPDATE_BATCH + * Description + * Update multiple elements in a map by *key*. + * + * The *keys* and *values* are input parameters which must point + * to memory large enough to hold *count* items based on the key + * and value size of the map *map_fd*. The *keys* buffer must be + * of *key_size* * *count*. The *values* buffer must be of + * *value_size* * *count*. + * + * Each element specified in *keys* is sequentially updated to the + * value in the corresponding index in *values*. The *in_batch* + * and *out_batch* parameters are ignored and should be zeroed. + * + * The *elem_flags* argument should be specified as one of the + * following: + * + * **BPF_ANY** + * Create new elements or update a existing elements. + * **BPF_NOEXIST** + * Create new elements only if they do not exist. + * **BPF_EXIST** + * Update existing elements. + * **BPF_F_LOCK** + * Update spin_lock-ed map elements. This must be + * specified if the map value contains a spinlock. + * + * On success, *count* elements from the map are updated. + * + * If an error is returned and *errno* is not **EFAULT**, *count* + * is set to the number of successfully processed elements. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or + * **E2BIG**. **E2BIG** indicates that the number of elements in + * the map reached the *max_entries* limit specified at map + * creation time. + * + * May set *errno* to one of the following error codes under + * specific circumstances: + * + * **EEXIST** + * If *flags* specifies **BPF_NOEXIST** and the element + * with *key* already exists in the map. + * **ENOENT** + * If *flags* specifies **BPF_EXIST** and the element with + * *key* does not exist in the map. + * + * BPF_MAP_DELETE_BATCH + * Description + * Delete multiple elements in a map by *key*. + * + * The *keys* parameter is an input parameter which must point + * to memory large enough to hold *count* items based on the key + * size of the map *map_fd*, that is, *key_size* * *count*. + * + * Each element specified in *keys* is sequentially deleted. The + * *in_batch*, *out_batch*, and *values* parameters are ignored + * and should be zeroed. + * + * The *elem_flags* argument may be specified as one of the + * following: + * + * **BPF_F_LOCK** + * Look up the value of a spin-locked map without + * returning the lock. This must be specified if the + * elements contain a spinlock. + * + * On success, *count* elements from the map are updated. + * + * If an error is returned and *errno* is not **EFAULT**, *count* + * is set to the number of successfully processed elements. If + * *errno* is **EFAULT**, up to *count* elements may be been + * deleted. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_LINK_CREATE + * Description + * Attach an eBPF program to a *target_fd* at the specified + * *attach_type* hook and return a file descriptor handle for + * managing the link. + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_LINK_UPDATE + * Description + * Update the eBPF program in the specified *link_fd* to + * *new_prog_fd*. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_LINK_GET_FD_BY_ID + * Description + * Open a file descriptor for the eBPF Link corresponding to + * *link_id*. + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_LINK_GET_NEXT_ID + * Description + * Fetch the next eBPF link currently loaded into the kernel. + * + * Looks for the eBPF link with an id greater than *start_id* + * and updates *next_id* on success. If no other eBPF links + * remain with ids higher than *start_id*, returns -1 and sets + * *errno* to **ENOENT**. + * + * Return + * Returns zero on success. On error, or when no id remains, -1 + * is returned and *errno* is set appropriately. + * + * BPF_ENABLE_STATS + * Description + * Enable eBPF runtime statistics gathering. + * + * Runtime statistics gathering for the eBPF runtime is disabled + * by default to minimize the corresponding performance overhead. + * This command enables statistics globally. + * + * Multiple programs may independently enable statistics. + * After gathering the desired statistics, eBPF runtime statistics + * may be disabled again by calling **close**\ (2) for the file + * descriptor returned by this function. Statistics will only be + * disabled system-wide when all outstanding file descriptors + * returned by prior calls for this subcommand are closed. + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_ITER_CREATE + * Description + * Create an iterator on top of the specified *link_fd* (as + * previously created using **BPF_LINK_CREATE**) and return a + * file descriptor that can be used to trigger the iteration. + * + * If the resulting file descriptor is pinned to the filesystem + * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls + * for that path will trigger the iterator to read kernel state + * using the eBPF program attached to *link_fd*. + * + * Return + * A new file descriptor (a nonnegative integer), or -1 if an + * error occurred (in which case, *errno* is set appropriately). + * + * BPF_LINK_DETACH + * Description + * Forcefully detach the specified *link_fd* from its + * corresponding attachment point. + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * BPF_PROG_BIND_MAP + * Description + * Bind a map to the lifetime of an eBPF program. + * + * The map identified by *map_fd* is bound to the program + * identified by *prog_fd* and only released when *prog_fd* is + * released. This may be used in cases where metadata should be + * associated with a program which otherwise does not contain any + * references to the map (for example, embedded in the eBPF + * program instructions). + * + * Return + * Returns zero on success. On error, -1 is returned and *errno* + * is set appropriately. + * + * NOTES + * eBPF objects (maps and programs) can be shared between processes. + * + * * After **fork**\ (2), the child inherits file descriptors + * referring to the same eBPF objects. + * * File descriptors referring to eBPF objects can be transferred over + * **unix**\ (7) domain sockets. + * * File descriptors referring to eBPF objects can be duplicated in the + * usual way, using **dup**\ (2) and similar calls. + * * File descriptors referring to eBPF objects can be pinned to the + * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2). + * + * An eBPF object is deallocated only after all file descriptors referring + * to the object have been closed and no references remain pinned to the + * filesystem or attached (for example, bound to a program or device). + */ enum bpf_cmd { BPF_MAP_CREATE, BPF_MAP_LOOKUP_ELEM, @@ -247,6 +978,7 @@ enum bpf_attach_type { BPF_XDP_CPUMAP, BPF_SK_LOOKUP, BPF_XDP, + BPF_SK_SKB_VERDICT, __MAX_BPF_ATTACH_TYPE }; @@ -393,11 +1125,24 @@ enum bpf_link_type { * is struct/union. */ #define BPF_PSEUDO_BTF_ID 3 +/* insn[0].src_reg: BPF_PSEUDO_FUNC + * insn[0].imm: insn offset to the func + * insn[1].imm: 0 + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of the function + * verifier type: PTR_TO_FUNC. + */ +#define BPF_PSEUDO_FUNC 4 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative * offset to another bpf function */ #define BPF_PSEUDO_CALL 1 +/* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL, + * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel + */ +#define BPF_PSEUDO_KFUNC_CALL 2 /* flags for BPF_MAP_UPDATE_ELEM command */ enum { @@ -720,7 +1465,7 @@ union bpf_attr { * parsed and used to produce a manual page. The workflow is the following, * and requires the rst2man utility: * - * $ ./scripts/bpf_helpers_doc.py \ + * $ ./scripts/bpf_doc.py \ * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 * $ man /tmp/bpf-helpers.7 @@ -1765,6 +2510,10 @@ union bpf_attr { * Use with ENCAP_L3/L4 flags to further specify the tunnel * type; *len* is the length of the inner MAC header. * + * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**: + * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the + * L2 type as Ethernet. + * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be @@ -3333,12 +4082,20 @@ union bpf_attr { * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. + * If **0** is specified in *flags*, an adaptive notification + * of new data availability is sent. + * + * An adaptive notification is a notification sent whenever the user-space + * process has caught up and consumed all available payloads. In case the user-space + * process is still processing a previous payload, then no notification is needed + * as it will process the newly added payload automatically. * Return * 0 on success, or a negative error in case of failure. * * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) * Description * Reserve *size* bytes of payload in a ring buffer *ringbuf*. + * *flags* must be 0. * Return * Valid pointer with *size* bytes of memory available; NULL, * otherwise. @@ -3350,6 +4107,10 @@ union bpf_attr { * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. + * If **0** is specified in *flags*, an adaptive notification + * of new data availability is sent. + * + * See 'bpf_ringbuf_output()' for the definition of adaptive notification. * Return * Nothing. Always succeeds. * @@ -3360,6 +4121,10 @@ union bpf_attr { * of new data availability is sent. * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification * of new data availability is sent unconditionally. + * If **0** is specified in *flags*, an adaptive notification + * of new data availability is sent. + * + * See 'bpf_ringbuf_output()' for the definition of adaptive notification. * Return * Nothing. Always succeeds. * @@ -3915,6 +4680,61 @@ union bpf_attr { * * **BPF_MTU_CHK_RET_FRAG_NEEDED** * * **BPF_MTU_CHK_RET_SEGS_TOOBIG** * + * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags) + * Description + * For each element in **map**, call **callback_fn** function with + * **map**, **callback_ctx** and other map-specific parameters. + * The **callback_fn** should be a static function and + * the **callback_ctx** should be a pointer to the stack. + * The **flags** is used to control certain aspects of the helper. + * Currently, the **flags** must be 0. + * + * The following are a list of supported map types and their + * respective expected callback signatures: + * + * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH, + * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, + * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY + * + * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx); + * + * For per_cpu maps, the map_value is the value on the cpu where the + * bpf_prog is running. + * + * If **callback_fn** return 0, the helper will continue to the next + * element. If return value is 1, the helper will skip the rest of + * elements and return. Other return values are not used now. + * + * Return + * The number of traversed map elements for success, **-EINVAL** for + * invalid **flags**. + * + * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len) + * Description + * Outputs a string into the **str** buffer of size **str_size** + * based on a format string stored in a read-only map pointed by + * **fmt**. + * + * Each format specifier in **fmt** corresponds to one u64 element + * in the **data** array. For strings and pointers where pointees + * are accessed, only the pointer values are stored in the *data* + * array. The *data_len* is the size of *data* in bytes. + * + * Formats **%s** and **%p{i,I}{4,6}** require to read kernel + * memory. Reading kernel memory may fail due to either invalid + * address or valid address but requiring a major memory fault. If + * reading kernel memory fails, the string for **%s** will be an + * empty string, and the ip address for **%p{i,I}{4,6}** will be 0. + * Not returning error to bpf program is consistent with what + * **bpf_trace_printk**\ () does for now. + * + * Return + * The strictly positive length of the formatted string, including + * the trailing zero character. If the return value is greater than + * **str_size**, **str** contains a truncated string, guaranteed to + * be zero-terminated except when **str_size** is 0. + * + * Or **-EBUSY** if the per-CPU memory copy buffer is busy. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -4081,6 +4901,8 @@ union bpf_attr { FN(ima_inode_hash), \ FN(sock_from_file), \ FN(check_mtu), \ + FN(for_each_map_elem), \ + FN(snprintf), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -4174,6 +4996,7 @@ enum { BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6), }; enum { @@ -4621,6 +5444,8 @@ struct bpf_link_info { } raw_tracepoint; struct { __u32 attach_type; + __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */ + __u32 target_btf_id; /* BTF type id inside the object */ } tracing; struct { __u64 cgroup_id; @@ -5211,7 +6036,10 @@ struct bpf_pidns_info { /* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ struct bpf_sk_lookup { - __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ + union { + __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ + __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */ + }; __u32 family; /* Protocol family (AF_INET, AF_INET6) */ __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h index 5a667107ad2c..d27b1708efe9 100644 --- a/include/uapi/linux/btf.h +++ b/include/uapi/linux/btf.h @@ -52,7 +52,7 @@ struct btf_type { }; }; -#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f) +#define BTF_INFO_KIND(info) (((info) >> 24) & 0x1f) #define BTF_INFO_VLEN(info) ((info) & 0xffff) #define BTF_INFO_KFLAG(info) ((info) >> 31) @@ -72,7 +72,8 @@ struct btf_type { #define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ #define BTF_KIND_VAR 14 /* Variable */ #define BTF_KIND_DATASEC 15 /* Section */ -#define BTF_KIND_MAX BTF_KIND_DATASEC +#define BTF_KIND_FLOAT 16 /* Floating point */ +#define BTF_KIND_MAX BTF_KIND_FLOAT #define NR_BTF_KINDS (BTF_KIND_MAX + 1) /* For some specific BTF_KIND, "struct btf_type" is immediately diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h index c6ca33034147..2ddb4226cd23 100644 --- a/include/uapi/linux/capability.h +++ b/include/uapi/linux/capability.h @@ -335,7 +335,8 @@ struct vfs_ns_cap_data { #define CAP_AUDIT_CONTROL 30 -/* Set or remove capabilities on files */ +/* Set or remove capabilities on files. + Map uid=0 into a child user namespace. */ #define CAP_SETFCAP 31 diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h index 7d1a06c52469..dc8879d179fd 100644 --- a/include/uapi/linux/cec.h +++ b/include/uapi/linux/cec.h @@ -396,6 +396,7 @@ struct cec_drm_connector_info { * associated with the CEC adapter. * @type: connector type (if any) * @drm: drm connector info + * @raw: array to pad the union */ struct cec_connector_info { __u32 type; @@ -453,7 +454,7 @@ struct cec_event_lost_msgs { * struct cec_event - CEC event structure * @ts: the timestamp of when the event was sent. * @event: the event. - * array. + * @flags: event flags. * @state_change: the event payload for CEC_EVENT_STATE_CHANGE. * @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS. * @raw: array to pad the union. diff --git a/include/uapi/linux/cyclades.h b/include/uapi/linux/cyclades.h deleted file mode 100644 index fc0add2194a9..000000000000 --- a/include/uapi/linux/cyclades.h +++ /dev/null @@ -1,494 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $ - * linux/include/linux/cyclades.h - * - * This file was initially written by - * Randolph Bentson <bentson@grieg.seaslug.org> and is maintained by - * Ivan Passos <ivan@cyclades.com>. - * - * This file contains the general definitions for the cyclades.c driver - *$Log: cyclades.h,v $ - *Revision 3.1 2002/01/29 11:36:16 henrique - *added throttle field on struct cyclades_port to indicate whether the - *port is throttled or not - * - *Revision 3.1 2000/04/19 18:52:52 ivan - *converted address fields to unsigned long and added fields for physical - *addresses on cyclades_card structure; - * - *Revision 3.0 1998/11/02 14:20:59 ivan - *added nports field on cyclades_card structure; - * - *Revision 2.5 1998/08/03 16:57:01 ivan - *added cyclades_idle_stats structure; - * - *Revision 2.4 1998/06/01 12:09:53 ivan - *removed closing_wait2 from cyclades_port structure; - * - *Revision 2.3 1998/03/16 18:01:12 ivan - *changes in the cyclades_port structure to get it closer to the - *standard serial port structure; - *added constants for new ioctls; - * - *Revision 2.2 1998/02/17 16:50:00 ivan - *changes in the cyclades_port structure (addition of shutdown_wait and - *chip_rev variables); - *added constants for new ioctls and for CD1400 rev. numbers. - * - *Revision 2.1 1997/10/24 16:03:00 ivan - *added rflow (which allows enabling the CD1400 special flow control - *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to - *cyclades_port structure; - *added Alpha support - * - *Revision 2.0 1997/06/30 10:30:00 ivan - *added some new doorbell command constants related to IOCTLW and - *UART error signaling - * - *Revision 1.8 1997/06/03 15:30:00 ivan - *added constant ZFIRM_HLT - *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin) - * - *Revision 1.7 1997/03/26 10:30:00 daniel - *new entries at the end of cyclades_port struct to reallocate - *variables illegally allocated within card memory. - * - *Revision 1.6 1996/09/09 18:35:30 bentson - *fold in changes for Cyclom-Z -- including structures for - *communicating with board as well modest changes to original - *structures to support new features. - * - *Revision 1.5 1995/11/13 21:13:31 bentson - *changes suggested by Michael Chastain <mec@duracef.shout.net> - *to support use of this file in non-kernel applications - * - * - */ - -#ifndef _UAPI_LINUX_CYCLADES_H -#define _UAPI_LINUX_CYCLADES_H - -#include <linux/types.h> - -struct cyclades_monitor { - unsigned long int_count; - unsigned long char_count; - unsigned long char_max; - unsigned long char_last; -}; - -/* - * These stats all reflect activity since the device was last initialized. - * (i.e., since the port was opened with no other processes already having it - * open) - */ -struct cyclades_idle_stats { - __kernel_old_time_t in_use; /* Time device has been in use (secs) */ - __kernel_old_time_t recv_idle; /* Time since last char received (secs) */ - __kernel_old_time_t xmit_idle; /* Time since last char transmitted (secs) */ - unsigned long recv_bytes; /* Bytes received */ - unsigned long xmit_bytes; /* Bytes transmitted */ - unsigned long overruns; /* Input overruns */ - unsigned long frame_errs; /* Input framing errors */ - unsigned long parity_errs; /* Input parity errors */ -}; - -#define CYCLADES_MAGIC 0x4359 - -#define CYGETMON 0x435901 -#define CYGETTHRESH 0x435902 -#define CYSETTHRESH 0x435903 -#define CYGETDEFTHRESH 0x435904 -#define CYSETDEFTHRESH 0x435905 -#define CYGETTIMEOUT 0x435906 -#define CYSETTIMEOUT 0x435907 -#define CYGETDEFTIMEOUT 0x435908 -#define CYSETDEFTIMEOUT 0x435909 -#define CYSETRFLOW 0x43590a -#define CYGETRFLOW 0x43590b -#define CYSETRTSDTR_INV 0x43590c -#define CYGETRTSDTR_INV 0x43590d -#define CYZSETPOLLCYCLE 0x43590e -#define CYZGETPOLLCYCLE 0x43590f -#define CYGETCD1400VER 0x435910 -#define CYSETWAIT 0x435912 -#define CYGETWAIT 0x435913 - -/*************** CYCLOM-Z ADDITIONS ***************/ - -#define CZIOC ('M' << 8) -#define CZ_NBOARDS (CZIOC|0xfa) -#define CZ_BOOT_START (CZIOC|0xfb) -#define CZ_BOOT_DATA (CZIOC|0xfc) -#define CZ_BOOT_END (CZIOC|0xfd) -#define CZ_TEST (CZIOC|0xfe) - -#define CZ_DEF_POLL (HZ/25) - -#define MAX_BOARD 4 /* Max number of boards */ -#define MAX_DEV 256 /* Max number of ports total */ -#define CYZ_MAX_SPEED 921600 - -#define CYZ_FIFO_SIZE 16 - -#define CYZ_BOOT_NWORDS 0x100 -struct CYZ_BOOT_CTRL { - unsigned short nboard; - int status[MAX_BOARD]; - int nchannel[MAX_BOARD]; - int fw_rev[MAX_BOARD]; - unsigned long offset; - unsigned long data[CYZ_BOOT_NWORDS]; -}; - - -#ifndef DP_WINDOW_SIZE -/* - * Memory Window Sizes - */ - -#define DP_WINDOW_SIZE (0x00080000) /* window size 512 Kb */ -#define ZE_DP_WINDOW_SIZE (0x00100000) /* window size 1 Mb (Ze and - 8Zo V.2 */ -#define CTRL_WINDOW_SIZE (0x00000080) /* runtime regs 128 bytes */ - -/* - * CUSTOM_REG - Cyclom-Z/PCI Custom Registers Set. The driver - * normally will access only interested on the fpga_id, fpga_version, - * start_cpu and stop_cpu. - */ - -struct CUSTOM_REG { - __u32 fpga_id; /* FPGA Identification Register */ - __u32 fpga_version; /* FPGA Version Number Register */ - __u32 cpu_start; /* CPU start Register (write) */ - __u32 cpu_stop; /* CPU stop Register (write) */ - __u32 misc_reg; /* Miscellaneous Register */ - __u32 idt_mode; /* IDT mode Register */ - __u32 uart_irq_status; /* UART IRQ status Register */ - __u32 clear_timer0_irq; /* Clear timer interrupt Register */ - __u32 clear_timer1_irq; /* Clear timer interrupt Register */ - __u32 clear_timer2_irq; /* Clear timer interrupt Register */ - __u32 test_register; /* Test Register */ - __u32 test_count; /* Test Count Register */ - __u32 timer_select; /* Timer select register */ - __u32 pr_uart_irq_status; /* Prioritized UART IRQ stat Reg */ - __u32 ram_wait_state; /* RAM wait-state Register */ - __u32 uart_wait_state; /* UART wait-state Register */ - __u32 timer_wait_state; /* timer wait-state Register */ - __u32 ack_wait_state; /* ACK wait State Register */ -}; - -/* - * RUNTIME_9060 - PLX PCI9060ES local configuration and shared runtime - * registers. This structure can be used to access the 9060 registers - * (memory mapped). - */ - -struct RUNTIME_9060 { - __u32 loc_addr_range; /* 00h - Local Address Range */ - __u32 loc_addr_base; /* 04h - Local Address Base */ - __u32 loc_arbitr; /* 08h - Local Arbitration */ - __u32 endian_descr; /* 0Ch - Big/Little Endian Descriptor */ - __u32 loc_rom_range; /* 10h - Local ROM Range */ - __u32 loc_rom_base; /* 14h - Local ROM Base */ - __u32 loc_bus_descr; /* 18h - Local Bus descriptor */ - __u32 loc_range_mst; /* 1Ch - Local Range for Master to PCI */ - __u32 loc_base_mst; /* 20h - Local Base for Master PCI */ - __u32 loc_range_io; /* 24h - Local Range for Master IO */ - __u32 pci_base_mst; /* 28h - PCI Base for Master PCI */ - __u32 pci_conf_io; /* 2Ch - PCI configuration for Master IO */ - __u32 filler1; /* 30h */ - __u32 filler2; /* 34h */ - __u32 filler3; /* 38h */ - __u32 filler4; /* 3Ch */ - __u32 mail_box_0; /* 40h - Mail Box 0 */ - __u32 mail_box_1; /* 44h - Mail Box 1 */ - __u32 mail_box_2; /* 48h - Mail Box 2 */ - __u32 mail_box_3; /* 4Ch - Mail Box 3 */ - __u32 filler5; /* 50h */ - __u32 filler6; /* 54h */ - __u32 filler7; /* 58h */ - __u32 filler8; /* 5Ch */ - __u32 pci_doorbell; /* 60h - PCI to Local Doorbell */ - __u32 loc_doorbell; /* 64h - Local to PCI Doorbell */ - __u32 intr_ctrl_stat; /* 68h - Interrupt Control/Status */ - __u32 init_ctrl; /* 6Ch - EEPROM control, Init Control, etc */ -}; - -/* Values for the Local Base Address re-map register */ - -#define WIN_RAM 0x00000001L /* set the sliding window to RAM */ -#define WIN_CREG 0x14000001L /* set the window to custom Registers */ - -/* Values timer select registers */ - -#define TIMER_BY_1M 0x00 /* clock divided by 1M */ -#define TIMER_BY_256K 0x01 /* clock divided by 256k */ -#define TIMER_BY_128K 0x02 /* clock divided by 128k */ -#define TIMER_BY_32K 0x03 /* clock divided by 32k */ - -/****************** ****************** *******************/ -#endif - -#ifndef ZFIRM_ID -/* #include "zfwint.h" */ -/****************** ****************** *******************/ -/* - * This file contains the definitions for interfacing with the - * Cyclom-Z ZFIRM Firmware. - */ - -/* General Constant definitions */ - -#define MAX_CHAN 64 /* max number of channels per board */ - -/* firmware id structure (set after boot) */ - -#define ID_ADDRESS 0x00000180L /* signature/pointer address */ -#define ZFIRM_ID 0x5557465AL /* ZFIRM/U signature */ -#define ZFIRM_HLT 0x59505B5CL /* ZFIRM needs external power supply */ -#define ZFIRM_RST 0x56040674L /* RST signal (due to FW reset) */ - -#define ZF_TINACT_DEF 1000 /* default inactivity timeout - (1000 ms) */ -#define ZF_TINACT ZF_TINACT_DEF - -struct FIRM_ID { - __u32 signature; /* ZFIRM/U signature */ - __u32 zfwctrl_addr; /* pointer to ZFW_CTRL structure */ -}; - -/* Op. System id */ - -#define C_OS_LINUX 0x00000030 /* generic Linux system */ - -/* channel op_mode */ - -#define C_CH_DISABLE 0x00000000 /* channel is disabled */ -#define C_CH_TXENABLE 0x00000001 /* channel Tx enabled */ -#define C_CH_RXENABLE 0x00000002 /* channel Rx enabled */ -#define C_CH_ENABLE 0x00000003 /* channel Tx/Rx enabled */ -#define C_CH_LOOPBACK 0x00000004 /* Loopback mode */ - -/* comm_parity - parity */ - -#define C_PR_NONE 0x00000000 /* None */ -#define C_PR_ODD 0x00000001 /* Odd */ -#define C_PR_EVEN 0x00000002 /* Even */ -#define C_PR_MARK 0x00000004 /* Mark */ -#define C_PR_SPACE 0x00000008 /* Space */ -#define C_PR_PARITY 0x000000ff - -#define C_PR_DISCARD 0x00000100 /* discard char with frame/par error */ -#define C_PR_IGNORE 0x00000200 /* ignore frame/par error */ - -/* comm_data_l - data length and stop bits */ - -#define C_DL_CS5 0x00000001 -#define C_DL_CS6 0x00000002 -#define C_DL_CS7 0x00000004 -#define C_DL_CS8 0x00000008 -#define C_DL_CS 0x0000000f -#define C_DL_1STOP 0x00000010 -#define C_DL_15STOP 0x00000020 -#define C_DL_2STOP 0x00000040 -#define C_DL_STOP 0x000000f0 - -/* interrupt enabling/status */ - -#define C_IN_DISABLE 0x00000000 /* zero, disable interrupts */ -#define C_IN_TXBEMPTY 0x00000001 /* tx buffer empty */ -#define C_IN_TXLOWWM 0x00000002 /* tx buffer below LWM */ -#define C_IN_RXHIWM 0x00000010 /* rx buffer above HWM */ -#define C_IN_RXNNDT 0x00000020 /* rx no new data timeout */ -#define C_IN_MDCD 0x00000100 /* modem DCD change */ -#define C_IN_MDSR 0x00000200 /* modem DSR change */ -#define C_IN_MRI 0x00000400 /* modem RI change */ -#define C_IN_MCTS 0x00000800 /* modem CTS change */ -#define C_IN_RXBRK 0x00001000 /* Break received */ -#define C_IN_PR_ERROR 0x00002000 /* parity error */ -#define C_IN_FR_ERROR 0x00004000 /* frame error */ -#define C_IN_OVR_ERROR 0x00008000 /* overrun error */ -#define C_IN_RXOFL 0x00010000 /* RX buffer overflow */ -#define C_IN_IOCTLW 0x00020000 /* I/O control w/ wait */ -#define C_IN_MRTS 0x00040000 /* modem RTS drop */ -#define C_IN_ICHAR 0x00080000 - -/* flow control */ - -#define C_FL_OXX 0x00000001 /* output Xon/Xoff flow control */ -#define C_FL_IXX 0x00000002 /* output Xon/Xoff flow control */ -#define C_FL_OIXANY 0x00000004 /* output Xon/Xoff (any xon) */ -#define C_FL_SWFLOW 0x0000000f - -/* flow status */ - -#define C_FS_TXIDLE 0x00000000 /* no Tx data in the buffer or UART */ -#define C_FS_SENDING 0x00000001 /* UART is sending data */ -#define C_FS_SWFLOW 0x00000002 /* Tx is stopped by received Xoff */ - -/* rs_control/rs_status RS-232 signals */ - -#define C_RS_PARAM 0x80000000 /* Indicates presence of parameter in - IOCTLM command */ -#define C_RS_RTS 0x00000001 /* RTS */ -#define C_RS_DTR 0x00000004 /* DTR */ -#define C_RS_DCD 0x00000100 /* CD */ -#define C_RS_DSR 0x00000200 /* DSR */ -#define C_RS_RI 0x00000400 /* RI */ -#define C_RS_CTS 0x00000800 /* CTS */ - -/* commands Host <-> Board */ - -#define C_CM_RESET 0x01 /* reset/flush buffers */ -#define C_CM_IOCTL 0x02 /* re-read CH_CTRL */ -#define C_CM_IOCTLW 0x03 /* re-read CH_CTRL, intr when done */ -#define C_CM_IOCTLM 0x04 /* RS-232 outputs change */ -#define C_CM_SENDXOFF 0x10 /* send Xoff */ -#define C_CM_SENDXON 0x11 /* send Xon */ -#define C_CM_CLFLOW 0x12 /* Clear flow control (resume) */ -#define C_CM_SENDBRK 0x41 /* send break */ -#define C_CM_INTBACK 0x42 /* Interrupt back */ -#define C_CM_SET_BREAK 0x43 /* Tx break on */ -#define C_CM_CLR_BREAK 0x44 /* Tx break off */ -#define C_CM_CMD_DONE 0x45 /* Previous command done */ -#define C_CM_INTBACK2 0x46 /* Alternate Interrupt back */ -#define C_CM_TINACT 0x51 /* set inactivity detection */ -#define C_CM_IRQ_ENBL 0x52 /* enable generation of interrupts */ -#define C_CM_IRQ_DSBL 0x53 /* disable generation of interrupts */ -#define C_CM_ACK_ENBL 0x54 /* enable acknowledged interrupt mode */ -#define C_CM_ACK_DSBL 0x55 /* disable acknowledged intr mode */ -#define C_CM_FLUSH_RX 0x56 /* flushes Rx buffer */ -#define C_CM_FLUSH_TX 0x57 /* flushes Tx buffer */ -#define C_CM_Q_ENABLE 0x58 /* enables queue access from the - driver */ -#define C_CM_Q_DISABLE 0x59 /* disables queue access from the - driver */ - -#define C_CM_TXBEMPTY 0x60 /* Tx buffer is empty */ -#define C_CM_TXLOWWM 0x61 /* Tx buffer low water mark */ -#define C_CM_RXHIWM 0x62 /* Rx buffer high water mark */ -#define C_CM_RXNNDT 0x63 /* rx no new data timeout */ -#define C_CM_TXFEMPTY 0x64 -#define C_CM_ICHAR 0x65 -#define C_CM_MDCD 0x70 /* modem DCD change */ -#define C_CM_MDSR 0x71 /* modem DSR change */ -#define C_CM_MRI 0x72 /* modem RI change */ -#define C_CM_MCTS 0x73 /* modem CTS change */ -#define C_CM_MRTS 0x74 /* modem RTS drop */ -#define C_CM_RXBRK 0x84 /* Break received */ -#define C_CM_PR_ERROR 0x85 /* Parity error */ -#define C_CM_FR_ERROR 0x86 /* Frame error */ -#define C_CM_OVR_ERROR 0x87 /* Overrun error */ -#define C_CM_RXOFL 0x88 /* RX buffer overflow */ -#define C_CM_CMDERROR 0x90 /* command error */ -#define C_CM_FATAL 0x91 /* fatal error */ -#define C_CM_HW_RESET 0x92 /* reset board */ - -/* - * CH_CTRL - This per port structure contains all parameters - * that control an specific port. It can be seen as the - * configuration registers of a "super-serial-controller". - */ - -struct CH_CTRL { - __u32 op_mode; /* operation mode */ - __u32 intr_enable; /* interrupt masking */ - __u32 sw_flow; /* SW flow control */ - __u32 flow_status; /* output flow status */ - __u32 comm_baud; /* baud rate - numerically specified */ - __u32 comm_parity; /* parity */ - __u32 comm_data_l; /* data length/stop */ - __u32 comm_flags; /* other flags */ - __u32 hw_flow; /* HW flow control */ - __u32 rs_control; /* RS-232 outputs */ - __u32 rs_status; /* RS-232 inputs */ - __u32 flow_xon; /* xon char */ - __u32 flow_xoff; /* xoff char */ - __u32 hw_overflow; /* hw overflow counter */ - __u32 sw_overflow; /* sw overflow counter */ - __u32 comm_error; /* frame/parity error counter */ - __u32 ichar; - __u32 filler[7]; -}; - - -/* - * BUF_CTRL - This per channel structure contains - * all Tx and Rx buffer control for a given channel. - */ - -struct BUF_CTRL { - __u32 flag_dma; /* buffers are in Host memory */ - __u32 tx_bufaddr; /* address of the tx buffer */ - __u32 tx_bufsize; /* tx buffer size */ - __u32 tx_threshold; /* tx low water mark */ - __u32 tx_get; /* tail index tx buf */ - __u32 tx_put; /* head index tx buf */ - __u32 rx_bufaddr; /* address of the rx buffer */ - __u32 rx_bufsize; /* rx buffer size */ - __u32 rx_threshold; /* rx high water mark */ - __u32 rx_get; /* tail index rx buf */ - __u32 rx_put; /* head index rx buf */ - __u32 filler[5]; /* filler to align structures */ -}; - -/* - * BOARD_CTRL - This per board structure contains all global - * control fields related to the board. - */ - -struct BOARD_CTRL { - - /* static info provided by the on-board CPU */ - __u32 n_channel; /* number of channels */ - __u32 fw_version; /* firmware version */ - - /* static info provided by the driver */ - __u32 op_system; /* op_system id */ - __u32 dr_version; /* driver version */ - - /* board control area */ - __u32 inactivity; /* inactivity control */ - - /* host to FW commands */ - __u32 hcmd_channel; /* channel number */ - __u32 hcmd_param; /* pointer to parameters */ - - /* FW to Host commands */ - __u32 fwcmd_channel; /* channel number */ - __u32 fwcmd_param; /* pointer to parameters */ - __u32 zf_int_queue_addr; /* offset for INT_QUEUE structure */ - - /* filler so the structures are aligned */ - __u32 filler[6]; -}; - -/* Host Interrupt Queue */ - -#define QUEUE_SIZE (10*MAX_CHAN) - -struct INT_QUEUE { - unsigned char intr_code[QUEUE_SIZE]; - unsigned long channel[QUEUE_SIZE]; - unsigned long param[QUEUE_SIZE]; - unsigned long put; - unsigned long get; -}; - -/* - * ZFW_CTRL - This is the data structure that includes all other - * data structures used by the Firmware. - */ - -struct ZFW_CTRL { - struct BOARD_CTRL board_ctrl; - struct CH_CTRL ch_ctrl[MAX_CHAN]; - struct BUF_CTRL buf_ctrl[MAX_CHAN]; -}; - -/****************** ****************** *******************/ -#endif - -#endif /* _UAPI_LINUX_CYCLADES_H */ diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index fcff6669137b..e5c6e458bdf7 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -193,8 +193,22 @@ struct dm_name_list { __u32 next; /* offset to the next record from the _start_ of this */ char name[0]; + + /* + * The following members can be accessed by taking a pointer that + * points immediately after the terminating zero character in "name" + * and aligning this pointer to next 8-byte boundary. + * Uuid is present if the flag DM_NAME_LIST_FLAG_HAS_UUID is set. + * + * __u32 event_nr; + * __u32 flags; + * char uuid[0]; + */ }; +#define DM_NAME_LIST_FLAG_HAS_UUID 1 +#define DM_NAME_LIST_FLAG_DOESNT_HAVE_UUID 2 + /* * Used to retrieve the target versions */ @@ -272,9 +286,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 44 +#define DM_VERSION_MINOR 45 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2021-02-01)" +#define DM_VERSION_EXTRA "-ioctl (2021-03-22)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/uapi/linux/dqblk_xfs.h b/include/uapi/linux/dqblk_xfs.h index c71d909addda..8cda3e62e0e7 100644 --- a/include/uapi/linux/dqblk_xfs.h +++ b/include/uapi/linux/dqblk_xfs.h @@ -219,7 +219,10 @@ struct fs_quota_statv { __s32 qs_rtbtimelimit;/* limit for rt blks timer */ __u16 qs_bwarnlimit; /* limit for num warnings */ __u16 qs_iwarnlimit; /* limit for num warnings */ - __u64 qs_pad2[8]; /* for future proofing */ + __u16 qs_rtbwarnlimit;/* limit for rt blks warnings */ + __u16 qs_pad3; + __u32 qs_pad4; + __u64 qs_pad2[7]; /* for future proofing */ }; #endif /* _LINUX_DQBLK_XFS_H */ diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 30f68b42eeb5..61bf4774b8f2 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -426,6 +426,7 @@ typedef struct elf64_shdr { #define NT_ARM_PACA_KEYS 0x407 /* ARM pointer authentication address keys */ #define NT_ARM_PACG_KEYS 0x408 /* ARM pointer authentication generic key */ #define NT_ARM_TAGGED_ADDR_CTRL 0x409 /* arm64 tagged address control (prctl()) */ +#define NT_ARM_PAC_ENABLED_KEYS 0x40a /* arm64 ptr auth enabled keys (prctl()) */ #define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ #define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */ #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 5afea692a3f7..cfef6b08169a 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -669,6 +669,11 @@ enum ethtool_link_ext_substate_cable_issue { * @ETH_SS_TS_TX_TYPES: timestamping Tx types * @ETH_SS_TS_RX_FILTERS: timestamping Rx filters * @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types + * @ETH_SS_STATS_STD: standardized stats + * @ETH_SS_STATS_ETH_PHY: names of IEEE 802.3 PHY statistics + * @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics + * @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics + * @ETH_SS_STATS_RMON: names of RMON statistics * * @ETH_SS_COUNT: number of defined string sets */ @@ -689,6 +694,11 @@ enum ethtool_stringset { ETH_SS_TS_TX_TYPES, ETH_SS_TS_RX_FILTERS, ETH_SS_UDP_TUNNEL_TYPES, + ETH_SS_STATS_STD, + ETH_SS_STATS_ETH_PHY, + ETH_SS_STATS_ETH_MAC, + ETH_SS_STATS_ETH_CTRL, + ETH_SS_STATS_RMON, /* add new constants above here */ ETH_SS_COUNT @@ -1383,15 +1393,33 @@ struct ethtool_per_queue_op { }; /** - * struct ethtool_fecparam - Ethernet forward error correction(fec) parameters + * struct ethtool_fecparam - Ethernet Forward Error Correction parameters * @cmd: Command number = %ETHTOOL_GFECPARAM or %ETHTOOL_SFECPARAM - * @active_fec: FEC mode which is active on porte - * @fec: Bitmask of supported/configured FEC modes - * @rsvd: Reserved for future extensions. i.e FEC bypass feature. + * @active_fec: FEC mode which is active on the port, single bit set, GET only. + * @fec: Bitmask of configured FEC modes. + * @reserved: Reserved for future extensions, ignore on GET, write 0 for SET. * - * Drivers should reject a non-zero setting of @autoneg when - * autoneogotiation is disabled (or not supported) for the link. + * Note that @reserved was never validated on input and ethtool user space + * left it uninitialized when calling SET. Hence going forward it can only be + * used to return a value to userspace with GET. + * + * FEC modes supported by the device can be read via %ETHTOOL_GLINKSETTINGS. + * FEC settings are configured by link autonegotiation whenever it's enabled. + * With autoneg on %ETHTOOL_GFECPARAM can be used to read the current mode. + * + * When autoneg is disabled %ETHTOOL_SFECPARAM controls the FEC settings. + * It is recommended that drivers only accept a single bit set in @fec. + * When multiple bits are set in @fec drivers may pick mode in an implementation + * dependent way. Drivers should reject mixing %ETHTOOL_FEC_AUTO_BIT with other + * FEC modes, because it's unclear whether in this case other modes constrain + * AUTO or are independent choices. + * Drivers must reject SET requests if they support none of the requested modes. + * + * If device does not support FEC drivers may use %ETHTOOL_FEC_NONE instead + * of returning %EOPNOTSUPP from %ETHTOOL_GFECPARAM. * + * See enum ethtool_fec_config_bits for definition of valid bits for both + * @fec and @active_fec. */ struct ethtool_fecparam { __u32 cmd; @@ -1403,11 +1431,16 @@ struct ethtool_fecparam { /** * enum ethtool_fec_config_bits - flags definition of ethtool_fec_configuration - * @ETHTOOL_FEC_NONE: FEC mode configuration is not supported - * @ETHTOOL_FEC_AUTO: Default/Best FEC mode provided by driver - * @ETHTOOL_FEC_OFF: No FEC Mode - * @ETHTOOL_FEC_RS: Reed-Solomon Forward Error Detection mode - * @ETHTOOL_FEC_BASER: Base-R/Reed-Solomon Forward Error Detection mode + * @ETHTOOL_FEC_NONE_BIT: FEC mode configuration is not supported. Should not + * be used together with other bits. GET only. + * @ETHTOOL_FEC_AUTO_BIT: Select default/best FEC mode automatically, usually + * based link mode and SFP parameters read from module's + * EEPROM. This bit does _not_ mean autonegotiation. + * @ETHTOOL_FEC_OFF_BIT: No FEC Mode + * @ETHTOOL_FEC_RS_BIT: Reed-Solomon FEC Mode + * @ETHTOOL_FEC_BASER_BIT: Base-R/Reed-Solomon FEC Mode + * @ETHTOOL_FEC_LLRS_BIT: Low Latency Reed Solomon FEC Mode (25G/50G Ethernet + * Consortium) */ enum ethtool_fec_config_bits { ETHTOOL_FEC_NONE_BIT, diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index a286635ac9b8..825cfda1c5d5 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -42,6 +42,10 @@ enum { ETHTOOL_MSG_CABLE_TEST_ACT, ETHTOOL_MSG_CABLE_TEST_TDR_ACT, ETHTOOL_MSG_TUNNEL_INFO_GET, + ETHTOOL_MSG_FEC_GET, + ETHTOOL_MSG_FEC_SET, + ETHTOOL_MSG_MODULE_EEPROM_GET, + ETHTOOL_MSG_STATS_GET, /* add new constants above here */ __ETHTOOL_MSG_USER_CNT, @@ -80,6 +84,10 @@ enum { ETHTOOL_MSG_CABLE_TEST_NTF, ETHTOOL_MSG_CABLE_TEST_TDR_NTF, ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY, + ETHTOOL_MSG_FEC_GET_REPLY, + ETHTOOL_MSG_FEC_NTF, + ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY, + ETHTOOL_MSG_STATS_GET_REPLY, /* add new constants above here */ __ETHTOOL_MSG_KERNEL_CNT, @@ -629,6 +637,185 @@ enum { ETHTOOL_A_TUNNEL_INFO_MAX = (__ETHTOOL_A_TUNNEL_INFO_CNT - 1) }; +/* FEC */ + +enum { + ETHTOOL_A_FEC_UNSPEC, + ETHTOOL_A_FEC_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_FEC_MODES, /* bitset */ + ETHTOOL_A_FEC_AUTO, /* u8 */ + ETHTOOL_A_FEC_ACTIVE, /* u32 */ + ETHTOOL_A_FEC_STATS, /* nest - _A_FEC_STAT */ + + __ETHTOOL_A_FEC_CNT, + ETHTOOL_A_FEC_MAX = (__ETHTOOL_A_FEC_CNT - 1) +}; + +enum { + ETHTOOL_A_FEC_STAT_UNSPEC, + ETHTOOL_A_FEC_STAT_PAD, + + ETHTOOL_A_FEC_STAT_CORRECTED, /* array, u64 */ + ETHTOOL_A_FEC_STAT_UNCORR, /* array, u64 */ + ETHTOOL_A_FEC_STAT_CORR_BITS, /* array, u64 */ + + /* add new constants above here */ + __ETHTOOL_A_FEC_STAT_CNT, + ETHTOOL_A_FEC_STAT_MAX = (__ETHTOOL_A_FEC_STAT_CNT - 1) +}; + +/* MODULE EEPROM */ + +enum { + ETHTOOL_A_MODULE_EEPROM_UNSPEC, + ETHTOOL_A_MODULE_EEPROM_HEADER, /* nest - _A_HEADER_* */ + + ETHTOOL_A_MODULE_EEPROM_OFFSET, /* u32 */ + ETHTOOL_A_MODULE_EEPROM_LENGTH, /* u32 */ + ETHTOOL_A_MODULE_EEPROM_PAGE, /* u8 */ + ETHTOOL_A_MODULE_EEPROM_BANK, /* u8 */ + ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS, /* u8 */ + ETHTOOL_A_MODULE_EEPROM_DATA, /* nested */ + + __ETHTOOL_A_MODULE_EEPROM_CNT, + ETHTOOL_A_MODULE_EEPROM_MAX = (__ETHTOOL_A_MODULE_EEPROM_CNT - 1) +}; + +/* STATS */ + +enum { + ETHTOOL_A_STATS_UNSPEC, + ETHTOOL_A_STATS_PAD, + ETHTOOL_A_STATS_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_STATS_GROUPS, /* bitset */ + + ETHTOOL_A_STATS_GRP, /* nest - _A_STATS_GRP_* */ + + /* add new constants above here */ + __ETHTOOL_A_STATS_CNT, + ETHTOOL_A_STATS_MAX = (__ETHTOOL_A_STATS_CNT - 1) +}; + +enum { + ETHTOOL_STATS_ETH_PHY, + ETHTOOL_STATS_ETH_MAC, + ETHTOOL_STATS_ETH_CTRL, + ETHTOOL_STATS_RMON, + + /* add new constants above here */ + __ETHTOOL_STATS_CNT +}; + +enum { + ETHTOOL_A_STATS_GRP_UNSPEC, + ETHTOOL_A_STATS_GRP_PAD, + + ETHTOOL_A_STATS_GRP_ID, /* u32 */ + ETHTOOL_A_STATS_GRP_SS_ID, /* u32 */ + + ETHTOOL_A_STATS_GRP_STAT, /* nest */ + + ETHTOOL_A_STATS_GRP_HIST_RX, /* nest */ + ETHTOOL_A_STATS_GRP_HIST_TX, /* nest */ + + ETHTOOL_A_STATS_GRP_HIST_BKT_LOW, /* u32 */ + ETHTOOL_A_STATS_GRP_HIST_BKT_HI, /* u32 */ + ETHTOOL_A_STATS_GRP_HIST_VAL, /* u64 */ + + /* add new constants above here */ + __ETHTOOL_A_STATS_GRP_CNT, + ETHTOOL_A_STATS_GRP_MAX = (__ETHTOOL_A_STATS_CNT - 1) +}; + +enum { + /* 30.3.2.1.5 aSymbolErrorDuringCarrier */ + ETHTOOL_A_STATS_ETH_PHY_5_SYM_ERR, + + /* add new constants above here */ + __ETHTOOL_A_STATS_ETH_PHY_CNT, + ETHTOOL_A_STATS_ETH_PHY_MAX = (__ETHTOOL_A_STATS_ETH_PHY_CNT - 1) +}; + +enum { + /* 30.3.1.1.2 aFramesTransmittedOK */ + ETHTOOL_A_STATS_ETH_MAC_2_TX_PKT, + /* 30.3.1.1.3 aSingleCollisionFrames */ + ETHTOOL_A_STATS_ETH_MAC_3_SINGLE_COL, + /* 30.3.1.1.4 aMultipleCollisionFrames */ + ETHTOOL_A_STATS_ETH_MAC_4_MULTI_COL, + /* 30.3.1.1.5 aFramesReceivedOK */ + ETHTOOL_A_STATS_ETH_MAC_5_RX_PKT, + /* 30.3.1.1.6 aFrameCheckSequenceErrors */ + ETHTOOL_A_STATS_ETH_MAC_6_FCS_ERR, + /* 30.3.1.1.7 aAlignmentErrors */ + ETHTOOL_A_STATS_ETH_MAC_7_ALIGN_ERR, + /* 30.3.1.1.8 aOctetsTransmittedOK */ + ETHTOOL_A_STATS_ETH_MAC_8_TX_BYTES, + /* 30.3.1.1.9 aFramesWithDeferredXmissions */ + ETHTOOL_A_STATS_ETH_MAC_9_TX_DEFER, + /* 30.3.1.1.10 aLateCollisions */ + ETHTOOL_A_STATS_ETH_MAC_10_LATE_COL, + /* 30.3.1.1.11 aFramesAbortedDueToXSColls */ + ETHTOOL_A_STATS_ETH_MAC_11_XS_COL, + /* 30.3.1.1.12 aFramesLostDueToIntMACXmitError */ + ETHTOOL_A_STATS_ETH_MAC_12_TX_INT_ERR, + /* 30.3.1.1.13 aCarrierSenseErrors */ + ETHTOOL_A_STATS_ETH_MAC_13_CS_ERR, + /* 30.3.1.1.14 aOctetsReceivedOK */ + ETHTOOL_A_STATS_ETH_MAC_14_RX_BYTES, + /* 30.3.1.1.15 aFramesLostDueToIntMACRcvError */ + ETHTOOL_A_STATS_ETH_MAC_15_RX_INT_ERR, + + /* 30.3.1.1.18 aMulticastFramesXmittedOK */ + ETHTOOL_A_STATS_ETH_MAC_18_TX_MCAST, + /* 30.3.1.1.19 aBroadcastFramesXmittedOK */ + ETHTOOL_A_STATS_ETH_MAC_19_TX_BCAST, + /* 30.3.1.1.20 aFramesWithExcessiveDeferral */ + ETHTOOL_A_STATS_ETH_MAC_20_XS_DEFER, + /* 30.3.1.1.21 aMulticastFramesReceivedOK */ + ETHTOOL_A_STATS_ETH_MAC_21_RX_MCAST, + /* 30.3.1.1.22 aBroadcastFramesReceivedOK */ + ETHTOOL_A_STATS_ETH_MAC_22_RX_BCAST, + /* 30.3.1.1.23 aInRangeLengthErrors */ + ETHTOOL_A_STATS_ETH_MAC_23_IR_LEN_ERR, + /* 30.3.1.1.24 aOutOfRangeLengthField */ + ETHTOOL_A_STATS_ETH_MAC_24_OOR_LEN, + /* 30.3.1.1.25 aFrameTooLongErrors */ + ETHTOOL_A_STATS_ETH_MAC_25_TOO_LONG_ERR, + + /* add new constants above here */ + __ETHTOOL_A_STATS_ETH_MAC_CNT, + ETHTOOL_A_STATS_ETH_MAC_MAX = (__ETHTOOL_A_STATS_ETH_MAC_CNT - 1) +}; + +enum { + /* 30.3.3.3 aMACControlFramesTransmitted */ + ETHTOOL_A_STATS_ETH_CTRL_3_TX, + /* 30.3.3.4 aMACControlFramesReceived */ + ETHTOOL_A_STATS_ETH_CTRL_4_RX, + /* 30.3.3.5 aUnsupportedOpcodesReceived */ + ETHTOOL_A_STATS_ETH_CTRL_5_RX_UNSUP, + + /* add new constants above here */ + __ETHTOOL_A_STATS_ETH_CTRL_CNT, + ETHTOOL_A_STATS_ETH_CTRL_MAX = (__ETHTOOL_A_STATS_ETH_CTRL_CNT - 1) +}; + +enum { + /* etherStatsUndersizePkts */ + ETHTOOL_A_STATS_RMON_UNDERSIZE, + /* etherStatsOversizePkts */ + ETHTOOL_A_STATS_RMON_OVERSIZE, + /* etherStatsFragments */ + ETHTOOL_A_STATS_RMON_FRAG, + /* etherStatsJabbers */ + ETHTOOL_A_STATS_RMON_JABBER, + + /* add new constants above here */ + __ETHTOOL_A_STATS_RMON_CNT, + ETHTOOL_A_STATS_RMON_MAX = (__ETHTOOL_A_STATS_RMON_CNT - 1) +}; + /* generic netlink info */ #define ETHTOOL_GENL_NAME "ethtool" #define ETHTOOL_GENL_VERSION 1 diff --git a/include/uapi/linux/fd.h b/include/uapi/linux/fd.h index 8b80c63b971c..7022e3413dbc 100644 --- a/include/uapi/linux/fd.h +++ b/include/uapi/linux/fd.h @@ -49,11 +49,11 @@ struct floppy_struct { #define FDCLRPRM _IO(2, 0x41) /* clear user-defined parameters */ -#define FDSETPRM _IOW(2, 0x42, struct floppy_struct) +#define FDSETPRM _IOW(2, 0x42, struct floppy_struct) #define FDSETMEDIAPRM FDSETPRM /* set user-defined parameters for current media */ -#define FDDEFPRM _IOW(2, 0x43, struct floppy_struct) +#define FDDEFPRM _IOW(2, 0x43, struct floppy_struct) #define FDGETPRM _IOR(2, 0x04, struct floppy_struct) #define FDDEFMEDIAPRM FDDEFPRM #define FDGETMEDIAPRM FDGETPRM @@ -65,7 +65,7 @@ struct floppy_struct { /* issue/don't issue kernel messages on media type change */ -/* +/* * Formatting (obsolete) */ #define FD_FILL_BYTE 0xF6 /* format fill byte. */ @@ -126,13 +126,13 @@ typedef char floppy_drive_name[16]; */ struct floppy_drive_params { signed char cmos; /* CMOS type */ - - /* Spec2 is (HLD<<1 | ND), where HLD is head load time (1=2ms, 2=4 ms + + /* Spec2 is (HLD<<1 | ND), where HLD is head load time (1=2ms, 2=4 ms * etc) and ND is set means no DMA. Hardcoded to 6 (HLD=6ms, use DMA). */ unsigned long max_dtr; /* Step rate, usec */ unsigned long hlt; /* Head load/settle time, msec */ - unsigned long hut; /* Head unload time (remnant of + unsigned long hut; /* Head unload time (remnant of * 8" drives) */ unsigned long srt; /* Step rate, usec */ @@ -145,12 +145,12 @@ struct floppy_drive_params { unsigned char rps; /* rotations per second */ unsigned char tracks; /* maximum number of tracks */ unsigned long timeout; /* timeout for interrupt requests */ - - unsigned char interleave_sect; /* if there are more sectors, use + + unsigned char interleave_sect; /* if there are more sectors, use * interleave */ - + struct floppy_max_errors max_errors; - + char flags; /* various flags, including ftd_msg */ /* * Announce successful media type detection and media information loss after @@ -162,7 +162,7 @@ struct floppy_drive_params { #define FD_BROKEN_DCL 0x20 #define FD_DEBUG 0x02 #define FD_SILENT_DCL_CLEAR 0x4 -#define FD_INVERTED_DCL 0x80 /* must be 0x80, because of hardware +#define FD_INVERTED_DCL 0x80 /* must be 0x80, because of hardware considerations */ char read_track; /* use readtrack during probing? */ @@ -176,8 +176,8 @@ struct floppy_drive_params { #define FD_AUTODETECT_SIZE 8 short autodetect[FD_AUTODETECT_SIZE]; /* autodetected formats */ - - int checkfreq; /* how often should the drive be checked for disk + + int checkfreq; /* how often should the drive be checked for disk * changes */ int native_format; /* native format of this drive */ }; @@ -225,13 +225,13 @@ struct floppy_drive_struct { * decremented after each probe. */ int keep_data; - + /* Prevent "aliased" accesses. */ int fd_ref; int fd_device; - unsigned long last_checked; /* when was the drive last checked for a disk + unsigned long last_checked; /* when was the drive last checked for a disk * change? */ - + char *dmabuf; int bufblocks; }; @@ -255,7 +255,7 @@ enum reset_mode { /* * FDC state */ -struct floppy_fdc_state { +struct floppy_fdc_state { int spec1; /* spec1 value last used */ int spec2; /* spec2 value last used */ int dtr; @@ -302,16 +302,16 @@ struct floppy_write_errors { * to the user process are not counted. */ - unsigned int write_errors; /* number of physical write errors + unsigned int write_errors; /* number of physical write errors * encountered */ - + /* position of first and last write errors */ unsigned long first_error_sector; int first_error_generation; unsigned long last_error_sector; int last_error_generation; - - unsigned int badness; /* highest retry count for a read or write + + unsigned int badness; /* highest retry count for a read or write * operation */ }; @@ -335,7 +335,7 @@ struct floppy_raw_cmd { #define FD_RAW_DISK_CHANGE 4 /* out: disk change flag was set */ #define FD_RAW_INTR 8 /* wait for an interrupt */ #define FD_RAW_SPIN 0x10 /* spin up the disk for this command */ -#define FD_RAW_NO_MOTOR_AFTER 0x20 /* switch the motor off after command +#define FD_RAW_NO_MOTOR_AFTER 0x20 /* switch the motor off after command * completion */ #define FD_RAW_NEED_DISK 0x40 /* this command needs a disk to be present */ #define FD_RAW_NEED_SEEK 0x80 /* this command uses an implied seek (soft) */ @@ -353,7 +353,7 @@ struct floppy_raw_cmd { void __user *data; char *kernel_data; /* location of data buffer in the kernel */ - struct floppy_raw_cmd *next; /* used for chaining of raw cmd's + struct floppy_raw_cmd *next; /* used for chaining of raw cmd's * within the kernel */ long length; /* in: length of dma transfer. out: remaining bytes */ long phys_length; /* physical length, if different from dma length */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index f44eb0a04afd..4c32e97dcdf0 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -185,7 +185,7 @@ struct fsxattr { #define BLKROTATIONAL _IO(0x12,126) #define BLKZEROOUT _IO(0x12,127) /* - * A jump here: 130-131 are reserved for zoned block devices + * A jump here: 130-136 are reserved for zoned block devices * (see uapi/linux/blkzoned.h) */ diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 54442612c48b..271ae90a9bb7 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -179,6 +179,8 @@ * 7.33 * - add FUSE_HANDLE_KILLPRIV_V2, FUSE_WRITE_KILL_SUIDGID, FATTR_KILL_SUIDGID * - add FUSE_OPEN_KILL_SUIDGID + * - extend fuse_setxattr_in, add FUSE_SETXATTR_EXT + * - add FUSE_SETXATTR_ACL_KILL_SGID */ #ifndef _LINUX_FUSE_H @@ -330,6 +332,7 @@ struct fuse_file_lock { * does not have CAP_FSETID. Additionally upon * write/truncate sgid is killed only if file has group * execute permission. (Same as Linux VFS behavior). + * FUSE_SETXATTR_EXT: Server supports extended struct fuse_setxattr_in */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -360,6 +363,7 @@ struct fuse_file_lock { #define FUSE_MAP_ALIGNMENT (1 << 26) #define FUSE_SUBMOUNTS (1 << 27) #define FUSE_HANDLE_KILLPRIV_V2 (1 << 28) +#define FUSE_SETXATTR_EXT (1 << 29) /** * CUSE INIT request/reply flags @@ -451,6 +455,12 @@ struct fuse_file_lock { */ #define FUSE_OPEN_KILL_SUIDGID (1 << 0) +/** + * setxattr flags + * FUSE_SETXATTR_ACL_KILL_SGID: Clear SGID when system.posix_acl_access is set + */ +#define FUSE_SETXATTR_ACL_KILL_SGID (1 << 0) + enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, /* no reply */ @@ -681,9 +691,13 @@ struct fuse_fsync_in { uint32_t padding; }; +#define FUSE_COMPAT_SETXATTR_IN_SIZE 8 + struct fuse_setxattr_in { uint32_t size; uint32_t flags; + uint32_t setxattr_flags; + uint32_t padding; }; struct fuse_getxattr_in { diff --git a/include/uapi/linux/icmp.h b/include/uapi/linux/icmp.h index fb169a50895e..c1da8244c5e1 100644 --- a/include/uapi/linux/icmp.h +++ b/include/uapi/linux/icmp.h @@ -20,6 +20,9 @@ #include <linux/types.h> #include <asm/byteorder.h> +#include <linux/in.h> +#include <linux/if.h> +#include <linux/in6.h> #define ICMP_ECHOREPLY 0 /* Echo Reply */ #define ICMP_DEST_UNREACH 3 /* Destination Unreachable */ @@ -66,6 +69,23 @@ #define ICMP_EXC_TTL 0 /* TTL count exceeded */ #define ICMP_EXC_FRAGTIME 1 /* Fragment Reass time exceeded */ +/* Codes for EXT_ECHO (PROBE) */ +#define ICMP_EXT_ECHO 42 +#define ICMP_EXT_ECHOREPLY 43 +#define ICMP_EXT_CODE_MAL_QUERY 1 /* Malformed Query */ +#define ICMP_EXT_CODE_NO_IF 2 /* No such Interface */ +#define ICMP_EXT_CODE_NO_TABLE_ENT 3 /* No such Table Entry */ +#define ICMP_EXT_CODE_MULT_IFS 4 /* Multiple Interfaces Satisfy Query */ + +/* Constants for EXT_ECHO (PROBE) */ +#define ICMP_EXT_ECHOREPLY_ACTIVE (1 << 2)/* active bit in reply message */ +#define ICMP_EXT_ECHOREPLY_IPV4 (1 << 1)/* ipv4 bit in reply message */ +#define ICMP_EXT_ECHOREPLY_IPV6 1 /* ipv6 bit in reply message */ +#define ICMP_EXT_ECHO_CTYPE_NAME 1 +#define ICMP_EXT_ECHO_CTYPE_INDEX 2 +#define ICMP_EXT_ECHO_CTYPE_ADDR 3 +#define ICMP_AFI_IP 1 /* Address Family Identifier for ipv4 */ +#define ICMP_AFI_IP6 2 /* Address Family Identifier for ipv6 */ struct icmphdr { __u8 type; @@ -118,4 +138,26 @@ struct icmp_extobj_hdr { __u8 class_type; }; +/* RFC 8335: 2.1 Header for c-type 3 payload */ +struct icmp_ext_echo_ctype3_hdr { + __be16 afi; + __u8 addrlen; + __u8 reserved; +}; + +/* RFC 8335: 2.1 Interface Identification Object */ +struct icmp_ext_echo_iio { + struct icmp_extobj_hdr extobj_hdr; + union { + char name[IFNAMSIZ]; + __be32 ifindex; + struct { + struct icmp_ext_echo_ctype3_hdr ctype3_hdr; + union { + struct in_addr ipv4_addr; + struct in6_addr ipv6_addr; + } ip_addr; + } addr; + } ident; +}; #endif /* _UAPI_LINUX_ICMP_H */ diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h index 0564fd7ccde4..ecaece3af38d 100644 --- a/include/uapi/linux/icmpv6.h +++ b/include/uapi/linux/icmpv6.h @@ -140,6 +140,9 @@ struct icmp6hdr { #define ICMPV6_UNK_OPTION 2 #define ICMPV6_HDR_INCOMP 3 +/* Codes for EXT_ECHO (PROBE) */ +#define ICMPV6_EXT_ECHO_REQUEST 160 +#define ICMPV6_EXT_ECHO_REPLY 161 /* * constants for (set|get)sockopt */ diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h index e8eb4ad03cf1..d174914a837d 100644 --- a/include/uapi/linux/if_bonding.h +++ b/include/uapi/linux/if_bonding.h @@ -153,14 +153,3 @@ enum { #define BOND_3AD_STAT_MAX (__BOND_3AD_STAT_MAX - 1) #endif /* _LINUX_IF_BONDING_H */ - -/* - * Local variables: - * version-control: t - * kept-new-versions: 5 - * c-indent-level: 8 - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ - diff --git a/include/uapi/linux/if_fddi.h b/include/uapi/linux/if_fddi.h index 7239aa9c0766..8df2d9934bcd 100644 --- a/include/uapi/linux/if_fddi.h +++ b/include/uapi/linux/if_fddi.h @@ -9,7 +9,7 @@ * Version: @(#)if_fddi.h 1.0.3 Oct 6 2018 * * Author: Lawrence V. Stefani, <stefani@yahoo.com> - * Maintainer: Maciej W. Rozycki, <macro@linux-mips.org> + * Maintainer: Maciej W. Rozycki, <macro@orcam.me.uk> * * if_fddi.h is based on previous if_ether.h and if_tr.h work by * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 91c8dda6d95d..cd5b382a4138 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -614,6 +614,7 @@ enum macvlan_macaddr_mode { }; #define MACVLAN_FLAG_NOPROMISC 1 +#define MACVLAN_FLAG_NODST 2 /* skip dst macvlan if matching src macvlan */ /* VRF section */ enum { diff --git a/include/uapi/linux/iio/buffer.h b/include/uapi/linux/iio/buffer.h new file mode 100644 index 000000000000..13939032b3f6 --- /dev/null +++ b/include/uapi/linux/iio/buffer.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* industrial I/O buffer definitions needed both in and out of kernel + */ + +#ifndef _UAPI_IIO_BUFFER_H_ +#define _UAPI_IIO_BUFFER_H_ + +#define IIO_BUFFER_GET_FD_IOCTL _IOWR('i', 0x91, int) + +#endif /* _UAPI_IIO_BUFFER_H_ */ diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 2514eb6b1cf2..e1ae46683301 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -160,6 +160,21 @@ enum { #define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */ /* + * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the + * command flags for POLL_ADD are stored in sqe->len. + * + * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if + * the poll handler will continue to report + * CQEs on behalf of the same SQE. + * + * IORING_POLL_UPDATE Update existing poll request, matching + * sqe->addr as the old user_data field. + */ +#define IORING_POLL_ADD_MULTI (1U << 0) +#define IORING_POLL_UPDATE_EVENTS (1U << 1) +#define IORING_POLL_UPDATE_USER_DATA (1U << 2) + +/* * IO completion data structure (Completion Queue Entry) */ struct io_uring_cqe { @@ -172,8 +187,10 @@ struct io_uring_cqe { * cqe->flags * * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID + * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries */ #define IORING_CQE_F_BUFFER (1U << 0) +#define IORING_CQE_F_MORE (1U << 1) enum { IORING_CQE_BUFFER_SHIFT = 16, @@ -281,6 +298,8 @@ enum { IORING_UNREGISTER_PERSONALITY = 10, IORING_REGISTER_RESTRICTIONS = 11, IORING_REGISTER_ENABLE_RINGS = 12, + IORING_REGISTER_RSRC = 13, + IORING_REGISTER_RSRC_UPDATE = 14, /* this goes last */ IORING_REGISTER_LAST @@ -293,12 +312,33 @@ struct io_uring_files_update { __aligned_u64 /* __s32 * */ fds; }; +enum { + IORING_RSRC_FILE = 0, + IORING_RSRC_BUFFER = 1, +}; + +struct io_uring_rsrc_register { + __u32 type; + __u32 nr; + __aligned_u64 data; + __aligned_u64 tags; +}; + struct io_uring_rsrc_update { __u32 offset; __u32 resv; __aligned_u64 data; }; +struct io_uring_rsrc_update2 { + __u32 offset; + __u32 resv; + __aligned_u64 data; + __aligned_u64 tags; + __u32 type; + __u32 nr; +}; + /* Skip updating fd indexes set to this value in the fd table */ #define IORING_REGISTER_FILES_SKIP (-2) diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h index e1d9e75f2c94..59178fc229ca 100644 --- a/include/uapi/linux/iommu.h +++ b/include/uapi/linux/iommu.h @@ -288,7 +288,8 @@ struct iommu_gpasid_bind_data_vtd { #define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */ #define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */ #define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */ -#define IOMMU_SVA_VTD_GPASID_LAST (1 << 6) +#define IOMMU_SVA_VTD_GPASID_WPE (1 << 6) /* Write protect enable */ +#define IOMMU_SVA_VTD_GPASID_LAST (1 << 7) __u64 flags; __u32 pat; __u32 emt; diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 05669c87a0af..778dc191c265 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -42,6 +42,7 @@ #define KEXEC_ARCH_MIPS_LE (10 << 16) #define KEXEC_ARCH_MIPS ( 8 << 16) #define KEXEC_ARCH_AARCH64 (183 << 16) +#define KEXEC_ARCH_RISCV (243 << 16) /* The artificial cap on the number of segments passed to kexec_load. */ #define KEXEC_SEGMENT_MAX 16 diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 695b606da4b1..bf5e7d7846dd 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -29,9 +29,10 @@ /* * - 1.1 - initial version * - 1.3 - Add SMI events support + * - 1.4 - Indicate new SRAM EDC bit in device properties */ #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 3 +#define KFD_IOCTL_MINOR_VERSION 4 struct kfd_ioctl_get_version_args { __u32 major_version; /* from KFD */ @@ -351,6 +352,7 @@ struct kfd_ioctl_acquire_vm_args { #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27) #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26) +#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25) /* Allocate memory for later SVM (shared virtual memory) mapping. * diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index f6afee209620..3fd9a7e9d90c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1078,6 +1078,10 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_DIRTY_LOG_RING 192 #define KVM_CAP_X86_BUS_LOCK_EXIT 193 #define KVM_CAP_PPC_DAWR1 194 +#define KVM_CAP_SET_GUEST_DEBUG2 195 +#define KVM_CAP_SGX_ATTRIBUTE 196 +#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197 +#define KVM_CAP_PTP_KVM 198 #ifdef KVM_CAP_IRQ_ROUTING @@ -1671,6 +1675,8 @@ enum sev_cmd_id { KVM_SEV_CERT_EXPORT, /* Attestation report */ KVM_SEV_GET_ATTESTATION_REPORT, + /* Guest Migration Extension */ + KVM_SEV_SEND_CANCEL, KVM_SEV_NR_MAX, }; @@ -1729,6 +1735,45 @@ struct kvm_sev_attestation_report { __u32 len; }; +struct kvm_sev_send_start { + __u32 policy; + __u64 pdh_cert_uaddr; + __u32 pdh_cert_len; + __u64 plat_certs_uaddr; + __u32 plat_certs_len; + __u64 amd_certs_uaddr; + __u32 amd_certs_len; + __u64 session_uaddr; + __u32 session_len; +}; + +struct kvm_sev_send_update_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_uaddr; + __u32 guest_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_sev_receive_start { + __u32 handle; + __u32 policy; + __u64 pdh_uaddr; + __u32 pdh_len; + __u64 session_uaddr; + __u32 session_len; +}; + +struct kvm_sev_receive_update_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_uaddr; + __u32 guest_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h new file mode 100644 index 000000000000..b3d952067f59 --- /dev/null +++ b/include/uapi/linux/landlock.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Landlock - User space API + * + * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> + * Copyright © 2018-2020 ANSSI + */ + +#ifndef _UAPI_LINUX_LANDLOCK_H +#define _UAPI_LINUX_LANDLOCK_H + +#include <linux/types.h> + +/** + * struct landlock_ruleset_attr - Ruleset definition + * + * Argument of sys_landlock_create_ruleset(). This structure can grow in + * future versions. + */ +struct landlock_ruleset_attr { + /** + * @handled_access_fs: Bitmask of actions (cf. `Filesystem flags`_) + * that is handled by this ruleset and should then be forbidden if no + * rule explicitly allow them. This is needed for backward + * compatibility reasons. + */ + __u64 handled_access_fs; +}; + +/* + * sys_landlock_create_ruleset() flags: + * + * - %LANDLOCK_CREATE_RULESET_VERSION: Get the highest supported Landlock ABI + * version. + */ +#define LANDLOCK_CREATE_RULESET_VERSION (1U << 0) + +/** + * enum landlock_rule_type - Landlock rule type + * + * Argument of sys_landlock_add_rule(). + */ +enum landlock_rule_type { + /** + * @LANDLOCK_RULE_PATH_BENEATH: Type of a &struct + * landlock_path_beneath_attr . + */ + LANDLOCK_RULE_PATH_BENEATH = 1, +}; + +/** + * struct landlock_path_beneath_attr - Path hierarchy definition + * + * Argument of sys_landlock_add_rule(). + */ +struct landlock_path_beneath_attr { + /** + * @allowed_access: Bitmask of allowed actions for this file hierarchy + * (cf. `Filesystem flags`_). + */ + __u64 allowed_access; + /** + * @parent_fd: File descriptor, open with ``O_PATH``, which identifies + * the parent directory of a file hierarchy, or just a file. + */ + __s32 parent_fd; + /* + * This struct is packed to avoid trailing reserved members. + * Cf. security/landlock/syscalls.c:build_check_abi() + */ +} __attribute__((packed)); + +/** + * DOC: fs_access + * + * A set of actions on kernel objects may be defined by an attribute (e.g. + * &struct landlock_path_beneath_attr) including a bitmask of access. + * + * Filesystem flags + * ~~~~~~~~~~~~~~~~ + * + * These flags enable to restrict a sandboxed process to a set of actions on + * files and directories. Files or directories opened before the sandboxing + * are not subject to these restrictions. + * + * A file can only receive these access rights: + * + * - %LANDLOCK_ACCESS_FS_EXECUTE: Execute a file. + * - %LANDLOCK_ACCESS_FS_WRITE_FILE: Open a file with write access. + * - %LANDLOCK_ACCESS_FS_READ_FILE: Open a file with read access. + * + * A directory can receive access rights related to files or directories. The + * following access right is applied to the directory itself, and the + * directories beneath it: + * + * - %LANDLOCK_ACCESS_FS_READ_DIR: Open a directory or list its content. + * + * However, the following access rights only apply to the content of a + * directory, not the directory itself: + * + * - %LANDLOCK_ACCESS_FS_REMOVE_DIR: Remove an empty directory or rename one. + * - %LANDLOCK_ACCESS_FS_REMOVE_FILE: Unlink (or rename) a file. + * - %LANDLOCK_ACCESS_FS_MAKE_CHAR: Create (or rename or link) a character + * device. + * - %LANDLOCK_ACCESS_FS_MAKE_DIR: Create (or rename) a directory. + * - %LANDLOCK_ACCESS_FS_MAKE_REG: Create (or rename or link) a regular file. + * - %LANDLOCK_ACCESS_FS_MAKE_SOCK: Create (or rename or link) a UNIX domain + * socket. + * - %LANDLOCK_ACCESS_FS_MAKE_FIFO: Create (or rename or link) a named pipe. + * - %LANDLOCK_ACCESS_FS_MAKE_BLOCK: Create (or rename or link) a block device. + * - %LANDLOCK_ACCESS_FS_MAKE_SYM: Create (or rename or link) a symbolic link. + * + * .. warning:: + * + * It is currently not possible to restrict some file-related actions + * accessible through these syscall families: :manpage:`chdir(2)`, + * :manpage:`truncate(2)`, :manpage:`stat(2)`, :manpage:`flock(2)`, + * :manpage:`chmod(2)`, :manpage:`chown(2)`, :manpage:`setxattr(2)`, + * :manpage:`utime(2)`, :manpage:`ioctl(2)`, :manpage:`fcntl(2)`, + * :manpage:`access(2)`. + * Future Landlock evolutions will enable to restrict them. + */ +#define LANDLOCK_ACCESS_FS_EXECUTE (1ULL << 0) +#define LANDLOCK_ACCESS_FS_WRITE_FILE (1ULL << 1) +#define LANDLOCK_ACCESS_FS_READ_FILE (1ULL << 2) +#define LANDLOCK_ACCESS_FS_READ_DIR (1ULL << 3) +#define LANDLOCK_ACCESS_FS_REMOVE_DIR (1ULL << 4) +#define LANDLOCK_ACCESS_FS_REMOVE_FILE (1ULL << 5) +#define LANDLOCK_ACCESS_FS_MAKE_CHAR (1ULL << 6) +#define LANDLOCK_ACCESS_FS_MAKE_DIR (1ULL << 7) +#define LANDLOCK_ACCESS_FS_MAKE_REG (1ULL << 8) +#define LANDLOCK_ACCESS_FS_MAKE_SOCK (1ULL << 9) +#define LANDLOCK_ACCESS_FS_MAKE_FIFO (1ULL << 10) +#define LANDLOCK_ACCESS_FS_MAKE_BLOCK (1ULL << 11) +#define LANDLOCK_ACCESS_FS_MAKE_SYM (1ULL << 12) + +#endif /* _UAPI_LINUX_LANDLOCK_H */ diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h index ead2e72e5c88..2745afd9b8fa 100644 --- a/include/uapi/linux/lightnvm.h +++ b/include/uapi/linux/lightnvm.h @@ -22,7 +22,6 @@ #ifdef __KERNEL__ #include <linux/const.h> -#include <linux/ioctl.h> #else /* __KERNEL__ */ #include <stdio.h> #include <sys/ioctl.h> diff --git a/include/uapi/linux/major.h b/include/uapi/linux/major.h index 7e5fa8e15c43..4e5f2b3a3d54 100644 --- a/include/uapi/linux/major.h +++ b/include/uapi/linux/major.h @@ -34,8 +34,6 @@ #define GOLDSTAR_CDROM_MAJOR 16 #define OPTICS_CDROM_MAJOR 17 #define SANYO_CDROM_MAJOR 18 -#define CYCLADES_MAJOR 19 -#define CYCLADESAUX_MAJOR 20 #define MITSUMI_X_CDROM_MAJOR 20 #define MFM_ACORN_MAJOR 21 /* ARM Linux /dev/mfm */ #define SCSI_GENERIC_MAJOR 21 diff --git a/include/uapi/linux/map_to_7segment.h b/include/uapi/linux/map_to_7segment.h index 8b02088f96e3..04c8b55812e7 100644 --- a/include/uapi/linux/map_to_7segment.h +++ b/include/uapi/linux/map_to_7segment.h @@ -1,20 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* * Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of - * the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef MAP_TO_7SEGMENT_H diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index 3f302e2523b2..bdf77dffa5a4 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -120,6 +120,8 @@ #define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */ #define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */ #define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */ +#define MDIO_PCS_SPEED_2_5G 0x0040 /* 2.5G capable */ +#define MDIO_PCS_SPEED_5G 0x0080 /* 5G capable */ /* Device present registers. */ #define MDIO_DEVS_PRESENT(devad) (1 << (devad)) diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h index 8948467b3992..4832fd0b5642 100644 --- a/include/uapi/linux/mempolicy.h +++ b/include/uapi/linux/mempolicy.h @@ -64,5 +64,12 @@ enum { #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */ #define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */ +/* + * These bit locations are exposed in the vm.zone_reclaim_mode sysctl + * ABI. New bits are OK, but existing bits can never change. + */ +#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ +#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ +#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */ #endif /* _UAPI_LINUX_MEMPOLICY_H */ diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index e1172c1ffdfd..8eb3c0844bff 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -174,10 +174,21 @@ enum mptcp_event_attr { MPTCP_ATTR_FLAGS, /* u16 */ MPTCP_ATTR_TIMEOUT, /* u32 */ MPTCP_ATTR_IF_IDX, /* s32 */ + MPTCP_ATTR_RESET_REASON,/* u32 */ + MPTCP_ATTR_RESET_FLAGS, /* u32 */ __MPTCP_ATTR_AFTER_LAST }; #define MPTCP_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1) +/* MPTCP Reset reason codes, rfc8684 */ +#define MPTCP_RST_EUNSPEC 0 +#define MPTCP_RST_EMPTCP 1 +#define MPTCP_RST_ERESOURCE 2 +#define MPTCP_RST_EPROHIBIT 3 +#define MPTCP_RST_EWQ2BIG 4 +#define MPTCP_RST_EBADPERF 5 +#define MPTCP_RST_EMIDDLEBOX 6 + #endif /* _UAPI_MPTCP_H */ diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 79bab7a36b30..1fb4ca18ffbb 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -398,9 +398,11 @@ enum nft_set_attributes { * enum nft_set_elem_flags - nf_tables set element flags * * @NFT_SET_ELEM_INTERVAL_END: element ends the previous interval + * @NFT_SET_ELEM_CATCHALL: special catch-all element */ enum nft_set_elem_flags { NFT_SET_ELEM_INTERVAL_END = 0x1, + NFT_SET_ELEM_CATCHALL = 0x2, }; /** @@ -1014,11 +1016,13 @@ enum nft_rt_attributes { * * @NFTA_SOCKET_KEY: socket key to match * @NFTA_SOCKET_DREG: destination register + * @NFTA_SOCKET_LEVEL: cgroups2 ancestor level (only for cgroupsv2) */ enum nft_socket_attributes { NFTA_SOCKET_UNSPEC, NFTA_SOCKET_KEY, NFTA_SOCKET_DREG, + NFTA_SOCKET_LEVEL, __NFTA_SOCKET_MAX }; #define NFTA_SOCKET_MAX (__NFTA_SOCKET_MAX - 1) @@ -1029,11 +1033,13 @@ enum nft_socket_attributes { * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option * @NFT_SOCKET_MARK: Value of the socket mark * @NFT_SOCKET_WILDCARD: Whether the socket is zero-bound (e.g. 0.0.0.0 or ::0) + * @NFT_SOCKET_CGROUPV2: Match on cgroups version 2 */ enum nft_socket_keys { NFT_SOCKET_TRANSPARENT, NFT_SOCKET_MARK, NFT_SOCKET_WILDCARD, + NFT_SOCKET_CGROUPV2, __NFT_SOCKET_MAX }; #define NFT_SOCKET_MAX (__NFT_SOCKET_MAX - 1) diff --git a/include/uapi/linux/netfilter/xt_SECMARK.h b/include/uapi/linux/netfilter/xt_SECMARK.h index 1f2a708413f5..beb2cadba8a9 100644 --- a/include/uapi/linux/netfilter/xt_SECMARK.h +++ b/include/uapi/linux/netfilter/xt_SECMARK.h @@ -20,4 +20,10 @@ struct xt_secmark_target_info { char secctx[SECMARK_SECCTX_MAX]; }; +struct xt_secmark_target_info_v1 { + __u8 mode; + char secctx[SECMARK_SECCTX_MAX]; + __u32 secid; +}; + #endif /*_XT_SECMARK_H_target */ diff --git a/include/uapi/linux/nexthop.h b/include/uapi/linux/nexthop.h index 2d4a1e784cf0..d8ffa8c9ca78 100644 --- a/include/uapi/linux/nexthop.h +++ b/include/uapi/linux/nexthop.h @@ -21,7 +21,10 @@ struct nexthop_grp { }; enum { - NEXTHOP_GRP_TYPE_MPATH, /* default type if not specified */ + NEXTHOP_GRP_TYPE_MPATH, /* hash-threshold nexthop group + * default type if not specified + */ + NEXTHOP_GRP_TYPE_RES, /* resilient nexthop group */ __NEXTHOP_GRP_TYPE_MAX, }; @@ -52,8 +55,50 @@ enum { NHA_FDB, /* flag; nexthop belongs to a bridge fdb */ /* if NHA_FDB is added, OIF, BLACKHOLE, ENCAP cannot be set */ + /* nested; resilient nexthop group attributes */ + NHA_RES_GROUP, + /* nested; nexthop bucket attributes */ + NHA_RES_BUCKET, + __NHA_MAX, }; #define NHA_MAX (__NHA_MAX - 1) + +enum { + NHA_RES_GROUP_UNSPEC, + /* Pad attribute for 64-bit alignment. */ + NHA_RES_GROUP_PAD = NHA_RES_GROUP_UNSPEC, + + /* u16; number of nexthop buckets in a resilient nexthop group */ + NHA_RES_GROUP_BUCKETS, + /* clock_t as u32; nexthop bucket idle timer (per-group) */ + NHA_RES_GROUP_IDLE_TIMER, + /* clock_t as u32; nexthop unbalanced timer */ + NHA_RES_GROUP_UNBALANCED_TIMER, + /* clock_t as u64; nexthop unbalanced time */ + NHA_RES_GROUP_UNBALANCED_TIME, + + __NHA_RES_GROUP_MAX, +}; + +#define NHA_RES_GROUP_MAX (__NHA_RES_GROUP_MAX - 1) + +enum { + NHA_RES_BUCKET_UNSPEC, + /* Pad attribute for 64-bit alignment. */ + NHA_RES_BUCKET_PAD = NHA_RES_BUCKET_UNSPEC, + + /* u16; nexthop bucket index */ + NHA_RES_BUCKET_INDEX, + /* clock_t as u64; nexthop bucket idle time */ + NHA_RES_BUCKET_IDLE_TIME, + /* u32; nexthop id assigned to the nexthop bucket */ + NHA_RES_BUCKET_NH_ID, + + __NHA_RES_BUCKET_MAX, +}; + +#define NHA_RES_BUCKET_MAX (__NHA_RES_BUCKET_MAX - 1) + #endif diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h index ed5415e0f1c1..800bb0ffa6e6 100644 --- a/include/uapi/linux/nfs4.h +++ b/include/uapi/linux/nfs4.h @@ -178,9 +178,3 @@ #define NFS4_MAX_BACK_CHANNEL_OPS 2 #endif /* _UAPI_LINUX_NFS4_H */ - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/include/uapi/linux/nfsd/nfsfh.h b/include/uapi/linux/nfsd/nfsfh.h index ff0ca88b1c8f..427294dd56a1 100644 --- a/include/uapi/linux/nfsd/nfsfh.h +++ b/include/uapi/linux/nfsd/nfsfh.h @@ -64,13 +64,24 @@ struct nfs_fhbase_old { * in include/linux/exportfs.h for currently registered values. */ struct nfs_fhbase_new { - __u8 fb_version; /* == 1, even => nfs_fhbase_old */ - __u8 fb_auth_type; - __u8 fb_fsid_type; - __u8 fb_fileid_type; - __u32 fb_auth[1]; -/* __u32 fb_fsid[0]; floating */ -/* __u32 fb_fileid[0]; floating */ + union { + struct { + __u8 fb_version_aux; /* == 1, even => nfs_fhbase_old */ + __u8 fb_auth_type_aux; + __u8 fb_fsid_type_aux; + __u8 fb_fileid_type_aux; + __u32 fb_auth[1]; + /* __u32 fb_fsid[0]; floating */ + /* __u32 fb_fileid[0]; floating */ + }; + struct { + __u8 fb_version; /* == 1, even => nfs_fhbase_old */ + __u8 fb_auth_type; + __u8 fb_fsid_type; + __u8 fb_fileid_type; + __u32 fb_auth_flex[]; /* flexible-array member */ + }; + }; }; struct knfsd_fh { @@ -97,7 +108,7 @@ struct knfsd_fh { #define fh_fsid_type fh_base.fh_new.fb_fsid_type #define fh_auth_type fh_base.fh_new.fb_auth_type #define fh_fileid_type fh_base.fh_new.fb_fileid_type -#define fh_fsid fh_base.fh_new.fb_auth +#define fh_fsid fh_base.fh_new.fb_auth_flex /* Do not use, provided for userspace compatiblity. */ #define fh_auth fh_base.fh_new.fb_auth diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index ac78da99fccd..f962c06e9818 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -655,6 +655,9 @@ * When a security association was established on an 802.1X network using * fast transition, this event should be followed by an * %NL80211_CMD_PORT_AUTHORIZED event. + * Following a %NL80211_CMD_ROAM event userspace can issue + * %NL80211_CMD_GET_SCAN in order to obtain the scan information for the + * new BSS the card/driver roamed to. * @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify * userspace that a connection was dropped by the AP or due to other * reasons, for this the %NL80211_ATTR_DISCONNECTED_BY_AP and @@ -5937,6 +5940,16 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_BEACON_RATE_HE: Driver supports beacon rate * configuration (AP/mesh) with HE rates. * + * @NL80211_EXT_FEATURE_SECURE_LTF: Device supports secure LTF measurement + * exchange protocol. + * + * @NL80211_EXT_FEATURE_SECURE_RTT: Device supports secure RTT measurement + * exchange protocol. + * + * @NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE: Device supports management + * frame protection for all management frames exchanged during the + * negotiation and range measurement procedure. + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -5998,6 +6011,9 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_FILS_DISCOVERY, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP, NL80211_EXT_FEATURE_BEACON_RATE_HE, + NL80211_EXT_FEATURE_SECURE_LTF, + NL80211_EXT_FEATURE_SECURE_RTT, + NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -6295,11 +6311,13 @@ struct nl80211_vendor_cmd_info { * @NL80211_TDLS_PEER_HT: TDLS peer is HT capable. * @NL80211_TDLS_PEER_VHT: TDLS peer is VHT capable. * @NL80211_TDLS_PEER_WMM: TDLS peer is WMM capable. + * @NL80211_TDLS_PEER_HE: TDLS peer is HE capable. */ enum nl80211_tdls_peer_capability { NL80211_TDLS_PEER_HT = 1<<0, NL80211_TDLS_PEER_VHT = 1<<1, NL80211_TDLS_PEER_WMM = 1<<2, + NL80211_TDLS_PEER_HE = 1<<3, }; /** @@ -6891,6 +6909,9 @@ enum nl80211_peer_measurement_ftm_capa { * if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based * ranging will be used. + * @NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK: negotiate for LMR feedback. Only + * valid if either %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED or + * %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set. * * @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal * @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number @@ -6909,6 +6930,7 @@ enum nl80211_peer_measurement_ftm_req { NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC, NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED, NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED, + NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK, /* keep last */ NUM_NL80211_PMSR_FTM_REQ_ATTR, diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index ad15e40d7f5d..f92880a15645 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -38,6 +38,21 @@ enum perf_type_id { }; /* + * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE + * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA + * AA: hardware event ID + * EEEEEEEE: PMU type ID + * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB + * BB: hardware cache ID + * CC: hardware cache op ID + * DD: hardware cache op result ID + * EEEEEEEE: PMU type ID + * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. + */ +#define PERF_PMU_TYPE_SHIFT 32 +#define PERF_HW_EVENT_MASK 0xffffffff + +/* * Generalized performance event event_id types, used by the * attr.event_id parameter of the sys_perf_event_open() * syscall: @@ -112,6 +127,7 @@ enum perf_sw_ids { PERF_COUNT_SW_EMULATION_FAULTS = 8, PERF_COUNT_SW_DUMMY = 9, PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_CGROUP_SWITCHES = 11, PERF_COUNT_SW_MAX, /* non-ABI */ }; @@ -311,6 +327,7 @@ enum perf_event_read_format { #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ #define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ +#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */ /* * Hardware event_id to monitor via a performance monitoring event: @@ -389,7 +406,10 @@ struct perf_event_attr { cgroup : 1, /* include cgroup events */ text_poke : 1, /* include text poke events */ build_id : 1, /* use build id in mmap2 events */ - __reserved_1 : 29; + inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ + remove_on_exec : 1, /* event is removed from task on exec */ + sigtrap : 1, /* send synchronous SIGTRAP on event */ + __reserved_1 : 26; union { __u32 wakeup_events; /* wakeup every n events */ @@ -441,6 +461,12 @@ struct perf_event_attr { __u16 __reserved_2; __u32 aux_sample_size; __u32 __reserved_3; + + /* + * User provided data if sigtrap=1, passed back to user via + * siginfo_t::si_perf_data, e.g. to permit user to identify the event. + */ + __u64 sig_data; }; /* @@ -1156,10 +1182,15 @@ enum perf_callchain_context { /** * PERF_RECORD_AUX::flags bits */ -#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ -#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ -#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ -#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ +#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ +#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ +#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ +#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ +#define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */ + +/* CoreSight PMU AUX buffer formats */ +#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ +#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ #define PERF_FLAG_FD_NO_GROUP (1UL << 0) #define PERF_FLAG_FD_OUTPUT (1UL << 1) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 7ea59cfe1fa7..025c40fef93d 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -190,6 +190,8 @@ enum { TCA_POLICE_PAD, TCA_POLICE_RATE64, TCA_POLICE_PEAKRATE64, + TCA_POLICE_PKTRATE64, + TCA_POLICE_PKTBURST64, __TCA_POLICE_MAX #define TCA_POLICE_RESULT TCA_POLICE_RESULT }; diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 667f1aed091c..18a9f59dc067 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -255,4 +255,8 @@ struct prctl_mm_map { # define SYSCALL_DISPATCH_FILTER_ALLOW 0 # define SYSCALL_DISPATCH_FILTER_BLOCK 1 +/* Set/get enabled arm64 pointer authentication keys */ +#define PR_PAC_SET_ENABLED_KEYS 60 +#define PR_PAC_GET_ENABLED_KEYS 61 + #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h index bff5032c98df..e585db5bf2d2 100644 --- a/include/uapi/linux/psample.h +++ b/include/uapi/linux/psample.h @@ -13,6 +13,13 @@ enum { PSAMPLE_ATTR_GROUP_REFCOUNT, PSAMPLE_ATTR_TUNNEL, + PSAMPLE_ATTR_PAD, + PSAMPLE_ATTR_OUT_TC, /* u16 */ + PSAMPLE_ATTR_OUT_TC_OCC, /* u64, bytes */ + PSAMPLE_ATTR_LATENCY, /* u64, nanoseconds */ + PSAMPLE_ATTR_TIMESTAMP, /* u64, nanoseconds */ + PSAMPLE_ATTR_PROTO, /* u16 */ + __PSAMPLE_ATTR_MAX }; diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index 83ee45fa634b..3747bf816f9a 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h @@ -102,6 +102,16 @@ struct ptrace_syscall_info { }; }; +#define PTRACE_GET_RSEQ_CONFIGURATION 0x420f + +struct ptrace_rseq_configuration { + __u64 rseq_abi_pointer; + __u32 rseq_abi_size; + __u32 signature; + __u32 flags; + __u32 pad; +}; + /* * These values are stored in task->ptrace_message * by tracehook_report_syscall_* to describe the current syscall-stop. diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h index 36e3efb81b01..583ca0d9a79d 100644 --- a/include/uapi/linux/rkisp1-config.h +++ b/include/uapi/linux/rkisp1-config.h @@ -15,7 +15,7 @@ #define RKISP1_CIF_ISP_MODULE_BLS (1U << 1) /* Sensor De-gamma */ #define RKISP1_CIF_ISP_MODULE_SDG (1U << 2) -/* Histogram */ +/* Histogram statistics configuration */ #define RKISP1_CIF_ISP_MODULE_HST (1U << 3) /* Lens Shade Control */ #define RKISP1_CIF_ISP_MODULE_LSC (1U << 4) @@ -31,13 +31,13 @@ #define RKISP1_CIF_ISP_MODULE_GOC (1U << 9) /* Color Processing */ #define RKISP1_CIF_ISP_MODULE_CPROC (1U << 10) -/* Auto Focus Control */ +/* Auto Focus Control statistics configuration */ #define RKISP1_CIF_ISP_MODULE_AFC (1U << 11) -/* Auto White Balancing */ +/* Auto White Balancing statistics configuration */ #define RKISP1_CIF_ISP_MODULE_AWB (1U << 12) /* Image Effect */ #define RKISP1_CIF_ISP_MODULE_IE (1U << 13) -/* Auto Exposure Control */ +/* Auto Exposure Control statistics configuration */ #define RKISP1_CIF_ISP_MODULE_AEC (1U << 14) /* Wide Dynamic Range */ #define RKISP1_CIF_ISP_MODULE_WDR (1U << 15) @@ -411,7 +411,7 @@ struct rkisp1_cif_isp_cproc_config { }; /** - * struct rkisp1_cif_isp_awb_meas_config - Configuration used by auto white balance + * struct rkisp1_cif_isp_awb_meas_config - Configuration for the AWB statistics * * @awb_mode: the awb meas mode. From enum rkisp1_cif_isp_awb_mode_type. * @awb_wnd: white balance measurement window (in pixels) @@ -550,7 +550,7 @@ struct rkisp1_cif_isp_goc_config { }; /** - * struct rkisp1_cif_isp_hst_config - Configuration used by Histogram + * struct rkisp1_cif_isp_hst_config - Configuration for Histogram statistics * * @mode: histogram mode (from enum rkisp1_cif_isp_histogram_mode) * @histogram_predivider: process every stepsize pixel, all other pixels are @@ -575,7 +575,7 @@ struct rkisp1_cif_isp_hst_config { }; /** - * struct rkisp1_cif_isp_aec_config - Configuration used by Auto Exposure Control + * struct rkisp1_cif_isp_aec_config - Configuration for Auto Exposure statistics * * @mode: Exposure measure mode (from enum rkisp1_cif_isp_exp_meas_mode) * @autostop: stop mode (from enum rkisp1_cif_isp_exp_ctrl_autostop) @@ -588,7 +588,7 @@ struct rkisp1_cif_isp_aec_config { }; /** - * struct rkisp1_cif_isp_afc_config - Configuration used by Auto Focus Control + * struct rkisp1_cif_isp_afc_config - Configuration for the Auto Focus statistics * * @num_afm_win: max RKISP1_CIF_ISP_AFM_MAX_WINDOWS * @afm_win: coordinates of the meas window diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h index e14c6dab4223..f5ca8740f3fb 100644 --- a/include/uapi/linux/rpmsg.h +++ b/include/uapi/linux/rpmsg.h @@ -9,11 +9,13 @@ #include <linux/ioctl.h> #include <linux/types.h> +#define RPMSG_ADDR_ANY 0xFFFFFFFF + /** * struct rpmsg_endpoint_info - endpoint info representation * @name: name of service - * @src: local address - * @dst: destination address + * @src: local address. To set to RPMSG_ADDR_ANY if not used. + * @dst: destination address. To set to RPMSG_ADDR_ANY if not used. */ struct rpmsg_endpoint_info { char name[32]; @@ -21,7 +23,14 @@ struct rpmsg_endpoint_info { __u32 dst; }; +/** + * Instantiate a new rmpsg char device endpoint. + */ #define RPMSG_CREATE_EPT_IOCTL _IOW(0xb5, 0x1, struct rpmsg_endpoint_info) + +/** + * Destroy a rpmsg char device endpoint created by the RPMSG_CREATE_EPT_IOCTL. + */ #define RPMSG_DESTROY_EPT_IOCTL _IO(0xb5, 0x2) #endif diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 91e4ca064d61..5888492a5257 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -178,6 +178,13 @@ enum { RTM_GETVLAN, #define RTM_GETVLAN RTM_GETVLAN + RTM_NEWNEXTHOPBUCKET = 116, +#define RTM_NEWNEXTHOPBUCKET RTM_NEWNEXTHOPBUCKET + RTM_DELNEXTHOPBUCKET, +#define RTM_DELNEXTHOPBUCKET RTM_DELNEXTHOPBUCKET + RTM_GETNEXTHOPBUCKET, +#define RTM_GETNEXTHOPBUCKET RTM_GETNEXTHOPBUCKET + __RTM_MAX, #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; @@ -283,6 +290,7 @@ enum { #define RTPROT_MROUTED 17 /* Multicast daemon */ #define RTPROT_KEEPALIVED 18 /* Keepalived daemon */ #define RTPROT_BABEL 42 /* Babel daemon */ +#define RTPROT_OPENR 99 /* Open Routing (Open/R) Routes */ #define RTPROT_BGP 186 /* BGP Routes */ #define RTPROT_ISIS 187 /* ISIS Routes */ #define RTPROT_OSPF 188 /* OSPF Routes */ diff --git a/include/uapi/linux/seg6_local.h b/include/uapi/linux/seg6_local.h index 3b39ef1dbb46..5ae3ace84de0 100644 --- a/include/uapi/linux/seg6_local.h +++ b/include/uapi/linux/seg6_local.h @@ -27,6 +27,7 @@ enum { SEG6_LOCAL_OIF, SEG6_LOCAL_BPF, SEG6_LOCAL_VRFTABLE, + SEG6_LOCAL_COUNTERS, __SEG6_LOCAL_MAX, }; #define SEG6_LOCAL_MAX (__SEG6_LOCAL_MAX - 1) @@ -78,4 +79,33 @@ enum { #define SEG6_LOCAL_BPF_PROG_MAX (__SEG6_LOCAL_BPF_PROG_MAX - 1) +/* SRv6 Behavior counters are encoded as netlink attributes guaranteeing the + * correct alignment. + * Each counter is identified by a different attribute type (i.e. + * SEG6_LOCAL_CNT_PACKETS). + * + * - SEG6_LOCAL_CNT_PACKETS: identifies a counter that counts the number of + * packets that have been CORRECTLY processed by an SRv6 Behavior instance + * (i.e., packets that generate errors or are dropped are NOT counted). + * + * - SEG6_LOCAL_CNT_BYTES: identifies a counter that counts the total amount + * of traffic in bytes of all packets that have been CORRECTLY processed by + * an SRv6 Behavior instance (i.e., packets that generate errors or are + * dropped are NOT counted). + * + * - SEG6_LOCAL_CNT_ERRORS: identifies a counter that counts the number of + * packets that have NOT been properly processed by an SRv6 Behavior instance + * (i.e., packets that generate errors or are dropped). + */ +enum { + SEG6_LOCAL_CNT_UNSPEC, + SEG6_LOCAL_CNT_PAD, /* pad for 64 bits values */ + SEG6_LOCAL_CNT_PACKETS, + SEG6_LOCAL_CNT_BYTES, + SEG6_LOCAL_CNT_ERRORS, + __SEG6_LOCAL_CNT_MAX, +}; + +#define SEG6_LOCAL_CNT_MAX (__SEG6_LOCAL_CNT_MAX - 1) + #endif diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h index 93eb3c496ff1..fa6b16e5fdd8 100644 --- a/include/uapi/linux/serial.h +++ b/include/uapi/linux/serial.h @@ -52,11 +52,11 @@ struct serial_struct { #define PORT_16450 2 #define PORT_16550 3 #define PORT_16550A 4 -#define PORT_CIRRUS 5 /* usurped by cyclades.c */ +#define PORT_CIRRUS 5 #define PORT_16650 6 #define PORT_16650V2 7 #define PORT_16750 8 -#define PORT_STARTECH 9 /* usurped by cyclades.c */ +#define PORT_STARTECH 9 #define PORT_16C950 10 /* Oxford Semiconductor */ #define PORT_16654 11 #define PORT_16850 12 diff --git a/include/uapi/linux/surface_aggregator/dtx.h b/include/uapi/linux/surface_aggregator/dtx.h new file mode 100644 index 000000000000..0833aab0d819 --- /dev/null +++ b/include/uapi/linux/surface_aggregator/dtx.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Surface DTX (clipboard detachment system driver) user-space interface. + * + * Definitions, structs, and IOCTLs for the /dev/surface/dtx misc device. This + * device allows user-space to control the clipboard detachment process on + * Surface Book series devices. + * + * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com> + */ + +#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H +#define _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/* Status/error categories */ +#define SDTX_CATEGORY_STATUS 0x0000 +#define SDTX_CATEGORY_RUNTIME_ERROR 0x1000 +#define SDTX_CATEGORY_HARDWARE_ERROR 0x2000 +#define SDTX_CATEGORY_UNKNOWN 0xf000 + +#define SDTX_CATEGORY_MASK 0xf000 +#define SDTX_CATEGORY(value) ((value) & SDTX_CATEGORY_MASK) + +#define SDTX_STATUS(code) ((code) | SDTX_CATEGORY_STATUS) +#define SDTX_ERR_RT(code) ((code) | SDTX_CATEGORY_RUNTIME_ERROR) +#define SDTX_ERR_HW(code) ((code) | SDTX_CATEGORY_HARDWARE_ERROR) +#define SDTX_UNKNOWN(code) ((code) | SDTX_CATEGORY_UNKNOWN) + +#define SDTX_SUCCESS(value) (SDTX_CATEGORY(value) == SDTX_CATEGORY_STATUS) + +/* Latch status values */ +#define SDTX_LATCH_CLOSED SDTX_STATUS(0x00) +#define SDTX_LATCH_OPENED SDTX_STATUS(0x01) + +/* Base state values */ +#define SDTX_BASE_DETACHED SDTX_STATUS(0x00) +#define SDTX_BASE_ATTACHED SDTX_STATUS(0x01) + +/* Runtime errors (non-critical) */ +#define SDTX_DETACH_NOT_FEASIBLE SDTX_ERR_RT(0x01) +#define SDTX_DETACH_TIMEDOUT SDTX_ERR_RT(0x02) + +/* Hardware errors (critical) */ +#define SDTX_ERR_FAILED_TO_OPEN SDTX_ERR_HW(0x01) +#define SDTX_ERR_FAILED_TO_REMAIN_OPEN SDTX_ERR_HW(0x02) +#define SDTX_ERR_FAILED_TO_CLOSE SDTX_ERR_HW(0x03) + +/* Base types */ +#define SDTX_DEVICE_TYPE_HID 0x0100 +#define SDTX_DEVICE_TYPE_SSH 0x0200 + +#define SDTX_DEVICE_TYPE_MASK 0x0f00 +#define SDTX_DEVICE_TYPE(value) ((value) & SDTX_DEVICE_TYPE_MASK) + +#define SDTX_BASE_TYPE_HID(id) ((id) | SDTX_DEVICE_TYPE_HID) +#define SDTX_BASE_TYPE_SSH(id) ((id) | SDTX_DEVICE_TYPE_SSH) + +/** + * enum sdtx_device_mode - Mode describing how (and if) the clipboard is + * attached to the base of the device. + * @SDTX_DEVICE_MODE_TABLET: The clipboard is detached from the base and the + * device operates as tablet. + * @SDTX_DEVICE_MODE_LAPTOP: The clipboard is attached normally to the base + * and the device operates as laptop. + * @SDTX_DEVICE_MODE_STUDIO: The clipboard is attached to the base in reverse. + * The device operates as tablet with keyboard and + * touchpad deactivated, however, the base battery + * and, if present in the specific device model, dGPU + * are available to the system. + */ +enum sdtx_device_mode { + SDTX_DEVICE_MODE_TABLET = 0x00, + SDTX_DEVICE_MODE_LAPTOP = 0x01, + SDTX_DEVICE_MODE_STUDIO = 0x02, +}; + +/** + * struct sdtx_event - Event provided by reading from the DTX device file. + * @length: Length of the event payload, in bytes. + * @code: Event code, detailing what type of event this is. + * @data: Payload of the event, containing @length bytes. + * + * See &enum sdtx_event_code for currently valid event codes. + */ +struct sdtx_event { + __u16 length; + __u16 code; + __u8 data[]; +} __attribute__((__packed__)); + +/** + * enum sdtx_event_code - Code describing the type of an event. + * @SDTX_EVENT_REQUEST: Detachment request event type. + * @SDTX_EVENT_CANCEL: Cancel detachment process event type. + * @SDTX_EVENT_BASE_CONNECTION: Base/clipboard connection change event type. + * @SDTX_EVENT_LATCH_STATUS: Latch status change event type. + * @SDTX_EVENT_DEVICE_MODE: Device mode change event type. + * + * Used in &struct sdtx_event to describe the type of the event. Further event + * codes are reserved for future use. Any event parser should be able to + * gracefully handle unknown events, i.e. by simply skipping them. + * + * Consult the DTX user-space interface documentation for details regarding + * the individual event types. + */ +enum sdtx_event_code { + SDTX_EVENT_REQUEST = 1, + SDTX_EVENT_CANCEL = 2, + SDTX_EVENT_BASE_CONNECTION = 3, + SDTX_EVENT_LATCH_STATUS = 4, + SDTX_EVENT_DEVICE_MODE = 5, +}; + +/** + * struct sdtx_base_info - Describes if and what type of base is connected. + * @state: The state of the connection. Valid values are %SDTX_BASE_DETACHED, + * %SDTX_BASE_ATTACHED, and %SDTX_DETACH_NOT_FEASIBLE (in case a base + * is attached but low clipboard battery prevents detachment). Other + * values are currently reserved. + * @base_id: The type of base connected. Zero if no base is connected. + */ +struct sdtx_base_info { + __u16 state; + __u16 base_id; +} __attribute__((__packed__)); + +/* IOCTLs */ +#define SDTX_IOCTL_EVENTS_ENABLE _IO(0xa5, 0x21) +#define SDTX_IOCTL_EVENTS_DISABLE _IO(0xa5, 0x22) + +#define SDTX_IOCTL_LATCH_LOCK _IO(0xa5, 0x23) +#define SDTX_IOCTL_LATCH_UNLOCK _IO(0xa5, 0x24) + +#define SDTX_IOCTL_LATCH_REQUEST _IO(0xa5, 0x25) +#define SDTX_IOCTL_LATCH_CONFIRM _IO(0xa5, 0x26) +#define SDTX_IOCTL_LATCH_HEARTBEAT _IO(0xa5, 0x27) +#define SDTX_IOCTL_LATCH_CANCEL _IO(0xa5, 0x28) + +#define SDTX_IOCTL_GET_BASE_INFO _IOR(0xa5, 0x29, struct sdtx_base_info) +#define SDTX_IOCTL_GET_DEVICE_MODE _IOR(0xa5, 0x2a, __u16) +#define SDTX_IOCTL_GET_LATCH_STATUS _IOR(0xa5, 0x2b, __u16) + +#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H */ diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h index c105054cbb57..9aa2fedfa309 100644 --- a/include/uapi/linux/thermal.h +++ b/include/uapi/linux/thermal.h @@ -60,7 +60,7 @@ enum thermal_genl_event { THERMAL_GENL_EVENT_UNSPEC, THERMAL_GENL_EVENT_TZ_CREATE, /* Thermal zone creation */ THERMAL_GENL_EVENT_TZ_DELETE, /* Thermal zone deletion */ - THERMAL_GENL_EVENT_TZ_DISABLE, /* Thermal zone disabed */ + THERMAL_GENL_EVENT_TZ_DISABLE, /* Thermal zone disabled */ THERMAL_GENL_EVENT_TZ_ENABLE, /* Thermal zone enabled */ THERMAL_GENL_EVENT_TZ_TRIP_UP, /* Trip point crossed the way up */ THERMAL_GENL_EVENT_TZ_TRIP_DOWN, /* Trip point crossed the way down */ diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h index 900a32e63424..cf25056d4b27 100644 --- a/include/uapi/linux/tty_flags.h +++ b/include/uapi/linux/tty_flags.h @@ -39,7 +39,7 @@ * WARNING: These flags are no longer used and have been superceded by the * TTY_PORT_ flags in the iflags field (and not userspace-visible) */ -#ifndef _KERNEL_ +#ifndef __KERNEL__ #define ASYNCB_INITIALIZED 31 /* Serial port was initialized */ #define ASYNCB_SUSPENDED 30 /* Serial port is suspended */ #define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */ @@ -73,15 +73,15 @@ #define ASYNC_MAGIC_MULTIPLIER (1U << ASYNCB_MAGIC_MULTIPLIER) #define ASYNC_FLAGS ((1U << (ASYNCB_LAST_USER + 1)) - 1) -#define ASYNC_DEPRECATED (ASYNC_SESSION_LOCKOUT | ASYNC_PGRP_LOCKOUT | \ - ASYNC_CALLOUT_NOHUP | ASYNC_AUTOPROBE) +#define ASYNC_DEPRECATED (ASYNC_SPLIT_TERMIOS | ASYNC_SESSION_LOCKOUT | \ + ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP | ASYNC_AUTOPROBE) #define ASYNC_USR_MASK (ASYNC_SPD_MASK|ASYNC_CALLOUT_NOHUP| \ ASYNC_LOW_LATENCY) #define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI) #define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI) #define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI) -#ifndef _KERNEL_ +#ifndef __KERNEL__ /* These flags are no longer used (and were always masked from userspace) */ #define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED) #define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE) diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h index d854cb19c42c..bfdae12cdacf 100644 --- a/include/uapi/linux/usb/video.h +++ b/include/uapi/linux/usb/video.h @@ -302,9 +302,10 @@ struct uvc_processing_unit_descriptor { __u8 bControlSize; __u8 bmControls[2]; __u8 iProcessing; + __u8 bmVideoStandards; } __attribute__((__packed__)); -#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n)) +#define UVC_DT_PROCESSING_UNIT_SIZE(n) (10+(n)) /* 3.7.2.6. Extension Unit Descriptor */ struct uvc_extension_unit_descriptor { diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 5f2d88212f7c..bafbeb1a2624 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -19,15 +19,19 @@ * means the userland is reading). */ #define UFFD_API ((__u64)0xAA) +#define UFFD_API_REGISTER_MODES (UFFDIO_REGISTER_MODE_MISSING | \ + UFFDIO_REGISTER_MODE_WP | \ + UFFDIO_REGISTER_MODE_MINOR) #define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \ UFFD_FEATURE_EVENT_FORK | \ UFFD_FEATURE_EVENT_REMAP | \ - UFFD_FEATURE_EVENT_REMOVE | \ + UFFD_FEATURE_EVENT_REMOVE | \ UFFD_FEATURE_EVENT_UNMAP | \ UFFD_FEATURE_MISSING_HUGETLBFS | \ UFFD_FEATURE_MISSING_SHMEM | \ UFFD_FEATURE_SIGBUS | \ - UFFD_FEATURE_THREAD_ID) + UFFD_FEATURE_THREAD_ID | \ + UFFD_FEATURE_MINOR_HUGETLBFS) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -36,10 +40,12 @@ ((__u64)1 << _UFFDIO_WAKE | \ (__u64)1 << _UFFDIO_COPY | \ (__u64)1 << _UFFDIO_ZEROPAGE | \ - (__u64)1 << _UFFDIO_WRITEPROTECT) + (__u64)1 << _UFFDIO_WRITEPROTECT | \ + (__u64)1 << _UFFDIO_CONTINUE) #define UFFD_API_RANGE_IOCTLS_BASIC \ ((__u64)1 << _UFFDIO_WAKE | \ - (__u64)1 << _UFFDIO_COPY) + (__u64)1 << _UFFDIO_COPY | \ + (__u64)1 << _UFFDIO_CONTINUE) /* * Valid ioctl command number range with this API is from 0x00 to @@ -55,6 +61,7 @@ #define _UFFDIO_COPY (0x03) #define _UFFDIO_ZEROPAGE (0x04) #define _UFFDIO_WRITEPROTECT (0x06) +#define _UFFDIO_CONTINUE (0x07) #define _UFFDIO_API (0x3F) /* userfaultfd ioctl ids */ @@ -73,6 +80,8 @@ struct uffdio_zeropage) #define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \ struct uffdio_writeprotect) +#define UFFDIO_CONTINUE _IOR(UFFDIO, _UFFDIO_CONTINUE, \ + struct uffdio_continue) /* read() structure */ struct uffd_msg { @@ -127,6 +136,7 @@ struct uffd_msg { /* flags for UFFD_EVENT_PAGEFAULT */ #define UFFD_PAGEFAULT_FLAG_WRITE (1<<0) /* If this was a write fault */ #define UFFD_PAGEFAULT_FLAG_WP (1<<1) /* If reason is VM_UFFD_WP */ +#define UFFD_PAGEFAULT_FLAG_MINOR (1<<2) /* If reason is VM_UFFD_MINOR */ struct uffdio_api { /* userland asks for an API number and the features to enable */ @@ -171,6 +181,10 @@ struct uffdio_api { * * UFFD_FEATURE_THREAD_ID pid of the page faulted task_struct will * be returned, if feature is not requested 0 will be returned. + * + * UFFD_FEATURE_MINOR_HUGETLBFS indicates that minor faults + * can be intercepted (via REGISTER_MODE_MINOR) for + * hugetlbfs-backed pages. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -181,6 +195,7 @@ struct uffdio_api { #define UFFD_FEATURE_EVENT_UNMAP (1<<6) #define UFFD_FEATURE_SIGBUS (1<<7) #define UFFD_FEATURE_THREAD_ID (1<<8) +#define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9) __u64 features; __u64 ioctls; @@ -195,6 +210,7 @@ struct uffdio_register { struct uffdio_range range; #define UFFDIO_REGISTER_MODE_MISSING ((__u64)1<<0) #define UFFDIO_REGISTER_MODE_WP ((__u64)1<<1) +#define UFFDIO_REGISTER_MODE_MINOR ((__u64)1<<2) __u64 mode; /* @@ -257,6 +273,18 @@ struct uffdio_writeprotect { __u64 mode; }; +struct uffdio_continue { + struct uffdio_range range; +#define UFFDIO_CONTINUE_MODE_DONTWAKE ((__u64)1<<0) + __u64 mode; + + /* + * Fields below here are written by the ioctl and must be at the end: + * the copy_from_user will not read past here. + */ + __s64 mapped; +}; + /* * Flags for the userfaultfd(2) system call itself. */ diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h index f80f05b3c423..8288137387c0 100644 --- a/include/uapi/linux/uvcvideo.h +++ b/include/uapi/linux/uvcvideo.h @@ -76,11 +76,11 @@ struct uvc_xu_control_query { /** * struct uvc_meta_buf - metadata buffer building block - * @ns - system timestamp of the payload in nanoseconds - * @sof - USB Frame Number - * @length - length of the payload header - * @flags - payload header flags - * @buf - optional device-specific header data + * @ns: system timestamp of the payload in nanoseconds + * @sof: USB Frame Number + * @length: length of the payload header + * @flags: payload header flags + * @buf: optional device-specific header data * * UVC metadata nodes fill buffers with possibly multiple instances of this * struct. The first two fields are added by the driver, they can be used for diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 039c0d7add1b..d43bec5f1afd 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -66,6 +66,7 @@ #define V4L2_CTRL_CLASS_RF_TUNER 0x00a20000 /* RF tuner controls */ #define V4L2_CTRL_CLASS_DETECT 0x00a30000 /* Detection controls */ #define V4L2_CTRL_CLASS_CODEC_STATELESS 0x00a40000 /* Stateless codecs controls */ +#define V4L2_CTRL_CLASS_COLORIMETRY 0x00a50000 /* Colorimetry controls */ /* User-class control IDs */ @@ -428,6 +429,11 @@ enum v4l2_mpeg_video_multi_slice_mode { #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_CODEC_BASE+228) #define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_CODEC_BASE+229) #define V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID (V4L2_CID_CODEC_BASE+230) +#define V4L2_CID_MPEG_VIDEO_AU_DELIMITER (V4L2_CID_CODEC_BASE+231) +#define V4L2_CID_MPEG_VIDEO_LTR_COUNT (V4L2_CID_CODEC_BASE+232) +#define V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX (V4L2_CID_CODEC_BASE+233) +#define V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES (V4L2_CID_CODEC_BASE+234) +#define V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR (V4L2_CID_CODEC_BASE+235) /* CIDs for the MPEG-2 Part 2 (H.262) codec */ #define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_CODEC_BASE+270) @@ -797,6 +803,9 @@ enum v4l2_mpeg_video_frame_skip_mode { #define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 651) #define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 652) +#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY (V4L2_CID_CODEC_BASE + 653) +#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_BASE + 654) + /* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */ #define V4L2_CID_CODEC_CX2341X_BASE (V4L2_CTRL_CLASS_CODEC | 0x1000) #define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+0) @@ -1329,7 +1338,7 @@ struct v4l2_ctrl_h264_sps { * struct v4l2_ctrl_h264_pps - H264 picture parameter set * * Except where noted, all the members on this picture parameter set - * structure match the sequence parameter set syntax as specified + * structure match the picture parameter set syntax as specified * by the H264 specification. * * In particular, V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT flag @@ -1657,6 +1666,236 @@ struct v4l2_ctrl_fwht_params { __u32 quantization; }; +/* Stateless VP8 control */ + +#define V4L2_VP8_SEGMENT_FLAG_ENABLED 0x01 +#define V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP 0x02 +#define V4L2_VP8_SEGMENT_FLAG_UPDATE_FEATURE_DATA 0x04 +#define V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE 0x08 + +/** + * struct v4l2_vp8_segment - VP8 segment-based adjustments parameters + * + * @quant_update: update values for the segment quantizer. + * @lf_update: update values for the loop filter level. + * @segment_probs: branch probabilities of the segment_id decoding tree. + * @padding: padding field. Should be zeroed by applications. + * @flags: see V4L2_VP8_SEGMENT_FLAG_{}. + * + * This structure contains segment-based adjustments related parameters. + * See the 'update_segmentation()' part of the frame header syntax, + * and section '9.3. Segment-Based Adjustments' of the VP8 specification + * for more details. + */ +struct v4l2_vp8_segment { + __s8 quant_update[4]; + __s8 lf_update[4]; + __u8 segment_probs[3]; + __u8 padding; + __u32 flags; +}; + +#define V4L2_VP8_LF_ADJ_ENABLE 0x01 +#define V4L2_VP8_LF_DELTA_UPDATE 0x02 +#define V4L2_VP8_LF_FILTER_TYPE_SIMPLE 0x04 + +/** + * struct v4l2_vp8_loop_filter - VP8 loop filter parameters + * + * @ref_frm_delta: Reference frame signed delta values. + * @mb_mode_delta: MB prediction mode signed delta values. + * @sharpness_level: matches sharpness_level syntax element. + * @level: matches loop_filter_level syntax element. + * @padding: padding field. Should be zeroed by applications. + * @flags: see V4L2_VP8_LF_FLAG_{}. + * + * This structure contains loop filter related parameters. + * See the 'mb_lf_adjustments()' part of the frame header syntax, + * and section '9.4. Loop Filter Type and Levels' of the VP8 specification + * for more details. + */ +struct v4l2_vp8_loop_filter { + __s8 ref_frm_delta[4]; + __s8 mb_mode_delta[4]; + __u8 sharpness_level; + __u8 level; + __u16 padding; + __u32 flags; +}; + +/** + * struct v4l2_vp8_quantization - VP8 quantizattion indices + * + * @y_ac_qi: luma AC coefficient table index. + * @y_dc_delta: luma DC delta vaue. + * @y2_dc_delta: y2 block DC delta value. + * @y2_ac_delta: y2 block AC delta value. + * @uv_dc_delta: chroma DC delta value. + * @uv_ac_delta: chroma AC delta value. + * @padding: padding field. Should be zeroed by applications. + * + * This structure contains the quantization indices present + * in 'quant_indices()' part of the frame header syntax. + * See section '9.6. Dequantization Indices' of the VP8 specification + * for more details. + */ +struct v4l2_vp8_quantization { + __u8 y_ac_qi; + __s8 y_dc_delta; + __s8 y2_dc_delta; + __s8 y2_ac_delta; + __s8 uv_dc_delta; + __s8 uv_ac_delta; + __u16 padding; +}; + +#define V4L2_VP8_COEFF_PROB_CNT 11 +#define V4L2_VP8_MV_PROB_CNT 19 + +/** + * struct v4l2_vp8_entropy - VP8 update probabilities + * + * @coeff_probs: coefficient probability update values. + * @y_mode_probs: luma intra-prediction probabilities. + * @uv_mode_probs: chroma intra-prediction probabilities. + * @mv_probs: mv decoding probability. + * @padding: padding field. Should be zeroed by applications. + * + * This structure contains the update probabilities present in + * 'token_prob_update()' and 'mv_prob_update()' part of the frame header. + * See section '17.2. Probability Updates' of the VP8 specification + * for more details. + */ +struct v4l2_vp8_entropy { + __u8 coeff_probs[4][8][3][V4L2_VP8_COEFF_PROB_CNT]; + __u8 y_mode_probs[4]; + __u8 uv_mode_probs[3]; + __u8 mv_probs[2][V4L2_VP8_MV_PROB_CNT]; + __u8 padding[3]; +}; + +/** + * struct v4l2_vp8_entropy_coder_state - VP8 boolean coder state + * + * @range: coder state value for "Range" + * @value: coder state value for "Value" + * @bit_count: number of bits left in range "Value". + * @padding: padding field. Should be zeroed by applications. + * + * This structure contains the state for the boolean coder, as + * explained in section '7. Boolean Entropy Decoder' of the VP8 specification. + */ +struct v4l2_vp8_entropy_coder_state { + __u8 range; + __u8 value; + __u8 bit_count; + __u8 padding; +}; + +#define V4L2_VP8_FRAME_FLAG_KEY_FRAME 0x01 +#define V4L2_VP8_FRAME_FLAG_EXPERIMENTAL 0x02 +#define V4L2_VP8_FRAME_FLAG_SHOW_FRAME 0x04 +#define V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF 0x08 +#define V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN 0x10 +#define V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT 0x20 + +#define V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) \ + (!!((hdr)->flags & V4L2_VP8_FRAME_FLAG_KEY_FRAME)) + +#define V4L2_CID_STATELESS_VP8_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 200) +/** + * struct v4l2_ctrl_vp8_frame - VP8 frame parameters + * + * @segment: segmentation parameters. See &v4l2_vp8_segment for more details + * @lf: loop filter parameters. See &v4l2_vp8_loop_filter for more details + * @quant: quantization parameters. See &v4l2_vp8_quantization for more details + * @entropy: update probabilities. See &v4l2_vp8_entropy for more details + * @coder_state: boolean coder state. See &v4l2_vp8_entropy_coder_state for more details + * @width: frame width. + * @height: frame height. + * @horizontal_scale: horizontal scaling factor. + * @vertical_scale: vertical scaling factor. + * @version: bitstream version. + * @prob_skip_false: frame header syntax element. + * @prob_intra: frame header syntax element. + * @prob_last: frame header syntax element. + * @prob_gf: frame header syntax element. + * @num_dct_parts: number of DCT coefficients partitions. + * @first_part_size: size of the first partition, i.e. the control partition. + * @first_part_header_bits: size in bits of the first partition header portion. + * @dct_part_sizes: DCT coefficients sizes. + * @last_frame_ts: "last" reference buffer timestamp. + * The timestamp refers to the timestamp field in struct v4l2_buffer. + * Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64. + * @golden_frame_ts: "golden" reference buffer timestamp. + * @alt_frame_ts: "alt" reference buffer timestamp. + * @flags: see V4L2_VP8_FRAME_FLAG_{}. + */ +struct v4l2_ctrl_vp8_frame { + struct v4l2_vp8_segment segment; + struct v4l2_vp8_loop_filter lf; + struct v4l2_vp8_quantization quant; + struct v4l2_vp8_entropy entropy; + struct v4l2_vp8_entropy_coder_state coder_state; + + __u16 width; + __u16 height; + + __u8 horizontal_scale; + __u8 vertical_scale; + + __u8 version; + __u8 prob_skip_false; + __u8 prob_intra; + __u8 prob_last; + __u8 prob_gf; + __u8 num_dct_parts; + + __u32 first_part_size; + __u32 first_part_header_bits; + __u32 dct_part_sizes[8]; + + __u64 last_frame_ts; + __u64 golden_frame_ts; + __u64 alt_frame_ts; + + __u64 flags; +}; + +#define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900) +#define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1) + +#define V4L2_CID_COLORIMETRY_HDR10_CLL_INFO (V4L2_CID_COLORIMETRY_CLASS_BASE + 0) + +struct v4l2_ctrl_hdr10_cll_info { + __u16 max_content_light_level; + __u16 max_pic_average_light_level; +}; + +#define V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY (V4L2_CID_COLORIMETRY_CLASS_BASE + 1) + +#define V4L2_HDR10_MASTERING_PRIMARIES_X_LOW 5 +#define V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH 37000 +#define V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW 5 +#define V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH 42000 +#define V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW 5 +#define V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH 37000 +#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW 5 +#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH 42000 +#define V4L2_HDR10_MASTERING_MAX_LUMA_LOW 50000 +#define V4L2_HDR10_MASTERING_MAX_LUMA_HIGH 100000000 +#define V4L2_HDR10_MASTERING_MIN_LUMA_LOW 1 +#define V4L2_HDR10_MASTERING_MIN_LUMA_HIGH 50000 + +struct v4l2_ctrl_hdr10_mastering_display { + __u16 display_primaries_x[3]; + __u16 display_primaries_y[3]; + __u16 white_point_x; + __u16 white_point_y; + __u32 max_display_mastering_luminance; + __u32 min_display_mastering_luminance; +}; + /* MPEG-compression definitions kept for backwards compatibility */ #ifndef __KERNEL__ #define V4L2_CTRL_CLASS_MPEG V4L2_CTRL_CLASS_CODEC diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h index a38454d9e0f5..658106f5b5dc 100644 --- a/include/uapi/linux/v4l2-subdev.h +++ b/include/uapi/linux/v4l2-subdev.h @@ -44,6 +44,7 @@ enum v4l2_subdev_format_whence { * @which: format type (from enum v4l2_subdev_format_whence) * @pad: pad number, as reported by the media API * @format: media bus format (format code and frame size) + * @reserved: drivers and applications must zero this array */ struct v4l2_subdev_format { __u32 which; @@ -57,6 +58,7 @@ struct v4l2_subdev_format { * @which: format type (from enum v4l2_subdev_format_whence) * @pad: pad number, as reported by the media API * @rect: pad crop rectangle boundaries + * @reserved: drivers and applications must zero this array */ struct v4l2_subdev_crop { __u32 which; @@ -78,6 +80,7 @@ struct v4l2_subdev_crop { * @code: format code (MEDIA_BUS_FMT_ definitions) * @which: format type (from enum v4l2_subdev_format_whence) * @flags: flags set by the driver, (V4L2_SUBDEV_MBUS_CODE_*) + * @reserved: drivers and applications must zero this array */ struct v4l2_subdev_mbus_code_enum { __u32 pad; @@ -90,10 +93,15 @@ struct v4l2_subdev_mbus_code_enum { /** * struct v4l2_subdev_frame_size_enum - Media bus format enumeration - * @pad: pad number, as reported by the media API * @index: format index during enumeration + * @pad: pad number, as reported by the media API * @code: format code (MEDIA_BUS_FMT_ definitions) + * @min_width: minimum frame width, in pixels + * @max_width: maximum frame width, in pixels + * @min_height: minimum frame height, in pixels + * @max_height: maximum frame height, in pixels * @which: format type (from enum v4l2_subdev_format_whence) + * @reserved: drivers and applications must zero this array */ struct v4l2_subdev_frame_size_enum { __u32 index; @@ -111,6 +119,7 @@ struct v4l2_subdev_frame_size_enum { * struct v4l2_subdev_frame_interval - Pad-level frame rate * @pad: pad number, as reported by the media API * @interval: frame interval in seconds + * @reserved: drivers and applications must zero this array */ struct v4l2_subdev_frame_interval { __u32 pad; @@ -127,6 +136,7 @@ struct v4l2_subdev_frame_interval { * @height: frame height in pixels * @interval: frame interval in seconds * @which: format type (from enum v4l2_subdev_format_whence) + * @reserved: drivers and applications must zero this array */ struct v4l2_subdev_frame_interval_enum { __u32 index; diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 8ce36c1d53ca..ef33ea002b0b 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -335,6 +335,8 @@ struct vfio_region_info_cap_type { /* 10de vendor PCI sub-types */ /* * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space. + * + * Deprecated, region no longer provided */ #define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1) @@ -342,6 +344,8 @@ struct vfio_region_info_cap_type { /* * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU * to do TLB invalidation on a GPU. + * + * Deprecated, region no longer provided */ #define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1) @@ -641,6 +645,8 @@ struct vfio_device_migration_info { * Capability with compressed real address (aka SSA - small system address) * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing * and by the userspace to associate a NVLink bridge with a GPU. + * + * Deprecated, capability no longer provided */ #define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4 @@ -655,6 +661,8 @@ struct vfio_region_info_cap_nvlink2_ssatgt { * property in the device tree. The value is fixed in the hardware * and failing to provide the correct value results in the link * not working with no indication from the driver why. + * + * Deprecated, capability no longer provided */ #define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5 diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 79dbde3bcf8d..311a01cc5775 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -586,6 +586,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */ #define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */ #define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */ +#define V4L2_PIX_FMT_YUV24 v4l2_fourcc('Y', 'U', 'V', '3') /* 24 YUV-8-8-8 */ #define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */ #define V4L2_PIX_FMT_AYUV32 v4l2_fourcc('A', 'Y', 'U', 'V') /* 32 AYUV-8-8-8-8 */ #define V4L2_PIX_FMT_XYUV32 v4l2_fourcc('X', 'Y', 'U', 'V') /* 32 XYUV-8-8-8-8 */ @@ -694,6 +695,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */ #define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */ #define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */ +#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') /* VP8 parsed frame */ #define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */ #define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */ #define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */ @@ -975,8 +977,10 @@ struct v4l2_requestbuffers { * pointing to this plane * @fd: when memory is V4L2_MEMORY_DMABUF, a userspace file * descriptor associated with this plane + * @m: union of @mem_offset, @userptr and @fd * @data_offset: offset in the plane to the start of data; usually 0, * unless there is a header in front of the data + * @reserved: drivers and applications must zero this array * * Multi-planar buffers consist of one or more planes, e.g. an YCbCr buffer * with two planes can have one plane for Y, and another for interleaved CbCr @@ -1018,10 +1022,14 @@ struct v4l2_plane { * a userspace file descriptor associated with this buffer * @planes: for multiplanar buffers; userspace pointer to the array of plane * info structs for this buffer + * @m: union of @offset, @userptr, @planes and @fd * @length: size in bytes of the buffer (NOT its payload) for single-plane * buffers (when type != *_MPLANE); number of elements in the * planes array for multi-plane buffers + * @reserved2: drivers and applications must zero this field * @request_fd: fd of the request that this buffer should use + * @reserved: for backwards compatibility with applications that do not know + * about @request_fd * * Contains data exchanged by application and driver using one of the Streaming * I/O methods. @@ -1059,7 +1067,7 @@ struct v4l2_buffer { #ifndef __KERNEL__ /** * v4l2_timeval_to_ns - Convert timeval to nanoseconds - * @ts: pointer to the timeval variable to be converted + * @tv: pointer to the timeval variable to be converted * * Returns the scalar nanosecond representation of the timeval * parameter. @@ -1120,6 +1128,7 @@ static inline __u64 v4l2_timeval_to_ns(const struct timeval *tv) * @flags: flags for newly created file, currently only O_CLOEXEC is * supported, refer to manual of open syscall for more details * @fd: file descriptor associated with DMABUF (set by driver) + * @reserved: drivers and applications must zero this array * * Contains data used for exporting a video buffer as DMABUF file descriptor. * The buffer is identified by a 'cookie' returned by VIDIOC_QUERYBUF @@ -1737,6 +1746,7 @@ struct v4l2_ext_control { struct v4l2_ctrl_h264_slice_params __user *p_h264_slice_params; struct v4l2_ctrl_h264_decode_params __user *p_h264_decode_params; struct v4l2_ctrl_fwht_params __user *p_fwht_params; + struct v4l2_ctrl_vp8_frame __user *p_vp8_frame; void __user *ptr; }; } __attribute__ ((packed)); @@ -1784,6 +1794,9 @@ enum v4l2_ctrl_type { V4L2_CTRL_TYPE_U32 = 0x0102, V4L2_CTRL_TYPE_AREA = 0x0106, + V4L2_CTRL_TYPE_HDR10_CLL_INFO = 0x0110, + V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY = 0x0111, + V4L2_CTRL_TYPE_H264_SPS = 0x0200, V4L2_CTRL_TYPE_H264_PPS = 0x0201, V4L2_CTRL_TYPE_H264_SCALING_MATRIX = 0x0202, @@ -1792,6 +1805,8 @@ enum v4l2_ctrl_type { V4L2_CTRL_TYPE_H264_PRED_WEIGHTS = 0x0205, V4L2_CTRL_TYPE_FWHT_PARAMS = 0x0220, + + V4L2_CTRL_TYPE_VP8_FRAME = 0x0240, }; /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ @@ -2229,6 +2244,7 @@ struct v4l2_mpeg_vbi_fmt_ivtv { * this plane will be used * @bytesperline: distance in bytes between the leftmost pixels in two * adjacent lines + * @reserved: drivers and applications must zero this array */ struct v4l2_plane_pix_format { __u32 sizeimage; @@ -2247,8 +2263,10 @@ struct v4l2_plane_pix_format { * @num_planes: number of planes for this format * @flags: format flags (V4L2_PIX_FMT_FLAG_*) * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding + * @hsv_enc: enum v4l2_hsv_encoding, HSV encoding * @quantization: enum v4l2_quantization, colorspace quantization * @xfer_func: enum v4l2_xfer_func, colorspace transfer function + * @reserved: drivers and applications must zero this array */ struct v4l2_pix_format_mplane { __u32 width; @@ -2273,6 +2291,7 @@ struct v4l2_pix_format_mplane { * struct v4l2_sdr_format - SDR format definition * @pixelformat: little endian four character code (fourcc) * @buffersize: maximum size in bytes required for data + * @reserved: drivers and applications must zero this array */ struct v4l2_sdr_format { __u32 pixelformat; @@ -2299,6 +2318,8 @@ struct v4l2_meta_format { * @vbi: raw VBI capture or output parameters * @sliced: sliced VBI capture or output parameters * @raw_data: placeholder for future extensions and custom formats + * @fmt: union of @pix, @pix_mp, @win, @vbi, @sliced, @sdr, @meta + * and @raw_data */ struct v4l2_format { __u32 type; diff --git a/include/uapi/linux/virtio_bt.h b/include/uapi/linux/virtio_bt.h new file mode 100644 index 000000000000..a7bd48daa9a9 --- /dev/null +++ b/include/uapi/linux/virtio_bt.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ + +#ifndef _UAPI_LINUX_VIRTIO_BT_H +#define _UAPI_LINUX_VIRTIO_BT_H + +#include <linux/virtio_types.h> + +/* Feature bits */ +#define VIRTIO_BT_F_VND_HCI 0 /* Indicates vendor command support */ +#define VIRTIO_BT_F_MSFT_EXT 1 /* Indicates MSFT vendor support */ +#define VIRTIO_BT_F_AOSP_EXT 2 /* Indicates AOSP vendor support */ + +enum virtio_bt_config_type { + VIRTIO_BT_CONFIG_TYPE_PRIMARY = 0, + VIRTIO_BT_CONFIG_TYPE_AMP = 1, +}; + +enum virtio_bt_config_vendor { + VIRTIO_BT_CONFIG_VENDOR_NONE = 0, + VIRTIO_BT_CONFIG_VENDOR_ZEPHYR = 1, + VIRTIO_BT_CONFIG_VENDOR_INTEL = 2, + VIRTIO_BT_CONFIG_VENDOR_REALTEK = 3, +}; + +struct virtio_bt_config { + __u8 type; + __u16 vendor; + __u16 msft_opcode; +} __attribute__((packed)); + +#endif /* _UAPI_LINUX_VIRTIO_BT_H */ diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index 029a2e07a7f9..f0c35ce8628c 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h @@ -54,6 +54,7 @@ #define VIRTIO_ID_SOUND 25 /* virtio sound */ #define VIRTIO_ID_FS 26 /* virtio filesystem */ #define VIRTIO_ID_PMEM 27 /* virtio pmem */ +#define VIRTIO_ID_BT 28 /* virtio bluetooth */ #define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */ #endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 5a86b521a450..6d2d34c9f375 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -239,6 +239,39 @@ enum gaudi_engine_id { GAUDI_ENGINE_ID_SIZE }; +/* + * ASIC specific PLL index + * + * Used to retrieve in frequency info of different IPs via + * HL_INFO_PLL_FREQUENCY under HL_IOCTL_INFO IOCTL. The enums need to be + * used as an index in struct hl_pll_frequency_info + */ + +enum hl_goya_pll_index { + HL_GOYA_CPU_PLL = 0, + HL_GOYA_IC_PLL, + HL_GOYA_MC_PLL, + HL_GOYA_MME_PLL, + HL_GOYA_PCI_PLL, + HL_GOYA_EMMC_PLL, + HL_GOYA_TPC_PLL, + HL_GOYA_PLL_MAX +}; + +enum hl_gaudi_pll_index { + HL_GAUDI_CPU_PLL = 0, + HL_GAUDI_PCI_PLL, + HL_GAUDI_SRAM_PLL, + HL_GAUDI_HBM_PLL, + HL_GAUDI_NIC_PLL, + HL_GAUDI_DMA_PLL, + HL_GAUDI_MESH_PLL, + HL_GAUDI_MME_PLL, + HL_GAUDI_TPC_PLL, + HL_GAUDI_IF_PLL, + HL_GAUDI_PLL_MAX +}; + enum hl_device_status { HL_DEVICE_STATUS_OPERATIONAL, HL_DEVICE_STATUS_IN_RESET, @@ -297,6 +330,7 @@ enum hl_device_status { #define HL_INFO_SYNC_MANAGER 14 #define HL_INFO_TOTAL_ENERGY 15 #define HL_INFO_PLL_FREQUENCY 16 +#define HL_INFO_POWER 17 #define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_CARD_NAME_MAX_LEN 16 @@ -411,6 +445,14 @@ struct hl_pll_frequency_info { }; /** + * struct hl_power_info - power information + * @power: power consumption + */ +struct hl_power_info { + __u64 power; +}; + +/** * struct hl_info_sync_manager - sync manager information * @first_available_sync_object: first available sob * @first_available_monitor: first available monitor @@ -621,6 +663,7 @@ struct hl_cs_chunk { #define HL_CS_FLAGS_STAGED_SUBMISSION 0x40 #define HL_CS_FLAGS_STAGED_SUBMISSION_FIRST 0x80 #define HL_CS_FLAGS_STAGED_SUBMISSION_LAST 0x100 +#define HL_CS_FLAGS_CUSTOM_TIMEOUT 0x200 #define HL_CS_STATUS_SUCCESS 0 @@ -634,17 +677,10 @@ struct hl_cs_in { /* holds address of array of hl_cs_chunk for execution phase */ __u64 chunks_execute; - union { - /* this holds address of array of hl_cs_chunk for store phase - - * Currently not in use - */ - __u64 chunks_store; - - /* Sequence number of a staged submission CS - * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set - */ - __u64 seq; - }; + /* Sequence number of a staged submission CS + * valid only if HL_CS_FLAGS_STAGED_SUBMISSION is set + */ + __u64 seq; /* Number of chunks in restore phase array. Maximum number is * HL_MAX_JOBS_PER_CS @@ -656,8 +692,10 @@ struct hl_cs_in { */ __u32 num_chunks_execute; - /* Number of chunks in restore phase array - Currently not in use */ - __u32 num_chunks_store; + /* timeout in seconds - valid only if HL_CS_FLAGS_CUSTOM_TIMEOUT + * is set + */ + __u32 timeout; /* HL_CS_FLAGS_* */ __u32 cs_flags; @@ -682,14 +720,46 @@ union hl_cs_args { struct hl_cs_out out; }; +#define HL_WAIT_CS_FLAGS_INTERRUPT 0x2 +#define HL_WAIT_CS_FLAGS_INTERRUPT_MASK 0xFFF00000 + struct hl_wait_cs_in { - /* Command submission sequence number */ - __u64 seq; - /* Absolute timeout to wait in microseconds */ - __u64 timeout_us; + union { + struct { + /* Command submission sequence number */ + __u64 seq; + /* Absolute timeout to wait for command submission + * in microseconds + */ + __u64 timeout_us; + }; + + struct { + /* User address for completion comparison. + * upon interrupt, driver will compare the value pointed + * by this address with the supplied target value. + * in order not to perform any comparison, set address + * to all 1s. + * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT is set + */ + __u64 addr; + /* Target value for completion comparison */ + __u32 target; + /* Absolute timeout to wait for interrupt + * in microseconds + */ + __u32 interrupt_timeout_us; + }; + }; + /* Context ID - Currently not in use */ __u32 ctx_id; - __u32 pad; + /* HL_WAIT_CS_FLAGS_* + * If HL_WAIT_CS_FLAGS_INTERRUPT is set, this field should include + * interrupt id according to HL_WAIT_CS_FLAGS_INTERRUPT_MASK, in order + * not to specify an interrupt id ,set mask to all 1s. + */ + __u32 flags; }; #define HL_WAIT_CS_STATUS_COMPLETED 0 @@ -999,8 +1069,8 @@ struct hl_debug_args { * Each JOB will be enqueued on a specific queue, according to the user's input. * There can be more then one JOB per queue. * - * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase, - * a second set is for "execution" phase and a third set is for "store" phase. + * The CS IOCTL will receive two sets of JOBS. One set is for "restore" phase + * and a second set is for "execution" phase. * The JOBS on the "restore" phase are enqueued only after context-switch * (or if its the first CS for this context). The user can also order the * driver to run the "restore" phase explicitly diff --git a/include/uapi/misc/uacce/hisi_qm.h b/include/uapi/misc/uacce/hisi_qm.h index 6435f0bcb556..1faef5ff87ef 100644 --- a/include/uapi/misc/uacce/hisi_qm.h +++ b/include/uapi/misc/uacce/hisi_qm.h @@ -16,6 +16,7 @@ struct hisi_qp_ctx { #define HISI_QM_API_VER_BASE "hisi_qm_v1" #define HISI_QM_API_VER2_BASE "hisi_qm_v2" +#define HISI_QM_API_VER3_BASE "hisi_qm_v3" /* UACCE_CMD_QM_SET_QP_CTX: Set qp algorithm type */ #define UACCE_CMD_QM_SET_QP_CTX _IOWR('H', 10, struct hisi_qp_ctx) diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h index 65b9db936557..b869990c2db2 100644 --- a/include/uapi/mtd/mtd-abi.h +++ b/include/uapi/mtd/mtd-abi.h @@ -205,6 +205,8 @@ struct otp_info { * without OOB, e.g., NOR flash. */ #define MEMWRITE _IOWR('M', 24, struct mtd_write_req) +/* Erase a given range of user data (must be in mode %MTD_FILE_MODE_OTP_USER) */ +#define OTPERASE _IOW('M', 25, struct otp_info) /* * Obsolete legacy interface. Keep it in order not to break userspace diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index 90b739d05adf..42b177655560 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -86,6 +86,8 @@ struct hns_roce_ib_create_qp_resp { struct hns_roce_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 cqe_size; + __u32 srq_tab_size; + __u32 reserved; }; struct hns_roce_ib_alloc_pd_resp { diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index 3fd9b380a091..ca2372864b70 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -41,6 +41,25 @@ enum mlx5_ib_create_flow_action_attrs { MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT), }; +enum mlx5_ib_dm_methods { + MLX5_IB_METHOD_DM_MAP_OP_ADDR = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_METHOD_DM_QUERY, +}; + +enum mlx5_ib_dm_map_op_addr_attrs { + MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP, + MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET, + MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX, +}; + +enum mlx5_ib_query_dm_attrs { + MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET, + MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX, + MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH, +}; + enum mlx5_ib_alloc_dm_attrs { MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, @@ -154,6 +173,7 @@ enum mlx5_ib_devx_umem_reg_attrs { MLX5_IB_ATTR_DEVX_UMEM_REG_LEN, MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, + MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP, }; enum mlx5_ib_devx_umem_dereg_attrs { @@ -300,4 +320,13 @@ enum mlx5_ib_pd_methods { }; +enum mlx5_ib_device_methods { + MLX5_IB_METHOD_QUERY_PORT = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum mlx5_ib_query_port_attrs { + MLX5_IB_ATTR_QUERY_PORT_PORT_NUM = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_QUERY_PORT, +}; + #endif diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index 56b26eaea083..a21ca8ece8db 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -83,5 +83,30 @@ enum mlx5_ib_uapi_uar_alloc_type { MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC = 0x1, }; +enum mlx5_ib_uapi_query_port_flags { + MLX5_IB_UAPI_QUERY_PORT_VPORT = 1 << 0, + MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID = 1 << 1, + MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX = 1 << 2, + MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX = 1 << 3, + MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0 = 1 << 4, + MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID = 1 << 5, +}; + +struct mlx5_ib_uapi_reg { + __u32 value; + __u32 mask; +}; + +struct mlx5_ib_uapi_query_port { + __aligned_u64 flags; + __u16 vport; + __u16 vport_vhca_id; + __u16 esw_owner_vhca_id; + __u16 rsvd0; + __aligned_u64 vport_steering_icm_rx; + __aligned_u64 vport_steering_icm_tx; + struct mlx5_ib_uapi_reg reg_c0; +}; + #endif diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index d2f5b8396243..75a1ae2311d8 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -293,6 +293,10 @@ enum rdma_nldev_command { RDMA_NLDEV_CMD_RES_MR_GET_RAW, + RDMA_NLDEV_CMD_RES_CTX_GET, /* can dump */ + + RDMA_NLDEV_CMD_RES_SRQ_GET, /* can dump */ + RDMA_NLDEV_NUM_OPS }; @@ -533,6 +537,18 @@ enum rdma_nldev_attr { RDMA_NLDEV_ATTR_RES_RAW, /* binary */ + RDMA_NLDEV_ATTR_RES_CTX, /* nested table */ + RDMA_NLDEV_ATTR_RES_CTX_ENTRY, /* nested table */ + + RDMA_NLDEV_ATTR_RES_SRQ, /* nested table */ + RDMA_NLDEV_ATTR_RES_SRQ_ENTRY, /* nested table */ + RDMA_NLDEV_ATTR_RES_SRQN, /* u32 */ + + RDMA_NLDEV_ATTR_MIN_RANGE, /* u32 */ + RDMA_NLDEV_ATTR_MAX_RANGE, /* u32 */ + + RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, /* u8 */ + /* * Always the end */ diff --git a/include/vdso/time64.h b/include/vdso/time64.h index 9d43c3f5e89d..b40cfa2aa33c 100644 --- a/include/vdso/time64.h +++ b/include/vdso/time64.h @@ -9,6 +9,7 @@ #define NSEC_PER_MSEC 1000000L #define USEC_PER_SEC 1000000L #define NSEC_PER_SEC 1000000000L +#define PSEC_PER_SEC 1000000000000LL #define FSEC_PER_SEC 1000000000000000LL #endif /* __VDSO_TIME64_H */ diff --git a/include/xen/acpi.h b/include/xen/acpi.h index 4ddd7dc4a61e..b1e11863144d 100644 --- a/include/xen/acpi.h +++ b/include/xen/acpi.h @@ -40,41 +40,6 @@ #include <xen/xen.h> #include <linux/acpi.h> -#define ACPI_MEMORY_DEVICE_CLASS "memory" -#define ACPI_MEMORY_DEVICE_HID "PNP0C80" -#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device" - -int xen_stub_memory_device_init(void); -void xen_stub_memory_device_exit(void); - -#define ACPI_PROCESSOR_CLASS "processor" -#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007" -#define ACPI_PROCESSOR_DEVICE_NAME "Processor" - -int xen_stub_processor_init(void); -void xen_stub_processor_exit(void); - -void xen_pcpu_hotplug_sync(void); -int xen_pcpu_id(uint32_t acpi_id); - -static inline int xen_acpi_get_pxm(acpi_handle h) -{ - unsigned long long pxm; - acpi_status status; - acpi_handle handle; - acpi_handle phandle = h; - - do { - handle = phandle; - status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); - if (ACPI_SUCCESS(status)) - return pxm; - status = acpi_get_parent(handle, &phandle); - } while (ACPI_SUCCESS(status)); - - return -ENXIO; -} - int xen_acpi_notify_hypervisor_sleep(u8 sleep_state, u32 pm1a_cnt, u32 pm1b_cnd); int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state, diff --git a/include/xen/arm/swiotlb-xen.h b/include/xen/arm/swiotlb-xen.h new file mode 100644 index 000000000000..33336ab58afc --- /dev/null +++ b/include/xen/arm/swiotlb-xen.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM_SWIOTLB_XEN_H +#define _ASM_ARM_SWIOTLB_XEN_H + +#include <xen/features.h> +#include <xen/xen.h> + +static inline int xen_swiotlb_detect(void) +{ + if (!xen_domain()) + return 0; + if (xen_feature(XENFEAT_direct_mapped)) + return 1; + /* legacy case */ + if (!xen_feature(XENFEAT_not_direct_mapped) && xen_initial_domain()) + return 1; + return 0; +} + +#endif /* _ASM_ARM_SWIOTLB_XEN_H */ diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h index 9e9f9bf7c66d..449bd383cb76 100644 --- a/include/xen/interface/elfnote.h +++ b/include/xen/interface/elfnote.h @@ -208,13 +208,3 @@ #define XEN_ELFNOTE_MAX XEN_ELFNOTE_PHYS32_ENTRY #endif /* __XEN_PUBLIC_ELFNOTE_H__ */ - -/* - * Local variables: - * mode: C - * c-set-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h index 6d1384abfbdf..5a7bdefa06a8 100644 --- a/include/xen/interface/features.h +++ b/include/xen/interface/features.h @@ -83,6 +83,20 @@ */ #define XENFEAT_linux_rsdp_unrestricted 15 +/* + * A direct-mapped (or 1:1 mapped) domain is a domain for which its + * local pages have gfn == mfn. If a domain is direct-mapped, + * XENFEAT_direct_mapped is set; otherwise XENFEAT_not_direct_mapped + * is set. + * + * If neither flag is set (e.g. older Xen releases) the assumptions are: + * - not auto_translated domains (x86 only) are always direct-mapped + * - on x86, auto_translated domains are not direct-mapped + * - on ARM, Dom0 is direct-mapped, DomUs are not + */ +#define XENFEAT_not_direct_mapped 16 +#define XENFEAT_direct_mapped 17 + #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ diff --git a/include/xen/interface/hvm/hvm_vcpu.h b/include/xen/interface/hvm/hvm_vcpu.h index 32ca83edd44d..bfc2138e0bf5 100644 --- a/include/xen/interface/hvm/hvm_vcpu.h +++ b/include/xen/interface/hvm/hvm_vcpu.h @@ -131,13 +131,3 @@ struct vcpu_hvm_context { typedef struct vcpu_hvm_context vcpu_hvm_context_t; #endif /* __XEN_PUBLIC_HVM_HVM_VCPU_H__ */ - -/* - * Local variables: - * mode: C - * c-file-style: "BSD" - * c-basic-offset: 4 - * tab-width: 4 - * indent-tabs-mode: nil - * End: - */ diff --git a/include/xen/interface/io/xenbus.h b/include/xen/interface/io/xenbus.h index aaf2951b1cce..fb8716112251 100644 --- a/include/xen/interface/io/xenbus.h +++ b/include/xen/interface/io/xenbus.h @@ -39,13 +39,3 @@ enum xenbus_state }; #endif /* _XEN_PUBLIC_IO_XENBUS_H */ - -/* - * Local variables: - * c-file-style: "linux" - * indent-tabs-mode: t - * c-indent-level: 8 - * c-basic-offset: 8 - * tab-width: 8 - * End: - */ diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h index d5eaf9d682b8..b3e647f86e3e 100644 --- a/include/xen/swiotlb-xen.h +++ b/include/xen/swiotlb-xen.h @@ -3,13 +3,15 @@ #define __LINUX_SWIOTLB_XEN_H #include <linux/swiotlb.h> +#include <asm/xen/swiotlb-xen.h> void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir); void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir); -extern int xen_swiotlb_init(int verbose, bool early); +int xen_swiotlb_init(void); +void __init xen_swiotlb_init_early(void); extern const struct dma_map_ops xen_swiotlb_dma_ops; #endif /* __LINUX_SWIOTLB_XEN_H */ |