diff options
Diffstat (limited to 'include')
103 files changed, 1465 insertions, 597 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index fe2e3ac..12c2882 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -144,6 +144,10 @@ #define ACPI_ADDRESS_RANGE_MAX 2 +/* Maximum number of While() loops before abort */ + +#define ACPI_MAX_LOOP_COUNT 0xFFFF + /****************************************************************************** * * ACPI Specification constants (Do not change unless the specification changes) diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index 34f601e..48eb4dd 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h @@ -366,7 +366,7 @@ ACPI_TRACE_ENTRY (name, acpi_ut_trace_u32, u32, value) #define ACPI_FUNCTION_TRACE_STR(name, string) \ - ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, char *, string) + ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, const char *, string) #define ACPI_FUNCTION_ENTRY() \ acpi_ut_track_stack_ptr() @@ -425,6 +425,9 @@ #define return_PTR(pointer) \ ACPI_TRACE_EXIT (acpi_ut_ptr_exit, void *, pointer) +#define return_STR(string) \ + ACPI_TRACE_EXIT (acpi_ut_str_exit, const char *, string) + #define return_VALUE(value) \ ACPI_TRACE_EXIT (acpi_ut_value_exit, u64, value) @@ -478,6 +481,7 @@ #define return_VOID return #define return_ACPI_STATUS(s) return(s) #define return_PTR(s) return(s) +#define return_STR(s) return(s) #define return_VALUE(s) return(s) #define return_UINT8(s) return(s) #define return_UINT32(s) return(s) diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 562603d..f3414c8 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -371,6 +371,12 @@ acpi_status acpi_os_wait_command_ready(void); acpi_status acpi_os_notify_command_complete(void); #endif +#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point +void +acpi_os_trace_point(acpi_trace_event_type type, + u8 begin, u8 *aml, char *pathname); +#endif + /* * Obtain ACPI table(s) */ @@ -416,41 +422,4 @@ char *acpi_os_get_next_filename(void *dir_handle); void acpi_os_close_directory(void *dir_handle); #endif -/* - * File I/O and related support - */ -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_open_file -ACPI_FILE acpi_os_open_file(const char *path, u8 modes); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_file -void acpi_os_close_file(ACPI_FILE file); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_file -int -acpi_os_read_file(ACPI_FILE file, - void *buffer, acpi_size size, acpi_size count); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_write_file -int -acpi_os_write_file(ACPI_FILE file, - void *buffer, acpi_size size, acpi_size count); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_file_offset -long acpi_os_get_file_offset(ACPI_FILE file); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_set_file_offset -acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from); -#endif - -#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point -void -acpi_os_trace_point(acpi_trace_event_type type, - u8 begin, u8 *aml, char *pathname); -#endif - #endif /* __ACPIOSXF_H__ */ diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 1ff3a76..c7b3a13 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -46,7 +46,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20160422 +#define ACPI_CA_VERSION 0x20160831 #include <acpi/acconfig.h> #include <acpi/actypes.h> @@ -195,6 +195,13 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE); ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE); /* + * Optionally support module level code by parsing the entire table as + * a term_list. Default is FALSE, do not execute entire table until some + * lock order issues are fixed. + */ +ACPI_INIT_GLOBAL(u8, acpi_gbl_parse_table_as_term_list, FALSE); + +/* * Optionally use 32-bit FADT addresses if and when there is a conflict * (address mismatch) between the 32-bit and 64-bit versions of the * address. Although ACPICA adheres to the ACPI specification which @@ -416,18 +423,19 @@ ACPI_GLOBAL(u8, acpi_gbl_system_awake_and_running); /* * Initialization */ -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION acpi_initialize_tables(struct acpi_table_desc *initial_storage, u32 initial_table_count, u8 allow_resize)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_initialize_subsystem(void)) - -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_enable_subsystem(u32 flags)) - -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init - acpi_initialize_objects(u32 flags)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_terminate(void)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_initialize_subsystem(void)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_enable_subsystem(u32 flags)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_initialize_objects(u32 flags)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_terminate(void)) /* * Miscellaneous global interfaces @@ -467,7 +475,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status /* * ACPI table load/unload interfaces */ -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION acpi_install_table(acpi_physical_address address, u8 physical)) @@ -476,14 +484,17 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_unload_parent_table(acpi_handle object)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void)) + +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_load_tables(void)) /* * ACPI table manipulation interfaces */ -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION + acpi_reallocate_root_table(void)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init +ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION acpi_find_root_pointer(acpi_physical_address *rsdp_address)) ACPI_EXTERNAL_RETURN_STATUS(acpi_status @@ -732,6 +743,10 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status u32 gpe_number)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_mask_gpe(acpi_handle gpe_device, + u32 gpe_number, u8 is_masked)) + +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_mark_gpe_for_wake(acpi_handle gpe_device, u32 gpe_number)) @@ -935,9 +950,6 @@ ACPI_DBG_DEPENDENT_RETURN_VOID(void acpi_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname)) -ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1) - void ACPI_INTERNAL_VAR_XFACE - acpi_log_error(const char *format, ...)) acpi_status acpi_initialize_debugger(void); diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index c19700e..1b949e0 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -230,62 +230,72 @@ struct acpi_table_facs { /* Fields common to all versions of the FADT */ struct acpi_table_fadt { - struct acpi_table_header header; /* Common ACPI table header */ - u32 facs; /* 32-bit physical address of FACS */ - u32 dsdt; /* 32-bit physical address of DSDT */ - u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ - u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */ - u16 sci_interrupt; /* System vector of SCI interrupt */ - u32 smi_command; /* 32-bit Port address of SMI command port */ - u8 acpi_enable; /* Value to write to SMI_CMD to enable ACPI */ - u8 acpi_disable; /* Value to write to SMI_CMD to disable ACPI */ - u8 s4_bios_request; /* Value to write to SMI_CMD to enter S4BIOS state */ - u8 pstate_control; /* Processor performance state control */ - u32 pm1a_event_block; /* 32-bit port address of Power Mgt 1a Event Reg Blk */ - u32 pm1b_event_block; /* 32-bit port address of Power Mgt 1b Event Reg Blk */ - u32 pm1a_control_block; /* 32-bit port address of Power Mgt 1a Control Reg Blk */ - u32 pm1b_control_block; /* 32-bit port address of Power Mgt 1b Control Reg Blk */ - u32 pm2_control_block; /* 32-bit port address of Power Mgt 2 Control Reg Blk */ - u32 pm_timer_block; /* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ - u32 gpe0_block; /* 32-bit port address of General Purpose Event 0 Reg Blk */ - u32 gpe1_block; /* 32-bit port address of General Purpose Event 1 Reg Blk */ - u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */ - u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */ - u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */ - u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */ - u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */ - u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */ - u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */ - u8 cst_control; /* Support for the _CST object and C-States change notification */ - u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */ - u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */ - u16 flush_size; /* Processor memory cache line width, in bytes */ - u16 flush_stride; /* Number of flush strides that need to be read */ - u8 duty_offset; /* Processor duty cycle index in processor P_CNT reg */ - u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */ - u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ - u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ - u8 century; /* Index to century in RTC CMOS RAM */ - u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */ - u8 reserved; /* Reserved, must be zero */ - u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ - struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ - u8 reset_value; /* Value to write to the reset_register port to reset the system */ - u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ - u8 minor_revision; /* FADT Minor Revision (ACPI 5.1) */ - u64 Xfacs; /* 64-bit physical address of FACS */ - u64 Xdsdt; /* 64-bit physical address of DSDT */ - struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */ - struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */ - struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */ - struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */ - struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */ - struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ - struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */ - struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ - struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */ - struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */ - u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */ + struct acpi_table_header header; /* [V1] Common ACPI table header */ + u32 facs; /* [V1] 32-bit physical address of FACS */ + u32 dsdt; /* [V1] 32-bit physical address of DSDT */ + u8 model; /* [V1] System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ + u8 preferred_profile; /* [V1] Conveys preferred power management profile to OSPM. */ + u16 sci_interrupt; /* [V1] System vector of SCI interrupt */ + u32 smi_command; /* [V1] 32-bit Port address of SMI command port */ + u8 acpi_enable; /* [V1] Value to write to SMI_CMD to enable ACPI */ + u8 acpi_disable; /* [V1] Value to write to SMI_CMD to disable ACPI */ + u8 s4_bios_request; /* [V1] Value to write to SMI_CMD to enter S4BIOS state */ + u8 pstate_control; /* [V1] Processor performance state control */ + u32 pm1a_event_block; /* [V1] 32-bit port address of Power Mgt 1a Event Reg Blk */ + u32 pm1b_event_block; /* [V1] 32-bit port address of Power Mgt 1b Event Reg Blk */ + u32 pm1a_control_block; /* [V1] 32-bit port address of Power Mgt 1a Control Reg Blk */ + u32 pm1b_control_block; /* [V1] 32-bit port address of Power Mgt 1b Control Reg Blk */ + u32 pm2_control_block; /* [V1] 32-bit port address of Power Mgt 2 Control Reg Blk */ + u32 pm_timer_block; /* [V1] 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ + u32 gpe0_block; /* [V1] 32-bit port address of General Purpose Event 0 Reg Blk */ + u32 gpe1_block; /* [V1] 32-bit port address of General Purpose Event 1 Reg Blk */ + u8 pm1_event_length; /* [V1] Byte Length of ports at pm1x_event_block */ + u8 pm1_control_length; /* [V1] Byte Length of ports at pm1x_control_block */ + u8 pm2_control_length; /* [V1] Byte Length of ports at pm2_control_block */ + u8 pm_timer_length; /* [V1] Byte Length of ports at pm_timer_block */ + u8 gpe0_block_length; /* [V1] Byte Length of ports at gpe0_block */ + u8 gpe1_block_length; /* [V1] Byte Length of ports at gpe1_block */ + u8 gpe1_base; /* [V1] Offset in GPE number space where GPE1 events start */ + u8 cst_control; /* [V1] Support for the _CST object and C-States change notification */ + u16 c2_latency; /* [V1] Worst case HW latency to enter/exit C2 state */ + u16 c3_latency; /* [V1] Worst case HW latency to enter/exit C3 state */ + u16 flush_size; /* [V1] Processor memory cache line width, in bytes */ + u16 flush_stride; /* [V1] Number of flush strides that need to be read */ + u8 duty_offset; /* [V1] Processor duty cycle index in processor P_CNT reg */ + u8 duty_width; /* [V1] Processor duty cycle value bit width in P_CNT register */ + u8 day_alarm; /* [V1] Index to day-of-month alarm in RTC CMOS RAM */ + u8 month_alarm; /* [V1] Index to month-of-year alarm in RTC CMOS RAM */ + u8 century; /* [V1] Index to century in RTC CMOS RAM */ + u16 boot_flags; /* [V3] IA-PC Boot Architecture Flags (see below for individual flags) */ + u8 reserved; /* [V1] Reserved, must be zero */ + u32 flags; /* [V1] Miscellaneous flag bits (see below for individual flags) */ + /* End of Version 1 FADT fields (ACPI 1.0) */ + + struct acpi_generic_address reset_register; /* [V3] 64-bit address of the Reset register */ + u8 reset_value; /* [V3] Value to write to the reset_register port to reset the system */ + u16 arm_boot_flags; /* [V5] ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ + u8 minor_revision; /* [V5] FADT Minor Revision (ACPI 5.1) */ + u64 Xfacs; /* [V3] 64-bit physical address of FACS */ + u64 Xdsdt; /* [V3] 64-bit physical address of DSDT */ + struct acpi_generic_address xpm1a_event_block; /* [V3] 64-bit Extended Power Mgt 1a Event Reg Blk address */ + struct acpi_generic_address xpm1b_event_block; /* [V3] 64-bit Extended Power Mgt 1b Event Reg Blk address */ + struct acpi_generic_address xpm1a_control_block; /* [V3] 64-bit Extended Power Mgt 1a Control Reg Blk address */ + struct acpi_generic_address xpm1b_control_block; /* [V3] 64-bit Extended Power Mgt 1b Control Reg Blk address */ + struct acpi_generic_address xpm2_control_block; /* [V3] 64-bit Extended Power Mgt 2 Control Reg Blk address */ + struct acpi_generic_address xpm_timer_block; /* [V3] 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ + struct acpi_generic_address xgpe0_block; /* [V3] 64-bit Extended General Purpose Event 0 Reg Blk address */ + struct acpi_generic_address xgpe1_block; /* [V3] 64-bit Extended General Purpose Event 1 Reg Blk address */ + /* End of Version 3 FADT fields (ACPI 2.0) */ + + struct acpi_generic_address sleep_control; /* [V4] 64-bit Sleep Control register (ACPI 5.0) */ + /* End of Version 4 FADT fields (ACPI 3.0 and ACPI 4.0) (Field was originally reserved in ACPI 3.0) */ + + struct acpi_generic_address sleep_status; /* [V5] 64-bit Sleep Status register (ACPI 5.0) */ + /* End of Version 5 FADT fields (ACPI 5.0) */ + + u64 hypervisor_id; /* [V6] Hypervisor Vendor ID (ACPI 6.0) */ + /* End of Version 6 FADT fields (ACPI 6.0) */ + }; /* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */ @@ -301,8 +311,8 @@ struct acpi_table_fadt { /* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */ -#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5+] PSCI 0.2+ is implemented */ -#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */ +#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5] PSCI 0.2+ is implemented */ +#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5] HVC must be used instead of SMC as the PSCI conduit */ /* Masks for FADT flags */ @@ -399,20 +409,34 @@ struct acpi_table_desc { * match the expected length. In other words, the length of the * FADT is the bottom line as to what the version really is. * - * For reference, the values below are as follows: - * FADT V1 size: 0x074 - * FADT V2 size: 0x084 - * FADT V3 size: 0x0F4 - * FADT V4 size: 0x0F4 - * FADT V5 size: 0x10C - * FADT V6 size: 0x114 + * NOTE: There is no officialy released V2 of the FADT. This + * version was used only for prototyping and testing during the + * 32-bit to 64-bit transition. V3 was the first official 64-bit + * version of the FADT. + * + * Update this list of defines when a new version of the FADT is + * added to the ACPI specification. Note that the FADT version is + * only incremented when new fields are appended to the existing + * version. Therefore, the FADT version is competely independent + * from the version of the ACPI specification where it is + * defined. + * + * For reference, the various FADT lengths are as follows: + * FADT V1 size: 0x074 ACPI 1.0 + * FADT V3 size: 0x0F4 ACPI 2.0 + * FADT V4 size: 0x100 ACPI 3.0 and ACPI 4.0 + * FADT V5 size: 0x10C ACPI 5.0 + * FADT V6 size: 0x114 ACPI 6.0 */ -#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) -#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1) -#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) -#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) -#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) +#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) /* ACPI 1.0 */ +#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) /* ACPI 2.0 */ +#define ACPI_FADT_V4_SIZE (u32) (ACPI_FADT_OFFSET (sleep_status)) /* ACPI 3.0 and ACPI 4.0 */ +#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) /* ACPI 5.0 */ +#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) /* ACPI 6.0 */ + +/* Update these when new FADT versions are added */ +#define ACPI_FADT_MAX_VERSION 6 #define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)" #endif /* __ACTBL_H__ */ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index cb389ef..1d798ab 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -732,16 +732,17 @@ typedef u32 acpi_event_type; * The encoding of acpi_event_status is illustrated below. * Note that a set bit (1) indicates the property is TRUE * (e.g. if bit 0 is set then the event is enabled). - * +-------------+-+-+-+-+-+ - * | Bits 31:5 |4|3|2|1|0| - * +-------------+-+-+-+-+-+ - * | | | | | | - * | | | | | +- Enabled? - * | | | | +--- Enabled for wake? - * | | | +----- Status bit set? - * | | +------- Enable bit set? - * | +--------- Has a handler? - * +--------------- <Reserved> + * +-------------+-+-+-+-+-+-+ + * | Bits 31:6 |5|4|3|2|1|0| + * +-------------+-+-+-+-+-+-+ + * | | | | | | | + * | | | | | | +- Enabled? + * | | | | | +--- Enabled for wake? + * | | | | +----- Status bit set? + * | | | +------- Enable bit set? + * | | +--------- Has a handler? + * | +----------- Masked? + * +----------------- <Reserved> */ typedef u32 acpi_event_status; @@ -751,6 +752,7 @@ typedef u32 acpi_event_status; #define ACPI_EVENT_FLAG_STATUS_SET (acpi_event_status) 0x04 #define ACPI_EVENT_FLAG_ENABLE_SET (acpi_event_status) 0x08 #define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x10 +#define ACPI_EVENT_FLAG_MASKED (acpi_event_status) 0x20 #define ACPI_EVENT_FLAG_SET ACPI_EVENT_FLAG_STATUS_SET /* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */ @@ -761,14 +763,15 @@ typedef u32 acpi_event_status; /* * GPE info flags - Per GPE - * +-------+-+-+---+ - * | 7:5 |4|3|2:0| - * +-------+-+-+---+ - * | | | | - * | | | +-- Type of dispatch:to method, handler, notify, or none - * | | +----- Interrupt type: edge or level triggered - * | +------- Is a Wake GPE - * +------------ <Reserved> + * +---+-+-+-+---+ + * |7:6|5|4|3|2:0| + * +---+-+-+-+---+ + * | | | | | + * | | | | +-- Type of dispatch:to method, handler, notify, or none + * | | | +----- Interrupt type: edge or level triggered + * | | +------- Is a Wake GPE + * | +--------- Is GPE masked by the software GPE masking machanism + * +------------ <Reserved> */ #define ACPI_GPE_DISPATCH_NONE (u8) 0x00 #define ACPI_GPE_DISPATCH_METHOD (u8) 0x01 @@ -1031,12 +1034,6 @@ struct acpi_statistics { u32 method_count; }; -/* Table Event Types */ - -#define ACPI_TABLE_EVENT_LOAD 0x0 -#define ACPI_TABLE_EVENT_UNLOAD 0x1 -#define ACPI_NUM_TABLE_EVENTS 2 - /* * Types specific to the OS service interfaces */ @@ -1088,9 +1085,13 @@ acpi_status (*acpi_exception_handler) (acpi_status aml_status, typedef acpi_status (*acpi_table_handler) (u32 event, void *table, void *context); -#define ACPI_TABLE_LOAD 0x0 -#define ACPI_TABLE_UNLOAD 0x1 -#define ACPI_NUM_TABLE_EVENTS 2 +/* Table Event Types */ + +#define ACPI_TABLE_EVENT_LOAD 0x0 +#define ACPI_TABLE_EVENT_UNLOAD 0x1 +#define ACPI_TABLE_EVENT_INSTALL 0x2 +#define ACPI_TABLE_EVENT_UNINSTALL 0x3 +#define ACPI_NUM_TABLE_EVENTS 4 /* Address Spaces (For Operation Regions) */ @@ -1285,15 +1286,6 @@ typedef enum { #define ACPI_OSI_WIN_8 0x0C #define ACPI_OSI_WIN_10 0x0D -/* Definitions of file IO */ - -#define ACPI_FILE_READING 0x01 -#define ACPI_FILE_WRITING 0x02 -#define ACPI_FILE_BINARY 0x04 - -#define ACPI_FILE_BEGIN 0x01 -#define ACPI_FILE_END 0x02 - /* Definitions of getopt */ #define ACPI_OPT_END -1 diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index 284965c..427a7c3 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -24,7 +24,9 @@ #define CPPC_NUM_ENT 21 #define CPPC_REV 2 -#define PCC_CMD_COMPLETE 1 +#define PCC_CMD_COMPLETE_MASK (1 << 0) +#define PCC_ERROR_MASK (1 << 2) + #define MAX_CPC_REG_ENT 19 /* CPPC specific PCC commands. */ @@ -49,6 +51,7 @@ struct cpc_reg { */ struct cpc_register_resource { acpi_object_type type; + u64 __iomem *sys_mem_vaddr; union { struct cpc_reg reg; u64 int_value; @@ -60,8 +63,11 @@ struct cpc_desc { int num_entries; int version; int cpu_id; + int write_cmd_status; + int write_cmd_id; struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT]; struct acpi_psd_package domain_info; + struct kobject kobj; }; /* These are indexes into the per-cpu cpc_regs[]. Order is important. */ @@ -96,7 +102,6 @@ enum cppc_regs { struct cppc_perf_caps { u32 highest_perf; u32 nominal_perf; - u32 reference_perf; u32 lowest_perf; }; @@ -108,13 +113,13 @@ struct cppc_perf_ctrls { struct cppc_perf_fb_ctrs { u64 reference; - u64 prev_reference; u64 delivered; - u64 prev_delivered; + u64 reference_perf; + u64 ctr_wrap_time; }; /* Per CPU container for runtime CPPC management. */ -struct cpudata { +struct cppc_cpudata { int cpu; struct cppc_perf_caps perf_caps; struct cppc_perf_ctrls perf_ctrls; @@ -127,6 +132,7 @@ struct cpudata { extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); -extern int acpi_get_psd_map(struct cpudata **); +extern int acpi_get_psd_map(struct cppc_cpudata **); +extern unsigned int cppc_get_transition_latency(int cpu); #endif /* _CPPC_ACPI_H*/ diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h index 86b5a84..34cce72 100644 --- a/include/acpi/platform/acenv.h +++ b/include/acpi/platform/acenv.h @@ -78,6 +78,7 @@ (defined ACPI_EXAMPLE_APP) #define ACPI_APPLICATION #define ACPI_SINGLE_THREADED +#define USE_NATIVE_ALLOCATE_ZEROED #endif /* iASL configuration */ @@ -124,7 +125,6 @@ #ifdef ACPI_DUMP_APP #define ACPI_USE_NATIVE_MEMORY_MAPPING -#define USE_NATIVE_ALLOCATE_ZEROED #endif /* acpi_names/Example configuration. Hardware disabled */ @@ -149,7 +149,6 @@ /* Common for all ACPICA applications */ #ifdef ACPI_APPLICATION -#define ACPI_USE_SYSTEM_CLIBRARY #define ACPI_USE_LOCAL_CACHE #endif @@ -167,10 +166,21 @@ /****************************************************************************** * * Host configuration files. The compiler configuration files are included - * by the host files. + * first. * *****************************************************************************/ +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) +#include <acpi/platform/acgcc.h> + +#elif defined(_MSC_VER) +#include "acmsvc.h" + +#elif defined(__INTEL_COMPILER) +#include "acintel.h" + +#endif + #if defined(_LINUX) || defined(__linux__) #include <acpi/platform/aclinux.h> @@ -210,18 +220,20 @@ #elif defined(__OS2__) #include "acos2.h" -#elif defined(_AED_EFI) -#include "acefi.h" - -#elif defined(_GNU_EFI) -#include "acefi.h" - #elif defined(__HAIKU__) #include "achaiku.h" #elif defined(__QNX__) #include "acqnx.h" +/* + * EFI applications can be built with -nostdlib, in this case, it must be + * included after including all other host environmental definitions, in + * order to override the definitions. + */ +#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI) +#include "acefi.h" + #else /* Unknown environment */ @@ -326,7 +338,8 @@ * ACPI_USE_SYSTEM_CLIBRARY - Define this if linking to an actual C library. * Otherwise, local versions of string/memory functions will be used. * ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and - * the standard header files may be used. + * the standard header files may be used. Defining this implies that + * ACPI_USE_SYSTEM_CLIBRARY has been defined. * * The ACPICA subsystem only uses low level C library functions that do not * call operating system services and may therefore be inlined in the code. @@ -334,7 +347,6 @@ * It may be necessary to tailor these include files to the target * generation environment. */ -#ifdef ACPI_USE_SYSTEM_CLIBRARY /* Use the standard C library headers. We want to keep these to a minimum. */ @@ -342,57 +354,20 @@ /* Use the standard headers from the standard locations */ -#include <stdarg.h> #include <stdlib.h> #include <string.h> #include <ctype.h> +#ifdef ACPI_APPLICATION +#include <stdio.h> +#include <fcntl.h> +#include <errno.h> +#include <time.h> +#include <signal.h> +#endif #endif /* ACPI_USE_STANDARD_HEADERS */ -/* We will be linking to the standard Clib functions */ - -#else - -/****************************************************************************** - * - * Not using native C library, use local implementations - * - *****************************************************************************/ - -/* - * Use local definitions of C library macros and functions. These function - * implementations may not be as efficient as an inline or assembly code - * implementation provided by a native C library, but they are functionally - * equivalent. - */ -#ifndef va_arg - -#ifndef _VALIST -#define _VALIST -typedef char *va_list; -#endif /* _VALIST */ - -/* Storage alignment properties */ - -#define _AUPBND (sizeof (acpi_native_int) - 1) -#define _ADNBND (sizeof (acpi_native_int) - 1) - -/* Variable argument list macro definitions */ - -#define _bnd(X, bnd) (((sizeof (X)) + (bnd)) & (~(bnd))) -#define va_arg(ap, T) (*(T *)(((ap) += (_bnd (T, _AUPBND))) - (_bnd (T,_ADNBND)))) -#define va_end(ap) (ap = (va_list) NULL) -#define va_start(ap, A) (void) ((ap) = (((char *) &(A)) + (_bnd (A,_AUPBND)))) - -#endif /* va_arg */ - -/* Use the local (ACPICA) definitions of the clib functions */ - -#endif /* ACPI_USE_SYSTEM_CLIBRARY */ - -#ifndef ACPI_FILE #ifdef ACPI_APPLICATION -#include <stdio.h> #define ACPI_FILE FILE * #define ACPI_FILE_OUT stdout #define ACPI_FILE_ERR stderr @@ -401,6 +376,9 @@ typedef char *va_list; #define ACPI_FILE_OUT NULL #define ACPI_FILE_ERR NULL #endif /* ACPI_APPLICATION */ -#endif /* ACPI_FILE */ + +#ifndef ACPI_INIT_FUNCTION +#define ACPI_INIT_FUNCTION +#endif #endif /* __ACENV_H__ */ diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h index 4f15c1d..b3171b9 100644 --- a/include/acpi/platform/acenvex.h +++ b/include/acpi/platform/acenvex.h @@ -56,17 +56,24 @@ #if defined(_LINUX) || defined(__linux__) #include <acpi/platform/aclinuxex.h> -#elif defined(WIN32) -#include "acwinex.h" +#elif defined(__DragonFly__) +#include "acdragonflyex.h" -#elif defined(_AED_EFI) +/* + * EFI applications can be built with -nostdlib, in this case, it must be + * included after including all other host environmental definitions, in + * order to override the definitions. + */ +#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI) #include "acefiex.h" -#elif defined(_GNU_EFI) -#include "acefiex.h" +#endif -#elif defined(__DragonFly__) -#include "acdragonflyex.h" +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) +#include "acgccex.h" + +#elif defined(_MSC_VER) +#include "acmsvcex.h" #endif diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h index c5a216c9..8f66aaa 100644 --- a/include/acpi/platform/acgcc.h +++ b/include/acpi/platform/acgcc.h @@ -44,6 +44,12 @@ #ifndef __ACGCC_H__ #define __ACGCC_H__ +/* + * Use compiler specific <stdarg.h> is a good practice for even when + * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined. + */ +#include <stdarg.h> + #define ACPI_INLINE __inline__ /* Function name is used for debug output. Non-ANSI, compiler-dependent */ @@ -64,17 +70,6 @@ */ #define ACPI_UNUSED_VAR __attribute__ ((unused)) -/* - * Some versions of gcc implement strchr() with a buggy macro. So, - * undef it here. Prevents error messages of this form (usually from the - * file getopt.c): - * - * error: logical '&&' with non-zero constant will always evaluate as true - */ -#ifdef strchr -#undef strchr -#endif - /* GCC supports __VA_ARGS__ in macros */ #define COMPILER_VA_MACRO 1 diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h new file mode 100644 index 0000000..46ead2c --- /dev/null +++ b/include/acpi/platform/acgccex.h @@ -0,0 +1,58 @@ +/****************************************************************************** + * + * Name: acgccex.h - Extra GCC specific defines, etc. + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2016, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#ifndef __ACGCCEX_H__ +#define __ACGCCEX_H__ + +/* + * Some versions of gcc implement strchr() with a buggy macro. So, + * undef it here. Prevents error messages of this form (usually from the + * file getopt.c): + * + * error: logical '&&' with non-zero constant will always evaluate as true + */ +#ifdef strchr +#undef strchr +#endif + +#endif /* __ACGCCEX_H__ */ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 93b61b1..a5d98d1 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -92,6 +92,8 @@ #include <asm/acenv.h> #endif +#define ACPI_INIT_FUNCTION __init + #ifndef CONFIG_ACPI /* External globals for __KERNEL__, stubs is needed */ @@ -168,13 +170,21 @@ #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_next_filename #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_close_directory +#define ACPI_MSG_ERROR KERN_ERR "ACPI Error: " +#define ACPI_MSG_EXCEPTION KERN_ERR "ACPI Exception: " +#define ACPI_MSG_WARNING KERN_WARNING "ACPI Warning: " +#define ACPI_MSG_INFO KERN_INFO "ACPI: " + +#define ACPI_MSG_BIOS_ERROR KERN_ERR "ACPI BIOS Error (bug): " +#define ACPI_MSG_BIOS_WARNING KERN_WARNING "ACPI BIOS Warning (bug): " + #else /* !__KERNEL__ */ -#include <stdarg.h> -#include <string.h> -#include <stdlib.h> -#include <ctype.h> +#define ACPI_USE_STANDARD_HEADERS + +#ifdef ACPI_USE_STANDARD_HEADERS #include <unistd.h> +#endif /* Define/disable kernel-specific declarators */ @@ -205,8 +215,4 @@ #endif /* __KERNEL__ */ -/* Linux uses GCC */ - -#include <acpi/platform/acgcc.h> - #endif /* __ACLINUX_H__ */ diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h index f8bb0d8..a5509d8 100644 --- a/include/acpi/platform/aclinuxex.h +++ b/include/acpi/platform/aclinuxex.h @@ -71,7 +71,7 @@ /* * Overrides for in-kernel ACPICA */ -acpi_status __init acpi_os_initialize(void); +acpi_status ACPI_INIT_FUNCTION acpi_os_initialize(void); acpi_status acpi_os_terminate(void); diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 1bfa602..6df9b07 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -72,6 +72,7 @@ struct exception_table_entry /* Returns 0 if exception not found and fixup otherwise. */ extern unsigned long search_exception_table(unsigned long); + /* * architectures with an MMU should override these two */ @@ -230,14 +231,18 @@ extern int __put_user_bad(void) __attribute__((noreturn)); might_fault(); \ access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \ __get_user((x), (__typeof__(*(ptr)) *)__p) : \ - -EFAULT; \ + ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ }) #ifndef __get_user_fn static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) { - size = __copy_from_user(x, ptr, size); - return size ? -EFAULT : size; + size_t n = __copy_from_user(x, ptr, size); + if (unlikely(n)) { + memset(x + (size - n), 0, n); + return -EFAULT; + } + return 0; } #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) @@ -257,11 +262,13 @@ extern int __get_user_bad(void) __attribute__((noreturn)); static inline long copy_from_user(void *to, const void __user * from, unsigned long n) { + unsigned long res = n; might_fault(); - if (access_ok(VERIFY_READ, from, n)) - return __copy_from_user(to, from, n); - else - return n; + if (likely(access_ok(VERIFY_READ, from, n))) + res = __copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } static inline long copy_to_user(void __user *to, diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4d8452c..19e650c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -85,6 +85,8 @@ static inline const char *acpi_dev_name(struct acpi_device *adev) return dev_name(&adev->dev); } +struct device *acpi_get_first_physical_node(struct acpi_device *adev); + enum acpi_irq_model_id { ACPI_IRQ_MODEL_PIC = 0, ACPI_IRQ_MODEL_IOAPIC, @@ -267,12 +269,18 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) return phys_id == PHYS_CPUID_INVALID; } +/* Validate the processor object's proc_id */ +bool acpi_processor_validate_proc_id(int proc_id); + #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu); int acpi_unmap_cpu(int cpu); +int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ +void acpi_set_processor_mapping(void); + #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); #endif @@ -634,6 +642,11 @@ static inline const char *acpi_dev_name(struct acpi_device *adev) return NULL; } +static inline struct device *acpi_get_first_physical_node(struct acpi_device *adev) +{ + return NULL; +} + static inline void acpi_early_init(void) { } static inline void acpi_subsystem_init(void) { } @@ -751,6 +764,12 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) #endif /* !CONFIG_ACPI */ +#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC +int acpi_ioapic_add(acpi_handle root); +#else +static inline int acpi_ioapic_add(acpi_handle root) { return 0; } +#endif + #ifdef CONFIG_ACPI void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, u32 pm1a_ctrl, u32 pm1b_ctrl)); @@ -1056,7 +1075,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, return NULL; } -#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \ +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ static const void * __acpi_table_##name[] \ __attribute__((unused)) \ = { (void *) table_id, \ @@ -1074,4 +1093,10 @@ void acpi_table_upgrade(void); static inline void acpi_table_upgrade(void) { } #endif +#if defined(CONFIG_ACPI) && defined(CONFIG_ACPI_WATCHDOG) +extern bool acpi_has_watchdog(void); +#else +static inline bool acpi_has_watchdog(void) { return false; } +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h new file mode 100644 index 0000000..0e32dac --- /dev/null +++ b/include/linux/acpi_iort.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2016, Semihalf + * Author: Tomasz Nowicki <tn@semihalf.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef __ACPI_IORT_H__ +#define __ACPI_IORT_H__ + +#include <linux/acpi.h> +#include <linux/fwnode.h> +#include <linux/irqdomain.h> + +int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node); +void iort_deregister_domain_token(int trans_id); +struct fwnode_handle *iort_find_domain_token(int trans_id); +#ifdef CONFIG_ACPI_IORT +void acpi_iort_init(void); +u32 iort_msi_map_rid(struct device *dev, u32 req_id); +struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); +#else +static inline void acpi_iort_init(void) { } +static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) +{ return req_id; } +static inline struct irq_domain *iort_get_device_domain(struct device *dev, + u32 req_id) +{ return NULL; } +#endif + +#endif /* __ACPI_IORT_H__ */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 598bc99..3b77588 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -339,6 +339,24 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen, return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); } +/* + * bitmap_from_u64 - Check and swap words within u64. + * @mask: source bitmap + * @dst: destination bitmap + * + * In 32-bit Big Endian kernel, when using (u32 *)(&val)[*] + * to read u64 mask, we will get the wrong word. + * That is "(u32 *)(&val)[0]" gets the upper 32 bits, + * but we expect the lower 32-bits of u64. + */ +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} + #endif /* __ASSEMBLY__ */ #endif /* __LINUX_BITMAP_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 5261751..5f52709 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -32,6 +32,7 @@ enum can_mode { * CAN common private data */ struct can_priv { + struct net_device *dev; struct can_device_stats can_stats; struct can_bittiming bittiming, data_bittiming; @@ -47,7 +48,7 @@ struct can_priv { u32 ctrlmode_static; /* static enabled options for driver/hardware */ int restart_ms; - struct timer_list restart_timer; + struct delayed_work restart_work; int (*do_set_bittiming)(struct net_device *dev); int (*do_set_data_bittiming)(struct net_device *dev); diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h index 82c3d3b..138bbf7 100644 --- a/include/linux/cec-funcs.h +++ b/include/linux/cec-funcs.h @@ -162,10 +162,11 @@ static inline void cec_msg_standby(struct cec_msg *msg) /* One Touch Record Feature */ -static inline void cec_msg_record_off(struct cec_msg *msg) +static inline void cec_msg_record_off(struct cec_msg *msg, bool reply) { msg->len = 2; msg->msg[1] = CEC_MSG_RECORD_OFF; + msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0; } struct cec_op_arib_data { @@ -227,7 +228,7 @@ static inline void cec_set_digital_service_id(__u8 *msg, if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { *msg++ = (digital->channel.channel_number_fmt << 2) | (digital->channel.major >> 8); - *msg++ = digital->channel.major && 0xff; + *msg++ = digital->channel.major & 0xff; *msg++ = digital->channel.minor >> 8; *msg++ = digital->channel.minor & 0xff; *msg++ = 0; @@ -323,6 +324,7 @@ static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg, } static inline void cec_msg_record_on(struct cec_msg *msg, + bool reply, const struct cec_op_record_src *rec_src) { switch (rec_src->type) { @@ -346,6 +348,7 @@ static inline void cec_msg_record_on(struct cec_msg *msg, rec_src->ext_phys_addr.phys_addr); break; } + msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0; } static inline void cec_ops_record_on(const struct cec_msg *msg, @@ -1141,6 +1144,75 @@ static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg, msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0; } +static inline void cec_msg_vendor_command(struct cec_msg *msg, + __u8 size, const __u8 *vendor_cmd) +{ + if (size > 14) + size = 14; + msg->len = 2 + size; + msg->msg[1] = CEC_MSG_VENDOR_COMMAND; + memcpy(msg->msg + 2, vendor_cmd, size); +} + +static inline void cec_ops_vendor_command(const struct cec_msg *msg, + __u8 *size, + const __u8 **vendor_cmd) +{ + *size = msg->len - 2; + + if (*size > 14) + *size = 14; + *vendor_cmd = msg->msg + 2; +} + +static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg, + __u32 vendor_id, __u8 size, + const __u8 *vendor_cmd) +{ + if (size > 11) + size = 11; + msg->len = 5 + size; + msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID; + msg->msg[2] = vendor_id >> 16; + msg->msg[3] = (vendor_id >> 8) & 0xff; + msg->msg[4] = vendor_id & 0xff; + memcpy(msg->msg + 5, vendor_cmd, size); +} + +static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg, + __u32 *vendor_id, __u8 *size, + const __u8 **vendor_cmd) +{ + *size = msg->len - 5; + + if (*size > 11) + *size = 11; + *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4]; + *vendor_cmd = msg->msg + 5; +} + +static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg, + __u8 size, + const __u8 *rc_code) +{ + if (size > 14) + size = 14; + msg->len = 2 + size; + msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN; + memcpy(msg->msg + 2, rc_code, size); +} + +static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg, + __u8 *size, + const __u8 **rc_code) +{ + *size = msg->len - 2; + + if (*size > 14) + *size = 14; + *rc_code = msg->msg + 2; +} + static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg) { msg->len = 2; @@ -1277,7 +1349,7 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg, msg->len += 4; msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) | (ui_cmd->channel_identifier.major >> 8); - msg->msg[4] = ui_cmd->channel_identifier.major && 0xff; + msg->msg[4] = ui_cmd->channel_identifier.major & 0xff; msg->msg[5] = ui_cmd->channel_identifier.minor >> 8; msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff; break; diff --git a/include/linux/cec.h b/include/linux/cec.h index b3e2289..851968e 100644 --- a/include/linux/cec.h +++ b/include/linux/cec.h @@ -364,7 +364,7 @@ struct cec_caps { * @num_log_addrs: how many logical addresses should be claimed. Set by the * caller. * @vendor_id: the vendor ID of the device. Set by the caller. - * @flags: set to 0. + * @flags: flags. * @osd_name: the OSD name of the device. Set by the caller. * @primary_device_type: the primary device type for each logical address. * Set by the caller. @@ -389,6 +389,9 @@ struct cec_log_addrs { __u8 features[CEC_MAX_LOG_ADDRS][12]; }; +/* Allow a fallback to unregistered */ +#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0) + /* Events */ /* Event that occurs when the adapter state changes */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 8dbc892..573c5a1 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -158,7 +158,7 @@ #define __compiler_offsetof(a, b) \ __builtin_offsetof(a, b) -#if GCC_VERSION >= 40100 && GCC_VERSION < 40600 +#if GCC_VERSION >= 40100 # define __compiletime_object_size(obj) __builtin_object_size(obj, 0) #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 436aa4e..6685698 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -527,13 +527,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * object's lifetime is managed by something other than RCU. That * "something other" might be reference counting or simple immortality. * - * The seemingly unused size_t variable is to validate @p is indeed a pointer - * type by making sure it can be dereferenced. + * The seemingly unused variable ___typecheck_p validates that @p is + * indeed a pointer type by using a pointer to typeof(*p) as the type. + * Taking a pointer to typeof(*p) again is needed in case p is void *. */ #define lockless_dereference(p) \ ({ \ typeof(p) _________p1 = READ_ONCE(p); \ - size_t __maybe_unused __size_of_ptr = sizeof(*(p)); \ + typeof(*(p)) *___typecheck_p __maybe_unused; \ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ (_________p1); \ }) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6bf1992..7572d9e 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -216,7 +216,11 @@ static inline void cpu_hotplug_done(void) {} #endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_PM_SLEEP_SMP -extern int disable_nonboot_cpus(void); +extern int freeze_secondary_cpus(int primary); +static inline int disable_nonboot_cpus(void) +{ + return freeze_secondary_cpus(0); +} extern void enable_nonboot_cpus(void); #else /* !CONFIG_PM_SLEEP_SMP */ static inline int disable_nonboot_cpus(void) { return 0; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 510205e..7b6c446 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -1,6 +1,8 @@ #ifndef __CPUHOTPLUG_H #define __CPUHOTPLUG_H +#include <linux/types.h> + enum cpuhp_state { CPUHP_OFFLINE, CPUHP_CREATE_THREADS, @@ -70,6 +72,8 @@ enum cpuhp_state { CPUHP_AP_PERF_METAG_STARTING, CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, CPUHP_AP_ARM_VFP_STARTING, + CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, + CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h index 0a83a1e..4db00b0 100644 --- a/include/linux/devfreq-event.h +++ b/include/linux/devfreq-event.h @@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev) return -EINVAL; } -static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev) -{ - return ERR_PTR(-EINVAL); -} - static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle( struct device *dev, int index) { diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 66533e1..dc69df0 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -718,7 +718,7 @@ static inline int dma_mmap_wc(struct device *dev, #define dma_mmap_writecombine dma_mmap_wc #endif -#ifdef CONFIG_NEED_DMA_MAP_STATE +#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG) #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) diff --git a/include/linux/efi.h b/include/linux/efi.h index 7f5a582..2d08948 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -20,6 +20,7 @@ #include <linux/ioport.h> #include <linux/pfn.h> #include <linux/pstore.h> +#include <linux/range.h> #include <linux/reboot.h> #include <linux/uuid.h> #include <linux/screen_info.h> @@ -37,6 +38,7 @@ #define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1))) #define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1))) #define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1))) +#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1))) #define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1))) typedef unsigned long efi_status_t; @@ -118,6 +120,15 @@ typedef struct { u32 imagesize; } efi_capsule_header_t; +struct efi_boot_memmap { + efi_memory_desc_t **map; + unsigned long *map_size; + unsigned long *desc_size; + u32 *desc_ver; + unsigned long *key_ptr; + unsigned long *buff_size; +}; + /* * EFI capsule flags */ @@ -669,6 +680,18 @@ typedef struct { unsigned long tables; } efi_system_table_t; +/* + * Architecture independent structure for describing a memory map for the + * benefit of efi_memmap_init_early(), saving us the need to pass four + * parameters. + */ +struct efi_memory_map_data { + phys_addr_t phys_map; + unsigned long size; + unsigned long desc_version; + unsigned long desc_size; +}; + struct efi_memory_map { phys_addr_t phys_map; void *map; @@ -676,6 +699,12 @@ struct efi_memory_map { int nr_map; unsigned long desc_version; unsigned long desc_size; + bool late; +}; + +struct efi_mem_range { + struct range range; + u64 attribute; }; struct efi_fdt_params { @@ -900,6 +929,16 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, } #endif extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); + +extern int __init efi_memmap_init_early(struct efi_memory_map_data *data); +extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size); +extern void __init efi_memmap_unmap(void); +extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map); +extern int __init efi_memmap_split_count(efi_memory_desc_t *md, + struct range *range); +extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap, + void *buf, struct efi_mem_range *mem); + extern int efi_config_init(efi_config_table_type_t *arch_tables); #ifdef CONFIG_EFI_ESRT extern void __init efi_esrt_init(void); @@ -915,6 +954,7 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); extern int __init efi_uart_console_only (void); extern u64 efi_mem_desc_end(efi_memory_desc_t *md); extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); +extern void efi_mem_reserve(phys_addr_t addr, u64 size); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); extern void efi_reserve_boot_services(void); @@ -946,7 +986,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm, /* Iterate through an efi_memory_map */ #define for_each_efi_memory_desc_in_map(m, md) \ for ((md) = (m)->map; \ - ((void *)(md) + (m)->desc_size) <= (m)->map_end; \ + (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end; \ (md) = (void *)(md) + (m)->desc_size) /** @@ -1127,12 +1167,6 @@ struct efivar_operations { }; struct efivars { - /* - * ->lock protects two things: - * 1) efivarfs_list and efivars_sysfs_list - * 2) ->ops calls - */ - spinlock_t lock; struct kset *kset; struct kobject *kobject; const struct efivar_operations *ops; @@ -1273,8 +1307,8 @@ struct kobject *efivars_kobject(void); int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), void *data, bool duplicates, struct list_head *head); -void efivar_entry_add(struct efivar_entry *entry, struct list_head *head); -void efivar_entry_remove(struct efivar_entry *entry); +int efivar_entry_add(struct efivar_entry *entry, struct list_head *head); +int efivar_entry_remove(struct efivar_entry *entry); int __efivar_entry_delete(struct efivar_entry *entry); int efivar_entry_delete(struct efivar_entry *entry); @@ -1291,7 +1325,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, bool block, unsigned long size, void *data); -void efivar_entry_iter_begin(void); +int efivar_entry_iter_begin(void); void efivar_entry_iter_end(void); int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *), @@ -1327,7 +1361,6 @@ extern int efi_capsule_update(efi_capsule_header_t *capsule, #ifdef CONFIG_EFI_RUNTIME_MAP int efi_runtime_map_init(struct kobject *); -void efi_runtime_map_setup(void *, int, u32); int efi_get_runtime_map_size(void); int efi_get_runtime_map_desc_size(void); int efi_runtime_map_copy(void *buf, size_t bufsz); @@ -1337,9 +1370,6 @@ static inline int efi_runtime_map_init(struct kobject *kobj) return 0; } -static inline void -efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {} - static inline int efi_get_runtime_map_size(void) { return 0; @@ -1371,11 +1401,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, efi_loaded_image_t *image, int *cmd_line_len); efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, - efi_memory_desc_t **map, - unsigned long *map_size, - unsigned long *desc_size, - u32 *desc_ver, - unsigned long *key_ptr); + struct efi_boot_memmap *map); efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long align, @@ -1457,4 +1483,14 @@ extern void efi_call_virt_check_flags(unsigned long flags, const char *call); arch_efi_call_virt_teardown(); \ }) +typedef efi_status_t (*efi_exit_boot_map_processing)( + efi_system_table_t *sys_table_arg, + struct efi_boot_memmap *map, + void *priv); + +efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table, + void *handle, + struct efi_boot_memmap *map, + void *priv, + efi_exit_boot_map_processing priv_func); #endif /* _LINUX_EFI_H */ diff --git a/include/linux/fence.h b/include/linux/fence.h index 8cc719a6..2ac6fa5 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -49,8 +49,6 @@ struct fence_cb; * @timestamp: Timestamp when the fence was signaled. * @status: Optional, only valid if < 0, must be set before calling * fence_signal, indicates that the fence has completed with an error. - * @child_list: list of children fences - * @active_list: list of active fences * * the flags member must be manipulated and read using the appropriate * atomic ops (bit_*), so taking the spinlock will not be needed most diff --git a/include/linux/fs.h b/include/linux/fs.h index 3523bf6..901e25d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -574,6 +574,7 @@ static inline void mapping_allow_writable(struct address_space *mapping) struct posix_acl; #define ACL_NOT_CACHED ((void *)(-1)) +#define ACL_DONT_CACHE ((void *)(-3)) static inline struct posix_acl * uncached_acl_sentinel(struct task_struct *task) diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h index cfa6cde..76cff18 100644 --- a/include/linux/fscrypto.h +++ b/include/linux/fscrypto.h @@ -274,8 +274,7 @@ extern void fscrypt_restore_control_page(struct page *); extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t, unsigned int); /* policy.c */ -extern int fscrypt_process_policy(struct inode *, - const struct fscrypt_policy *); +extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *); extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, @@ -345,7 +344,7 @@ static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p, } /* policy.c */ -static inline int fscrypt_notsupp_process_policy(struct inode *i, +static inline int fscrypt_notsupp_process_policy(struct file *f, const struct fscrypt_policy *p) { return -EOPNOTSUPP; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 58205f3..7268ed0 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -148,6 +148,7 @@ struct fsnotify_group { #define FS_PRIO_1 1 /* fanotify content based access control */ #define FS_PRIO_2 2 /* fanotify pre-content access */ unsigned int priority; + bool shutdown; /* group is being shut down, don't queue more events */ /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ struct mutex mark_mutex; /* protect marks_list */ @@ -179,7 +180,6 @@ struct fsnotify_group { spinlock_t access_lock; struct list_head access_list; wait_queue_head_t access_waitq; - atomic_t bypass_perm; #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ int f_flags; unsigned int max_marks; @@ -292,6 +292,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op extern void fsnotify_get_group(struct fsnotify_group *group); /* drop reference on a group from fsnotify_alloc_group */ extern void fsnotify_put_group(struct fsnotify_group *group); +/* group destruction begins, stop queuing new events */ +extern void fsnotify_group_stop_queueing(struct fsnotify_group *group); /* destroy group */ extern void fsnotify_destroy_group(struct fsnotify_group *group); /* fasync handler function */ @@ -304,8 +306,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct list_head *, struct fsnotify_event *)); -/* Remove passed event from groups notification queue */ -extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event); /* true if the group notification queue is empty */ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); /* return, but do not dequeue the first event on the notification queue */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 7d565af..6f93ac4 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -795,7 +795,12 @@ struct ftrace_ret_stack { unsigned long func; unsigned long long calltime; unsigned long long subtime; +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST unsigned long fp; +#endif +#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + unsigned long *retp; +#endif }; /* @@ -807,7 +812,10 @@ extern void return_to_handler(void); extern int ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, - unsigned long frame_pointer); + unsigned long frame_pointer, unsigned long *retp); + +unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, + unsigned long ret, unsigned long *retp); /* * Sometimes we don't want to trace a function with the function @@ -870,6 +878,13 @@ static inline int task_curr_ret_stack(struct task_struct *tsk) return -1; } +static inline unsigned long +ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, + unsigned long *retp) +{ + return ret; +} + static inline void pause_graph_tracing(void) { } static inline void unpause_graph_tracing(void) { } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h new file mode 100644 index 0000000..3fa5ef2 --- /dev/null +++ b/include/linux/hypervisor.h @@ -0,0 +1,17 @@ +#ifndef __LINUX_HYPEVISOR_H +#define __LINUX_HYPEVISOR_H + +/* + * Generic Hypervisor support + * Juergen Gross <jgross@suse.com> + */ + +#ifdef CONFIG_HYPERVISOR_GUEST +#include <asm/hypervisor.h> +#else +static inline void hypervisor_pin_vcpu(int cpu) +{ +} +#endif + +#endif /* __LINUX_HYPEVISOR_H */ diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h index 5198f8e..c97eab6 100644 --- a/include/linux/iio/sw_trigger.h +++ b/include/linux/iio/sw_trigger.h @@ -62,7 +62,7 @@ void iio_swt_group_init_type_name(struct iio_sw_trigger *t, const char *name, struct config_item_type *type) { -#ifdef CONFIG_CONFIGFS_FS +#if IS_ENABLED(CONFIG_CONFIGFS_FS) config_group_init_type_name(&t->group, name, type); #endif } diff --git a/include/linux/init_task.h b/include/linux/init_task.h index f8834f8..325f649 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -15,6 +15,8 @@ #include <net/net_namespace.h> #include <linux/sched/rt.h> +#include <asm/thread_info.h> + #ifdef CONFIG_SMP # define INIT_PUSHABLE_TASKS(tsk) \ .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), @@ -183,12 +185,21 @@ extern struct task_group root_task_group; # define INIT_KASAN(tsk) #endif +#ifdef CONFIG_THREAD_INFO_IN_TASK +# define INIT_TASK_TI(tsk) \ + .thread_info = INIT_THREAD_INFO(tsk), \ + .stack_refcount = ATOMIC_INIT(1), +#else +# define INIT_TASK_TI(tsk) +#endif + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) */ #define INIT_TASK(tsk) \ { \ + INIT_TASK_TI(tsk) \ .state = 0, \ .stack = init_stack, \ .usage = ATOMIC_INIT(2), \ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index b6683f0..72f0721 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -278,7 +278,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); -struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs); +struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec); +int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec); #else /* CONFIG_SMP */ @@ -311,11 +312,18 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) return 0; } -static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) +static inline struct cpumask * +irq_create_affinity_masks(const struct cpumask *affinity, int nvec) { - *nr_vecs = 1; return NULL; } + +static inline int +irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec) +{ + return maxvec; +} + #endif /* CONFIG_SMP */ /* diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 3267df4..3d70ece 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -19,6 +19,11 @@ struct vm_fault; #define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */ /* + * Flags for iomap mappings: + */ +#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */ + +/* * Magic value for blkno: */ #define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */ @@ -27,7 +32,8 @@ struct iomap { sector_t blkno; /* 1st sector of mapping, 512b units */ loff_t offset; /* file offset of mapping, bytes */ u64 length; /* length of mapping, bytes */ - int type; /* type of mapping */ + u16 type; /* type of mapping */ + u16 flags; /* flags for mapping */ struct block_device *bdev; /* block device for I/O */ }; diff --git a/include/linux/irq.h b/include/linux/irq.h index b52424e..e798755 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -916,12 +916,20 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, unsigned int clr, unsigned int set); struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); -int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, - int num_ct, const char *name, - irq_flow_handler_t handler, - unsigned int clr, unsigned int set, - enum irq_gc_flags flags); +int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, + int num_ct, const char *name, + irq_flow_handler_t handler, + unsigned int clr, unsigned int set, + enum irq_gc_flags flags); + +#define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \ + handler, clr, set, flags) \ +({ \ + MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \ + __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\ + handler, clr, set, flags); \ +}) static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) { @@ -945,6 +953,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { } static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } #endif +/* + * The irqsave variants are for usage in non interrupt code. Do not use + * them in irq_chip callbacks. Use irq_gc_lock() instead. + */ +#define irq_gc_lock_irqsave(gc, flags) \ + raw_spin_lock_irqsave(&(gc)->lock, flags) + +#define irq_gc_unlock_irqrestore(gc, flags) \ + raw_spin_unlock_irqrestore(&(gc)->lock, flags) + static inline void irq_reg_writel(struct irq_chip_generic *gc, u32 val, int reg_offset) { diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 99ac022..8361c8d 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -430,9 +430,9 @@ struct rdists { }; struct irq_domain; -struct device_node; +struct fwnode_handle; int its_cpu_init(void); -int its_init(struct device_node *node, struct rdists *rdists, +int its_init(struct fwnode_handle *handle, struct rdists *rdists, struct irq_domain *domain); static inline bool gic_enable_sre(void) diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index b51beeb..c9be579 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -2,6 +2,7 @@ #define _LINUX_IRQDESC_H #include <linux/rcupdate.h> +#include <linux/kobject.h> /* * Core internal functions to deal with irq descriptors @@ -43,6 +44,7 @@ struct pt_regs; * @force_resume_depth: number of irqactions on a irq descriptor with * IRQF_FORCE_RESUME set * @rcu: rcu head for delayed free + * @kobj: kobject used to represent this struct in sysfs * @dir: /proc/irq/ procfs entry * @name: flow handler name for /proc/interrupts output */ @@ -88,6 +90,7 @@ struct irq_desc { #endif #ifdef CONFIG_SPARSE_IRQ struct rcu_head rcu; + struct kobject kobj; #endif int parent_irq; struct module *owner; diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 661af56..a0547c5 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -21,6 +21,8 @@ * * DEFINE_STATIC_KEY_TRUE(key); * DEFINE_STATIC_KEY_FALSE(key); + * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); + * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); * static_branch_likely() * static_branch_unlikely() * @@ -267,9 +269,25 @@ struct static_key_false { #define DEFINE_STATIC_KEY_TRUE(name) \ struct static_key_true name = STATIC_KEY_TRUE_INIT +#define DECLARE_STATIC_KEY_TRUE(name) \ + extern struct static_key_true name + #define DEFINE_STATIC_KEY_FALSE(name) \ struct static_key_false name = STATIC_KEY_FALSE_INIT +#define DECLARE_STATIC_KEY_FALSE(name) \ + extern struct static_key_false name + +#define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \ + struct static_key_true name[count] = { \ + [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \ + } + +#define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \ + struct static_key_false name[count] = { \ + [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \ + } + extern bool ____wrong_branch_error(void); #define static_key_enabled(x) \ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d96a611..74fd6f0 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -259,17 +259,14 @@ static inline void might_fault(void) { } extern struct atomic_notifier_head panic_notifier_list; extern long (*panic_blink)(int state); __printf(1, 2) -void panic(const char *fmt, ...) - __noreturn __cold; +void panic(const char *fmt, ...) __noreturn __cold; void nmi_panic(struct pt_regs *regs, const char *msg); extern void oops_enter(void); extern void oops_exit(void); void print_oops_end_marker(void); extern int oops_may_print(void); -void do_exit(long error_code) - __noreturn; -void complete_and_exit(struct completion *, long) - __noreturn; +void do_exit(long error_code) __noreturn; +void complete_and_exit(struct completion *, long) __noreturn; /* Internal, do not use. */ int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 2b6a204..3ffc69e 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -64,6 +64,13 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; }) /* + * Same as ktime_add(), but avoids undefined behaviour on overflow; however, + * this means that you must check the result for overflow yourself. + */ +#define ktime_add_unsafe(lhs, rhs) \ + ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; }) + +/* * Add a ktime_t variable and a scalar nanosecond value. * res = kt + nsval: */ diff --git a/include/linux/lglock.h b/include/linux/lglock.h deleted file mode 100644 index c92ebd1..0000000 --- a/include/linux/lglock.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Specialised local-global spinlock. Can only be declared as global variables - * to avoid overhead and keep things simple (and we don't want to start using - * these inside dynamically allocated structures). - * - * "local/global locks" (lglocks) can be used to: - * - * - Provide fast exclusive access to per-CPU data, with exclusive access to - * another CPU's data allowed but possibly subject to contention, and to - * provide very slow exclusive access to all per-CPU data. - * - Or to provide very fast and scalable read serialisation, and to provide - * very slow exclusive serialisation of data (not necessarily per-CPU data). - * - * Brlocks are also implemented as a short-hand notation for the latter use - * case. - * - * Copyright 2009, 2010, Nick Piggin, Novell Inc. - */ -#ifndef __LINUX_LGLOCK_H -#define __LINUX_LGLOCK_H - -#include <linux/spinlock.h> -#include <linux/lockdep.h> -#include <linux/percpu.h> -#include <linux/cpu.h> -#include <linux/notifier.h> - -#ifdef CONFIG_SMP - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -#define LOCKDEP_INIT_MAP lockdep_init_map -#else -#define LOCKDEP_INIT_MAP(a, b, c, d) -#endif - -struct lglock { - arch_spinlock_t __percpu *lock; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lock_class_key lock_key; - struct lockdep_map lock_dep_map; -#endif -}; - -#define DEFINE_LGLOCK(name) \ - static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ - = __ARCH_SPIN_LOCK_UNLOCKED; \ - struct lglock name = { .lock = &name ## _lock } - -#define DEFINE_STATIC_LGLOCK(name) \ - static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ - = __ARCH_SPIN_LOCK_UNLOCKED; \ - static struct lglock name = { .lock = &name ## _lock } - -void lg_lock_init(struct lglock *lg, char *name); - -void lg_local_lock(struct lglock *lg); -void lg_local_unlock(struct lglock *lg); -void lg_local_lock_cpu(struct lglock *lg, int cpu); -void lg_local_unlock_cpu(struct lglock *lg, int cpu); - -void lg_double_lock(struct lglock *lg, int cpu1, int cpu2); -void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2); - -void lg_global_lock(struct lglock *lg); -void lg_global_unlock(struct lglock *lg); - -#else -/* When !CONFIG_SMP, map lglock to spinlock */ -#define lglock spinlock -#define DEFINE_LGLOCK(name) DEFINE_SPINLOCK(name) -#define DEFINE_STATIC_LGLOCK(name) static DEFINE_SPINLOCK(name) -#define lg_lock_init(lg, name) spin_lock_init(lg) -#define lg_local_lock spin_lock -#define lg_local_unlock spin_unlock -#define lg_local_lock_cpu(lg, cpu) spin_lock(lg) -#define lg_local_unlock_cpu(lg, cpu) spin_unlock(lg) -#define lg_global_lock spin_lock -#define lg_global_unlock spin_unlock -#endif - -#endif diff --git a/include/linux/list.h b/include/linux/list.h index 5183138..5809e9a2 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -381,8 +381,11 @@ static inline void list_splice_tail_init(struct list_head *list, * * Note that if the list is empty, it returns NULL. */ -#define list_first_entry_or_null(ptr, type, member) \ - (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#define list_first_entry_or_null(ptr, type, member) ({ \ + struct list_head *head__ = (ptr); \ + struct list_head *pos__ = READ_ONCE(head__->next); \ + pos__ != head__ ? list_entry(pos__, type, member) : NULL; \ +}) /** * list_next_entry - get the next element in list diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 4429d25..5e5b296 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -195,6 +195,7 @@ static inline bool vma_migratable(struct vm_area_struct *vma) } extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); +extern void mpol_put_task_policy(struct task_struct *); #else @@ -297,5 +298,8 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, return -1; /* no node preference */ } +static inline void mpol_put_task_policy(struct task_struct *task) +{ +} #endif /* CONFIG_NUMA */ #endif diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h new file mode 100644 index 0000000..304985e --- /dev/null +++ b/include/linux/mfd/da8xx-cfgchip.h @@ -0,0 +1,153 @@ +/* + * TI DaVinci DA8xx CHIPCFGx registers for syscon consumers. + * + * Copyright (C) 2016 David Lechner <david@lechnology.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H +#define __LINUX_MFD_DA8XX_CFGCHIP_H + +#include <linux/bitops.h> + +/* register offset (32-bit registers) */ +#define CFGCHIP(n) ((n) * 4) + +/* CFGCHIP0 (PLL0/EDMA3_0) register bits */ +#define CFGCHIP0_PLL_MASTER_LOCK BIT(4) +#define CFGCHIP0_EDMA30TC1DBS(n) ((n) << 2) +#define CFGCHIP0_EDMA30TC1DBS_MASK CFGCHIP0_EDMA30TC1DBS(0x3) +#define CFGCHIP0_EDMA30TC1DBS_16 CFGCHIP0_EDMA30TC1DBS(0x0) +#define CFGCHIP0_EDMA30TC1DBS_32 CFGCHIP0_EDMA30TC1DBS(0x1) +#define CFGCHIP0_EDMA30TC1DBS_64 CFGCHIP0_EDMA30TC1DBS(0x2) +#define CFGCHIP0_EDMA30TC0DBS(n) ((n) << 0) +#define CFGCHIP0_EDMA30TC0DBS_MASK CFGCHIP0_EDMA30TC0DBS(0x3) +#define CFGCHIP0_EDMA30TC0DBS_16 CFGCHIP0_EDMA30TC0DBS(0x0) +#define CFGCHIP0_EDMA30TC0DBS_32 CFGCHIP0_EDMA30TC0DBS(0x1) +#define CFGCHIP0_EDMA30TC0DBS_64 CFGCHIP0_EDMA30TC0DBS(0x2) + +/* CFGCHIP1 (eCAP/HPI/EDMA3_1/eHRPWM TBCLK/McASP0 AMUTEIN) register bits */ +#define CFGCHIP1_CAP2SRC(n) ((n) << 27) +#define CFGCHIP1_CAP2SRC_MASK CFGCHIP1_CAP2SRC(0x1f) +#define CFGCHIP1_CAP2SRC_ECAP_PIN CFGCHIP1_CAP2SRC(0x0) +#define CFGCHIP1_CAP2SRC_MCASP0_TX CFGCHIP1_CAP2SRC(0x1) +#define CFGCHIP1_CAP2SRC_MCASP0_RX CFGCHIP1_CAP2SRC(0x2) +#define CFGCHIP1_CAP2SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP2SRC(0x7) +#define CFGCHIP1_CAP2SRC_EMAC_C0_RX CFGCHIP1_CAP2SRC(0x8) +#define CFGCHIP1_CAP2SRC_EMAC_C0_TX CFGCHIP1_CAP2SRC(0x9) +#define CFGCHIP1_CAP2SRC_EMAC_C0_MISC CFGCHIP1_CAP2SRC(0xa) +#define CFGCHIP1_CAP2SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xb) +#define CFGCHIP1_CAP2SRC_EMAC_C1_RX CFGCHIP1_CAP2SRC(0xc) +#define CFGCHIP1_CAP2SRC_EMAC_C1_TX CFGCHIP1_CAP2SRC(0xd) +#define CFGCHIP1_CAP2SRC_EMAC_C1_MISC CFGCHIP1_CAP2SRC(0xe) +#define CFGCHIP1_CAP2SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xf) +#define CFGCHIP1_CAP2SRC_EMAC_C2_RX CFGCHIP1_CAP2SRC(0x10) +#define CFGCHIP1_CAP2SRC_EMAC_C2_TX CFGCHIP1_CAP2SRC(0x11) +#define CFGCHIP1_CAP2SRC_EMAC_C2_MISC CFGCHIP1_CAP2SRC(0x12) +#define CFGCHIP1_CAP1SRC(n) ((n) << 22) +#define CFGCHIP1_CAP1SRC_MASK CFGCHIP1_CAP1SRC(0x1f) +#define CFGCHIP1_CAP1SRC_ECAP_PIN CFGCHIP1_CAP1SRC(0x0) +#define CFGCHIP1_CAP1SRC_MCASP0_TX CFGCHIP1_CAP1SRC(0x1) +#define CFGCHIP1_CAP1SRC_MCASP0_RX CFGCHIP1_CAP1SRC(0x2) +#define CFGCHIP1_CAP1SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP1SRC(0x7) +#define CFGCHIP1_CAP1SRC_EMAC_C0_RX CFGCHIP1_CAP1SRC(0x8) +#define CFGCHIP1_CAP1SRC_EMAC_C0_TX CFGCHIP1_CAP1SRC(0x9) +#define CFGCHIP1_CAP1SRC_EMAC_C0_MISC CFGCHIP1_CAP1SRC(0xa) +#define CFGCHIP1_CAP1SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xb) +#define CFGCHIP1_CAP1SRC_EMAC_C1_RX CFGCHIP1_CAP1SRC(0xc) +#define CFGCHIP1_CAP1SRC_EMAC_C1_TX CFGCHIP1_CAP1SRC(0xd) +#define CFGCHIP1_CAP1SRC_EMAC_C1_MISC CFGCHIP1_CAP1SRC(0xe) +#define CFGCHIP1_CAP1SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xf) +#define CFGCHIP1_CAP1SRC_EMAC_C2_RX CFGCHIP1_CAP1SRC(0x10) +#define CFGCHIP1_CAP1SRC_EMAC_C2_TX CFGCHIP1_CAP1SRC(0x11) +#define CFGCHIP1_CAP1SRC_EMAC_C2_MISC CFGCHIP1_CAP1SRC(0x12) +#define CFGCHIP1_CAP0SRC(n) ((n) << 17) +#define CFGCHIP1_CAP0SRC_MASK CFGCHIP1_CAP0SRC(0x1f) +#define CFGCHIP1_CAP0SRC_ECAP_PIN CFGCHIP1_CAP0SRC(0x0) +#define CFGCHIP1_CAP0SRC_MCASP0_TX CFGCHIP1_CAP0SRC(0x1) +#define CFGCHIP1_CAP0SRC_MCASP0_RX CFGCHIP1_CAP0SRC(0x2) +#define CFGCHIP1_CAP0SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP0SRC(0x7) +#define CFGCHIP1_CAP0SRC_EMAC_C0_RX CFGCHIP1_CAP0SRC(0x8) +#define CFGCHIP1_CAP0SRC_EMAC_C0_TX CFGCHIP1_CAP0SRC(0x9) +#define CFGCHIP1_CAP0SRC_EMAC_C0_MISC CFGCHIP1_CAP0SRC(0xa) +#define CFGCHIP1_CAP0SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xb) +#define CFGCHIP1_CAP0SRC_EMAC_C1_RX CFGCHIP1_CAP0SRC(0xc) +#define CFGCHIP1_CAP0SRC_EMAC_C1_TX CFGCHIP1_CAP0SRC(0xd) +#define CFGCHIP1_CAP0SRC_EMAC_C1_MISC CFGCHIP1_CAP0SRC(0xe) +#define CFGCHIP1_CAP0SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xf) +#define CFGCHIP1_CAP0SRC_EMAC_C2_RX CFGCHIP1_CAP0SRC(0x10) +#define CFGCHIP1_CAP0SRC_EMAC_C2_TX CFGCHIP1_CAP0SRC(0x11) +#define CFGCHIP1_CAP0SRC_EMAC_C2_MISC CFGCHIP1_CAP0SRC(0x12) +#define CFGCHIP1_HPIBYTEAD BIT(16) +#define CFGCHIP1_HPIENA BIT(15) +#define CFGCHIP0_EDMA31TC0DBS(n) ((n) << 13) +#define CFGCHIP0_EDMA31TC0DBS_MASK CFGCHIP0_EDMA31TC0DBS(0x3) +#define CFGCHIP0_EDMA31TC0DBS_16 CFGCHIP0_EDMA31TC0DBS(0x0) +#define CFGCHIP0_EDMA31TC0DBS_32 CFGCHIP0_EDMA31TC0DBS(0x1) +#define CFGCHIP0_EDMA31TC0DBS_64 CFGCHIP0_EDMA31TC0DBS(0x2) +#define CFGCHIP1_TBCLKSYNC BIT(12) +#define CFGCHIP1_AMUTESEL0(n) ((n) << 0) +#define CFGCHIP1_AMUTESEL0_MASK CFGCHIP1_AMUTESEL0(0xf) +#define CFGCHIP1_AMUTESEL0_LOW CFGCHIP1_AMUTESEL0(0x0) +#define CFGCHIP1_AMUTESEL0_BANK_0 CFGCHIP1_AMUTESEL0(0x1) +#define CFGCHIP1_AMUTESEL0_BANK_1 CFGCHIP1_AMUTESEL0(0x2) +#define CFGCHIP1_AMUTESEL0_BANK_2 CFGCHIP1_AMUTESEL0(0x3) +#define CFGCHIP1_AMUTESEL0_BANK_3 CFGCHIP1_AMUTESEL0(0x4) +#define CFGCHIP1_AMUTESEL0_BANK_4 CFGCHIP1_AMUTESEL0(0x5) +#define CFGCHIP1_AMUTESEL0_BANK_5 CFGCHIP1_AMUTESEL0(0x6) +#define CFGCHIP1_AMUTESEL0_BANK_6 CFGCHIP1_AMUTESEL0(0x7) +#define CFGCHIP1_AMUTESEL0_BANK_7 CFGCHIP1_AMUTESEL0(0x8) + +/* CFGCHIP2 (USB PHY) register bits */ +#define CFGCHIP2_PHYCLKGD BIT(17) +#define CFGCHIP2_VBUSSENSE BIT(16) +#define CFGCHIP2_RESET BIT(15) +#define CFGCHIP2_OTGMODE(n) ((n) << 13) +#define CFGCHIP2_OTGMODE_MASK CFGCHIP2_OTGMODE(0x3) +#define CFGCHIP2_OTGMODE_NO_OVERRIDE CFGCHIP2_OTGMODE(0x0) +#define CFGCHIP2_OTGMODE_FORCE_HOST CFGCHIP2_OTGMODE(0x1) +#define CFGCHIP2_OTGMODE_FORCE_DEVICE CFGCHIP2_OTGMODE(0x2) +#define CFGCHIP2_OTGMODE_FORCE_HOST_VBUS_LOW CFGCHIP2_OTGMODE(0x3) +#define CFGCHIP2_USB1PHYCLKMUX BIT(12) +#define CFGCHIP2_USB2PHYCLKMUX BIT(11) +#define CFGCHIP2_PHYPWRDN BIT(10) +#define CFGCHIP2_OTGPWRDN BIT(9) +#define CFGCHIP2_DATPOL BIT(8) +#define CFGCHIP2_USB1SUSPENDM BIT(7) +#define CFGCHIP2_PHY_PLLON BIT(6) +#define CFGCHIP2_SESENDEN BIT(5) +#define CFGCHIP2_VBDTCTEN BIT(4) +#define CFGCHIP2_REFFREQ(n) ((n) << 0) +#define CFGCHIP2_REFFREQ_MASK CFGCHIP2_REFFREQ(0xf) +#define CFGCHIP2_REFFREQ_12MHZ CFGCHIP2_REFFREQ(0x1) +#define CFGCHIP2_REFFREQ_24MHZ CFGCHIP2_REFFREQ(0x2) +#define CFGCHIP2_REFFREQ_48MHZ CFGCHIP2_REFFREQ(0x3) +#define CFGCHIP2_REFFREQ_19_2MHZ CFGCHIP2_REFFREQ(0x4) +#define CFGCHIP2_REFFREQ_38_4MHZ CFGCHIP2_REFFREQ(0x5) +#define CFGCHIP2_REFFREQ_13MHZ CFGCHIP2_REFFREQ(0x6) +#define CFGCHIP2_REFFREQ_26MHZ CFGCHIP2_REFFREQ(0x7) +#define CFGCHIP2_REFFREQ_20MHZ CFGCHIP2_REFFREQ(0x8) +#define CFGCHIP2_REFFREQ_40MHZ CFGCHIP2_REFFREQ(0x9) + +/* CFGCHIP3 (EMAC/uPP/PLL1/ASYNC3/PRU/DIV4.5/EMIFA) register bits */ +#define CFGCHIP3_RMII_SEL BIT(8) +#define CFGCHIP3_UPP_TX_CLKSRC BIT(6) +#define CFGCHIP3_PLL1_MASTER_LOCK BIT(5) +#define CFGCHIP3_ASYNC3_CLKSRC BIT(4) +#define CFGCHIP3_PRUEVTSEL BIT(3) +#define CFGCHIP3_DIV45PENA BIT(2) +#define CFGCHIP3_EMA_CLKSRC BIT(1) + +/* CFGCHIP4 (McASP0 AMUNTEIN) register bits */ +#define CFGCHIP4_AMUTECLR0 BIT(0) + +#endif /* __LINUX_MFD_DA8XX_CFGCHIP_H */ diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 2567a87..7f55b8b 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -138,16 +138,16 @@ /* * time in us for processing a single channel, calculated as follows: * - * num cycles = open delay + (sample delay + conv time) * averaging + * max num cycles = open delay + (sample delay + conv time) * averaging * - * num cycles: 152 + (1 + 13) * 16 = 376 + * max num cycles: 262143 + (255 + 13) * 16 = 266431 * * clock frequency: 26MHz / 8 = 3.25MHz * clock period: 1 / 3.25MHz = 308ns * - * processing time: 376 * 308ns = 116us + * max processing time: 266431 * 308ns = 83ms(approx) */ -#define IDLE_TIMEOUT 116 /* microsec */ +#define IDLE_TIMEOUT 83 /* milliseconds */ #define TSCADC_CELLS 2 diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 21bc455..d1f9a58 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -6710,9 +6710,10 @@ struct mlx5_ifc_pude_reg_bits { }; struct mlx5_ifc_ptys_reg_bits { - u8 an_disable_cap[0x1]; + u8 reserved_at_0[0x1]; u8 an_disable_admin[0x1]; - u8 reserved_at_2[0x6]; + u8 an_disable_cap[0x1]; + u8 reserved_at_3[0x5]; u8 local_port[0x8]; u8 reserved_at_10[0xd]; u8 proto_mask[0x3]; diff --git a/include/linux/mm.h b/include/linux/mm.h index 08ed53e..5f14534 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2014,10 +2014,13 @@ extern void mm_drop_all_locks(struct mm_struct *mm); extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern struct file *get_mm_exe_file(struct mm_struct *mm); +extern struct file *get_task_exe_file(struct task_struct *task); extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); +extern bool vma_is_special_mapping(const struct vm_area_struct *vma, + const struct vm_special_mapping *sm); extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d572b78..7f2ae99 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -828,9 +828,21 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); */ #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) -static inline int populated_zone(struct zone *zone) +/* + * Returns true if a zone has pages managed by the buddy allocator. + * All the reclaim decisions have to use this function rather than + * populated_zone(). If the whole zone is reserved then we can easily + * end up with populated_zone() && !managed_zone(). + */ +static inline bool managed_zone(struct zone *zone) +{ + return zone->managed_pages; +} + +/* Returns true if a zone has memory */ +static inline bool populated_zone(struct zone *zone) { - return (!!zone->present_pages); + return zone->present_pages; } extern int movable_zone; diff --git a/include/linux/mroute.h b/include/linux/mroute.h index d351fd3..e5fb813 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -120,5 +120,5 @@ struct mfc_cache { struct rtmsg; int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, - struct rtmsg *rtm, int nowait); + struct rtmsg *rtm, int nowait, u32 portid); #endif diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 3987b64..19a1c0c 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -116,7 +116,7 @@ struct mfc6_cache { struct rtmsg; extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, - struct rtmsg *rtm, int nowait); + struct rtmsg *rtm, int nowait, u32 portid); #ifdef CONFIG_IPV6_MROUTE extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); diff --git a/include/linux/msi.h b/include/linux/msi.h index e8c81fb..0db320b 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -68,7 +68,7 @@ struct msi_desc { unsigned int nvec_used; struct device *dev; struct msi_msg msg; - const struct cpumask *affinity; + struct cpumask *affinity; union { /* PCI MSI/X specific data */ @@ -123,7 +123,8 @@ static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) } #endif /* CONFIG_PCI_MSI */ -struct msi_desc *alloc_msi_entry(struct device *dev); +struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, + const struct cpumask *affinity); void free_msi_entry(struct msi_desc *entry); void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3a788bf..e8d79d4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3267,6 +3267,7 @@ static inline void napi_free_frags(struct napi_struct *napi) napi->skb = NULL; } +bool netdev_is_rx_handler_busy(struct net_device *dev); int netdev_rx_handler_register(struct net_device *dev, rx_handler_func_t *rx_handler, void *rx_handler_data); diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h index 80ca889..664da00 100644 --- a/include/linux/netfilter/nfnetlink_acct.h +++ b/include/linux/netfilter/nfnetlink_acct.h @@ -15,6 +15,6 @@ struct nf_acct; struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name); void nfnl_acct_put(struct nf_acct *acct); void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct); -extern int nfnl_acct_overquota(const struct sk_buff *skb, - struct nf_acct *nfacct); +int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb, + struct nf_acct *nfacct); #endif /* _NFNL_ACCT_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index d8b37ba..7676557 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -794,7 +794,7 @@ struct nvmf_connect_command { }; struct nvmf_connect_data { - uuid_le hostid; + uuid_be hostid; __le16 cntlid; char resv4[238]; char subsysnqn[NVMF_NQN_FIELD_LEN]; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 66a1260..01e8443 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -571,56 +571,57 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) */ static inline int fault_in_multipages_writeable(char __user *uaddr, int size) { - int ret = 0; char __user *end = uaddr + size - 1; if (unlikely(size == 0)) - return ret; + return 0; + if (unlikely(uaddr > end)) + return -EFAULT; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ - while (uaddr <= end) { - ret = __put_user(0, uaddr); - if (ret != 0) - return ret; + do { + if (unlikely(__put_user(0, uaddr) != 0)) + return -EFAULT; uaddr += PAGE_SIZE; - } + } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) - ret = __put_user(0, end); + return __put_user(0, end); - return ret; + return 0; } static inline int fault_in_multipages_readable(const char __user *uaddr, int size) { volatile char c; - int ret = 0; const char __user *end = uaddr + size - 1; if (unlikely(size == 0)) - return ret; + return 0; - while (uaddr <= end) { - ret = __get_user(c, uaddr); - if (ret != 0) - return ret; + if (unlikely(uaddr > end)) + return -EFAULT; + + do { + if (unlikely(__get_user(c, uaddr) != 0)) + return -EFAULT; uaddr += PAGE_SIZE; - } + } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) { - ret = __get_user(c, end); - (void)c; + return __get_user(c, end); } - return ret; + (void)c; + return 0; } int add_to_page_cache_locked(struct page *page, struct address_space *mapping, diff --git a/include/linux/pci.h b/include/linux/pci.h index fbc1fa6..7cc0acb 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -683,15 +683,6 @@ struct pci_driver { #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) /** - * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table - * @_table: device table name - * - * This macro is deprecated and should not be used in new code. - */ -#define DEFINE_PCI_DEVICE_TABLE(_table) \ - const struct pci_device_id _table[] - -/** * PCI_DEVICE - macro used to describe a specific pci device * @vend: the 16 bit PCI Vendor ID * @dev: the 16 bit PCI Device ID @@ -1135,6 +1126,7 @@ void pdev_enable_device(struct pci_dev *); int pci_enable_resources(struct pci_dev *, int mask); void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), int (*)(const struct pci_dev *, u8, u8)); +struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res); #define HAVE_PCI_REQ_REGIONS 2 int __must_check pci_request_regions(struct pci_dev *, const char *); int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); @@ -1309,6 +1301,7 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags); void pci_free_irq_vectors(struct pci_dev *dev); int pci_irq_vector(struct pci_dev *dev, unsigned int nr); +const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } @@ -1351,6 +1344,11 @@ static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) return -EINVAL; return dev->irq; } +static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, + int vec) +{ + return cpu_possible_mask; +} #endif #ifdef CONFIG_PCIEPORTBUS @@ -1551,6 +1549,9 @@ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { return 0; } +static inline struct resource *pci_find_resource(struct pci_dev *dev, + struct resource *res) +{ return NULL; } static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index c2fa3ec..5b2e615 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -10,32 +10,122 @@ struct percpu_rw_semaphore { struct rcu_sync rss; - unsigned int __percpu *fast_read_ctr; + unsigned int __percpu *read_count; struct rw_semaphore rw_sem; - atomic_t slow_read_ctr; - wait_queue_head_t write_waitq; + wait_queue_head_t writer; + int readers_block; }; -extern void percpu_down_read(struct percpu_rw_semaphore *); -extern int percpu_down_read_trylock(struct percpu_rw_semaphore *); -extern void percpu_up_read(struct percpu_rw_semaphore *); +#define DEFINE_STATIC_PERCPU_RWSEM(name) \ +static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ +static struct percpu_rw_semaphore name = { \ + .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ + .read_count = &__percpu_rwsem_rc_##name, \ + .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ + .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \ +} + +extern int __percpu_down_read(struct percpu_rw_semaphore *, int); +extern void __percpu_up_read(struct percpu_rw_semaphore *); + +static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) +{ + might_sleep(); + + rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_); + + preempt_disable(); + /* + * We are in an RCU-sched read-side critical section, so the writer + * cannot both change sem->state from readers_fast and start checking + * counters while we are here. So if we see !sem->state, we know that + * the writer won't be checking until we're past the preempt_enable() + * and that one the synchronize_sched() is done, the writer will see + * anything we did within this RCU-sched read-size critical section. + */ + __this_cpu_inc(*sem->read_count); + if (unlikely(!rcu_sync_is_idle(&sem->rss))) + __percpu_down_read(sem, false); /* Unconditional memory barrier */ + barrier(); + /* + * The barrier() prevents the compiler from + * bleeding the critical section out. + */ +} + +static inline void percpu_down_read(struct percpu_rw_semaphore *sem) +{ + percpu_down_read_preempt_disable(sem); + preempt_enable(); +} + +static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) +{ + int ret = 1; + + preempt_disable(); + /* + * Same as in percpu_down_read(). + */ + __this_cpu_inc(*sem->read_count); + if (unlikely(!rcu_sync_is_idle(&sem->rss))) + ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ + preempt_enable(); + /* + * The barrier() from preempt_enable() prevents the compiler from + * bleeding the critical section out. + */ + + if (ret) + rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_); + + return ret; +} + +static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) +{ + /* + * The barrier() prevents the compiler from + * bleeding the critical section out. + */ + barrier(); + /* + * Same as in percpu_down_read(). + */ + if (likely(rcu_sync_is_idle(&sem->rss))) + __this_cpu_dec(*sem->read_count); + else + __percpu_up_read(sem); /* Unconditional memory barrier */ + preempt_enable(); + + rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); +} + +static inline void percpu_up_read(struct percpu_rw_semaphore *sem) +{ + preempt_disable(); + percpu_up_read_preempt_enable(sem); +} extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *); extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, const char *, struct lock_class_key *); + extern void percpu_free_rwsem(struct percpu_rw_semaphore *); -#define percpu_init_rwsem(brw) \ +#define percpu_init_rwsem(sem) \ ({ \ static struct lock_class_key rwsem_key; \ - __percpu_init_rwsem(brw, #brw, &rwsem_key); \ + __percpu_init_rwsem(sem, #sem, &rwsem_key); \ }) - #define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem) +#define percpu_rwsem_assert_held(sem) \ + lockdep_assert_held(&(sem)->rw_sem) + static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 4ad1b40..8462da2 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -14,7 +14,7 @@ #include <linux/interrupt.h> #include <linux/perf_event.h> - +#include <linux/sysfs.h> #include <asm/cputype.h> /* @@ -77,6 +77,13 @@ struct pmu_hw_events { struct arm_pmu *percpu_pmu; }; +enum armpmu_attr_groups { + ARMPMU_ATTR_GROUP_COMMON, + ARMPMU_ATTR_GROUP_EVENTS, + ARMPMU_ATTR_GROUP_FORMATS, + ARMPMU_NR_ATTR_GROUPS +}; + struct arm_pmu { struct pmu pmu; cpumask_t active_irqs; @@ -111,6 +118,8 @@ struct arm_pmu { struct pmu_hw_events __percpu *hw_events; struct hlist_node node; struct notifier_block cpu_pm_nb; + /* the attr_groups array must be NULL-terminated */ + const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1]; }; #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) @@ -151,6 +160,8 @@ int arm_pmu_device_probe(struct platform_device *pdev, const struct of_device_id *of_table, const struct pmu_probe_info *probe_table); +#define ARMV8_PMU_PDEV_NAME "armv8-pmu" + #endif /* CONFIG_ARM_PMU */ #endif /* __ARM_PMU_H__ */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2b6b43c..5c53625 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -510,9 +510,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *regs); -enum perf_group_flag { - PERF_GROUP_SOFTWARE = 0x1, -}; +/* + * Event capabilities. For event_caps and groups caps. + * + * PERF_EV_CAP_SOFTWARE: Is a software event. + * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read + * from any CPU in the package where it is active. + */ +#define PERF_EV_CAP_SOFTWARE BIT(0) +#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) #define SWEVENT_HLIST_BITS 8 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) @@ -568,7 +574,12 @@ struct perf_event { struct hlist_node hlist_entry; struct list_head active_entry; int nr_siblings; - int group_flags; + + /* Not serialized. Only written during event initialization. */ + int event_caps; + /* The cumulative AND of all event_caps for events in this group. */ + int group_caps; + struct perf_event *group_leader; struct pmu *pmu; void *pmu_private; @@ -774,6 +785,9 @@ struct perf_cpu_context { #ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; #endif + + struct list_head sched_cb_entry; + int sched_cb_usage; }; struct perf_output_handle { @@ -985,7 +999,7 @@ static inline bool is_sampling_event(struct perf_event *event) */ static inline int is_software_event(struct perf_event *event) { - return event->pmu->task_ctx_nr == perf_sw_context; + return event->event_caps & PERF_EV_CAP_SOFTWARE; } extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 31fec85..a09fe5c 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -51,6 +51,8 @@ struct generic_pm_domain { struct mutex lock; struct dev_power_governor *gov; struct work_struct power_off_work; + struct fwnode_handle *provider; /* Identity of the domain provider */ + bool has_provider; const char *name; atomic_t sd_count; /* Number of subdomains with power "on" */ enum gpd_status status; /* Current state of the domain */ @@ -116,7 +118,6 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) return to_gpd_data(dev->power.subsys_data->domain_data); } -extern struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev); extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td); @@ -129,6 +130,7 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *target); extern int pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); +extern int pm_genpd_remove(struct generic_pm_domain *genpd); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -138,10 +140,6 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) { return ERR_PTR(-ENOSYS); } -static inline struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev) -{ - return NULL; -} static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td) @@ -168,6 +166,10 @@ static inline int pm_genpd_init(struct generic_pm_domain *genpd, { return -ENOSYS; } +static inline int pm_genpd_remove(struct generic_pm_domain *genpd) +{ + return -ENOTSUPP; +} #endif static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, @@ -192,57 +194,57 @@ struct genpd_onecell_data { unsigned int num_domains; }; -typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args, - void *data); - #ifdef CONFIG_PM_GENERIC_DOMAINS_OF -int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, - void *data); +int of_genpd_add_provider_simple(struct device_node *np, + struct generic_pm_domain *genpd); +int of_genpd_add_provider_onecell(struct device_node *np, + struct genpd_onecell_data *data); void of_genpd_del_provider(struct device_node *np); -struct generic_pm_domain *of_genpd_get_from_provider( - struct of_phandle_args *genpdspec); - -struct generic_pm_domain *__of_genpd_xlate_simple( - struct of_phandle_args *genpdspec, - void *data); -struct generic_pm_domain *__of_genpd_xlate_onecell( - struct of_phandle_args *genpdspec, - void *data); +extern int of_genpd_add_device(struct of_phandle_args *args, + struct device *dev); +extern int of_genpd_add_subdomain(struct of_phandle_args *parent, + struct of_phandle_args *new_subdomain); +extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int genpd_dev_pm_attach(struct device *dev); #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ -static inline int __of_genpd_add_provider(struct device_node *np, - genpd_xlate_t xlate, void *data) +static inline int of_genpd_add_provider_simple(struct device_node *np, + struct generic_pm_domain *genpd) { - return 0; + return -ENOTSUPP; } -static inline void of_genpd_del_provider(struct device_node *np) {} -static inline struct generic_pm_domain *of_genpd_get_from_provider( - struct of_phandle_args *genpdspec) +static inline int of_genpd_add_provider_onecell(struct device_node *np, + struct genpd_onecell_data *data) { - return NULL; + return -ENOTSUPP; } -#define __of_genpd_xlate_simple NULL -#define __of_genpd_xlate_onecell NULL +static inline void of_genpd_del_provider(struct device_node *np) {} -static inline int genpd_dev_pm_attach(struct device *dev) +static inline int of_genpd_add_device(struct of_phandle_args *args, + struct device *dev) { return -ENODEV; } -#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ -static inline int of_genpd_add_provider_simple(struct device_node *np, - struct generic_pm_domain *genpd) +static inline int of_genpd_add_subdomain(struct of_phandle_args *parent, + struct of_phandle_args *new_subdomain) { - return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd); + return -ENODEV; } -static inline int of_genpd_add_provider_onecell(struct device_node *np, - struct genpd_onecell_data *data) + +static inline int genpd_dev_pm_attach(struct device *dev) +{ + return -ENODEV; +} + +static inline +struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) { - return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data); + return ERR_PTR(-ENOTSUPP); } +#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ #ifdef CONFIG_PM extern int dev_pm_domain_attach(struct device *dev, bool power_on); diff --git a/include/linux/property.h b/include/linux/property.h index 3a2f9ae..856e50b 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -190,7 +190,7 @@ struct property_entry { .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ .is_array = true, \ .is_string = false, \ - { .pointer = { _type_##_data = _val_ } }, \ + { .pointer = { ._type_##_data = _val_ } }, \ } #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index a63a33e..ece7ed9 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -59,6 +59,7 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) } extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); +extern void rcu_sync_enter_start(struct rcu_sync *); extern void rcu_sync_enter(struct rcu_sync *); extern void rcu_sync_exit(struct rcu_sync *); extern void rcu_sync_dtor(struct rcu_sync *); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 1aa62e1..321f9ed 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -334,6 +334,7 @@ void rcu_sched_qs(void); void rcu_bh_qs(void); void rcu_check_callbacks(int user); void rcu_report_dead(unsigned int cpu); +void rcu_cpu_starting(unsigned int cpu); #ifndef CONFIG_TINY_RCU void rcu_end_inkernel_boot(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index 62c68e5..7543a47 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -448,6 +448,8 @@ static inline void io_schedule(void) io_schedule_timeout(MAX_SCHEDULE_TIMEOUT); } +void __noreturn do_task_dead(void); + struct nsproxy; struct user_namespace; @@ -1022,7 +1024,8 @@ extern void wake_up_q(struct wake_q_head *head); #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */ +#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ +#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ @@ -1064,6 +1067,12 @@ extern int sched_domain_level_max; struct sched_group; +struct sched_domain_shared { + atomic_t ref; + atomic_t nr_busy_cpus; + int has_idle_cores; +}; + struct sched_domain { /* These fields must be setup */ struct sched_domain *parent; /* top domain must be null terminated */ @@ -1094,6 +1103,8 @@ struct sched_domain { u64 max_newidle_lb_cost; unsigned long next_decay_max_lb_cost; + u64 avg_scan_cost; /* select_idle_sibling */ + #ifdef CONFIG_SCHEDSTATS /* load_balance() stats */ unsigned int lb_count[CPU_MAX_IDLE_TYPES]; @@ -1132,6 +1143,7 @@ struct sched_domain { void *private; /* used during construction */ struct rcu_head rcu; /* used during destruction */ }; + struct sched_domain_shared *shared; unsigned int span_weight; /* @@ -1165,6 +1177,7 @@ typedef int (*sched_domain_flags_f)(void); struct sd_data { struct sched_domain **__percpu sd; + struct sched_domain_shared **__percpu sds; struct sched_group **__percpu sg; struct sched_group_capacity **__percpu sgc; }; @@ -1458,6 +1471,13 @@ struct tlbflush_unmap_batch { }; struct task_struct { +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* + * For reasons of header soup (see current_thread_info()), this + * must be the first element of task_struct. + */ + struct thread_info thread_info; +#endif volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ void *stack; atomic_t usage; @@ -1467,6 +1487,9 @@ struct task_struct { #ifdef CONFIG_SMP struct llist_node wake_entry; int on_cpu; +#ifdef CONFIG_THREAD_INFO_IN_TASK + unsigned int cpu; /* current CPU */ +#endif unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; @@ -1923,6 +1946,13 @@ struct task_struct { #ifdef CONFIG_MMU struct task_struct *oom_reaper_list; #endif +#ifdef CONFIG_VMAP_STACK + struct vm_struct *stack_vm_area; +#endif +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* A live task holds one reference. */ + atomic_t stack_refcount; +#endif /* CPU-specific state of this task */ struct thread_struct thread; /* @@ -1939,6 +1969,18 @@ extern int arch_task_struct_size __read_mostly; # define arch_task_struct_size (sizeof(struct task_struct)) #endif +#ifdef CONFIG_VMAP_STACK +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return t->stack_vm_area; +} +#else +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) +{ + return NULL; +} +#endif + /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) @@ -2568,12 +2610,14 @@ static inline bool is_idle_task(const struct task_struct *p) return p->pid == 0; } extern struct task_struct *curr_task(int cpu); -extern void set_curr_task(int cpu, struct task_struct *p); +extern void ia64_set_curr_task(int cpu, struct task_struct *p); void yield(void); union thread_union { +#ifndef CONFIG_THREAD_INFO_IN_TASK struct thread_info thread_info; +#endif unsigned long stack[THREAD_SIZE/sizeof(long)]; }; @@ -3061,10 +3105,34 @@ static inline void threadgroup_change_end(struct task_struct *tsk) cgroup_threadgroup_change_end(tsk); } -#ifndef __HAVE_THREAD_FUNCTIONS +#ifdef CONFIG_THREAD_INFO_IN_TASK + +static inline struct thread_info *task_thread_info(struct task_struct *task) +{ + return &task->thread_info; +} + +/* + * When accessing the stack of a non-current task that might exit, use + * try_get_task_stack() instead. task_stack_page will return a pointer + * that could get freed out from under you. + */ +static inline void *task_stack_page(const struct task_struct *task) +{ + return task->stack; +} + +#define setup_thread_stack(new,old) do { } while(0) + +static inline unsigned long *end_of_stack(const struct task_struct *task) +{ + return task->stack; +} + +#elif !defined(__HAVE_THREAD_FUNCTIONS) #define task_thread_info(task) ((struct thread_info *)(task)->stack) -#define task_stack_page(task) ((task)->stack) +#define task_stack_page(task) ((void *)(task)->stack) static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) { @@ -3091,6 +3159,24 @@ static inline unsigned long *end_of_stack(struct task_struct *p) } #endif + +#ifdef CONFIG_THREAD_INFO_IN_TASK +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return atomic_inc_not_zero(&tsk->stack_refcount) ? + task_stack_page(tsk) : NULL; +} + +extern void put_task_stack(struct task_struct *tsk); +#else +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return task_stack_page(tsk); +} + +static inline void put_task_stack(struct task_struct *tsk) {} +#endif + #define task_stack_end_corrupted(task) \ (*(end_of_stack(task)) != STACK_END_MAGIC) @@ -3206,7 +3292,11 @@ static inline int signal_pending_state(long state, struct task_struct *p) * cond_resched_lock() will drop the spinlock before scheduling, * cond_resched_softirq() will enable bhs before scheduling. */ +#ifndef CONFIG_PREEMPT extern int _cond_resched(void); +#else +static inline int _cond_resched(void) { return 0; } +#endif #define cond_resched() ({ \ ___might_sleep(__FILE__, __LINE__, 0); \ @@ -3236,6 +3326,15 @@ static inline void cond_resched_rcu(void) #endif } +static inline unsigned long get_preempt_disable_ip(struct task_struct *p) +{ +#ifdef CONFIG_DEBUG_PREEMPT + return p->preempt_disable_ip; +#else + return 0; +#endif +} + /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPT, @@ -3364,7 +3463,11 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) static inline unsigned int task_cpu(const struct task_struct *p) { +#ifdef CONFIG_THREAD_INFO_IN_TASK + return p->cpu; +#else return task_thread_info(p)->cpu; +#endif } static inline int task_node(const struct task_struct *p) @@ -3469,15 +3572,20 @@ static inline unsigned long rlimit_max(unsigned int limit) return task_rlimit_max(current, limit); } +#define SCHED_CPUFREQ_RT (1U << 0) +#define SCHED_CPUFREQ_DL (1U << 1) +#define SCHED_CPUFREQ_IOWAIT (1U << 2) + +#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) + #ifdef CONFIG_CPU_FREQ struct update_util_data { - void (*func)(struct update_util_data *data, - u64 time, unsigned long util, unsigned long max); + void (*func)(struct update_util_data *data, u64 time, unsigned int flags); }; void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, - void (*func)(struct update_util_data *data, u64 time, - unsigned long util, unsigned long max)); + void (*func)(struct update_util_data *data, u64 time, + unsigned int flags)); void cpufreq_remove_update_util_hook(int cpu); #endif /* CONFIG_CPU_FREQ */ diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 923266c..48ec765 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -111,7 +111,6 @@ struct uart_8250_port { * if no_console_suspend */ unsigned char probe; - struct mctrl_gpios *gpios; #define UART_PROBE_RSA (1 << 0) /* diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index 76199b7..e302c44 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h @@ -1,6 +1,16 @@ #ifndef __SMC91X_H__ #define __SMC91X_H__ +/* + * These bits define which access sizes a platform can support, rather + * than the maximal access size. So, if your platform can do 16-bit + * and 32-bit accesses to the SMC91x device, but not 8-bit, set both + * SMC91X_USE_16BIT and SMC91X_USE_32BIT. + * + * The SMC91x driver requires at least one of SMC91X_USE_8BIT or + * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is + * an invalid configuration. + */ #define SMC91X_USE_8BIT (1 << 0) #define SMC91X_USE_16BIT (1 << 1) #define SMC91X_USE_32BIT (1 << 2) diff --git a/include/linux/smp.h b/include/linux/smp.h index eccae469..8e0cb7a 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -196,6 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void); void smp_setup_processor_id(void); +int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, + bool phys); + /* SMP core functions */ int smpcfd_prepare_cpu(unsigned int cpu); int smpcfd_dead_cpu(unsigned int cpu); diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 7693e39..d971837 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -245,6 +245,7 @@ static inline bool idle_should_freeze(void) return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER); } +extern void __init pm_states_init(void); extern void freeze_set_ops(const struct platform_freeze_ops *ops); extern void freeze_wake(void); @@ -279,6 +280,7 @@ static inline bool pm_resume_via_firmware(void) { return false; } static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } static inline bool idle_should_freeze(void) { return false; } +static inline void __init pm_states_init(void) {} static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} static inline void freeze_wake(void) {} #endif /* !CONFIG_SUSPEND */ diff --git a/include/linux/swap.h b/include/linux/swap.h index b17cc48..4a529c9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -257,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node) static inline void workingset_node_pages_dec(struct radix_tree_node *node) { + VM_BUG_ON(!workingset_node_pages(node)); node->count--; } @@ -272,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node) static inline void workingset_node_shadows_dec(struct radix_tree_node *node) { + VM_BUG_ON(!workingset_node_shadows(node)); node->count -= 1U << RADIX_TREE_COUNT_SHIFT; } diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index cbd8990..45f004e 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -13,6 +13,21 @@ struct timespec; struct compat_timespec; +#ifdef CONFIG_THREAD_INFO_IN_TASK +struct thread_info { + unsigned long flags; /* low level flags */ +}; + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .flags = 0, \ +} +#endif + +#ifdef CONFIG_THREAD_INFO_IN_TASK +#define current_thread_info() ((struct thread_info *)current) +#endif + /* * System call restart block. */ @@ -118,10 +133,11 @@ static inline int arch_within_stack_frames(const void * const stack, extern void __check_object_size(const void *ptr, unsigned long n, bool to_user); -static inline void check_object_size(const void *ptr, unsigned long n, - bool to_user) +static __always_inline void check_object_size(const void *ptr, unsigned long n, + bool to_user) { - __check_object_size(ptr, n, to_user); + if (!__builtin_constant_p(n)) + __check_object_size(ptr, n, to_user); } #else static inline void check_object_size(const void *ptr, unsigned long n, diff --git a/include/linux/time64.h b/include/linux/time64.h index 7e5d2fa..980c71b 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -5,6 +5,7 @@ #include <linux/math64.h> typedef __s64 time64_t; +typedef __u64 timeu64_t; /* * This wants to go into uapi/linux/time.h once we agreed about the diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 816b754..09168c5 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -1,7 +1,7 @@ #ifndef _LINUX_TIMEKEEPING_H #define _LINUX_TIMEKEEPING_H -#include <asm-generic/errno-base.h> +#include <linux/errno.h> /* Included from linux/ktime.h */ diff --git a/include/linux/torture.h b/include/linux/torture.h index 6685a73..a45702e 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -43,7 +43,7 @@ #define TORTURE_FLAG "-torture:" #define TOROUT_STRING(s) \ - pr_alert("%s" TORTURE_FLAG s "\n", torture_type) + pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s) #define VERBOSE_TOROUT_STRING(s) \ do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0) #define VERBOSE_TOROUT_ERRSTRING(s) \ diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index d3a2bb7..650f3dd 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -103,31 +103,42 @@ static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp) #endif } -static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else -#if BITS_PER_LONG==32 - preempt_disable(); -#endif return 0; #endif } -static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, +static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) +{ +#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) + preempt_disable(); +#endif + return __u64_stats_fetch_begin(syncp); +} + +static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else -#if BITS_PER_LONG==32 - preempt_enable(); -#endif return false; #endif } +static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, + unsigned int start) +{ +#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) + preempt_enable(); +#endif + return __u64_stats_fetch_retry(syncp, start); +} + /* * In case irq handlers can update u64 counters, readers can use following helpers * - SMP 32bit arches use seqcount protection, irq safe. @@ -136,27 +147,19 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, */ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) { -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) - return read_seqcount_begin(&syncp->seq); -#else -#if BITS_PER_LONG==32 +#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) local_irq_disable(); #endif - return 0; -#endif + return __u64_stats_fetch_begin(syncp); } static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, - unsigned int start) + unsigned int start) { -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) - return read_seqcount_retry(&syncp->seq, start); -#else -#if BITS_PER_LONG==32 +#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) local_irq_enable(); #endif - return false; -#endif + return __u64_stats_fetch_retry(syncp, start); } #endif /* _LINUX_U64_STATS_SYNC_H */ diff --git a/include/linux/uio.h b/include/linux/uio.h index 1b5d1cd..75b4aaf 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); -int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes); +#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable size_t iov_iter_single_seg_count(const struct iov_iter *i); size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); diff --git a/include/linux/wait.h b/include/linux/wait.h index c3ff74d..2408e8d5 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -248,6 +248,8 @@ wait_queue_head_t *bit_waitqueue(void *, int); (!__builtin_constant_p(state) || \ state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ +extern void init_wait_entry(wait_queue_t *__wait, int flags); + /* * The below macro ___wait_event() has an explicit shadow of the __ret * variable when used from the wait_event_*() macros. @@ -266,12 +268,7 @@ wait_queue_head_t *bit_waitqueue(void *, int); wait_queue_t __wait; \ long __ret = ret; /* explicit shadow */ \ \ - INIT_LIST_HEAD(&__wait.task_list); \ - if (exclusive) \ - __wait.flags = WQ_FLAG_EXCLUSIVE; \ - else \ - __wait.flags = 0; \ - \ + init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ for (;;) { \ long __int = prepare_to_wait_event(&wq, &__wait, state);\ \ @@ -280,12 +277,7 @@ wait_queue_head_t *bit_waitqueue(void *, int); \ if (___wait_is_interruptible(state) && __int) { \ __ret = __int; \ - if (exclusive) { \ - abort_exclusive_wait(&wq, &__wait, \ - state, NULL); \ - goto __out; \ - } \ - break; \ + goto __out; \ } \ \ cmd; \ @@ -989,7 +981,6 @@ void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); -void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key); long wait_woken(wait_queue_t *wait, unsigned mode, long timeout); int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); diff --git a/include/media/cec.h b/include/media/cec.h index dc7854b..fdb5d60 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -57,8 +57,8 @@ struct cec_devnode { int minor; bool registered; bool unregistered; - struct mutex fhs_lock; struct list_head fhs; + struct mutex lock; }; struct cec_adapter; diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 9b4c418..fd60ecc 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -52,7 +52,7 @@ struct unix_sock { struct sock sk; struct unix_address *addr; struct path path; - struct mutex readlock; + struct mutex iolock, bindlock; struct sock *peer; struct list_head link; atomic_long_t inflight; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9c23f4d3..beb7610 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1102,6 +1102,7 @@ struct station_info { struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1]; }; +#if IS_ENABLED(CONFIG_CFG80211) /** * cfg80211_get_station - retrieve information about a given station * @dev: the device where the station is supposed to be connected to @@ -1114,6 +1115,14 @@ struct station_info { */ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo); +#else +static inline int cfg80211_get_station(struct net_device *dev, + const u8 *mac_addr, + struct station_info *sinfo) +{ + return -ENOENT; +} +#endif /** * enum monitor_flags - monitor flags diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 4079fc1..7d4a72e 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -111,6 +111,7 @@ struct fib_info { unsigned char fib_scope; unsigned char fib_type; __be32 fib_prefsrc; + u32 fib_tb_id; u32 fib_priority; u32 *fib_metrics; #define fib_mtu fib_metrics[RTAX_MTU-1] @@ -319,7 +320,7 @@ void fib_flush_external(struct net *net); /* Exported by fib_semantics.c */ int ip_fib_check_default(__be32 gw, struct net_device *dev); int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); -int fib_sync_down_addr(struct net *net, __be32 local); +int fib_sync_down_addr(struct net_device *dev, __be32 local); int fib_sync_up(struct net_device *dev, unsigned int nh_flags); extern u32 fib_multipath_secret __read_mostly; diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h index 6793614..e693731 100644 --- a/include/net/netfilter/nf_conntrack_synproxy.h +++ b/include/net/netfilter/nf_conntrack_synproxy.h @@ -27,6 +27,20 @@ static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct) #endif } +static inline bool nf_ct_add_synproxy(struct nf_conn *ct, + const struct nf_conn *tmpl) +{ + if (tmpl && nfct_synproxy(tmpl)) { + if (!nfct_seqadj_ext_add(ct)) + return false; + + if (!nfct_synproxy_ext_add(ct)) + return false; + } + + return true; +} + struct synproxy_stats { unsigned int syn_received; unsigned int cookie_invalid; diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h index d27588c..1139cde 100644 --- a/include/net/netfilter/nft_meta.h +++ b/include/net/netfilter/nft_meta.h @@ -36,4 +36,8 @@ void nft_meta_set_eval(const struct nft_expr *expr, void nft_meta_set_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr); +int nft_meta_set_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data); + #endif diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h index 60fa153..02e28c5 100644 --- a/include/net/netfilter/nft_reject.h +++ b/include/net/netfilter/nft_reject.h @@ -8,6 +8,10 @@ struct nft_reject { extern const struct nla_policy nft_reject_policy[]; +int nft_reject_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data); + int nft_reject_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]); diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index efc0174..bafe2a0 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -382,7 +382,7 @@ enum { ADDIP_SERIAL_SIGN_BIT = (1<<31) }; -static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t) +static inline int ADDIP_SERIAL_gte(__u32 s, __u32 t) { return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT); } diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index ce93c4b..ced0df3 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -554,6 +554,9 @@ struct sctp_chunk { atomic_t refcnt; + /* How many times this chunk have been sent, for prsctp RTX policy */ + int sent_count; + /* This is our link to the per-transport transmitted list. */ struct list_head transmitted_list; @@ -603,16 +606,6 @@ struct sctp_chunk { /* This needs to be recoverable for SCTP_SEND_FAILED events. */ struct sctp_sndrcvinfo sinfo; - /* We use this field to record param for prsctp policies, - * for TTL policy, it is the time_to_drop of this chunk, - * for RTX policy, it is the max_sent_count of this chunk, - * for PRIO policy, it is the priority of this chunk. - */ - unsigned long prsctp_param; - - /* How many times this chunk have been sent, for prsctp RTX policy */ - int sent_count; - /* Which association does this belong to? */ struct sctp_association *asoc; diff --git a/include/net/sock.h b/include/net/sock.h index ff5be7e..8741988 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1332,6 +1332,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) if (!sk_has_account(sk)) return; sk->sk_forward_alloc += size; + + /* Avoid a possible overflow. + * TCP send queues can make this happen, if sk_mem_reclaim() + * is not called and more than 2 GBytes are released at once. + * + * If we reach 2 MBytes, reclaim 1 MBytes right now, there is + * no need to hold that much forward allocation anyway. + */ + if (unlikely(sk->sk_forward_alloc >= 1 << 21)) + __sk_mem_reclaim(sk, 1 << 20); } static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) diff --git a/include/net/tcp.h b/include/net/tcp.h index c00e7d5..7717302 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1523,6 +1523,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli { if (sk->sk_send_head == skb_unlinked) sk->sk_send_head = NULL; + if (tcp_sk(sk)->highest_sack == skb_unlinked) + tcp_sk(sk)->highest_sack = NULL; } static inline void tcp_init_send_head(struct sock *sk) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index adfebd6..1793431 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1540,8 +1540,10 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); void xfrm4_local_error(struct sk_buff *skb, u32 mtu); int xfrm6_extract_header(struct sk_buff *skb); int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); -int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); +int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, + struct ip6_tnl *t); int xfrm6_transport_finish(struct sk_buff *skb, int async); +int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t); int xfrm6_rcv(struct sk_buff *skb); int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 0dee7af..7e4cd53 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -771,12 +771,9 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost) shost->tmf_in_progress; } -extern bool scsi_use_blk_mq; - static inline bool shost_use_blk_mq(struct Scsi_Host *shost) { - return scsi_use_blk_mq; - + return shost->use_blk_mq; } extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index 13c0b2b..73d8709 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -11,12 +11,12 @@ struct sas_rphy; struct request; #if !IS_ENABLED(CONFIG_SCSI_SAS_ATTRS) -static inline int is_sas_attached(struct scsi_device *sdev) +static inline int scsi_is_sas_rphy(const struct device *sdev) { return 0; } #else -extern int is_sas_attached(struct scsi_device *sdev); +extern int scsi_is_sas_rphy(const struct device *); #endif static inline int sas_protocol_ata(enum sas_protocol proto) @@ -202,7 +202,6 @@ extern int sas_rphy_add(struct sas_rphy *); extern void sas_rphy_remove(struct sas_rphy *); extern void sas_rphy_delete(struct sas_rphy *); extern void sas_rphy_unlink(struct sas_rphy *); -extern int scsi_is_sas_rphy(const struct device *); struct sas_port *sas_port_alloc(struct device *, int); struct sas_port *sas_port_alloc_num(struct device *); diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h index 4cbbcef..70f0214 100644 --- a/include/trace/events/mce.h +++ b/include/trace/events/mce.h @@ -20,6 +20,8 @@ TRACE_EVENT(mce_record, __field( u64, status ) __field( u64, addr ) __field( u64, misc ) + __field( u64, synd ) + __field( u64, ipid ) __field( u64, ip ) __field( u64, tsc ) __field( u64, walltime ) @@ -38,6 +40,8 @@ TRACE_EVENT(mce_record, __entry->status = m->status; __entry->addr = m->addr; __entry->misc = m->misc; + __entry->synd = m->synd; + __entry->ipid = m->ipid; __entry->ip = m->ip; __entry->tsc = m->tsc; __entry->walltime = m->time; @@ -50,11 +54,12 @@ TRACE_EVENT(mce_record, __entry->cpuvendor = m->cpuvendor; ), - TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, ADDR/MISC: %016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x", + TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR/MISC/SYND: %016Lx/%016Lx/%016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PROCESSOR: %u:%x, TIME: %llu, SOCKET: %u, APIC: %x", __entry->cpu, __entry->mcgcap, __entry->mcgstatus, __entry->bank, __entry->status, - __entry->addr, __entry->misc, + __entry->ipid, + __entry->addr, __entry->misc, __entry->synd, __entry->cs, __entry->ip, __entry->tsc, __entry->cpuvendor, __entry->cpuid, diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 19e5030..54e3aad 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -69,7 +69,8 @@ TRACE_EVENT(pstate_sample, u64 mperf, u64 aperf, u64 tsc, - u32 freq + u32 freq, + u32 io_boost ), TP_ARGS(core_busy, @@ -79,7 +80,8 @@ TRACE_EVENT(pstate_sample, mperf, aperf, tsc, - freq + freq, + io_boost ), TP_STRUCT__entry( @@ -91,6 +93,7 @@ TRACE_EVENT(pstate_sample, __field(u64, aperf) __field(u64, tsc) __field(u32, freq) + __field(u32, io_boost) ), TP_fast_assign( @@ -102,9 +105,10 @@ TRACE_EVENT(pstate_sample, __entry->aperf = aperf; __entry->tsc = tsc; __entry->freq = freq; + __entry->io_boost = io_boost; ), - TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu ", + TP_printk("core_busy=%lu scaled=%lu from=%lu to=%lu mperf=%llu aperf=%llu tsc=%llu freq=%lu io_boost=%lu", (unsigned long)__entry->core_busy, (unsigned long)__entry->scaled_busy, (unsigned long)__entry->from, @@ -112,7 +116,8 @@ TRACE_EVENT(pstate_sample, (unsigned long long)__entry->mperf, (unsigned long long)__entry->aperf, (unsigned long long)__entry->tsc, - (unsigned long)__entry->freq + (unsigned long)__entry->freq, + (unsigned long)__entry->io_boost ) ); diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h index 9c9c6ad..5cd4d4d 100644 --- a/include/uapi/linux/atm_zatm.h +++ b/include/uapi/linux/atm_zatm.h @@ -14,6 +14,7 @@ #include <linux/atmapi.h> #include <linux/atmioc.h> +#include <linux/time.h> #define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) /* get pool statistics */ diff --git a/include/uapi/linux/if_pppol2tp.h b/include/uapi/linux/if_pppol2tp.h index 163e8ad..4bd1f55 100644 --- a/include/uapi/linux/if_pppol2tp.h +++ b/include/uapi/linux/if_pppol2tp.h @@ -16,7 +16,8 @@ #define _UAPI__LINUX_IF_PPPOL2TP_H #include <linux/types.h> - +#include <linux/in.h> +#include <linux/in6.h> /* Structure used to connect() the socket to a particular tunnel UDP * socket over IPv4. diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h index e128769..d37bbb1 100644 --- a/include/uapi/linux/if_pppox.h +++ b/include/uapi/linux/if_pppox.h @@ -21,8 +21,11 @@ #include <asm/byteorder.h> #include <linux/socket.h> +#include <linux/if.h> #include <linux/if_ether.h> #include <linux/if_pppol2tp.h> +#include <linux/in.h> +#include <linux/in6.h> /* For user-space programs to pick up these definitions * which they wouldn't get otherwise without defining __KERNEL__ diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 1046f55..777b6cd 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -2,6 +2,9 @@ #define _UAPI_IF_TUNNEL_H_ #include <linux/types.h> +#include <linux/if.h> +#include <linux/ip.h> +#include <linux/in6.h> #include <asm/byteorder.h> diff --git a/include/uapi/linux/ipx.h b/include/uapi/linux/ipx.h index 3d48014..30f031d 100644 --- a/include/uapi/linux/ipx.h +++ b/include/uapi/linux/ipx.h @@ -1,11 +1,13 @@ #ifndef _IPX_H_ #define _IPX_H_ +#include <linux/libc-compat.h> /* for compatibility with glibc netipx/ipx.h */ #include <linux/types.h> #include <linux/sockios.h> #include <linux/socket.h> #define IPX_NODE_LEN 6 #define IPX_MTU 576 +#if __UAPI_DEF_SOCKADDR_IPX struct sockaddr_ipx { __kernel_sa_family_t sipx_family; __be16 sipx_port; @@ -14,6 +16,7 @@ struct sockaddr_ipx { __u8 sipx_type; unsigned char sipx_zero; /* 16 byte fill */ }; +#endif /* __UAPI_DEF_SOCKADDR_IPX */ /* * So we can fit the extra info for SIOCSIFADDR into the address nicely @@ -23,12 +26,15 @@ struct sockaddr_ipx { #define IPX_DLTITF 0 #define IPX_CRTITF 1 +#if __UAPI_DEF_IPX_ROUTE_DEFINITION struct ipx_route_definition { __be32 ipx_network; __be32 ipx_router_network; unsigned char ipx_router_node[IPX_NODE_LEN]; }; +#endif /* __UAPI_DEF_IPX_ROUTE_DEFINITION */ +#if __UAPI_DEF_IPX_INTERFACE_DEFINITION struct ipx_interface_definition { __be32 ipx_network; unsigned char ipx_device[16]; @@ -45,16 +51,20 @@ struct ipx_interface_definition { #define IPX_INTERNAL 2 unsigned char ipx_node[IPX_NODE_LEN]; }; - +#endif /* __UAPI_DEF_IPX_INTERFACE_DEFINITION */ + +#if __UAPI_DEF_IPX_CONFIG_DATA struct ipx_config_data { unsigned char ipxcfg_auto_select_primary; unsigned char ipxcfg_auto_create_interfaces; }; +#endif /* __UAPI_DEF_IPX_CONFIG_DATA */ /* * OLD Route Definition for backward compatibility. */ +#if __UAPI_DEF_IPX_ROUTE_DEF struct ipx_route_def { __be32 ipx_network; __be32 ipx_router_network; @@ -67,6 +77,7 @@ struct ipx_route_def { #define IPX_RT_BLUEBOOK 2 #define IPX_RT_ROUTED 1 }; +#endif /* __UAPI_DEF_IPX_ROUTE_DEF */ #define SIOCAIPXITFCRT (SIOCPROTOPRIVATE) #define SIOCAIPXPRISLT (SIOCPROTOPRIVATE + 1) diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h index e4f048e..44b8a6b 100644 --- a/include/uapi/linux/libc-compat.h +++ b/include/uapi/linux/libc-compat.h @@ -139,6 +139,25 @@ #endif /* _NETINET_IN_H */ +/* Coordinate with glibc netipx/ipx.h header. */ +#if defined(__NETIPX_IPX_H) + +#define __UAPI_DEF_SOCKADDR_IPX 0 +#define __UAPI_DEF_IPX_ROUTE_DEFINITION 0 +#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 0 +#define __UAPI_DEF_IPX_CONFIG_DATA 0 +#define __UAPI_DEF_IPX_ROUTE_DEF 0 + +#else /* defined(__NETIPX_IPX_H) */ + +#define __UAPI_DEF_SOCKADDR_IPX 1 +#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1 +#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1 +#define __UAPI_DEF_IPX_CONFIG_DATA 1 +#define __UAPI_DEF_IPX_ROUTE_DEF 1 + +#endif /* defined(__NETIPX_IPX_H) */ + /* Definitions for xattr.h */ #if defined(_SYS_XATTR_H) #define __UAPI_DEF_XATTR 0 @@ -179,6 +198,13 @@ #define __UAPI_DEF_IN6_PKTINFO 1 #define __UAPI_DEF_IP6_MTUINFO 1 +/* Definitions for ipx.h */ +#define __UAPI_DEF_SOCKADDR_IPX 1 +#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1 +#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1 +#define __UAPI_DEF_IPX_CONFIG_DATA 1 +#define __UAPI_DEF_IPX_ROUTE_DEF 1 + /* Definitions for xattr.h */ #define __UAPI_DEF_XATTR 1 diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index d95a301..54c3b4f 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -583,7 +583,7 @@ enum ovs_userspace_attr { #define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) struct ovs_action_trunc { - uint32_t max_len; /* Max packet size in bytes. */ + __u32 max_len; /* Max packet size in bytes. */ }; /** @@ -632,8 +632,8 @@ enum ovs_hash_alg { * @hash_basis: basis used for computing hash. */ struct ovs_action_hash { - uint32_t hash_alg; /* One of ovs_hash_alg. */ - uint32_t hash_basis; + __u32 hash_alg; /* One of ovs_hash_alg. */ + __u32 hash_basis; }; /** diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h index f184909..a4c4d73 100644 --- a/include/xen/interface/sched.h +++ b/include/xen/interface/sched.h @@ -3,6 +3,24 @@ * * Scheduler state interactions * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * * Copyright (c) 2005, Keir Fraser <keir@xensource.com> */ @@ -12,18 +30,30 @@ #include <xen/interface/event_channel.h> /* + * Guest Scheduler Operations + * + * The SCHEDOP interface provides mechanisms for a guest to interact + * with the scheduler, including yield, blocking and shutting itself + * down. + */ + +/* * The prototype for this hypercall is: - * long sched_op_new(int cmd, void *arg) + * long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...) + * * @cmd == SCHEDOP_??? (scheduler operation). * @arg == Operation-specific extra argument(s), as described below. + * ... == Additional Operation-specific extra arguments, described below. * - * **NOTE**: - * Versions of Xen prior to 3.0.2 provide only the following legacy version + * Versions of Xen prior to 3.0.2 provided only the following legacy version * of this hypercall, supporting only the commands yield, block and shutdown: * long sched_op(int cmd, unsigned long arg) * @cmd == SCHEDOP_??? (scheduler operation). * @arg == 0 (SCHEDOP_yield and SCHEDOP_block) * == SHUTDOWN_* code (SCHEDOP_shutdown) + * + * This legacy version is available to new guests as: + * long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg) */ /* @@ -44,12 +74,17 @@ /* * Halt execution of this domain (all VCPUs) and notify the system controller. * @arg == pointer to sched_shutdown structure. + * + * If the sched_shutdown_t reason is SHUTDOWN_suspend then + * x86 PV guests must also set RDX (EDX for 32-bit guests) to the MFN + * of the guest's start info page. RDX/EDX is the third hypercall + * argument. + * + * In addition, which reason is SHUTDOWN_suspend this hypercall + * returns 1 if suspend was cancelled or the domain was merely + * checkpointed, and 0 if it is resuming in a new domain. */ #define SCHEDOP_shutdown 2 -struct sched_shutdown { - unsigned int reason; /* SHUTDOWN_* */ -}; -DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown); /* * Poll a set of event-channel ports. Return when one or more are pending. An @@ -57,12 +92,6 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown); * @arg == pointer to sched_poll structure. */ #define SCHEDOP_poll 3 -struct sched_poll { - GUEST_HANDLE(evtchn_port_t) ports; - unsigned int nr_ports; - uint64_t timeout; -}; -DEFINE_GUEST_HANDLE_STRUCT(sched_poll); /* * Declare a shutdown for another domain. The main use of this function is @@ -71,15 +100,11 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_poll); * @arg == pointer to sched_remote_shutdown structure. */ #define SCHEDOP_remote_shutdown 4 -struct sched_remote_shutdown { - domid_t domain_id; /* Remote domain ID */ - unsigned int reason; /* SHUTDOWN_xxx reason */ -}; /* * Latch a shutdown code, so that when the domain later shuts down it * reports this code to the control tools. - * @arg == as for SCHEDOP_shutdown. + * @arg == sched_shutdown, as for SCHEDOP_shutdown. */ #define SCHEDOP_shutdown_code 5 @@ -92,10 +117,47 @@ struct sched_remote_shutdown { * With id != 0 and timeout != 0, poke watchdog timer and set new timeout. */ #define SCHEDOP_watchdog 6 + +/* + * Override the current vcpu affinity by pinning it to one physical cpu or + * undo this override restoring the previous affinity. + * @arg == pointer to sched_pin_override structure. + * + * A negative pcpu value will undo a previous pin override and restore the + * previous cpu affinity. + * This call is allowed for the hardware domain only and requires the cpu + * to be part of the domain's cpupool. + */ +#define SCHEDOP_pin_override 7 + +struct sched_shutdown { + unsigned int reason; /* SHUTDOWN_* => shutdown reason */ +}; +DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown); + +struct sched_poll { + GUEST_HANDLE(evtchn_port_t) ports; + unsigned int nr_ports; + uint64_t timeout; +}; +DEFINE_GUEST_HANDLE_STRUCT(sched_poll); + +struct sched_remote_shutdown { + domid_t domain_id; /* Remote domain ID */ + unsigned int reason; /* SHUTDOWN_* => shutdown reason */ +}; +DEFINE_GUEST_HANDLE_STRUCT(sched_remote_shutdown); + struct sched_watchdog { uint32_t id; /* watchdog ID */ uint32_t timeout; /* timeout */ }; +DEFINE_GUEST_HANDLE_STRUCT(sched_watchdog); + +struct sched_pin_override { + int32_t pcpu; +}; +DEFINE_GUEST_HANDLE_STRUCT(sched_pin_override); /* * Reason codes for SCHEDOP_shutdown. These may be interpreted by control @@ -107,6 +169,7 @@ struct sched_watchdog { #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ + /* * Domain asked to perform 'soft reset' for it. The expected behavior is to * reset internal Xen state for the domain returning it to the point where it @@ -115,5 +178,6 @@ struct sched_watchdog { * interfaces again. */ #define SHUTDOWN_soft_reset 5 +#define SHUTDOWN_MAX 5 /* Maximum valid shutdown reason. */ #endif /* __XEN_PUBLIC_SCHED_H__ */ |