diff options
Diffstat (limited to 'drivers')
725 files changed, 18986 insertions, 10208 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index ecb26b4..1567028 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -19,12 +19,12 @@ obj-y += acpi.o \ # All the builtin files are in the "acpi." module_param namespace. acpi-y += osl.o utils.o reboot.o -acpi-y += atomicio.o +acpi-y += nvs.o # sleep related files acpi-y += wakeup.o acpi-y += sleep.o -acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o +acpi-$(CONFIG_ACPI_SLEEP) += proc.o # diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 301bd2d..0ca208b 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -8,41 +8,151 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT # use acpi.o to put all files here into acpi.o modparam namespace obj-y += acpi.o -acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ - dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \ - dsinit.o dsargs.o dscontrol.o dswload2.o +acpi-y := \ + dsargs.o \ + dscontrol.o \ + dsfield.o \ + dsinit.o \ + dsmethod.o \ + dsmthdat.o \ + dsobject.o \ + dsopcode.o \ + dsutils.o \ + dswexec.o \ + dswload.o \ + dswload2.o \ + dswscope.o \ + dswstate.o -acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ - evmisc.o evrgnini.o evxface.o evxfregn.o \ - evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o +acpi-y += \ + evevent.o \ + evgpe.o \ + evgpeblk.o \ + evgpeinit.o \ + evgpeutil.o \ + evglock.o \ + evmisc.o \ + evregion.o \ + evrgnini.o \ + evsci.o \ + evxface.o \ + evxfevnt.o \ + evxfgpe.o \ + evxfregn.o -acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ - exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ - excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \ - exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o +acpi-y += \ + exconfig.o \ + exconvrt.o \ + excreate.o \ + exdebug.o \ + exdump.o \ + exfield.o \ + exfldio.o \ + exmutex.o \ + exnames.o \ + exoparg1.o \ + exoparg2.o \ + exoparg3.o \ + exoparg6.o \ + exprep.o \ + exmisc.o \ + exregion.o \ + exresnte.o \ + exresolv.o \ + exresop.o \ + exstore.o \ + exstoren.o \ + exstorob.o \ + exsystem.o \ + exutils.o -acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o hwpci.o +acpi-y += \ + hwacpi.o \ + hwgpe.o \ + hwpci.o \ + hwregs.o \ + hwsleep.o \ + hwvalid.o \ + hwxface.o acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o -acpi-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \ - nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \ - nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \ - nsparse.o nspredef.o nsrepair.o nsrepair2.o +acpi-y += \ + nsaccess.o \ + nsalloc.o \ + nsdump.o \ + nseval.o \ + nsinit.o \ + nsload.o \ + nsnames.o \ + nsobject.o \ + nsparse.o \ + nspredef.o \ + nsrepair.o \ + nsrepair2.o \ + nssearch.o \ + nsutils.o \ + nswalk.o \ + nsxfeval.o \ + nsxfname.o \ + nsxfobj.o acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o -acpi-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \ - psopcode.o psscope.o psutils.o psxface.o +acpi-y += \ + psargs.o \ + psloop.o \ + psopcode.o \ + psparse.o \ + psscope.o \ + pstree.o \ + psutils.o \ + pswalk.o \ + psxface.o -acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \ - rscalc.o rsirq.o rsmemory.o rsutils.o +acpi-y += \ + rsaddr.o \ + rscalc.o \ + rscreate.o \ + rsinfo.o \ + rsio.o \ + rsirq.o \ + rslist.o \ + rsmemory.o \ + rsmisc.o \ + rsserial.o \ + rsutils.o \ + rsxface.o acpi-$(ACPI_FUTURE_USAGE) += rsdump.o -acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o +acpi-y += \ + tbfadt.o \ + tbfind.o \ + tbinstal.o \ + tbutils.o \ + tbxface.o \ + tbxfroot.o -acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \ - utcopy.o utdelete.o utglobal.o utmath.o utobject.o \ - utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \ - utosi.o utxferror.o utdecode.o +acpi-y += \ + utaddress.o \ + utalloc.o \ + utcopy.o \ + utdebug.o \ + utdecode.o \ + utdelete.o \ + uteval.o \ + utglobal.o \ + utids.o \ + utinit.o \ + utlock.o \ + utmath.o \ + utmisc.o \ + utmutex.o \ + utobject.o \ + utosi.o \ + utresrc.o \ + utstate.o \ + utxface.o \ + utxferror.o \ + utxfmutex.o diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h index e0ba17f..a44bd42 100644 --- a/drivers/acpi/acpica/accommon.h +++ b/drivers/acpi/acpica/accommon.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h index f895a24..1f30af6 100644 --- a/drivers/acpi/acpica/acconfig.h +++ b/drivers/acpi/acpica/acconfig.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -123,6 +123,10 @@ #define ACPI_MAX_SLEEP 2000 /* Two seconds */ +/* Address Range lists are per-space_id (Memory and I/O only) */ + +#define ACPI_ADDRESS_RANGE_MAX 2 + /****************************************************************************** * * ACPI Specification constants (Do not change unless the specification changes) @@ -202,9 +206,10 @@ #define ACPI_RSDP_CHECKSUM_LENGTH 20 #define ACPI_RSDP_XCHECKSUM_LENGTH 36 -/* SMBus and IPMI bidirectional buffer size */ +/* SMBus, GSBus and IPMI bidirectional buffer size */ #define ACPI_SMBUS_BUFFER_SIZE 34 +#define ACPI_GSBUS_BUFFER_SIZE 34 #define ACPI_IPMI_BUFFER_SIZE 66 /* _sx_d and _sx_w control methods */ diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index eb0b1f8..deaa819 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index 2d1b7ff..5935ba6 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index bea3b48..c53caa5 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -162,6 +162,7 @@ acpi_status acpi_ev_initialize_op_regions(void); acpi_status acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, + union acpi_operand_object *field_obj, u32 function, u32 region_offset, u32 bit_width, u64 *value); diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index e6652d7..2853f76 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -140,8 +140,19 @@ u32 acpi_gbl_trace_flags; acpi_name acpi_gbl_trace_method_name; u8 acpi_gbl_system_awake_and_running; +/* + * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning + * that the ACPI hardware is no longer required. A flag in the FADT indicates + * a reduced HW machine, and that flag is duplicated here for convenience. + */ +u8 acpi_gbl_reduced_hardware; + #endif +/* Do not disassemble buffers to resource descriptors */ + +ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE); + /***************************************************************************** * * Debug support @@ -207,7 +218,7 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock; /***************************************************************************** * - * Mutual exlusion within ACPICA subsystem + * Mutual exclusion within ACPICA subsystem * ****************************************************************************/ @@ -295,6 +306,8 @@ ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present; ACPI_EXTERN u8 acpi_gbl_events_initialized; ACPI_EXTERN u8 acpi_gbl_osi_data; ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces; +ACPI_EXTERN struct acpi_address_range + *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX]; #ifndef DEFINE_ACPI_GLOBALS diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index e7213be..677793e 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index 3731e1c..eb30863 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -468,6 +468,8 @@ void acpi_ex_eisa_id_to_string(char *dest, u64 compressed_id); void acpi_ex_integer_to_string(char *dest, u64 value); +u8 acpi_is_valid_space_id(u8 space_id); + /* * exregion - default op_region handlers */ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 5552125..3f24068 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle; /* Total number of aml opcodes defined */ -#define AML_NUM_OPCODES 0x7F +#define AML_NUM_OPCODES 0x81 /* Forward declarations */ @@ -249,12 +249,16 @@ struct acpi_create_field_info { struct acpi_namespace_node *field_node; struct acpi_namespace_node *register_node; struct acpi_namespace_node *data_register_node; + struct acpi_namespace_node *connection_node; + u8 *resource_buffer; u32 bank_value; u32 field_bit_position; u32 field_bit_length; + u16 resource_length; u8 field_flags; u8 attribute; u8 field_type; + u8 access_length; }; typedef @@ -315,7 +319,8 @@ struct acpi_name_info { /* * Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2, - * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT + * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT, + * ACPI_PTYPE2_FIX_VAR */ struct acpi_package_info { u8 type; @@ -625,6 +630,15 @@ union acpi_generic_state { typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state); +/* Address Range info block */ + +struct acpi_address_range { + struct acpi_address_range *next; + struct acpi_namespace_node *region_node; + acpi_physical_address start_address; + acpi_physical_address end_address; +}; + /***************************************************************************** * * Parser typedefs and structs @@ -951,7 +965,7 @@ struct acpi_port_info { #define ACPI_RESOURCE_NAME_END_DEPENDENT 0x38 #define ACPI_RESOURCE_NAME_IO 0x40 #define ACPI_RESOURCE_NAME_FIXED_IO 0x48 -#define ACPI_RESOURCE_NAME_RESERVED_S1 0x50 +#define ACPI_RESOURCE_NAME_FIXED_DMA 0x50 #define ACPI_RESOURCE_NAME_RESERVED_S2 0x58 #define ACPI_RESOURCE_NAME_RESERVED_S3 0x60 #define ACPI_RESOURCE_NAME_RESERVED_S4 0x68 @@ -973,7 +987,9 @@ struct acpi_port_info { #define ACPI_RESOURCE_NAME_EXTENDED_IRQ 0x89 #define ACPI_RESOURCE_NAME_ADDRESS64 0x8A #define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 0x8B -#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8B +#define ACPI_RESOURCE_NAME_GPIO 0x8C +#define ACPI_RESOURCE_NAME_SERIAL_BUS 0x8E +#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8E /***************************************************************************** * diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index b7491ee..ef338a9 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 79a598c..2c9e0f0 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 1055769..c065078 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -254,6 +254,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; u32 base_byte_offset; /* Byte offset within containing object */\ u32 value; /* Value to store into the Bank or Index register */\ u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\ + u8 access_length; /* For serial regions/fields */ struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */ @@ -261,7 +262,9 @@ struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and }; struct acpi_object_region_field { - ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */ + ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length; + union acpi_operand_object *region_obj; /* Containing op_region object */ + u8 *resource_buffer; /* resource_template for serial regions/fields */ }; struct acpi_object_bank_field { @@ -358,6 +361,7 @@ typedef enum { */ struct acpi_object_extra { ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */ + struct acpi_namespace_node *scope_node; void *region_context; /* Region-specific data */ u8 *aml_start; u32 aml_length; diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index bb2ccfa..9440d05 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -93,6 +93,7 @@ #define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) #define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) #define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME) +#define ARGP_CONNECTFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING) #define ARGP_CONTINUE_OP ARG_NONE #define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME) #define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME) @@ -164,6 +165,7 @@ #define ARGP_RETURN_OP ARGP_LIST1 (ARGP_TERMARG) #define ARGP_REVISION_OP ARG_NONE #define ARGP_SCOPE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_TERMLIST) +#define ARGP_SERIALFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING) #define ARGP_SHIFT_LEFT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) #define ARGP_SHIFT_RIGHT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET) #define ARGP_SIGNAL_OP ARGP_LIST1 (ARGP_SUPERNAME) @@ -223,6 +225,7 @@ #define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF) #define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF) #define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF) +#define ARGI_CONNECTFIELD_OP ARGI_INVALID_OPCODE #define ARGI_CONTINUE_OP ARGI_INVALID_OPCODE #define ARGI_COPY_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_SIMPLE_TARGET) #define ARGI_CREATE_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE) @@ -294,6 +297,7 @@ #define ARGI_RETURN_OP ARGI_INVALID_OPCODE #define ARGI_REVISION_OP ARG_NONE #define ARGI_SCOPE_OP ARGI_INVALID_OPCODE +#define ARGI_SERIALFIELD_OP ARGI_INVALID_OPCODE #define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF) #define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF) #define ARGI_SIGNAL_OP ARGI_LIST1 (ARGI_EVENT) diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index 5ea1e06..b725d78 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index c445cca..bbb34c9 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -94,6 +94,14 @@ * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length * (Used for _ART, _FPS) * + * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements + * followed by an optional element + * object type + * count + * object type + * count = 0 (optional) + * (Used for _DLM) + * *****************************************************************************/ enum acpi_return_package_types { @@ -105,7 +113,8 @@ enum acpi_return_package_types { ACPI_PTYPE2_PKG_COUNT = 6, ACPI_PTYPE2_FIXED = 7, ACPI_PTYPE2_MIN = 8, - ACPI_PTYPE2_REV_FIXED = 9 + ACPI_PTYPE2_REV_FIXED = 9, + ACPI_PTYPE2_FIX_VAR = 10 }; #ifdef ACPI_CREATE_PREDEFINED_TABLE @@ -154,6 +163,7 @@ static const union acpi_predefined_info predefined_names[] = {{"_AC8", 0, ACPI_RTYPE_INTEGER}}, {{"_AC9", 0, ACPI_RTYPE_INTEGER}}, {{"_ADR", 0, ACPI_RTYPE_INTEGER}}, + {{"_AEI", 0, ACPI_RTYPE_BUFFER}}, {{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, @@ -229,6 +239,13 @@ static const union acpi_predefined_info predefined_names[] = {{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}}, + {{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */ + {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}}, + + {{"_CPC", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Bufs) */ + {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0, + 0}}, + {{"_CRS", 0, ACPI_RTYPE_BUFFER}}, {{"_CRT", 0, ACPI_RTYPE_INTEGER}}, {{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */ @@ -237,12 +254,21 @@ static const union acpi_predefined_info predefined_names[] = {{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */ {{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}}, + {{"_CWS", 1, ACPI_RTYPE_INTEGER}}, {{"_DCK", 1, ACPI_RTYPE_INTEGER}}, {{"_DCS", 0, ACPI_RTYPE_INTEGER}}, {{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}}, {{"_DDN", 0, ACPI_RTYPE_STRING}}, + {{"_DEP", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ + {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, + {{"_DGS", 0, ACPI_RTYPE_INTEGER}}, {{"_DIS", 0, 0}}, + + {{"_DLM", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */ + {{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1, + ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}}, + {{"_DMA", 0, ACPI_RTYPE_BUFFER}}, {{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}}, @@ -262,6 +288,7 @@ static const union acpi_predefined_info predefined_names[] = {{"_EJ3", 1, 0}}, {{"_EJ4", 1, 0}}, {{"_EJD", 0, ACPI_RTYPE_STRING}}, + {{"_EVT", 1, 0}}, {{"_FDE", 0, ACPI_RTYPE_BUFFER}}, {{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}}, @@ -281,14 +308,17 @@ static const union acpi_predefined_info predefined_names[] = {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}}, {{"_GAI", 0, ACPI_RTYPE_INTEGER}}, + {{"_GCP", 0, ACPI_RTYPE_INTEGER}}, {{"_GHL", 0, ACPI_RTYPE_INTEGER}}, {{"_GLK", 0, ACPI_RTYPE_INTEGER}}, {{"_GPD", 0, ACPI_RTYPE_INTEGER}}, {{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */ + {{"_GRT", 0, ACPI_RTYPE_BUFFER}}, {{"_GSB", 0, ACPI_RTYPE_INTEGER}}, {{"_GTF", 0, ACPI_RTYPE_BUFFER}}, {{"_GTM", 0, ACPI_RTYPE_BUFFER}}, {{"_GTS", 1, 0}}, + {{"_GWS", 1, ACPI_RTYPE_INTEGER}}, {{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}}, {{"_HOT", 0, ACPI_RTYPE_INTEGER}}, {{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */ @@ -303,6 +333,7 @@ static const union acpi_predefined_info predefined_names[] = {{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */ {{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}}, + {{"_HRV", 0, ACPI_RTYPE_INTEGER}}, {{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */ {{"_INI", 0, 0}}, {{"_IRC", 0, 0}}, @@ -361,6 +392,9 @@ static const union acpi_predefined_info predefined_names[] = {{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, + {{"_PRE", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ + {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, + {{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}}, @@ -391,6 +425,7 @@ static const union acpi_predefined_info predefined_names[] = {{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */ {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}}, + {{"_PSE", 1, 0}}, {{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}}, @@ -457,6 +492,7 @@ static const union acpi_predefined_info predefined_names[] = {{"_SLI", 0, ACPI_RTYPE_BUFFER}}, {{"_SPD", 1, ACPI_RTYPE_INTEGER}}, {{"_SRS", 1, 0}}, + {{"_SRT", 1, ACPI_RTYPE_INTEGER}}, {{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */ {{"_SST", 1, 0}}, {{"_STA", 0, ACPI_RTYPE_INTEGER}}, @@ -464,6 +500,7 @@ static const union acpi_predefined_info predefined_names[] = {{"_STP", 2, ACPI_RTYPE_INTEGER}}, {{"_STR", 0, ACPI_RTYPE_BUFFER}}, {{"_STV", 2, ACPI_RTYPE_INTEGER}}, + {{"_SUB", 0, ACPI_RTYPE_STRING}}, {{"_SUN", 0, ACPI_RTYPE_INTEGER}}, {{"_SWS", 0, ACPI_RTYPE_INTEGER}}, {{"_TC1", 0, ACPI_RTYPE_INTEGER}}, diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h index f08b55b..0347d09 100644 --- a/drivers/acpi/acpica/acresrc.h +++ b/drivers/acpi/acpica/acresrc.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -73,28 +73,40 @@ typedef const struct acpi_rsconvert_info { /* Resource conversion opcodes */ -#define ACPI_RSC_INITGET 0 -#define ACPI_RSC_INITSET 1 -#define ACPI_RSC_FLAGINIT 2 -#define ACPI_RSC_1BITFLAG 3 -#define ACPI_RSC_2BITFLAG 4 -#define ACPI_RSC_COUNT 5 -#define ACPI_RSC_COUNT16 6 -#define ACPI_RSC_LENGTH 7 -#define ACPI_RSC_MOVE8 8 -#define ACPI_RSC_MOVE16 9 -#define ACPI_RSC_MOVE32 10 -#define ACPI_RSC_MOVE64 11 -#define ACPI_RSC_SET8 12 -#define ACPI_RSC_DATA8 13 -#define ACPI_RSC_ADDRESS 14 -#define ACPI_RSC_SOURCE 15 -#define ACPI_RSC_SOURCEX 16 -#define ACPI_RSC_BITMASK 17 -#define ACPI_RSC_BITMASK16 18 -#define ACPI_RSC_EXIT_NE 19 -#define ACPI_RSC_EXIT_LE 20 -#define ACPI_RSC_EXIT_EQ 21 +typedef enum { + ACPI_RSC_INITGET = 0, + ACPI_RSC_INITSET, + ACPI_RSC_FLAGINIT, + ACPI_RSC_1BITFLAG, + ACPI_RSC_2BITFLAG, + ACPI_RSC_3BITFLAG, + ACPI_RSC_ADDRESS, + ACPI_RSC_BITMASK, + ACPI_RSC_BITMASK16, + ACPI_RSC_COUNT, + ACPI_RSC_COUNT16, + ACPI_RSC_COUNT_GPIO_PIN, + ACPI_RSC_COUNT_GPIO_RES, + ACPI_RSC_COUNT_GPIO_VEN, + ACPI_RSC_COUNT_SERIAL_RES, + ACPI_RSC_COUNT_SERIAL_VEN, + ACPI_RSC_DATA8, + ACPI_RSC_EXIT_EQ, + ACPI_RSC_EXIT_LE, + ACPI_RSC_EXIT_NE, + ACPI_RSC_LENGTH, + ACPI_RSC_MOVE_GPIO_PIN, + ACPI_RSC_MOVE_GPIO_RES, + ACPI_RSC_MOVE_SERIAL_RES, + ACPI_RSC_MOVE_SERIAL_VEN, + ACPI_RSC_MOVE8, + ACPI_RSC_MOVE16, + ACPI_RSC_MOVE32, + ACPI_RSC_MOVE64, + ACPI_RSC_SET8, + ACPI_RSC_SOURCE, + ACPI_RSC_SOURCEX +} ACPI_RSCONVERT_OPCODES; /* Resource Conversion sub-opcodes */ @@ -106,6 +118,9 @@ typedef const struct acpi_rsconvert_info { #define ACPI_RS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_resource,f) #define AML_OFFSET(f) (u8) ACPI_OFFSET (union aml_resource,f) +/* + * Individual entry for the resource dump tables + */ typedef const struct acpi_rsdump_info { u8 opcode; u8 offset; @@ -116,20 +131,25 @@ typedef const struct acpi_rsdump_info { /* Values for the Opcode field above */ -#define ACPI_RSD_TITLE 0 -#define ACPI_RSD_LITERAL 1 -#define ACPI_RSD_STRING 2 -#define ACPI_RSD_UINT8 3 -#define ACPI_RSD_UINT16 4 -#define ACPI_RSD_UINT32 5 -#define ACPI_RSD_UINT64 6 -#define ACPI_RSD_1BITFLAG 7 -#define ACPI_RSD_2BITFLAG 8 -#define ACPI_RSD_SHORTLIST 9 -#define ACPI_RSD_LONGLIST 10 -#define ACPI_RSD_DWORDLIST 11 -#define ACPI_RSD_ADDRESS 12 -#define ACPI_RSD_SOURCE 13 +typedef enum { + ACPI_RSD_TITLE = 0, + ACPI_RSD_1BITFLAG, + ACPI_RSD_2BITFLAG, + ACPI_RSD_3BITFLAG, + ACPI_RSD_ADDRESS, + ACPI_RSD_DWORDLIST, + ACPI_RSD_LITERAL, + ACPI_RSD_LONGLIST, + ACPI_RSD_SHORTLIST, + ACPI_RSD_SHORTLISTX, + ACPI_RSD_SOURCE, + ACPI_RSD_STRING, + ACPI_RSD_UINT8, + ACPI_RSD_UINT16, + ACPI_RSD_UINT32, + ACPI_RSD_UINT64, + ACPI_RSD_WORDLIST +} ACPI_RSDUMP_OPCODES; /* restore default alignment */ @@ -138,13 +158,18 @@ typedef const struct acpi_rsdump_info { /* Resource tables indexed by internal resource type */ extern const u8 acpi_gbl_aml_resource_sizes[]; +extern const u8 acpi_gbl_aml_resource_serial_bus_sizes[]; extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[]; /* Resource tables indexed by raw AML resource descriptor type */ extern const u8 acpi_gbl_resource_struct_sizes[]; +extern const u8 acpi_gbl_resource_struct_serial_bus_sizes[]; extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[]; +extern struct acpi_rsconvert_info + *acpi_gbl_convert_resource_serial_bus_dispatch[]; + struct acpi_vendor_walk_info { struct acpi_vendor_uuid *uuid; struct acpi_buffer *buffer; @@ -190,6 +215,10 @@ acpi_status acpi_rs_set_srs_method_data(struct acpi_namespace_node *node, struct acpi_buffer *ret_buffer); +acpi_status +acpi_rs_get_aei_method_data(struct acpi_namespace_node *node, + struct acpi_buffer *ret_buffer); + /* * rscalc */ @@ -293,6 +322,11 @@ extern struct acpi_rsconvert_info acpi_rs_convert_address16[]; extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[]; extern struct acpi_rsconvert_info acpi_rs_convert_address64[]; extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[]; +extern struct acpi_rsconvert_info acpi_rs_convert_gpio[]; +extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[]; +extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[]; +extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[]; +extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[]; /* These resources require separate get/set tables */ @@ -310,6 +344,7 @@ extern struct acpi_rsconvert_info acpi_rs_set_vendor[]; * rsinfo */ extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[]; +extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[]; /* * rsdump @@ -331,6 +366,12 @@ extern struct acpi_rsdump_info acpi_rs_dump_address64[]; extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[]; extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[]; extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[]; +extern struct acpi_rsdump_info acpi_rs_dump_gpio[]; +extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[]; +extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[]; +extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[]; +extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[]; +extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[]; #endif #endif /* __ACRESRC_H__ */ diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index 1623b24..0404df6 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index 967f081..d5bec30 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 99c140d..925ccf2 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -45,6 +45,7 @@ #define _ACUTILS_H extern const u8 acpi_gbl_resource_aml_sizes[]; +extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[]; /* Strings used by the disassembler and debugger resource dump routines */ @@ -579,6 +580,24 @@ acpi_ut_create_list(char *list_name, #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ /* + * utaddress - address range check + */ +acpi_status +acpi_ut_add_address_range(acpi_adr_space_type space_id, + acpi_physical_address address, + u32 length, struct acpi_namespace_node *region_node); + +void +acpi_ut_remove_address_range(acpi_adr_space_type space_id, + struct acpi_namespace_node *region_node); + +u32 +acpi_ut_check_address_range(acpi_adr_space_type space_id, + acpi_physical_address address, u32 length, u8 warn); + +void acpi_ut_delete_address_lists(void); + +/* * utxferror - various error/warning output functions */ void ACPI_INTERNAL_VAR_XFACE diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h index 1077f17..905280f 100644 --- a/drivers/acpi/acpica/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h @@ -7,7 +7,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -189,6 +189,14 @@ #define AML_LNOTEQUAL_OP (u16) 0x9293 /* + * Opcodes for "Field" operators + */ +#define AML_FIELD_OFFSET_OP (u8) 0x00 +#define AML_FIELD_ACCESS_OP (u8) 0x01 +#define AML_FIELD_CONNECTION_OP (u8) 0x02 /* ACPI 5.0 */ +#define AML_FIELD_EXT_ACCESS_OP (u8) 0x03 /* ACPI 5.0 */ + +/* * Internal opcodes * Use only "Unknown" AML opcodes, don't attempt to use * any valid ACPI ASCII values (A-Z, 0-9, '-') @@ -202,6 +210,8 @@ #define AML_INT_METHODCALL_OP (u16) 0x0035 #define AML_INT_RETURN_VALUE_OP (u16) 0x0036 #define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037 +#define AML_INT_CONNECTION_OP (u16) 0x0038 +#define AML_INT_EXTACCESSFIELD_OP (u16) 0x0039 #define ARG_NONE 0x0 @@ -456,13 +466,16 @@ typedef enum { * access_as keyword */ typedef enum { - AML_FIELD_ATTRIB_SMB_QUICK = 0x02, - AML_FIELD_ATTRIB_SMB_SEND_RCV = 0x04, - AML_FIELD_ATTRIB_SMB_BYTE = 0x06, - AML_FIELD_ATTRIB_SMB_WORD = 0x08, - AML_FIELD_ATTRIB_SMB_BLOCK = 0x0A, - AML_FIELD_ATTRIB_SMB_WORD_CALL = 0x0C, - AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D + AML_FIELD_ATTRIB_QUICK = 0x02, + AML_FIELD_ATTRIB_SEND_RCV = 0x04, + AML_FIELD_ATTRIB_BYTE = 0x06, + AML_FIELD_ATTRIB_WORD = 0x08, + AML_FIELD_ATTRIB_BLOCK = 0x0A, + AML_FIELD_ATTRIB_MULTIBYTE = 0x0B, + AML_FIELD_ATTRIB_WORD_CALL = 0x0C, + AML_FIELD_ATTRIB_BLOCK_CALL = 0x0D, + AML_FIELD_ATTRIB_RAW_BYTES = 0x0E, + AML_FIELD_ATTRIB_RAW_PROCESS = 0x0F } AML_ACCESS_ATTRIBUTE; /* Bit fields in the AML method_flags byte */ diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index 59122cd..7b2128f 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -58,29 +58,48 @@ #define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT" #define ACPI_RESTAG_BASEADDRESS "_BAS" #define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */ +#define ACPI_RESTAG_DEBOUNCETIME "_DBT" #define ACPI_RESTAG_DECODE "_DEC" +#define ACPI_RESTAG_DEVICEPOLARITY "_DPL" #define ACPI_RESTAG_DMA "_DMA" #define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */ +#define ACPI_RESTAG_DRIVESTRENGTH "_DRS" +#define ACPI_RESTAG_ENDIANNESS "_END" +#define ACPI_RESTAG_FLOWCONTROL "_FLC" #define ACPI_RESTAG_GRANULARITY "_GRA" #define ACPI_RESTAG_INTERRUPT "_INT" #define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */ #define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */ #define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */ +#define ACPI_RESTAG_IORESTRICTION "_IOR" #define ACPI_RESTAG_LENGTH "_LEN" +#define ACPI_RESTAG_LINE "_LIN" #define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */ #define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */ #define ACPI_RESTAG_MAXADDR "_MAX" #define ACPI_RESTAG_MINADDR "_MIN" #define ACPI_RESTAG_MAXTYPE "_MAF" #define ACPI_RESTAG_MINTYPE "_MIF" +#define ACPI_RESTAG_MODE "_MOD" +#define ACPI_RESTAG_PARITY "_PAR" +#define ACPI_RESTAG_PHASE "_PHA" +#define ACPI_RESTAG_PIN "_PIN" +#define ACPI_RESTAG_PINCONFIG "_PPI" +#define ACPI_RESTAG_POLARITY "_POL" #define ACPI_RESTAG_REGISTERBITOFFSET "_RBO" #define ACPI_RESTAG_REGISTERBITWIDTH "_RBW" #define ACPI_RESTAG_RANGETYPE "_RNG" #define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */ +#define ACPI_RESTAG_LENGTH_RX "_RXL" +#define ACPI_RESTAG_LENGTH_TX "_TXL" +#define ACPI_RESTAG_SLAVEMODE "_SLV" +#define ACPI_RESTAG_SPEED "_SPE" +#define ACPI_RESTAG_STOPBITS "_STB" #define ACPI_RESTAG_TRANSLATION "_TRA" #define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */ #define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */ #define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */ +#define ACPI_RESTAG_VENDORDATA "_VEN" /* Default sizes for "small" resource descriptors */ @@ -90,6 +109,7 @@ #define ASL_RDESC_END_DEPEND_SIZE 0x00 #define ASL_RDESC_IO_SIZE 0x07 #define ASL_RDESC_FIXED_IO_SIZE 0x03 +#define ASL_RDESC_FIXED_DMA_SIZE 0x05 #define ASL_RDESC_END_TAG_SIZE 0x01 struct asl_resource_node { @@ -164,6 +184,12 @@ struct aml_resource_end_tag { AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum; }; +struct aml_resource_fixed_dma { + AML_RESOURCE_SMALL_HEADER_COMMON u16 request_lines; + u16 channels; + u8 width; +}; + /* * LARGE descriptors */ @@ -263,6 +289,110 @@ struct aml_resource_generic_register { u64 address; }; +/* Common descriptor for gpio_int and gpio_io (ACPI 5.0) */ + +struct aml_resource_gpio { + AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id; + u8 connection_type; + u16 flags; + u16 int_flags; + u8 pin_config; + u16 drive_strength; + u16 debounce_timeout; + u16 pin_table_offset; + u8 res_source_index; + u16 res_source_offset; + u16 vendor_offset; + u16 vendor_length; + /* + * Optional fields follow immediately: + * 1) PIN list (Words) + * 2) Resource Source String + * 3) Vendor Data bytes + */ +}; + +#define AML_RESOURCE_GPIO_REVISION 1 /* ACPI 5.0 */ + +/* Values for connection_type above */ + +#define AML_RESOURCE_GPIO_TYPE_INT 0 +#define AML_RESOURCE_GPIO_TYPE_IO 1 +#define AML_RESOURCE_MAX_GPIOTYPE 1 + +/* Common preamble for all serial descriptors (ACPI 5.0) */ + +#define AML_RESOURCE_SERIAL_COMMON \ + u8 revision_id; \ + u8 res_source_index; \ + u8 type; \ + u8 flags; \ + u16 type_specific_flags; \ + u8 type_revision_id; \ + u16 type_data_length; \ + +/* Values for the type field above */ + +#define AML_RESOURCE_I2C_SERIALBUSTYPE 1 +#define AML_RESOURCE_SPI_SERIALBUSTYPE 2 +#define AML_RESOURCE_UART_SERIALBUSTYPE 3 +#define AML_RESOURCE_MAX_SERIALBUSTYPE 3 +#define AML_RESOURCE_VENDOR_SERIALBUSTYPE 192 /* Vendor defined is 0xC0-0xFF (NOT SUPPORTED) */ + +struct aml_resource_common_serialbus { +AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON}; + +struct aml_resource_i2c_serialbus { + AML_RESOURCE_LARGE_HEADER_COMMON + AML_RESOURCE_SERIAL_COMMON u32 connection_speed; + u16 slave_address; + /* + * Optional fields follow immediately: + * 1) Vendor Data bytes + * 2) Resource Source String + */ +}; + +#define AML_RESOURCE_I2C_REVISION 1 /* ACPI 5.0 */ +#define AML_RESOURCE_I2C_TYPE_REVISION 1 /* ACPI 5.0 */ +#define AML_RESOURCE_I2C_MIN_DATA_LEN 6 + +struct aml_resource_spi_serialbus { + AML_RESOURCE_LARGE_HEADER_COMMON + AML_RESOURCE_SERIAL_COMMON u32 connection_speed; + u8 data_bit_length; + u8 clock_phase; + u8 clock_polarity; + u16 device_selection; + /* + * Optional fields follow immediately: + * 1) Vendor Data bytes + * 2) Resource Source String + */ +}; + +#define AML_RESOURCE_SPI_REVISION 1 /* ACPI 5.0 */ +#define AML_RESOURCE_SPI_TYPE_REVISION 1 /* ACPI 5.0 */ +#define AML_RESOURCE_SPI_MIN_DATA_LEN 9 + +struct aml_resource_uart_serialbus { + AML_RESOURCE_LARGE_HEADER_COMMON + AML_RESOURCE_SERIAL_COMMON u32 default_baud_rate; + u16 rx_fifo_size; + u16 tx_fifo_size; + u8 parity; + u8 lines_enabled; + /* + * Optional fields follow immediately: + * 1) Vendor Data bytes + * 2) Resource Source String + */ +}; + +#define AML_RESOURCE_UART_REVISION 1 /* ACPI 5.0 */ +#define AML_RESOURCE_UART_TYPE_REVISION 1 /* ACPI 5.0 */ +#define AML_RESOURCE_UART_MIN_DATA_LEN 10 + /* restore default alignment */ #pragma pack() @@ -284,6 +414,7 @@ union aml_resource { struct aml_resource_end_dependent end_dpf; struct aml_resource_io io; struct aml_resource_fixed_io fixed_io; + struct aml_resource_fixed_dma fixed_dma; struct aml_resource_vendor_small vendor_small; struct aml_resource_end_tag end_tag; @@ -299,6 +430,11 @@ union aml_resource { struct aml_resource_address64 address64; struct aml_resource_extended_address64 ext_address64; struct aml_resource_extended_irq extended_irq; + struct aml_resource_gpio gpio; + struct aml_resource_i2c_serialbus i2c_serial_bus; + struct aml_resource_spi_serialbus spi_serial_bus; + struct aml_resource_uart_serialbus uart_serial_bus; + struct aml_resource_common_serialbus common_serial_bus; /* Utility overlays */ diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index 8c7b997..80eb190 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -250,6 +250,13 @@ acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc) status = acpi_ds_execute_arguments(node, node->parent, extra_desc->extra.aml_length, extra_desc->extra.aml_start); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + status = acpi_ut_add_address_range(obj_desc->region.space_id, + obj_desc->region.address, + obj_desc->region.length, node); return_ACPI_STATUS(status); } @@ -384,8 +391,15 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc) /* Execute the argument AML */ - status = acpi_ds_execute_arguments(node, node->parent, + status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node, extra_desc->extra.aml_length, extra_desc->extra.aml_start); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + status = acpi_ut_add_address_range(obj_desc->region.space_id, + obj_desc->region.address, + obj_desc->region.length, node); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 26c49ff..effe4ca 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 34be60c..cd243cf 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -221,6 +221,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, { acpi_status status; u64 position; + union acpi_parse_object *child; ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info); @@ -232,10 +233,11 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, while (arg) { /* - * Three types of field elements are handled: - * 1) Offset - specifies a bit offset - * 2) access_as - changes the access mode - * 3) Name - Enters a new named field into the namespace + * Four types of field elements are handled: + * 1) Name - Enters a new named field into the namespace + * 2) Offset - specifies a bit offset + * 3) access_as - changes the access mode/attributes + * 4) Connection - Associate a resource template with the field */ switch (arg->common.aml_opcode) { case AML_INT_RESERVEDFIELD_OP: @@ -253,21 +255,70 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info, break; case AML_INT_ACCESSFIELD_OP: - + case AML_INT_EXTACCESSFIELD_OP: /* - * Get a new access_type and access_attribute -- to be used for all - * field units that follow, until field end or another access_as - * keyword. + * Get new access_type, access_attribute, and access_length fields + * -- to be used for all field units that follow, until the + * end-of-field or another access_as keyword is encountered. + * NOTE. These three bytes are encoded in the integer value + * of the parseop for convenience. * * In field_flags, preserve the flag bits other than the - * ACCESS_TYPE bits + * ACCESS_TYPE bits. */ + + /* access_type (byte_acc, word_acc, etc.) */ + info->field_flags = (u8) ((info-> field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) | - ((u8) ((u32) arg->common.value.integer >> 8))); + ((u8)((u32)(arg->common.value.integer & 0x07)))); + + /* access_attribute (attrib_quick, attrib_byte, etc.) */ + + info->attribute = + (u8)((arg->common.value.integer >> 8) & 0xFF); + + /* access_length (for serial/buffer protocols) */ + + info->access_length = + (u8)((arg->common.value.integer >> 16) & 0xFF); + break; + + case AML_INT_CONNECTION_OP: + /* + * Clear any previous connection. New connection is used for all + * fields that follow, similar to access_as + */ + info->resource_buffer = NULL; + info->connection_node = NULL; - info->attribute = (u8) (arg->common.value.integer); + /* + * A Connection() is either an actual resource descriptor (buffer) + * or a named reference to a resource template + */ + child = arg->common.value.arg; + if (child->common.aml_opcode == AML_INT_BYTELIST_OP) { + info->resource_buffer = child->named.data; + info->resource_length = + (u16)child->named.value.integer; + } else { + /* Lookup the Connection() namepath, it should already exist */ + + status = acpi_ns_lookup(walk_state->scope_info, + child->common.value. + name, ACPI_TYPE_ANY, + ACPI_IMODE_EXECUTE, + ACPI_NS_DONT_OPEN_SCOPE, + walk_state, + &info->connection_node); + if (ACPI_FAILURE(status)) { + ACPI_ERROR_NAMESPACE(child->common. + value.name, + status); + return_ACPI_STATUS(status); + } + } break; case AML_INT_NAMEDFIELD_OP: @@ -374,6 +425,8 @@ acpi_ds_create_field(union acpi_parse_object *op, } } + ACPI_MEMSET(&info, 0, sizeof(struct acpi_create_field_info)); + /* Second arg is the field flags */ arg = arg->common.next; @@ -386,7 +439,6 @@ acpi_ds_create_field(union acpi_parse_object *op, info.region_node = region_node; status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); - return_ACPI_STATUS(status); } @@ -474,8 +526,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op, */ while (arg) { /* - * Ignore OFFSET and ACCESSAS terms here; we are only interested in the - * field names in order to enter them into the namespace. + * Ignore OFFSET/ACCESSAS/CONNECTION terms here; we are only interested + * in the field names in order to enter them into the namespace. */ if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) { status = acpi_ns_lookup(walk_state->scope_info, @@ -651,6 +703,5 @@ acpi_ds_create_index_field(union acpi_parse_object *op, info.region_node = region_node; status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); - return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index a7718bf..9e5ac7f 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index 5d79775..00f5dab 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c index 905ce29..b40bd50 100644 --- a/drivers/acpi/acpica/dsmthdat.c +++ b/drivers/acpi/acpica/dsmthdat.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index f42e17e..d7045ca 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index c627a28..e5eff75 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index 2c477ce..1abcda3 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index fe40e4c..642f3c0 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index 324acec..552aa3a 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 9763181..ae71477 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c index 76a661f..9e9490a 100644 --- a/drivers/acpi/acpica/dswscope.c +++ b/drivers/acpi/acpica/dswscope.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index a6c374e..c9c2ac1 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index d458b041..6729ebe 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -71,6 +71,12 @@ acpi_status acpi_ev_initialize_events(void) ACPI_FUNCTION_TRACE(ev_initialize_events); + /* If Hardware Reduced flag is set, there are no fixed events */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* * Initialize the Fixed and General Purpose Events. This is done prior to * enabling SCIs to prevent interrupts from occurring before the handlers @@ -111,6 +117,12 @@ acpi_status acpi_ev_install_xrupt_handlers(void) ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers); + /* If Hardware Reduced flag is set, there is no ACPI h/w */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* Install the SCI handler */ status = acpi_ev_install_sci_handler(); diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index 56a562a..5e5683c 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -70,6 +70,12 @@ acpi_status acpi_ev_init_global_lock_handler(void) ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); + /* If Hardware Reduced flag is set, there is no global lock */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* Attempt installation of the global lock handler */ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 65c79ad..9e88cb6 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index ca2c41a..be75339 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index ce9aa9f..adf7494 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 80a81d0..2507393 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index d0b3318..84966f4 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index f0edf5c..1b0180a 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -329,6 +329,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) * FUNCTION: acpi_ev_address_space_dispatch * * PARAMETERS: region_obj - Internal region object + * field_obj - Corresponding field. Can be NULL. * Function - Read or Write operation * region_offset - Where in the region to read or write * bit_width - Field width in bits (8, 16, 32, or 64) @@ -344,6 +345,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) acpi_status acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, + union acpi_operand_object *field_obj, u32 function, u32 region_offset, u32 bit_width, u64 *value) { @@ -353,6 +355,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, union acpi_operand_object *handler_desc; union acpi_operand_object *region_obj2; void *region_context = NULL; + struct acpi_connection_info *context; ACPI_FUNCTION_TRACE(ev_address_space_dispatch); @@ -375,6 +378,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, return_ACPI_STATUS(AE_NOT_EXIST); } + context = handler_desc->address_space.context; + /* * It may be the case that the region has never been initialized. * Some types of regions require special init code @@ -404,8 +409,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, acpi_ex_exit_interpreter(); status = region_setup(region_obj, ACPI_REGION_ACTIVATE, - handler_desc->address_space.context, - ®ion_context); + context, ®ion_context); /* Re-enter the interpreter */ @@ -455,6 +459,25 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, acpi_ut_get_region_name(region_obj->region. space_id))); + /* + * Special handling for generic_serial_bus and general_purpose_io: + * There are three extra parameters that must be passed to the + * handler via the context: + * 1) Connection buffer, a resource template from Connection() op. + * 2) Length of the above buffer. + * 3) Actual access length from the access_as() op. + */ + if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) || + (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) && + context && field_obj) { + + /* Get the Connection (resource_template) buffer */ + + context->connection = field_obj->field.resource_buffer; + context->length = field_obj->field.resource_length; + context->access_length = field_obj->field.access_length; + } + if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* @@ -469,7 +492,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, status = handler(function, (region_obj->region.address + region_offset), - bit_width, value, handler_desc->address_space.context, + bit_width, value, context, region_obj2->extra.region_context); if (ACPI_FAILURE(status)) { diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 55a5d35..819c17f 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c index 2ebd40e..26065c6 100644 --- a/drivers/acpi/acpica/evsci.c +++ b/drivers/acpi/acpica/evsci.c @@ -6,7 +6,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index f4f523b..61944e8 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 20516e5..1768bbe 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index f06a3ee..33388fd 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index aee887e..6019208c 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 745a42b..c86d44e 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -297,9 +297,9 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer) /* Bytewise reads */ for (i = 0; i < length; i++) { - status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ, - region_offset, 8, - &value); + status = + acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ, + region_offset, 8, &value); if (ACPI_FAILURE(status)) { return status; } diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index 74162a1..e385436 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index 110711a..3f5bc99 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -267,7 +267,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state) * * PARAMETERS: aml_start - Pointer to the region declaration AML * aml_length - Max length of the declaration AML - * region_space - space_iD for the region + * space_id - Address space ID for the region * walk_state - Current state * * RETURN: Status @@ -279,7 +279,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state) acpi_status acpi_ex_create_region(u8 * aml_start, u32 aml_length, - u8 region_space, struct acpi_walk_state *walk_state) + u8 space_id, struct acpi_walk_state *walk_state) { acpi_status status; union acpi_operand_object *obj_desc; @@ -304,16 +304,19 @@ acpi_ex_create_region(u8 * aml_start, * Space ID must be one of the predefined IDs, or in the user-defined * range */ - if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) && - (region_space < ACPI_USER_REGION_BEGIN) && - (region_space != ACPI_ADR_SPACE_DATA_TABLE)) { - ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X", - region_space)); - return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); + if (!acpi_is_valid_space_id(space_id)) { + /* + * Print an error message, but continue. We don't want to abort + * a table load for this exception. Instead, if the region is + * actually used at runtime, abort the executing method. + */ + ACPI_ERROR((AE_INFO, + "Invalid/unknown Address Space ID: 0x%2.2X", + space_id)); } ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n", - acpi_ut_get_region_name(region_space), region_space)); + acpi_ut_get_region_name(space_id), space_id)); /* Create the region descriptor */ @@ -330,10 +333,16 @@ acpi_ex_create_region(u8 * aml_start, region_obj2 = obj_desc->common.next_object; region_obj2->extra.aml_start = aml_start; region_obj2->extra.aml_length = aml_length; + if (walk_state->scope_info) { + region_obj2->extra.scope_node = + walk_state->scope_info->scope.node; + } else { + region_obj2->extra.scope_node = node; + } /* Init the region from the operands */ - obj_desc->region.space_id = region_space; + obj_desc->region.space_id = space_id; obj_desc->region.address = 0; obj_desc->region.length = 0; obj_desc->region.node = node; diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index c7a2f1e..e211e9c 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index 61b8c0e..2a6ac0a 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -192,10 +192,13 @@ static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = { "Buffer Object"} }; -static struct acpi_exdump_info acpi_ex_dump_region_field[3] = { +static struct acpi_exdump_info acpi_ex_dump_region_field[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL}, {ACPI_EXD_FIELD, 0, NULL}, - {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"} + {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(field.access_length), "AccessLength"}, + {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"}, + {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.resource_buffer), + "ResourceBuffer"} }; static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = { diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index 0bde223..dc092f5 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -100,18 +100,25 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_SMBUS || obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_GSBUS + || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_IPMI)) { /* - * This is an SMBus or IPMI read. We must create a buffer to hold + * This is an SMBus, GSBus or IPMI read. We must create a buffer to hold * the data and then directly access the region handler. * - * Note: Smbus protocol value is passed in upper 16-bits of Function + * Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function */ if (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_SMBUS) { length = ACPI_SMBUS_BUFFER_SIZE; function = ACPI_READ | (obj_desc->field.attribute << 16); + } else if (obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_GSBUS) { + length = ACPI_GSBUS_BUFFER_SIZE; + function = + ACPI_READ | (obj_desc->field.attribute << 16); } else { /* IPMI */ length = ACPI_IPMI_BUFFER_SIZE; @@ -248,21 +255,23 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_SMBUS || obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_GSBUS + || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_IPMI)) { /* - * This is an SMBus or IPMI write. We will bypass the entire field + * This is an SMBus, GSBus or IPMI write. We will bypass the entire field * mechanism and handoff the buffer directly to the handler. For * these address spaces, the buffer is bi-directional; on a write, * return data is returned in the same buffer. * * Source must be a buffer of sufficient size: - * ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE. + * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE. * - * Note: SMBus protocol type is passed in upper 16-bits of Function + * Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function */ if (source_desc->common.type != ACPI_TYPE_BUFFER) { ACPI_ERROR((AE_INFO, - "SMBus or IPMI write requires Buffer, found type %s", + "SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s", acpi_ut_get_object_type_name(source_desc))); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); @@ -273,6 +282,11 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, length = ACPI_SMBUS_BUFFER_SIZE; function = ACPI_WRITE | (obj_desc->field.attribute << 16); + } else if (obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_GSBUS) { + length = ACPI_GSBUS_BUFFER_SIZE; + function = + ACPI_WRITE | (obj_desc->field.attribute << 16); } else { /* IPMI */ length = ACPI_IPMI_BUFFER_SIZE; @@ -281,7 +295,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, if (source_desc->buffer.length < length) { ACPI_ERROR((AE_INFO, - "SMBus or IPMI write requires Buffer of length %u, found length %u", + "SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u", length, source_desc->buffer.length)); return_ACPI_STATUS(AE_AML_BUFFER_LIMIT); diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index f915a7f..149de45 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -86,6 +86,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, { acpi_status status = AE_OK; union acpi_operand_object *rgn_desc; + u8 space_id; ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset); @@ -101,6 +102,17 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } + space_id = rgn_desc->region.space_id; + + /* Validate the Space ID */ + + if (!acpi_is_valid_space_id(space_id)) { + ACPI_ERROR((AE_INFO, + "Invalid/unknown Address Space ID: 0x%2.2X", + space_id)); + return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); + } + /* * If the Region Address and Length have not been previously evaluated, * evaluate them now and save the results. @@ -119,11 +131,12 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, } /* - * Exit now for SMBus or IPMI address space, it has a non-linear + * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear * address space and the request cannot be directly validated */ - if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS || - rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) { + if (space_id == ACPI_ADR_SPACE_SMBUS || + space_id == ACPI_ADR_SPACE_GSBUS || + space_id == ACPI_ADR_SPACE_IPMI) { /* SMBus or IPMI has a non-linear address space */ @@ -271,11 +284,12 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, /* Invoke the appropriate address_space/op_region handler */ - status = - acpi_ev_address_space_dispatch(rgn_desc, function, region_offset, - ACPI_MUL_8(obj_desc->common_field. - access_byte_width), - value); + status = acpi_ev_address_space_dispatch(rgn_desc, obj_desc, + function, region_offset, + ACPI_MUL_8(obj_desc-> + common_field. + access_byte_width), + value); if (ACPI_FAILURE(status)) { if (status == AE_NOT_IMPLEMENTED) { @@ -316,6 +330,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, static u8 acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value) { + ACPI_FUNCTION_NAME(ex_register_overflow); if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) { /* @@ -330,6 +345,11 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value) * The Value is larger than the maximum value that can fit into * the register. */ + ACPI_ERROR((AE_INFO, + "Index value 0x%8.8X%8.8X overflows field width 0x%X", + ACPI_FORMAT_UINT64(value), + obj_desc->common_field.bit_length)); + return (TRUE); } diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 703d88e..0a08933 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index be1c56e..60933e9 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index 49ec049..fcc75fa 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index 236ead1..9ba8c73 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 2571b4a..879e8a2 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index 1b48d9d..71fcc65 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index f4a2787..0786b86 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index cc95e20..30157f5 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -47,6 +47,7 @@ #include "acinterp.h" #include "amlcode.h" #include "acnamesp.h" +#include "acdispat.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exprep") @@ -455,6 +456,30 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) obj_desc->field.region_obj = acpi_ns_get_attached_object(info->region_node); + /* Fields specific to generic_serial_bus fields */ + + obj_desc->field.access_length = info->access_length; + + if (info->connection_node) { + second_desc = info->connection_node->object; + if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) { + status = + acpi_ds_get_buffer_arguments(second_desc); + if (ACPI_FAILURE(status)) { + acpi_ut_delete_object_desc(obj_desc); + return_ACPI_STATUS(status); + } + } + + obj_desc->field.resource_buffer = + second_desc->buffer.pointer; + obj_desc->field.resource_length = + (u16)second_desc->buffer.length; + } else if (info->resource_buffer) { + obj_desc->field.resource_buffer = info->resource_buffer; + obj_desc->field.resource_length = info->resource_length; + } + /* Allow full data read from EC address space */ if ((obj_desc->field.region_obj->region.space_id == diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index f0d5e14..12d51df 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index 55997e4..fa50e77 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index db502cd..6e335dc 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index e3bb00c..a67b1d9 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index c0c8842..c6cf843 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index a979017..b35bed5 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c @@ -7,7 +7,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index dc665cc..65a45d8 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index df66e7b..191a1294 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 8ad9314..eb6798b 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -435,4 +435,29 @@ void acpi_ex_integer_to_string(char *out_string, u64 value) } } +/******************************************************************************* + * + * FUNCTION: acpi_is_valid_space_id + * + * PARAMETERS: space_id - ID to be validated + * + * RETURN: TRUE if valid/supported ID. + * + * DESCRIPTION: Validate an operation region space_iD. + * + ******************************************************************************/ + +u8 acpi_is_valid_space_id(u8 space_id) +{ + + if ((space_id >= ACPI_NUM_PREDEFINED_REGIONS) && + (space_id < ACPI_USER_REGION_BEGIN) && + (space_id != ACPI_ADR_SPACE_DATA_TABLE) && + (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { + return (FALSE); + } + + return (TRUE); +} + #endif diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index fc380d3..d21ec5f 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index f610d88..1a6894a 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c index 050fd22..1455ddc 100644 --- a/drivers/acpi/acpica/hwpci.c +++ b/drivers/acpi/acpica/hwpci.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index cc70f3f..4ea4eeb5 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -7,7 +7,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index d52da30..3c4a922 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index 50d21c4..d4973d9 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index 5f16058..6e5c43a 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -134,6 +134,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) /* Supported widths are 8/16/32 */ if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) { + ACPI_ERROR((AE_INFO, + "Bad BitWidth parameter: %8.8X", bit_width)); return AE_BAD_PARAMETER; } diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index d707756..9d38eb6 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index d93172f..61623f3 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c index 1d0ef15..7c3d3ceb 100644 --- a/drivers/acpi/acpica/nsalloc.c +++ b/drivers/acpi/acpica/nsalloc.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index b683cc2..b7f2b3b 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c index 2ed294b..30ea5bc 100644 --- a/drivers/acpi/acpica/nsdumpdv.c +++ b/drivers/acpi/acpica/nsdumpdv.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c index c1bd02b..f375cb8 100644 --- a/drivers/acpi/acpica/nseval.c +++ b/drivers/acpi/acpica/nseval.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index fd7c638..9d84ec2 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index 5f7dc69..5cbf15f 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index d5fa520..b20e7c8 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c index 3bb8bf1..dd77a3c 100644 --- a/drivers/acpi/acpica/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c @@ -6,7 +6,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index b3234fa..ec7ba2d 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index c845c80..bbe46a4 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -620,6 +620,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data, case ACPI_PTYPE2_FIXED: case ACPI_PTYPE2_MIN: case ACPI_PTYPE2_COUNT: + case ACPI_PTYPE2_FIX_VAR: /* * These types all return a single Package that consists of a @@ -759,6 +760,34 @@ acpi_ns_check_package_list(struct acpi_predefined_data *data, } break; + case ACPI_PTYPE2_FIX_VAR: + /* + * Each subpackage has a fixed number of elements and an + * optional element + */ + expected_count = + package->ret_info.count1 + package->ret_info.count2; + if (sub_package->package.count < expected_count) { + goto package_too_small; + } + + status = + acpi_ns_check_package_elements(data, sub_elements, + package->ret_info. + object_type1, + package->ret_info. + count1, + package->ret_info. + object_type2, + sub_package->package. + count - + package->ret_info. + count1, 0); + if (ACPI_FAILURE(status)) { + return (status); + } + break; + case ACPI_PTYPE2_FIXED: /* Each sub-package has a fixed length */ diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index ac7b854..9c35d20 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -634,6 +634,7 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data, case ACPI_PTYPE2_FIXED: case ACPI_PTYPE2_MIN: case ACPI_PTYPE2_REV_FIXED: + case ACPI_PTYPE2_FIX_VAR: break; default: diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 024c4f2..726bc8e 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -467,11 +467,12 @@ acpi_ns_repair_HID(struct acpi_predefined_data *data, } /* - * Copy and uppercase the string. From the ACPI specification: + * Copy and uppercase the string. From the ACPI 5.0 specification: * * A valid PNP ID must be of the form "AAA####" where A is an uppercase * letter and # is a hex digit. A valid ACPI ID must be of the form - * "ACPI####" where # is a hex digit. + * "NNNN####" where N is an uppercase letter or decimal digit, and + * # is a hex digit. */ for (dest = new_string->string.pointer; *source; dest++, source++) { *dest = (char)ACPI_TOUPPER(*source); diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c index 28b0d7a..507043d 100644 --- a/drivers/acpi/acpica/nssearch.c +++ b/drivers/acpi/acpica/nssearch.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index cb1b104..a535b7a 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index 345f0c3..f69895a 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index e7f016d..71d15f6 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c @@ -6,7 +6,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 83bf930..af401c9 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c index 57e6d82..880a605 100644 --- a/drivers/acpi/acpica/nsxfobj.c +++ b/drivers/acpi/acpica/nsxfobj.c @@ -6,7 +6,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index e1fad0e..5ac36ab 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -484,34 +484,54 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state, static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state *parser_state) { - u32 aml_offset = (u32) - ACPI_PTR_DIFF(parser_state->aml, - parser_state->aml_start); + u32 aml_offset; union acpi_parse_object *field; + union acpi_parse_object *arg = NULL; u16 opcode; u32 name; + u8 access_type; + u8 access_attribute; + u8 access_length; + u32 pkg_length; + u8 *pkg_end; + u32 buffer_length; ACPI_FUNCTION_TRACE(ps_get_next_field); + aml_offset = + (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start); + /* Determine field type */ switch (ACPI_GET8(parser_state->aml)) { - default: + case AML_FIELD_OFFSET_OP: - opcode = AML_INT_NAMEDFIELD_OP; + opcode = AML_INT_RESERVEDFIELD_OP; + parser_state->aml++; break; - case 0x00: + case AML_FIELD_ACCESS_OP: - opcode = AML_INT_RESERVEDFIELD_OP; + opcode = AML_INT_ACCESSFIELD_OP; parser_state->aml++; break; - case 0x01: + case AML_FIELD_CONNECTION_OP: - opcode = AML_INT_ACCESSFIELD_OP; + opcode = AML_INT_CONNECTION_OP; + parser_state->aml++; + break; + + case AML_FIELD_EXT_ACCESS_OP: + + opcode = AML_INT_EXTACCESSFIELD_OP; parser_state->aml++; break; + + default: + + opcode = AML_INT_NAMEDFIELD_OP; + break; } /* Allocate a new field op */ @@ -549,16 +569,111 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state break; case AML_INT_ACCESSFIELD_OP: + case AML_INT_EXTACCESSFIELD_OP: /* * Get access_type and access_attrib and merge into the field Op - * access_type is first operand, access_attribute is second + * access_type is first operand, access_attribute is second. stuff + * these bytes into the node integer value for convenience. */ - field->common.value.integer = - (((u32) ACPI_GET8(parser_state->aml) << 8)); + + /* Get the two bytes (Type/Attribute) */ + + access_type = ACPI_GET8(parser_state->aml); parser_state->aml++; - field->common.value.integer |= ACPI_GET8(parser_state->aml); + access_attribute = ACPI_GET8(parser_state->aml); parser_state->aml++; + + field->common.value.integer = (u8)access_type; + field->common.value.integer |= (u16)(access_attribute << 8); + + /* This opcode has a third byte, access_length */ + + if (opcode == AML_INT_EXTACCESSFIELD_OP) { + access_length = ACPI_GET8(parser_state->aml); + parser_state->aml++; + + field->common.value.integer |= + (u32)(access_length << 16); + } + break; + + case AML_INT_CONNECTION_OP: + + /* + * Argument for Connection operator can be either a Buffer + * (resource descriptor), or a name_string. + */ + if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) { + parser_state->aml++; + + pkg_end = parser_state->aml; + pkg_length = + acpi_ps_get_next_package_length(parser_state); + pkg_end += pkg_length; + + if (parser_state->aml < pkg_end) { + + /* Non-empty list */ + + arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP); + if (!arg) { + return_PTR(NULL); + } + + /* Get the actual buffer length argument */ + + opcode = ACPI_GET8(parser_state->aml); + parser_state->aml++; + + switch (opcode) { + case AML_BYTE_OP: /* AML_BYTEDATA_ARG */ + buffer_length = + ACPI_GET8(parser_state->aml); + parser_state->aml += 1; + break; + + case AML_WORD_OP: /* AML_WORDDATA_ARG */ + buffer_length = + ACPI_GET16(parser_state->aml); + parser_state->aml += 2; + break; + + case AML_DWORD_OP: /* AML_DWORDATA_ARG */ + buffer_length = + ACPI_GET32(parser_state->aml); + parser_state->aml += 4; + break; + + default: + buffer_length = 0; + break; + } + + /* Fill in bytelist data */ + + arg->named.value.size = buffer_length; + arg->named.data = parser_state->aml; + } + + /* Skip to End of byte data */ + + parser_state->aml = pkg_end; + } else { + arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP); + if (!arg) { + return_PTR(NULL); + } + + /* Get the Namestring argument */ + + arg->common.value.name = + acpi_ps_get_next_namestring(parser_state); + } + + /* Link the buffer/namestring to parent (CONNECTION_OP) */ + + acpi_ps_append_arg(field, arg); break; default: diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 01dd70d..9547ad8 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index bed08de..a0226fd 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -638,7 +638,16 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { /* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, - AML_FLAGS_EXEC_0A_0T_1R) + AML_FLAGS_EXEC_0A_0T_1R), + +/* ACPI 5.0 opcodes */ + +/* 7F */ ACPI_OP("-ConnectField-", ARGP_CONNECTFIELD_OP, + ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY, + AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS), +/* 80 */ ACPI_OP("-ExtAccessField-", ARGP_CONNECTFIELD_OP, + ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY, + AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0) /*! [End] no source code translation !*/ }; @@ -657,7 +666,7 @@ static const u8 acpi_gbl_short_op_index[256] = { /* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX, /* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D, -/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, +/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, /* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, /* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 9bb0cbd..2ff9c35 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c index a5faa13..c872aa4 100644 --- a/drivers/acpi/acpica/psscope.c +++ b/drivers/acpi/acpica/psscope.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c index f1464c0..2b03cdb 100644 --- a/drivers/acpi/acpica/pstree.c +++ b/drivers/acpi/acpica/pstree.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -74,6 +74,12 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn) ACPI_FUNCTION_ENTRY(); +/* + if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP) + { + return (Op->Common.Value.Arg); + } +*/ /* Get the info structure for this opcode */ op_info = acpi_ps_get_opcode_info(op->common.aml_opcode); diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 7eda785..13bb131 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c index 3312d63..ab96cf4 100644 --- a/drivers/acpi/acpica/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c index 8086805..9d98c5f 100644 --- a/drivers/acpi/acpica/psxface.c +++ b/drivers/acpi/acpica/psxface.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c index 9e66f90..a030565 100644 --- a/drivers/acpi/acpica/rsaddr.c +++ b/drivers/acpi/acpica/rsaddr.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c index 3a8a89e..3c6df4b 100644 --- a/drivers/acpi/acpica/rscalc.c +++ b/drivers/acpi/acpica/rscalc.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -313,6 +313,38 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed) resource_source)); break; + case ACPI_RESOURCE_TYPE_GPIO: + + total_size = + (acpi_rs_length) (total_size + + (resource->data.gpio. + pin_table_length * 2) + + resource->data.gpio. + resource_source.string_length + + resource->data.gpio. + vendor_length); + + break; + + case ACPI_RESOURCE_TYPE_SERIAL_BUS: + + total_size = + acpi_gbl_aml_resource_serial_bus_sizes[resource-> + data. + common_serial_bus. + type]; + + total_size = (acpi_rs_length) (total_size + + resource->data. + i2c_serial_bus. + resource_source. + string_length + + resource->data. + i2c_serial_bus. + vendor_length); + + break; + default: break; } @@ -362,10 +394,11 @@ acpi_rs_get_list_length(u8 * aml_buffer, u32 extra_struct_bytes; u8 resource_index; u8 minimum_aml_resource_length; + union aml_resource *aml_resource; ACPI_FUNCTION_TRACE(rs_get_list_length); - *size_needed = 0; + *size_needed = ACPI_RS_SIZE_MIN; /* Minimum size is one end_tag */ end_aml = aml_buffer + aml_buffer_length; /* Walk the list of AML resource descriptors */ @@ -376,9 +409,15 @@ acpi_rs_get_list_length(u8 * aml_buffer, status = acpi_ut_validate_resource(aml_buffer, &resource_index); if (ACPI_FAILURE(status)) { + /* + * Exit on failure. Cannot continue because the descriptor length + * may be bogus also. + */ return_ACPI_STATUS(status); } + aml_resource = (void *)aml_buffer; + /* Get the resource length and base (minimum) AML size */ resource_length = acpi_ut_get_resource_length(aml_buffer); @@ -422,10 +461,8 @@ acpi_rs_get_list_length(u8 * aml_buffer, case ACPI_RESOURCE_NAME_END_TAG: /* - * End Tag: - * This is the normal exit, add size of end_tag + * End Tag: This is the normal exit */ - *size_needed += ACPI_RS_SIZE_MIN; return_ACPI_STATUS(AE_OK); case ACPI_RESOURCE_NAME_ADDRESS32: @@ -457,6 +494,33 @@ acpi_rs_get_list_length(u8 * aml_buffer, minimum_aml_resource_length); break; + case ACPI_RESOURCE_NAME_GPIO: + + /* Vendor data is optional */ + + if (aml_resource->gpio.vendor_length) { + extra_struct_bytes += + aml_resource->gpio.vendor_offset - + aml_resource->gpio.pin_table_offset + + aml_resource->gpio.vendor_length; + } else { + extra_struct_bytes += + aml_resource->large_header.resource_length + + sizeof(struct aml_resource_large_header) - + aml_resource->gpio.pin_table_offset; + } + break; + + case ACPI_RESOURCE_NAME_SERIAL_BUS: + + minimum_aml_resource_length = + acpi_gbl_resource_aml_serial_bus_sizes + [aml_resource->common_serial_bus.type]; + extra_struct_bytes += + aml_resource->common_serial_bus.resource_length - + minimum_aml_resource_length; + break; + default: break; } @@ -467,9 +531,18 @@ acpi_rs_get_list_length(u8 * aml_buffer, * Important: Round the size up for the appropriate alignment. This * is a requirement on IA64. */ - buffer_size = acpi_gbl_resource_struct_sizes[resource_index] + - extra_struct_bytes; - buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size); + if (acpi_ut_get_resource_type(aml_buffer) == + ACPI_RESOURCE_NAME_SERIAL_BUS) { + buffer_size = + acpi_gbl_resource_struct_serial_bus_sizes + [aml_resource->common_serial_bus.type] + + extra_struct_bytes; + } else { + buffer_size = + acpi_gbl_resource_struct_sizes[resource_index] + + extra_struct_bytes; + } + buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size); *size_needed += buffer_size; diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c index 4ce6e11..46d6eb3 100644 --- a/drivers/acpi/acpica/rscreate.c +++ b/drivers/acpi/acpica/rscreate.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -51,6 +51,70 @@ ACPI_MODULE_NAME("rscreate") /******************************************************************************* * + * FUNCTION: acpi_buffer_to_resource + * + * PARAMETERS: aml_buffer - Pointer to the resource byte stream + * aml_buffer_length - Length of the aml_buffer + * resource_ptr - Where the converted resource is returned + * + * RETURN: Status + * + * DESCRIPTION: Convert a raw AML buffer to a resource list + * + ******************************************************************************/ +acpi_status +acpi_buffer_to_resource(u8 *aml_buffer, + u16 aml_buffer_length, + struct acpi_resource **resource_ptr) +{ + acpi_status status; + acpi_size list_size_needed; + void *resource; + void *current_resource_ptr; + + /* + * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag + * is not required here. + */ + + /* Get the required length for the converted resource */ + + status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length, + &list_size_needed); + if (status == AE_AML_NO_RESOURCE_END_TAG) { + status = AE_OK; + } + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Allocate a buffer for the converted resource */ + + resource = ACPI_ALLOCATE_ZEROED(list_size_needed); + current_resource_ptr = resource; + if (!resource) { + return (AE_NO_MEMORY); + } + + /* Perform the AML-to-Resource conversion */ + + status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length, + acpi_rs_convert_aml_to_resources, + ¤t_resource_ptr); + if (status == AE_AML_NO_RESOURCE_END_TAG) { + status = AE_OK; + } + if (ACPI_FAILURE(status)) { + ACPI_FREE(resource); + } else { + *resource_ptr = resource; + } + + return (status); +} + +/******************************************************************************* + * * FUNCTION: acpi_rs_create_resource_list * * PARAMETERS: aml_buffer - Pointer to the resource byte stream @@ -66,9 +130,10 @@ ACPI_MODULE_NAME("rscreate") * of device resources. * ******************************************************************************/ + acpi_status acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer, - struct acpi_buffer *output_buffer) + struct acpi_buffer * output_buffer) { acpi_status status; diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c index 33db752..b4c5811 100644 --- a/drivers/acpi/acpica/rsdump.c +++ b/drivers/acpi/acpica/rsdump.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -61,11 +61,13 @@ static void acpi_rs_out_integer64(char *title, u64 value); static void acpi_rs_out_title(char *title); -static void acpi_rs_dump_byte_list(u16 length, u8 * data); +static void acpi_rs_dump_byte_list(u16 length, u8 *data); -static void acpi_rs_dump_dword_list(u8 length, u32 * data); +static void acpi_rs_dump_word_list(u16 length, u16 *data); -static void acpi_rs_dump_short_byte_list(u8 length, u8 * data); +static void acpi_rs_dump_dword_list(u8 length, u32 *data); + +static void acpi_rs_dump_short_byte_list(u8 length, u8 *data); static void acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source); @@ -309,6 +311,125 @@ struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = { {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL} }; +struct acpi_rsdump_info acpi_rs_dump_gpio[16] = { + {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL}, + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL}, + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type), + "ConnectionType", acpi_gbl_ct_decode}, + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer), + "ProducerConsumer", acpi_gbl_consume_decode}, + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig", + acpi_gbl_ppc_decode}, + {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable", + acpi_gbl_shr_decode}, + {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction), + "IoRestriction", acpi_gbl_ior_decode}, + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering", + acpi_gbl_he_decode}, + {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity", + acpi_gbl_ll_decode}, + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength", + NULL}, + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout), + "DebounceTimeout", NULL}, + {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source), + "ResourceSource", NULL}, + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length), + "PinTableLength", NULL}, + {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL}, + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength", + NULL}, + {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData", + NULL}, +}; + +struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = { + {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma), + "FixedDma", NULL}, + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines), + "RequestLines", NULL}, + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels", + NULL}, + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth", + acpi_gbl_dts_decode}, +}; + +#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \ + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \ + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \ + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \ + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \ + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \ + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \ + {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \ + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \ + {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL}, + +struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = { + {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus), + "Common Serial Bus", NULL}, + ACPI_RS_DUMP_COMMON_SERIAL_BUS +}; + +struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = { + {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus), + "I2C Serial Bus", NULL}, + ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG, + ACPI_RSD_OFFSET(i2c_serial_bus. + access_mode), + "AccessMode", acpi_gbl_am_decode}, + {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed), + "ConnectionSpeed", NULL}, + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address), + "SlaveAddress", NULL}, +}; + +struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = { + {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus), + "Spi Serial Bus", NULL}, + ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG, + ACPI_RSD_OFFSET(spi_serial_bus. + wire_mode), "WireMode", + acpi_gbl_wm_decode}, + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity), + "DevicePolarity", acpi_gbl_dp_decode}, + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length), + "DataBitLength", NULL}, + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase), + "ClockPhase", acpi_gbl_cph_decode}, + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity), + "ClockPolarity", acpi_gbl_cpo_decode}, + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection), + "DeviceSelection", NULL}, + {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed), + "ConnectionSpeed", NULL}, +}; + +struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = { + {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus), + "Uart Serial Bus", NULL}, + ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG, + ACPI_RSD_OFFSET(uart_serial_bus. + flow_control), + "FlowControl", acpi_gbl_fc_decode}, + {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits), + "StopBits", acpi_gbl_sb_decode}, + {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits), + "DataBits", acpi_gbl_bpb_decode}, + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian", + acpi_gbl_ed_decode}, + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity", + acpi_gbl_pt_decode}, + {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled), + "LinesEnabled", NULL}, + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size), + "RxFifoSize", NULL}, + {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size), + "TxFifoSize", NULL}, + {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate), + "ConnectionSpeed", NULL}, +}; + /* * Tables used for common address descriptor flag fields */ @@ -413,7 +534,14 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table) /* Data items, 8/16/32/64 bit */ case ACPI_RSD_UINT8: - acpi_rs_out_integer8(name, ACPI_GET8(target)); + if (table->pointer) { + acpi_rs_out_string(name, ACPI_CAST_PTR(char, + table-> + pointer + [*target])); + } else { + acpi_rs_out_integer8(name, ACPI_GET8(target)); + } break; case ACPI_RSD_UINT16: @@ -444,6 +572,13 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table) 0x03])); break; + case ACPI_RSD_3BITFLAG: + acpi_rs_out_string(name, ACPI_CAST_PTR(char, + table-> + pointer[*target & + 0x07])); + break; + case ACPI_RSD_SHORTLIST: /* * Short byte list (single line output) for DMA and IRQ resources @@ -456,6 +591,20 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table) } break; + case ACPI_RSD_SHORTLISTX: + /* + * Short byte list (single line output) for GPIO vendor data + * Note: The list length is obtained from the previous table entry + */ + if (previous_target) { + acpi_rs_out_title(name); + acpi_rs_dump_short_byte_list(*previous_target, + * + (ACPI_CAST_INDIRECT_PTR + (u8, target))); + } + break; + case ACPI_RSD_LONGLIST: /* * Long byte list for Vendor resource data @@ -480,6 +629,18 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table) } break; + case ACPI_RSD_WORDLIST: + /* + * Word list for GPIO Pin Table + * Note: The list length is obtained from the previous table entry + */ + if (previous_target) { + acpi_rs_dump_word_list(*previous_target, + *(ACPI_CAST_INDIRECT_PTR + (u16, target))); + } + break; + case ACPI_RSD_ADDRESS: /* * Common flags for all Address resources @@ -627,14 +788,20 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list) /* Dump the resource descriptor */ - acpi_rs_dump_descriptor(&resource_list->data, - acpi_gbl_dump_resource_dispatch[type]); + if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { + acpi_rs_dump_descriptor(&resource_list->data, + acpi_gbl_dump_serial_bus_dispatch + [resource_list->data. + common_serial_bus.type]); + } else { + acpi_rs_dump_descriptor(&resource_list->data, + acpi_gbl_dump_resource_dispatch + [type]); + } /* Point to the next resource structure */ - resource_list = - ACPI_ADD_PTR(struct acpi_resource, resource_list, - resource_list->length); + resource_list = ACPI_NEXT_RESOURCE(resource_list); /* Exit when END_TAG descriptor is reached */ @@ -768,4 +935,13 @@ static void acpi_rs_dump_dword_list(u8 length, u32 * data) } } +static void acpi_rs_dump_word_list(u16 length, u16 *data) +{ + u16 i; + + for (i = 0; i < length; i++) { + acpi_os_printf("%25s%2.2X : %4.4X\n", "Word", i, data[i]); + } +} + #endif diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c index f9ea608..a9fa515 100644 --- a/drivers/acpi/acpica/rsinfo.c +++ b/drivers/acpi/acpica/rsinfo.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -76,7 +76,10 @@ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = { acpi_rs_convert_address64, /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */ acpi_rs_convert_ext_address64, /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ acpi_rs_convert_ext_irq, /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ - acpi_rs_convert_generic_reg /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ + acpi_rs_convert_generic_reg, /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ + acpi_rs_convert_gpio, /* 0x11, ACPI_RESOURCE_TYPE_GPIO */ + acpi_rs_convert_fixed_dma, /* 0x12, ACPI_RESOURCE_TYPE_FIXED_DMA */ + NULL, /* 0x13, ACPI_RESOURCE_TYPE_SERIAL_BUS - Use subtype table below */ }; /* Dispatch tables for AML-to-resource (Get Resource) conversion functions */ @@ -94,7 +97,7 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = { acpi_rs_convert_end_dpf, /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */ acpi_rs_convert_io, /* 0x08, ACPI_RESOURCE_NAME_IO */ acpi_rs_convert_fixed_io, /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */ - NULL, /* 0x0A, Reserved */ + acpi_rs_convert_fixed_dma, /* 0x0A, ACPI_RESOURCE_NAME_FIXED_DMA */ NULL, /* 0x0B, Reserved */ NULL, /* 0x0C, Reserved */ NULL, /* 0x0D, Reserved */ @@ -114,7 +117,19 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = { acpi_rs_convert_address16, /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */ acpi_rs_convert_ext_irq, /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */ acpi_rs_convert_address64, /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */ - acpi_rs_convert_ext_address64 /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */ + acpi_rs_convert_ext_address64, /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */ + acpi_rs_convert_gpio, /* 0x0C, ACPI_RESOURCE_NAME_GPIO */ + NULL, /* 0x0D, Reserved */ + NULL, /* 0x0E, ACPI_RESOURCE_NAME_SERIAL_BUS - Use subtype table below */ +}; + +/* Subtype table for serial_bus -- I2C, SPI, and UART */ + +struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = { + NULL, + acpi_rs_convert_i2c_serial_bus, + acpi_rs_convert_spi_serial_bus, + acpi_rs_convert_uart_serial_bus, }; #ifdef ACPI_FUTURE_USAGE @@ -140,6 +155,16 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = { acpi_rs_dump_ext_address64, /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ acpi_rs_dump_ext_irq, /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ + acpi_rs_dump_gpio, /* ACPI_RESOURCE_TYPE_GPIO */ + acpi_rs_dump_fixed_dma, /* ACPI_RESOURCE_TYPE_FIXED_DMA */ + NULL, /* ACPI_RESOURCE_TYPE_SERIAL_BUS */ +}; + +struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = { + NULL, + acpi_rs_dump_i2c_serial_bus, /* AML_RESOURCE_I2C_BUS_TYPE */ + acpi_rs_dump_spi_serial_bus, /* AML_RESOURCE_SPI_BUS_TYPE */ + acpi_rs_dump_uart_serial_bus, /* AML_RESOURCE_UART_BUS_TYPE */ }; #endif @@ -166,7 +191,10 @@ const u8 acpi_gbl_aml_resource_sizes[] = { sizeof(struct aml_resource_address64), /* ACPI_RESOURCE_TYPE_ADDRESS64 */ sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */ sizeof(struct aml_resource_extended_irq), /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */ - sizeof(struct aml_resource_generic_register) /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ + sizeof(struct aml_resource_generic_register), /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */ + sizeof(struct aml_resource_gpio), /* ACPI_RESOURCE_TYPE_GPIO */ + sizeof(struct aml_resource_fixed_dma), /* ACPI_RESOURCE_TYPE_FIXED_DMA */ + sizeof(struct aml_resource_common_serialbus), /* ACPI_RESOURCE_TYPE_SERIAL_BUS */ }; const u8 acpi_gbl_resource_struct_sizes[] = { @@ -182,7 +210,7 @@ const u8 acpi_gbl_resource_struct_sizes[] = { ACPI_RS_SIZE_MIN, ACPI_RS_SIZE(struct acpi_resource_io), ACPI_RS_SIZE(struct acpi_resource_fixed_io), - 0, + ACPI_RS_SIZE(struct acpi_resource_fixed_dma), 0, 0, 0, @@ -202,5 +230,21 @@ const u8 acpi_gbl_resource_struct_sizes[] = { ACPI_RS_SIZE(struct acpi_resource_address16), ACPI_RS_SIZE(struct acpi_resource_extended_irq), ACPI_RS_SIZE(struct acpi_resource_address64), - ACPI_RS_SIZE(struct acpi_resource_extended_address64) + ACPI_RS_SIZE(struct acpi_resource_extended_address64), + ACPI_RS_SIZE(struct acpi_resource_gpio), + ACPI_RS_SIZE(struct acpi_resource_common_serialbus) +}; + +const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = { + 0, + sizeof(struct aml_resource_i2c_serialbus), + sizeof(struct aml_resource_spi_serialbus), + sizeof(struct aml_resource_uart_serialbus), +}; + +const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = { + 0, + ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus), + ACPI_RS_SIZE(struct acpi_resource_spi_serialbus), + ACPI_RS_SIZE(struct acpi_resource_uart_serialbus), }; diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c index 0c7efef..f6a0810 100644 --- a/drivers/acpi/acpica/rsio.c +++ b/drivers/acpi/acpica/rsio.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c index 50b8ad2..e23a9ec 100644 --- a/drivers/acpi/acpica/rsirq.c +++ b/drivers/acpi/acpica/rsirq.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -264,3 +264,34 @@ struct acpi_rsconvert_info acpi_rs_convert_dma[6] = { AML_OFFSET(dma.dma_channel_mask), ACPI_RS_OFFSET(data.dma.channel_count)} }; + +/******************************************************************************* + * + * acpi_rs_convert_fixed_dma + * + ******************************************************************************/ + +struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = { + {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_DMA, + ACPI_RS_SIZE(struct acpi_resource_fixed_dma), + ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_dma)}, + + {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_DMA, + sizeof(struct aml_resource_fixed_dma), + 0}, + + /* + * These fields are contiguous in both the source and destination: + * request_lines + * Channels + */ + + {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines), + AML_OFFSET(fixed_dma.request_lines), + 2}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width), + AML_OFFSET(fixed_dma.width), + 1}, + +}; diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c index 1bfcef7..9be129f 100644 --- a/drivers/acpi/acpica/rslist.c +++ b/drivers/acpi/acpica/rslist.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -70,6 +70,8 @@ acpi_rs_convert_aml_to_resources(u8 * aml, struct acpi_resource **resource_ptr = ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context); struct acpi_resource *resource; + union aml_resource *aml_resource; + struct acpi_rsconvert_info *conversion_table; acpi_status status; ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources); @@ -84,14 +86,37 @@ acpi_rs_convert_aml_to_resources(u8 * aml, "Misaligned resource pointer %p", resource)); } + /* Get the appropriate conversion info table */ + + aml_resource = ACPI_CAST_PTR(union aml_resource, aml); + if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) { + if (aml_resource->common_serial_bus.type > + AML_RESOURCE_MAX_SERIALBUSTYPE) { + conversion_table = NULL; + } else { + /* This is an I2C, SPI, or UART serial_bus descriptor */ + + conversion_table = + acpi_gbl_convert_resource_serial_bus_dispatch + [aml_resource->common_serial_bus.type]; + } + } else { + conversion_table = + acpi_gbl_get_resource_dispatch[resource_index]; + } + + if (!conversion_table) { + ACPI_ERROR((AE_INFO, + "Invalid/unsupported resource descriptor: Type 0x%2.2X", + resource_index)); + return (AE_AML_INVALID_RESOURCE_TYPE); + } + /* Convert the AML byte stream resource to a local resource struct */ status = - acpi_rs_convert_aml_to_resource(resource, - ACPI_CAST_PTR(union aml_resource, - aml), - acpi_gbl_get_resource_dispatch - [resource_index]); + acpi_rs_convert_aml_to_resource(resource, aml_resource, + conversion_table); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not convert AML resource (Type 0x%X)", @@ -106,7 +131,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml, /* Point to the next structure in the output buffer */ - *resource_ptr = ACPI_ADD_PTR(void, resource, resource->length); + *resource_ptr = ACPI_NEXT_RESOURCE(resource); return_ACPI_STATUS(AE_OK); } @@ -135,6 +160,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource, { u8 *aml = output_buffer; u8 *end_aml = output_buffer + aml_size_needed; + struct acpi_rsconvert_info *conversion_table; acpi_status status; ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml); @@ -154,11 +180,34 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource, /* Perform the conversion */ - status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union - aml_resource, - aml), - acpi_gbl_set_resource_dispatch - [resource->type]); + if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { + if (resource->data.common_serial_bus.type > + AML_RESOURCE_MAX_SERIALBUSTYPE) { + conversion_table = NULL; + } else { + /* This is an I2C, SPI, or UART serial_bus descriptor */ + + conversion_table = + acpi_gbl_convert_resource_serial_bus_dispatch + [resource->data.common_serial_bus.type]; + } + } else { + conversion_table = + acpi_gbl_set_resource_dispatch[resource->type]; + } + + if (!conversion_table) { + ACPI_ERROR((AE_INFO, + "Invalid/unsupported resource descriptor: Type 0x%2.2X", + resource->type)); + return (AE_AML_INVALID_RESOURCE_TYPE); + } + + status = acpi_rs_convert_resource_to_aml(resource, + ACPI_CAST_PTR(union + aml_resource, + aml), + conversion_table); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not convert resource (type 0x%X) to AML", @@ -192,9 +241,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource, /* Point to the next input resource descriptor */ - resource = - ACPI_ADD_PTR(struct acpi_resource, resource, - resource->length); + resource = ACPI_NEXT_RESOURCE(resource); } /* Completed buffer, but did not find an end_tag resource descriptor */ diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c index 7cc6d86..4fd611a 100644 --- a/drivers/acpi/acpica/rsmemory.c +++ b/drivers/acpi/acpica/rsmemory.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c index 410264b..8073b37 100644 --- a/drivers/acpi/acpica/rsmisc.c +++ b/drivers/acpi/acpica/rsmisc.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -83,6 +83,10 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource); + if (!info) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + if (((acpi_size) resource) & 0x3) { /* Each internal resource struct is expected to be 32-bit aligned */ @@ -101,7 +105,6 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, * table length (# of table entries) */ count = INIT_TABLE_LENGTH(info); - while (count) { /* * Source is the external AML byte stream buffer, @@ -145,6 +148,14 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, ((ACPI_GET8(source) >> info->value) & 0x03); break; + case ACPI_RSC_3BITFLAG: + /* + * Mask and shift the flag bits + */ + ACPI_SET8(destination) = (u8) + ((ACPI_GET8(source) >> info->value) & 0x07); + break; + case ACPI_RSC_COUNT: item_count = ACPI_GET8(source); @@ -163,6 +174,69 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, (info->value * (item_count - 1)); break; + case ACPI_RSC_COUNT_GPIO_PIN: + + target = ACPI_ADD_PTR(void, aml, info->value); + item_count = ACPI_GET16(target) - ACPI_GET16(source); + + resource->length = resource->length + item_count; + item_count = item_count / 2; + ACPI_SET16(destination) = item_count; + break; + + case ACPI_RSC_COUNT_GPIO_VEN: + + item_count = ACPI_GET8(source); + ACPI_SET8(destination) = (u8)item_count; + + resource->length = resource->length + + (info->value * item_count); + break; + + case ACPI_RSC_COUNT_GPIO_RES: + + /* + * Vendor data is optional (length/offset may both be zero) + * Examine vendor data length field first + */ + target = ACPI_ADD_PTR(void, aml, (info->value + 2)); + if (ACPI_GET16(target)) { + + /* Use vendor offset to get resource source length */ + + target = ACPI_ADD_PTR(void, aml, info->value); + item_count = + ACPI_GET16(target) - ACPI_GET16(source); + } else { + /* No vendor data to worry about */ + + item_count = aml->large_header.resource_length + + sizeof(struct aml_resource_large_header) - + ACPI_GET16(source); + } + + resource->length = resource->length + item_count; + ACPI_SET16(destination) = item_count; + break; + + case ACPI_RSC_COUNT_SERIAL_VEN: + + item_count = ACPI_GET16(source) - info->value; + + resource->length = resource->length + item_count; + ACPI_SET16(destination) = item_count; + break; + + case ACPI_RSC_COUNT_SERIAL_RES: + + item_count = (aml_resource_length + + sizeof(struct aml_resource_large_header)) + - ACPI_GET16(source) - info->value; + + resource->length = resource->length + item_count; + ACPI_SET16(destination) = item_count; + break; + case ACPI_RSC_LENGTH: resource->length = resource->length + info->value; @@ -183,6 +257,72 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, info->opcode); break; + case ACPI_RSC_MOVE_GPIO_PIN: + + /* Generate and set the PIN data pointer */ + + target = (char *)ACPI_ADD_PTR(void, resource, + (resource->length - + item_count * 2)); + *(u16 **)destination = ACPI_CAST_PTR(u16, target); + + /* Copy the PIN data */ + + source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source)); + acpi_rs_move_data(target, source, item_count, + info->opcode); + break; + + case ACPI_RSC_MOVE_GPIO_RES: + + /* Generate and set the resource_source string pointer */ + + target = (char *)ACPI_ADD_PTR(void, resource, + (resource->length - + item_count)); + *(u8 **)destination = ACPI_CAST_PTR(u8, target); + + /* Copy the resource_source string */ + + source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source)); + acpi_rs_move_data(target, source, item_count, + info->opcode); + break; + + case ACPI_RSC_MOVE_SERIAL_VEN: + + /* Generate and set the Vendor Data pointer */ + + target = (char *)ACPI_ADD_PTR(void, resource, + (resource->length - + item_count)); + *(u8 **)destination = ACPI_CAST_PTR(u8, target); + + /* Copy the Vendor Data */ + + source = ACPI_ADD_PTR(void, aml, info->value); + acpi_rs_move_data(target, source, item_count, + info->opcode); + break; + + case ACPI_RSC_MOVE_SERIAL_RES: + + /* Generate and set the resource_source string pointer */ + + target = (char *)ACPI_ADD_PTR(void, resource, + (resource->length - + item_count)); + *(u8 **)destination = ACPI_CAST_PTR(u8, target); + + /* Copy the resource_source string */ + + source = + ACPI_ADD_PTR(void, aml, + (ACPI_GET16(source) + info->value)); + acpi_rs_move_data(target, source, item_count, + info->opcode); + break; + case ACPI_RSC_SET8: ACPI_MEMSET(destination, info->aml_offset, info->value); @@ -219,13 +359,18 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, * Optional resource_source (Index and String). This is the more * complicated case used by the Interrupt() macro */ - target = - ACPI_ADD_PTR(char, resource, - info->aml_offset + (item_count * 4)); + target = ACPI_ADD_PTR(char, resource, + info->aml_offset + + (item_count * 4)); resource->length += acpi_rs_get_resource_source(aml_resource_length, - (acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target); + (acpi_rs_length) + (((item_count - + 1) * sizeof(u32)) + + info->value), + destination, aml, + target); break; case ACPI_RSC_BITMASK: @@ -327,6 +472,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource, { void *source = NULL; void *destination; + char *target; acpi_rsdesc_size aml_length = 0; u8 count; u16 temp16 = 0; @@ -334,6 +480,10 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource, ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml); + if (!info) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + /* * First table entry must be ACPI_RSC_INITxxx and must contain the * table length (# of table entries) @@ -383,6 +533,14 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource, ((ACPI_GET8(source) & 0x03) << info->value); break; + case ACPI_RSC_3BITFLAG: + /* + * Mask and shift the flag bits + */ + ACPI_SET8(destination) |= (u8) + ((ACPI_GET8(source) & 0x07) << info->value); + break; + case ACPI_RSC_COUNT: item_count = ACPI_GET8(source); @@ -400,6 +558,63 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource, acpi_rs_set_resource_length(aml_length, aml); break; + case ACPI_RSC_COUNT_GPIO_PIN: + + item_count = ACPI_GET16(source); + ACPI_SET16(destination) = (u16)aml_length; + + aml_length = (u16)(aml_length + item_count * 2); + target = ACPI_ADD_PTR(void, aml, info->value); + ACPI_SET16(target) = (u16)aml_length; + acpi_rs_set_resource_length(aml_length, aml); + break; + + case ACPI_RSC_COUNT_GPIO_VEN: + + item_count = ACPI_GET16(source); + ACPI_SET16(destination) = (u16)item_count; + + aml_length = + (u16)(aml_length + (info->value * item_count)); + acpi_rs_set_resource_length(aml_length, aml); + break; + + case ACPI_RSC_COUNT_GPIO_RES: + + /* Set resource source string length */ + + item_count = ACPI_GET16(source); + ACPI_SET16(destination) = (u16)aml_length; + + /* Compute offset for the Vendor Data */ + + aml_length = (u16)(aml_length + item_count); + target = ACPI_ADD_PTR(void, aml, info->value); + + /* Set vendor offset only if there is vendor data */ + + if (resource->data.gpio.vendor_length) { + ACPI_SET16(target) = (u16)aml_length; + } + + acpi_rs_set_resource_length(aml_length, aml); + break; + + case ACPI_RSC_COUNT_SERIAL_VEN: + + item_count = ACPI_GET16(source); + ACPI_SET16(destination) = item_count + info->value; + aml_length = (u16)(aml_length + item_count); + acpi_rs_set_resource_length(aml_length, aml); + break; + + case ACPI_RSC_COUNT_SERIAL_RES: + + item_count = ACPI_GET16(source); + aml_length = (u16)(aml_length + item_count); + acpi_rs_set_resource_length(aml_length, aml); + break; + case ACPI_RSC_LENGTH: acpi_rs_set_resource_length(info->value, aml); @@ -417,6 +632,48 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource, info->opcode); break; + case ACPI_RSC_MOVE_GPIO_PIN: + + destination = (char *)ACPI_ADD_PTR(void, aml, + ACPI_GET16 + (destination)); + source = *(u16 **)source; + acpi_rs_move_data(destination, source, item_count, + info->opcode); + break; + + case ACPI_RSC_MOVE_GPIO_RES: + + /* Used for both resource_source string and vendor_data */ + + destination = (char *)ACPI_ADD_PTR(void, aml, + ACPI_GET16 + (destination)); + source = *(u8 **)source; + acpi_rs_move_data(destination, source, item_count, + info->opcode); + break; + + case ACPI_RSC_MOVE_SERIAL_VEN: + + destination = (char *)ACPI_ADD_PTR(void, aml, + (aml_length - + item_count)); + source = *(u8 **)source; + acpi_rs_move_data(destination, source, item_count, + info->opcode); + break; + + case ACPI_RSC_MOVE_SERIAL_RES: + + destination = (char *)ACPI_ADD_PTR(void, aml, + (aml_length - + item_count)); + source = *(u8 **)source; + acpi_rs_move_data(destination, source, item_count, + info->opcode); + break; + case ACPI_RSC_ADDRESS: /* Set the Resource Type, General Flags, and Type-Specific Flags */ diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c new file mode 100644 index 0000000..9aa5e68 --- /dev/null +++ b/drivers/acpi/acpica/rsserial.c @@ -0,0 +1,441 @@ +/******************************************************************************* + * + * Module Name: rsserial - GPIO/serial_bus resource descriptors + * + ******************************************************************************/ + +/* + * Copyright (C) 2000 - 2012, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include "accommon.h" +#include "acresrc.h" + +#define _COMPONENT ACPI_RESOURCES +ACPI_MODULE_NAME("rsserial") + +/******************************************************************************* + * + * acpi_rs_convert_gpio + * + ******************************************************************************/ +struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = { + {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO, + ACPI_RS_SIZE(struct acpi_resource_gpio), + ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)}, + + {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GPIO, + sizeof(struct aml_resource_gpio), + 0}, + + /* + * These fields are contiguous in both the source and destination: + * revision_id + * connection_type + */ + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.revision_id), + AML_OFFSET(gpio.revision_id), + 2}, + + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.producer_consumer), + AML_OFFSET(gpio.flags), + 0}, + + {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable), + AML_OFFSET(gpio.int_flags), + 3}, + + {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction), + AML_OFFSET(gpio.int_flags), + 0}, + + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.triggering), + AML_OFFSET(gpio.int_flags), + 0}, + + {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.polarity), + AML_OFFSET(gpio.int_flags), + 1}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.pin_config), + AML_OFFSET(gpio.pin_config), + 1}, + + /* + * These fields are contiguous in both the source and destination: + * drive_strength + * debounce_timeout + */ + {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.gpio.drive_strength), + AML_OFFSET(gpio.drive_strength), + 2}, + + /* Pin Table */ + + {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table_length), + AML_OFFSET(gpio.pin_table_offset), + AML_OFFSET(gpio.res_source_offset)}, + + {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table), + AML_OFFSET(gpio.pin_table_offset), + 0}, + + /* Resource Source */ + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.resource_source.index), + AML_OFFSET(gpio.res_source_index), + 1}, + + {ACPI_RSC_COUNT_GPIO_RES, + ACPI_RS_OFFSET(data.gpio.resource_source.string_length), + AML_OFFSET(gpio.res_source_offset), + AML_OFFSET(gpio.vendor_offset)}, + + {ACPI_RSC_MOVE_GPIO_RES, + ACPI_RS_OFFSET(data.gpio.resource_source.string_ptr), + AML_OFFSET(gpio.res_source_offset), + 0}, + + /* Vendor Data */ + + {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.gpio.vendor_length), + AML_OFFSET(gpio.vendor_length), + 1}, + + {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.gpio.vendor_data), + AML_OFFSET(gpio.vendor_offset), + 0}, +}; + +/******************************************************************************* + * + * acpi_rs_convert_i2c_serial_bus + * + ******************************************************************************/ + +struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = { + {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS, + ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus), + ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)}, + + {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS, + sizeof(struct aml_resource_i2c_serialbus), + 0}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id), + AML_OFFSET(common_serial_bus.revision_id), + 1}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type), + AML_OFFSET(common_serial_bus.type), + 1}, + + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode), + AML_OFFSET(common_serial_bus.flags), + 0}, + + {ACPI_RSC_1BITFLAG, + ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer), + AML_OFFSET(common_serial_bus.flags), + 1}, + + {ACPI_RSC_MOVE8, + ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id), + AML_OFFSET(common_serial_bus.type_revision_id), + 1}, + + {ACPI_RSC_MOVE16, + ACPI_RS_OFFSET(data.common_serial_bus.type_data_length), + AML_OFFSET(common_serial_bus.type_data_length), + 1}, + + /* Vendor data */ + + {ACPI_RSC_COUNT_SERIAL_VEN, + ACPI_RS_OFFSET(data.common_serial_bus.vendor_length), + AML_OFFSET(common_serial_bus.type_data_length), + AML_RESOURCE_I2C_MIN_DATA_LEN}, + + {ACPI_RSC_MOVE_SERIAL_VEN, + ACPI_RS_OFFSET(data.common_serial_bus.vendor_data), + 0, + sizeof(struct aml_resource_i2c_serialbus)}, + + /* Resource Source */ + + {ACPI_RSC_MOVE8, + ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index), + AML_OFFSET(common_serial_bus.res_source_index), + 1}, + + {ACPI_RSC_COUNT_SERIAL_RES, + ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length), + AML_OFFSET(common_serial_bus.type_data_length), + sizeof(struct aml_resource_common_serialbus)}, + + {ACPI_RSC_MOVE_SERIAL_RES, + ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr), + AML_OFFSET(common_serial_bus.type_data_length), + sizeof(struct aml_resource_common_serialbus)}, + + /* I2C bus type specific */ + + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.i2c_serial_bus.access_mode), + AML_OFFSET(i2c_serial_bus.type_specific_flags), + 0}, + + {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.i2c_serial_bus.connection_speed), + AML_OFFSET(i2c_serial_bus.connection_speed), + 1}, + + {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.i2c_serial_bus.slave_address), + AML_OFFSET(i2c_serial_bus.slave_address), + 1}, +}; + +/******************************************************************************* + * + * acpi_rs_convert_spi_serial_bus + * + ******************************************************************************/ + +struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = { + {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS, + ACPI_RS_SIZE(struct acpi_resource_spi_serialbus), + ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)}, + + {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS, + sizeof(struct aml_resource_spi_serialbus), + 0}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id), + AML_OFFSET(common_serial_bus.revision_id), + 1}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type), + AML_OFFSET(common_serial_bus.type), + 1}, + + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode), + AML_OFFSET(common_serial_bus.flags), + 0}, + + {ACPI_RSC_1BITFLAG, + ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer), + AML_OFFSET(common_serial_bus.flags), + 1}, + + {ACPI_RSC_MOVE8, + ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id), + AML_OFFSET(common_serial_bus.type_revision_id), + 1}, + + {ACPI_RSC_MOVE16, + ACPI_RS_OFFSET(data.common_serial_bus.type_data_length), + AML_OFFSET(common_serial_bus.type_data_length), + 1}, + + /* Vendor data */ + + {ACPI_RSC_COUNT_SERIAL_VEN, + ACPI_RS_OFFSET(data.common_serial_bus.vendor_length), + AML_OFFSET(common_serial_bus.type_data_length), + AML_RESOURCE_SPI_MIN_DATA_LEN}, + + {ACPI_RSC_MOVE_SERIAL_VEN, + ACPI_RS_OFFSET(data.common_serial_bus.vendor_data), + 0, + sizeof(struct aml_resource_spi_serialbus)}, + + /* Resource Source */ + + {ACPI_RSC_MOVE8, + ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index), + AML_OFFSET(common_serial_bus.res_source_index), + 1}, + + {ACPI_RSC_COUNT_SERIAL_RES, + ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length), + AML_OFFSET(common_serial_bus.type_data_length), + sizeof(struct aml_resource_common_serialbus)}, + + {ACPI_RSC_MOVE_SERIAL_RES, + ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr), + AML_OFFSET(common_serial_bus.type_data_length), + sizeof(struct aml_resource_common_serialbus)}, + + /* Spi bus type specific */ + + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.wire_mode), + AML_OFFSET(spi_serial_bus.type_specific_flags), + 0}, + + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.device_polarity), + AML_OFFSET(spi_serial_bus.type_specific_flags), + 1}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.data_bit_length), + AML_OFFSET(spi_serial_bus.data_bit_length), + 1}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_phase), + AML_OFFSET(spi_serial_bus.clock_phase), + 1}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_polarity), + AML_OFFSET(spi_serial_bus.clock_polarity), + 1}, + + {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.spi_serial_bus.device_selection), + AML_OFFSET(spi_serial_bus.device_selection), + 1}, + + {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.spi_serial_bus.connection_speed), + AML_OFFSET(spi_serial_bus.connection_speed), + 1}, +}; + +/******************************************************************************* + * + * acpi_rs_convert_uart_serial_bus + * + ******************************************************************************/ + +struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = { + {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS, + ACPI_RS_SIZE(struct acpi_resource_uart_serialbus), + ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)}, + + {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS, + sizeof(struct aml_resource_uart_serialbus), + 0}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id), + AML_OFFSET(common_serial_bus.revision_id), + 1}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type), + AML_OFFSET(common_serial_bus.type), + 1}, + + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode), + AML_OFFSET(common_serial_bus.flags), + 0}, + + {ACPI_RSC_1BITFLAG, + ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer), + AML_OFFSET(common_serial_bus.flags), + 1}, + + {ACPI_RSC_MOVE8, + ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id), + AML_OFFSET(common_serial_bus.type_revision_id), + 1}, + + {ACPI_RSC_MOVE16, + ACPI_RS_OFFSET(data.common_serial_bus.type_data_length), + AML_OFFSET(common_serial_bus.type_data_length), + 1}, + + /* Vendor data */ + + {ACPI_RSC_COUNT_SERIAL_VEN, + ACPI_RS_OFFSET(data.common_serial_bus.vendor_length), + AML_OFFSET(common_serial_bus.type_data_length), + AML_RESOURCE_UART_MIN_DATA_LEN}, + + {ACPI_RSC_MOVE_SERIAL_VEN, + ACPI_RS_OFFSET(data.common_serial_bus.vendor_data), + 0, + sizeof(struct aml_resource_uart_serialbus)}, + + /* Resource Source */ + + {ACPI_RSC_MOVE8, + ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index), + AML_OFFSET(common_serial_bus.res_source_index), + 1}, + + {ACPI_RSC_COUNT_SERIAL_RES, + ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length), + AML_OFFSET(common_serial_bus.type_data_length), + sizeof(struct aml_resource_common_serialbus)}, + + {ACPI_RSC_MOVE_SERIAL_RES, + ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr), + AML_OFFSET(common_serial_bus.type_data_length), + sizeof(struct aml_resource_common_serialbus)}, + + /* Uart bus type specific */ + + {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.flow_control), + AML_OFFSET(uart_serial_bus.type_specific_flags), + 0}, + + {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.stop_bits), + AML_OFFSET(uart_serial_bus.type_specific_flags), + 2}, + + {ACPI_RSC_3BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.data_bits), + AML_OFFSET(uart_serial_bus.type_specific_flags), + 4}, + + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.endian), + AML_OFFSET(uart_serial_bus.type_specific_flags), + 7}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.parity), + AML_OFFSET(uart_serial_bus.parity), + 1}, + + {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.lines_enabled), + AML_OFFSET(uart_serial_bus.lines_enabled), + 1}, + + {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.rx_fifo_size), + AML_OFFSET(uart_serial_bus.rx_fifo_size), + 1}, + + {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.tx_fifo_size), + AML_OFFSET(uart_serial_bus.tx_fifo_size), + 1}, + + {ACPI_RSC_MOVE32, + ACPI_RS_OFFSET(data.uart_serial_bus.default_baud_rate), + AML_OFFSET(uart_serial_bus.default_baud_rate), + 1}, +}; diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c index 231811e..433a375 100644 --- a/drivers/acpi/acpica/rsutils.c +++ b/drivers/acpi/acpica/rsutils.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -144,6 +144,9 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type) * since there are no alignment or endian issues */ case ACPI_RSC_MOVE8: + case ACPI_RSC_MOVE_GPIO_RES: + case ACPI_RSC_MOVE_SERIAL_VEN: + case ACPI_RSC_MOVE_SERIAL_RES: ACPI_MEMCPY(destination, source, item_count); return; @@ -153,6 +156,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type) * misaligned memory transfers */ case ACPI_RSC_MOVE16: + case ACPI_RSC_MOVE_GPIO_PIN: ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i], &ACPI_CAST_PTR(u16, source)[i]); break; @@ -590,6 +594,56 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node, /******************************************************************************* * + * FUNCTION: acpi_rs_get_aei_method_data + * + * PARAMETERS: Node - Device node + * ret_buffer - Pointer to a buffer structure for the + * results + * + * RETURN: Status + * + * DESCRIPTION: This function is called to get the _AEI value of an object + * contained in an object specified by the handle passed in + * + * If the function fails an appropriate status will be returned + * and the contents of the callers buffer is undefined. + * + ******************************************************************************/ + +acpi_status +acpi_rs_get_aei_method_data(struct acpi_namespace_node *node, + struct acpi_buffer *ret_buffer) +{ + union acpi_operand_object *obj_desc; + acpi_status status; + + ACPI_FUNCTION_TRACE(rs_get_aei_method_data); + + /* Parameters guaranteed valid by caller */ + + /* Execute the method, no parameters */ + + status = acpi_ut_evaluate_object(node, METHOD_NAME__AEI, + ACPI_BTYPE_BUFFER, &obj_desc); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* + * Make the call to create a resource linked list from the + * byte stream buffer that comes back from the _CRS method + * execution. + */ + status = acpi_rs_create_resource_list(obj_desc, ret_buffer); + + /* On exit, we must delete the object returned by evaluate_object */ + + acpi_ut_remove_reference(obj_desc); + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * * FUNCTION: acpi_rs_get_method_data * * PARAMETERS: Handle - Handle to the containing object diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c index fe86b37..f58c098 100644 --- a/drivers/acpi/acpica/rsxface.c +++ b/drivers/acpi/acpica/rsxface.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -307,6 +307,46 @@ acpi_set_current_resources(acpi_handle device_handle, ACPI_EXPORT_SYMBOL(acpi_set_current_resources) +/******************************************************************************* + * + * FUNCTION: acpi_get_event_resources + * + * PARAMETERS: device_handle - Handle to the device object for the + * device we are getting resources + * in_buffer - Pointer to a buffer containing the + * resources to be set for the device + * + * RETURN: Status + * + * DESCRIPTION: This function is called to get the event resources for a + * specific device. The caller must first acquire a handle for + * the desired device. The resource data is passed to the routine + * the buffer pointed to by the in_buffer variable. Uses the + * _AEI method. + * + ******************************************************************************/ +acpi_status +acpi_get_event_resources(acpi_handle device_handle, + struct acpi_buffer *ret_buffer) +{ + acpi_status status; + struct acpi_namespace_node *node; + + ACPI_FUNCTION_TRACE(acpi_get_event_resources); + + /* Validate parameters then dispatch to internal routine */ + + status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + status = acpi_rs_get_aei_method_data(node, ret_buffer); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_get_event_resources) + /****************************************************************************** * * FUNCTION: acpi_resource_to_address64 @@ -486,8 +526,9 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context) * * PARAMETERS: device_handle - Handle to the device object for the * device we are querying - * Name - Method name of the resources we want - * (METHOD_NAME__CRS or METHOD_NAME__PRS) + * Name - Method name of the resources we want. + * (METHOD_NAME__CRS, METHOD_NAME__PRS, or + * METHOD_NAME__AEI) * user_function - Called for each resource * Context - Passed to user_function * @@ -514,11 +555,12 @@ acpi_walk_resources(acpi_handle device_handle, if (!device_handle || !user_function || !name || (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) && - !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) { + !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) && + !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) { return_ACPI_STATUS(AE_BAD_PARAMETER); } - /* Get the _CRS or _PRS resource list */ + /* Get the _CRS/_PRS/_AEI resource list */ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; status = acpi_rs_get_method_data(device_handle, name, &buffer); diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 6f5588e..c5d8704 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -63,14 +63,15 @@ static void acpi_tb_setup_fadt_registers(void); typedef struct acpi_fadt_info { char *name; - u8 address64; - u8 address32; - u8 length; + u16 address64; + u16 address32; + u16 length; u8 default_length; u8 type; } acpi_fadt_info; +#define ACPI_FADT_OPTIONAL 0 #define ACPI_FADT_REQUIRED 1 #define ACPI_FADT_SEPARATE_LENGTH 2 @@ -87,7 +88,7 @@ static struct acpi_fadt_info fadt_info_table[] = { ACPI_FADT_OFFSET(pm1b_event_block), ACPI_FADT_OFFSET(pm1_event_length), ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */ - 0}, + ACPI_FADT_OPTIONAL}, {"Pm1aControlBlock", ACPI_FADT_OFFSET(xpm1a_control_block), @@ -101,7 +102,7 @@ static struct acpi_fadt_info fadt_info_table[] = { ACPI_FADT_OFFSET(pm1b_control_block), ACPI_FADT_OFFSET(pm1_control_length), ACPI_PM1_REGISTER_WIDTH, - 0}, + ACPI_FADT_OPTIONAL}, {"Pm2ControlBlock", ACPI_FADT_OFFSET(xpm2_control_block), @@ -139,7 +140,7 @@ static struct acpi_fadt_info fadt_info_table[] = { typedef struct acpi_fadt_pm_info { struct acpi_generic_address *target; - u8 source; + u16 source; u8 register_num; } acpi_fadt_pm_info; @@ -253,8 +254,13 @@ void acpi_tb_parse_fadt(u32 table_index) acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt, ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT); - acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs, - ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS); + /* If Hardware Reduced flag is set, there is no FACS */ + + if (!acpi_gbl_reduced_hardware) { + acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT. + Xfacs, ACPI_SIG_FACS, + ACPI_TABLE_INDEX_FACS); + } } /******************************************************************************* @@ -277,12 +283,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) { /* * Check if the FADT is larger than the largest table that we expect - * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue + * (the ACPI 5.0 version). If so, truncate the table, and issue * a warning. */ if (length > sizeof(struct acpi_table_fadt)) { ACPI_WARNING((AE_INFO, - "FADT (revision %u) is longer than ACPI 2.0 version, " + "FADT (revision %u) is longer than ACPI 5.0 version, " "truncating length %u to %u", table->revision, length, (u32)sizeof(struct acpi_table_fadt))); @@ -297,6 +303,13 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) ACPI_MEMCPY(&acpi_gbl_FADT, table, ACPI_MIN(length, sizeof(struct acpi_table_fadt))); + /* Take a copy of the Hardware Reduced flag */ + + acpi_gbl_reduced_hardware = FALSE; + if (acpi_gbl_FADT.flags & ACPI_FADT_HW_REDUCED) { + acpi_gbl_reduced_hardware = TRUE; + } + /* Convert the local copy of the FADT to the common internal format */ acpi_tb_convert_fadt(); @@ -502,6 +515,12 @@ static void acpi_tb_validate_fadt(void) acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt; } + /* If Hardware Reduced flag is set, we are all done */ + + if (acpi_gbl_reduced_hardware) { + return; + } + /* Examine all of the 64-bit extended address fields (X fields) */ for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index a55cb2b..4903e36 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 62365f6..1aecf7b 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 0f2d395..09ca39e 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -135,6 +135,13 @@ acpi_status acpi_tb_initialize_facs(void) { acpi_status status; + /* If Hardware Reduced flag is set, there is no FACS */ + + if (acpi_gbl_reduced_hardware) { + acpi_gbl_FACS = NULL; + return (AE_OK); + } + status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS, ACPI_CAST_INDIRECT_PTR(struct acpi_table_header, diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index e7d13f5..abcc641 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index 7eb6c6c..4258f64 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c new file mode 100644 index 0000000..67932ae --- /dev/null +++ b/drivers/acpi/acpica/utaddress.c @@ -0,0 +1,294 @@ +/****************************************************************************** + * + * Module Name: utaddress - op_region address range check + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2012, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include "accommon.h" +#include "acnamesp.h" + +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("utaddress") + +/******************************************************************************* + * + * FUNCTION: acpi_ut_add_address_range + * + * PARAMETERS: space_id - Address space ID + * Address - op_region start address + * Length - op_region length + * region_node - op_region namespace node + * + * RETURN: Status + * + * DESCRIPTION: Add the Operation Region address range to the global list. + * The only supported Space IDs are Memory and I/O. Called when + * the op_region address/length operands are fully evaluated. + * + * MUTEX: Locks the namespace + * + * NOTE: Because this interface is only called when an op_region argument + * list is evaluated, there cannot be any duplicate region_nodes. + * Duplicate Address/Length values are allowed, however, so that multiple + * address conflicts can be detected. + * + ******************************************************************************/ +acpi_status +acpi_ut_add_address_range(acpi_adr_space_type space_id, + acpi_physical_address address, + u32 length, struct acpi_namespace_node *region_node) +{ + struct acpi_address_range *range_info; + acpi_status status; + + ACPI_FUNCTION_TRACE(ut_add_address_range); + + if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && + (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { + return_ACPI_STATUS(AE_OK); + } + + /* Allocate/init a new info block, add it to the appropriate list */ + + range_info = ACPI_ALLOCATE(sizeof(struct acpi_address_range)); + if (!range_info) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + + range_info->start_address = address; + range_info->end_address = (address + length - 1); + range_info->region_node = region_node; + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + ACPI_FREE(range_info); + return_ACPI_STATUS(status); + } + + range_info->next = acpi_gbl_address_range_list[space_id]; + acpi_gbl_address_range_list[space_id] = range_info; + + ACPI_DEBUG_PRINT((ACPI_DB_NAMES, + "\nAdded [%4.4s] address range: 0x%p-0x%p\n", + acpi_ut_get_node_name(range_info->region_node), + ACPI_CAST_PTR(void, address), + ACPI_CAST_PTR(void, range_info->end_address))); + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return_ACPI_STATUS(AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_remove_address_range + * + * PARAMETERS: space_id - Address space ID + * region_node - op_region namespace node + * + * RETURN: None + * + * DESCRIPTION: Remove the Operation Region from the global list. The only + * supported Space IDs are Memory and I/O. Called when an + * op_region is deleted. + * + * MUTEX: Assumes the namespace is locked + * + ******************************************************************************/ + +void +acpi_ut_remove_address_range(acpi_adr_space_type space_id, + struct acpi_namespace_node *region_node) +{ + struct acpi_address_range *range_info; + struct acpi_address_range *prev; + + ACPI_FUNCTION_TRACE(ut_remove_address_range); + + if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && + (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { + return_VOID; + } + + /* Get the appropriate list head and check the list */ + + range_info = prev = acpi_gbl_address_range_list[space_id]; + while (range_info) { + if (range_info->region_node == region_node) { + if (range_info == prev) { /* Found at list head */ + acpi_gbl_address_range_list[space_id] = + range_info->next; + } else { + prev->next = range_info->next; + } + + ACPI_DEBUG_PRINT((ACPI_DB_NAMES, + "\nRemoved [%4.4s] address range: 0x%p-0x%p\n", + acpi_ut_get_node_name(range_info-> + region_node), + ACPI_CAST_PTR(void, + range_info-> + start_address), + ACPI_CAST_PTR(void, + range_info-> + end_address))); + + ACPI_FREE(range_info); + return_VOID; + } + + prev = range_info; + range_info = range_info->next; + } + + return_VOID; +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_check_address_range + * + * PARAMETERS: space_id - Address space ID + * Address - Start address + * Length - Length of address range + * Warn - TRUE if warning on overlap desired + * + * RETURN: Count of the number of conflicts detected. Zero is always + * returned for Space IDs other than Memory or I/O. + * + * DESCRIPTION: Check if the input address range overlaps any of the + * ASL operation region address ranges. The only supported + * Space IDs are Memory and I/O. + * + * MUTEX: Assumes the namespace is locked. + * + ******************************************************************************/ + +u32 +acpi_ut_check_address_range(acpi_adr_space_type space_id, + acpi_physical_address address, u32 length, u8 warn) +{ + struct acpi_address_range *range_info; + acpi_physical_address end_address; + char *pathname; + u32 overlap_count = 0; + + ACPI_FUNCTION_TRACE(ut_check_address_range); + + if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && + (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { + return_UINT32(0); + } + + range_info = acpi_gbl_address_range_list[space_id]; + end_address = address + length - 1; + + /* Check entire list for all possible conflicts */ + + while (range_info) { + /* + * Check if the requested Address/Length overlaps this address_range. + * Four cases to consider: + * + * 1) Input address/length is contained completely in the address range + * 2) Input address/length overlaps range at the range start + * 3) Input address/length overlaps range at the range end + * 4) Input address/length completely encompasses the range + */ + if ((address <= range_info->end_address) && + (end_address >= range_info->start_address)) { + + /* Found an address range overlap */ + + overlap_count++; + if (warn) { /* Optional warning message */ + pathname = + acpi_ns_get_external_pathname(range_info-> + region_node); + + ACPI_WARNING((AE_INFO, + "0x%p-0x%p %s conflicts with Region %s %d", + ACPI_CAST_PTR(void, address), + ACPI_CAST_PTR(void, end_address), + acpi_ut_get_region_name(space_id), + pathname, overlap_count)); + ACPI_FREE(pathname); + } + } + + range_info = range_info->next; + } + + return_UINT32(overlap_count); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_delete_address_lists + * + * PARAMETERS: None + * + * RETURN: None + * + * DESCRIPTION: Delete all global address range lists (called during + * subsystem shutdown). + * + ******************************************************************************/ + +void acpi_ut_delete_address_lists(void) +{ + struct acpi_address_range *next; + struct acpi_address_range *range_info; + int i; + + /* Delete all elements in all address range lists */ + + for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) { + next = acpi_gbl_address_range_list[i]; + + while (next) { + range_info = next; + next = range_info->next; + ACPI_FREE(range_info); + } + + acpi_gbl_address_range_list[i] = NULL; + } +} diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c index 0a69735..9982d2ea 100644 --- a/drivers/acpi/acpica/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index aded299..3317c0a 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index a1f8d75..a0998a8 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index 8b087e2..d42ede52 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -171,7 +171,9 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = { "SMBus", "SystemCMOS", "PCIBARTarget", - "IPMI" + "IPMI", + "GeneralPurposeIo", + "GenericSerialBus" }; char *acpi_ut_get_region_name(u8 space_id) diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 31f5a78..2a6c3e1 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -215,11 +215,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "***** Region %p\n", object)); - /* Invalidate the region address/length via the host OS */ - - acpi_os_invalidate_address(object->region.space_id, - object->region.address, - (acpi_size) object->region.length); + /* + * Update address_range list. However, only permanent regions + * are installed in this list. (Not created within a method) + */ + if (!(object->region.node->flags & ANOBJ_TEMPORARY)) { + acpi_ut_remove_address_range(object->region.space_id, + object->region.node); + } second_desc = acpi_ns_get_secondary_object(object); if (second_desc) { diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c index 18f73c9..479f32b 100644 --- a/drivers/acpi/acpica/uteval.c +++ b/drivers/acpi/acpica/uteval.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index ffba0a3..4153584 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -264,6 +264,12 @@ acpi_status acpi_ut_init_globals(void) return_ACPI_STATUS(status); } + /* Address Range lists */ + + for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) { + acpi_gbl_address_range_list[i] = NULL; + } + /* Mutex locked flags */ for (i = 0; i < ACPI_NUM_MUTEX; i++) { diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index b679ea6..c92eb1d 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index 191b682..8359c0c 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -92,6 +92,7 @@ static void acpi_ut_terminate(void) gpe_xrupt_info = next_gpe_xrupt_info; } + acpi_ut_delete_address_lists(); return_VOID; } diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c index f6bb75c6..155fd78 100644 --- a/drivers/acpi/acpica/utlock.c +++ b/drivers/acpi/acpica/utlock.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c index ce481da..2491a55 100644 --- a/drivers/acpi/acpica/utmath.c +++ b/drivers/acpi/acpica/utmath.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c index c33a852..86f19db 100644 --- a/drivers/acpi/acpica/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index 7d797e2..43174df 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -293,14 +293,10 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) { - acpi_thread_id this_thread_id; - ACPI_FUNCTION_NAME(ut_release_mutex); - this_thread_id = acpi_os_get_thread_id(); - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n", - (u32)this_thread_id, + (u32)acpi_os_get_thread_id(), acpi_ut_get_mutex_name(mutex_id))); if (mutex_id > ACPI_MAX_MUTEX) { @@ -329,7 +325,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) * the ACPI subsystem code. */ for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { - if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { + if (acpi_gbl_mutex_info[i].thread_id == + acpi_os_get_thread_id()) { if (i == mutex_id) { continue; } diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index 188340a..b112744 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index 1fb10cb..2360cf7 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index 6ffd3a8..9d441ea 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -43,7 +43,7 @@ #include <acpi/acpi.h> #include "accommon.h" -#include "amlresrc.h" +#include "acresrc.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utresrc") @@ -154,6 +154,138 @@ const char *acpi_gbl_typ_decode[] = { "TypeF" }; +const char *acpi_gbl_ppc_decode[] = { + "PullDefault", + "PullUp", + "PullDown", + "PullNone" +}; + +const char *acpi_gbl_ior_decode[] = { + "IoRestrictionNone", + "IoRestrictionInputOnly", + "IoRestrictionOutputOnly", + "IoRestrictionNoneAndPreserve" +}; + +const char *acpi_gbl_dts_decode[] = { + "Width8bit", + "Width16bit", + "Width32bit", + "Width64bit", + "Width128bit", + "Width256bit", +}; + +/* GPIO connection type */ + +const char *acpi_gbl_ct_decode[] = { + "Interrupt", + "I/O" +}; + +/* Serial bus type */ + +const char *acpi_gbl_sbt_decode[] = { + "/* UNKNOWN serial bus type */", + "I2C", + "SPI", + "UART" +}; + +/* I2C serial bus access mode */ + +const char *acpi_gbl_am_decode[] = { + "AddressingMode7Bit", + "AddressingMode10Bit" +}; + +/* I2C serial bus slave mode */ + +const char *acpi_gbl_sm_decode[] = { + "ControllerInitiated", + "DeviceInitiated" +}; + +/* SPI serial bus wire mode */ + +const char *acpi_gbl_wm_decode[] = { + "FourWireMode", + "ThreeWireMode" +}; + +/* SPI serial clock phase */ + +const char *acpi_gbl_cph_decode[] = { + "ClockPhaseFirst", + "ClockPhaseSecond" +}; + +/* SPI serial bus clock polarity */ + +const char *acpi_gbl_cpo_decode[] = { + "ClockPolarityLow", + "ClockPolarityHigh" +}; + +/* SPI serial bus device polarity */ + +const char *acpi_gbl_dp_decode[] = { + "PolarityLow", + "PolarityHigh" +}; + +/* UART serial bus endian */ + +const char *acpi_gbl_ed_decode[] = { + "LittleEndian", + "BigEndian" +}; + +/* UART serial bus bits per byte */ + +const char *acpi_gbl_bpb_decode[] = { + "DataBitsFive", + "DataBitsSix", + "DataBitsSeven", + "DataBitsEight", + "DataBitsNine", + "/* UNKNOWN Bits per byte */", + "/* UNKNOWN Bits per byte */", + "/* UNKNOWN Bits per byte */" +}; + +/* UART serial bus stop bits */ + +const char *acpi_gbl_sb_decode[] = { + "StopBitsNone", + "StopBitsOne", + "StopBitsOnePlusHalf", + "StopBitsTwo" +}; + +/* UART serial bus flow control */ + +const char *acpi_gbl_fc_decode[] = { + "FlowControlNone", + "FlowControlHardware", + "FlowControlXON", + "/* UNKNOWN flow control keyword */" +}; + +/* UART serial bus parity type */ + +const char *acpi_gbl_pt_decode[] = { + "ParityTypeNone", + "ParityTypeEven", + "ParityTypeOdd", + "ParityTypeMark", + "ParityTypeSpace", + "/* UNKNOWN parity keyword */", + "/* UNKNOWN parity keyword */", + "/* UNKNOWN parity keyword */" +}; + #endif /* @@ -173,7 +305,7 @@ const u8 acpi_gbl_resource_aml_sizes[] = { ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent), ACPI_AML_SIZE_SMALL(struct aml_resource_io), ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io), - 0, + ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_dma), 0, 0, 0, @@ -193,7 +325,17 @@ const u8 acpi_gbl_resource_aml_sizes[] = { ACPI_AML_SIZE_LARGE(struct aml_resource_address16), ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq), ACPI_AML_SIZE_LARGE(struct aml_resource_address64), - ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64) + ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64), + ACPI_AML_SIZE_LARGE(struct aml_resource_gpio), + 0, + ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus), +}; + +const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = { + 0, + ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus), + ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus), + ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus), }; /* @@ -209,35 +351,49 @@ static const u8 acpi_gbl_resource_types[] = { 0, 0, 0, - ACPI_SMALL_VARIABLE_LENGTH, - ACPI_FIXED_LENGTH, - ACPI_SMALL_VARIABLE_LENGTH, - ACPI_FIXED_LENGTH, - ACPI_FIXED_LENGTH, - ACPI_FIXED_LENGTH, - 0, + ACPI_SMALL_VARIABLE_LENGTH, /* 04 IRQ */ + ACPI_FIXED_LENGTH, /* 05 DMA */ + ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */ + ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */ + ACPI_FIXED_LENGTH, /* 08 IO */ + ACPI_FIXED_LENGTH, /* 09 fixed_iO */ + ACPI_FIXED_LENGTH, /* 0_a fixed_dMA */ 0, 0, 0, - ACPI_VARIABLE_LENGTH, - ACPI_FIXED_LENGTH, + ACPI_VARIABLE_LENGTH, /* 0_e vendor_short */ + ACPI_FIXED_LENGTH, /* 0_f end_tag */ /* Large descriptors */ 0, - ACPI_FIXED_LENGTH, - ACPI_FIXED_LENGTH, + ACPI_FIXED_LENGTH, /* 01 Memory24 */ + ACPI_FIXED_LENGTH, /* 02 generic_register */ 0, - ACPI_VARIABLE_LENGTH, - ACPI_FIXED_LENGTH, - ACPI_FIXED_LENGTH, - ACPI_VARIABLE_LENGTH, - ACPI_VARIABLE_LENGTH, - ACPI_VARIABLE_LENGTH, - ACPI_VARIABLE_LENGTH, - ACPI_FIXED_LENGTH + ACPI_VARIABLE_LENGTH, /* 04 vendor_long */ + ACPI_FIXED_LENGTH, /* 05 Memory32 */ + ACPI_FIXED_LENGTH, /* 06 memory32_fixed */ + ACPI_VARIABLE_LENGTH, /* 07 Dword* address */ + ACPI_VARIABLE_LENGTH, /* 08 Word* address */ + ACPI_VARIABLE_LENGTH, /* 09 extended_iRQ */ + ACPI_VARIABLE_LENGTH, /* 0_a Qword* address */ + ACPI_FIXED_LENGTH, /* 0_b Extended* address */ + ACPI_VARIABLE_LENGTH, /* 0_c Gpio* */ + 0, + ACPI_VARIABLE_LENGTH /* 0_e *serial_bus */ }; +/* + * For the i_aSL compiler/disassembler, we don't want any error messages + * because the disassembler uses the resource validation code to determine + * if Buffer objects are actually Resource Templates. + */ +#ifdef ACPI_ASL_COMPILER +#define ACPI_RESOURCE_ERROR(plist) +#else +#define ACPI_RESOURCE_ERROR(plist) ACPI_ERROR(plist) +#endif + /******************************************************************************* * * FUNCTION: acpi_ut_walk_aml_resources @@ -265,6 +421,7 @@ acpi_ut_walk_aml_resources(u8 * aml, u8 resource_index; u32 length; u32 offset = 0; + u8 end_tag[2] = { 0x79, 0x00 }; ACPI_FUNCTION_TRACE(ut_walk_aml_resources); @@ -286,6 +443,10 @@ acpi_ut_walk_aml_resources(u8 * aml, status = acpi_ut_validate_resource(aml, &resource_index); if (ACPI_FAILURE(status)) { + /* + * Exit on failure. Cannot continue because the descriptor length + * may be bogus also. + */ return_ACPI_STATUS(status); } @@ -300,7 +461,7 @@ acpi_ut_walk_aml_resources(u8 * aml, user_function(aml, length, offset, resource_index, context); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } } @@ -333,7 +494,19 @@ acpi_ut_walk_aml_resources(u8 * aml, /* Did not find an end_tag descriptor */ - return (AE_AML_NO_RESOURCE_END_TAG); + if (user_function) { + + /* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */ + + (void)acpi_ut_validate_resource(end_tag, &resource_index); + status = + user_function(end_tag, 2, offset, resource_index, context); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } + + return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); } /******************************************************************************* @@ -354,6 +527,7 @@ acpi_ut_walk_aml_resources(u8 * aml, acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index) { + union aml_resource *aml_resource; u8 resource_type; u8 resource_index; acpi_rs_length resource_length; @@ -375,7 +549,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index) /* Verify the large resource type (name) against the max */ if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) { - return (AE_AML_INVALID_RESOURCE_TYPE); + goto invalid_resource; } /* @@ -392,15 +566,17 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index) ((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3); } - /* Check validity of the resource type, zero indicates name is invalid */ - + /* + * Check validity of the resource type, via acpi_gbl_resource_types. Zero + * indicates an invalid resource. + */ if (!acpi_gbl_resource_types[resource_index]) { - return (AE_AML_INVALID_RESOURCE_TYPE); + goto invalid_resource; } /* - * 2) Validate the resource_length field. This ensures that the length - * is at least reasonable, and guarantees that it is non-zero. + * Validate the resource_length field. This ensures that the length + * is at least reasonable, and guarantees that it is non-zero. */ resource_length = acpi_ut_get_resource_length(aml); minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index]; @@ -413,7 +589,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index) /* Fixed length resource, length must match exactly */ if (resource_length != minimum_resource_length) { - return (AE_AML_BAD_RESOURCE_LENGTH); + goto bad_resource_length; } break; @@ -422,7 +598,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index) /* Variable length resource, length must be at least the minimum */ if (resource_length < minimum_resource_length) { - return (AE_AML_BAD_RESOURCE_LENGTH); + goto bad_resource_length; } break; @@ -432,7 +608,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index) if ((resource_length > minimum_resource_length) || (resource_length < (minimum_resource_length - 1))) { - return (AE_AML_BAD_RESOURCE_LENGTH); + goto bad_resource_length; } break; @@ -440,7 +616,23 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index) /* Shouldn't happen (because of validation earlier), but be sure */ - return (AE_AML_INVALID_RESOURCE_TYPE); + goto invalid_resource; + } + + aml_resource = ACPI_CAST_PTR(union aml_resource, aml); + if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) { + + /* Validate the bus_type field */ + + if ((aml_resource->common_serial_bus.type == 0) || + (aml_resource->common_serial_bus.type > + AML_RESOURCE_MAX_SERIALBUSTYPE)) { + ACPI_RESOURCE_ERROR((AE_INFO, + "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X", + aml_resource->common_serial_bus. + type)); + return (AE_AML_INVALID_RESOURCE_TYPE); + } } /* Optionally return the resource table index */ @@ -450,6 +642,22 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index) } return (AE_OK); + + invalid_resource: + + ACPI_RESOURCE_ERROR((AE_INFO, + "Invalid/unsupported resource descriptor: Type 0x%2.2X", + resource_type)); + return (AE_AML_INVALID_RESOURCE_TYPE); + + bad_resource_length: + + ACPI_RESOURCE_ERROR((AE_INFO, + "Invalid resource descriptor length: Type " + "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X", + resource_type, resource_length, + minimum_resource_length)); + return (AE_AML_BAD_RESOURCE_LENGTH); } /******************************************************************************* diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c index 30c21e1..4267477 100644 --- a/drivers/acpi/acpica/utstate.c +++ b/drivers/acpi/acpica/utstate.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index 420ebfe..644e8c8 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -48,6 +48,7 @@ #include "acnamesp.h" #include "acdebug.h" #include "actables.h" +#include "acinterp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utxface") @@ -640,4 +641,41 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler) } ACPI_EXPORT_SYMBOL(acpi_install_interface_handler) + +/***************************************************************************** + * + * FUNCTION: acpi_check_address_range + * + * PARAMETERS: space_id - Address space ID + * Address - Start address + * Length - Length + * Warn - TRUE if warning on overlap desired + * + * RETURN: Count of the number of conflicts detected. + * + * DESCRIPTION: Check if the input address range overlaps any of the + * ASL operation region address ranges. + * + ****************************************************************************/ +u32 +acpi_check_address_range(acpi_adr_space_type space_id, + acpi_physical_address address, + acpi_size length, u8 warn) +{ + u32 overlaps; + acpi_status status; + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return (0); + } + + overlaps = acpi_ut_check_address_range(space_id, address, + (u32)length, warn); + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return (overlaps); +} + +ACPI_EXPORT_SYMBOL(acpi_check_address_range) #endif /* !ACPI_ASL_COMPILER */ diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index 8d0245e..52b568a 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2011, Intel Corp. + * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c new file mode 100644 index 0000000..1427d19 --- /dev/null +++ b/drivers/acpi/acpica/utxfmutex.c @@ -0,0 +1,187 @@ +/******************************************************************************* + * + * Module Name: utxfmutex - external AML mutex access functions + * + ******************************************************************************/ + +/* + * Copyright (C) 2000 - 2012, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include "accommon.h" +#include "acnamesp.h" + +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("utxfmutex") + +/* Local prototypes */ +static acpi_status +acpi_ut_get_mutex_object(acpi_handle handle, + acpi_string pathname, + union acpi_operand_object **ret_obj); + +/******************************************************************************* + * + * FUNCTION: acpi_ut_get_mutex_object + * + * PARAMETERS: Handle - Mutex or prefix handle (optional) + * Pathname - Mutex pathname (optional) + * ret_obj - Where the mutex object is returned + * + * RETURN: Status + * + * DESCRIPTION: Get an AML mutex object. The mutex node is pointed to by + * Handle:Pathname. Either Handle or Pathname can be NULL, but + * not both. + * + ******************************************************************************/ + +static acpi_status +acpi_ut_get_mutex_object(acpi_handle handle, + acpi_string pathname, + union acpi_operand_object **ret_obj) +{ + struct acpi_namespace_node *mutex_node; + union acpi_operand_object *mutex_obj; + acpi_status status; + + /* Parameter validation */ + + if (!ret_obj || (!handle && !pathname)) { + return (AE_BAD_PARAMETER); + } + + /* Get a the namespace node for the mutex */ + + mutex_node = handle; + if (pathname != NULL) { + status = acpi_get_handle(handle, pathname, + ACPI_CAST_PTR(acpi_handle, + &mutex_node)); + if (ACPI_FAILURE(status)) { + return (status); + } + } + + /* Ensure that we actually have a Mutex object */ + + if (!mutex_node || (mutex_node->type != ACPI_TYPE_MUTEX)) { + return (AE_TYPE); + } + + /* Get the low-level mutex object */ + + mutex_obj = acpi_ns_get_attached_object(mutex_node); + if (!mutex_obj) { + return (AE_NULL_OBJECT); + } + + *ret_obj = mutex_obj; + return (AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_acquire_mutex + * + * PARAMETERS: Handle - Mutex or prefix handle (optional) + * Pathname - Mutex pathname (optional) + * Timeout - Max time to wait for the lock (millisec) + * + * RETURN: Status + * + * DESCRIPTION: Acquire an AML mutex. This is a device driver interface to + * AML mutex objects, and allows for transaction locking between + * drivers and AML code. The mutex node is pointed to by + * Handle:Pathname. Either Handle or Pathname can be NULL, but + * not both. + * + ******************************************************************************/ + +acpi_status +acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout) +{ + acpi_status status; + union acpi_operand_object *mutex_obj; + + /* Get the low-level mutex associated with Handle:Pathname */ + + status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Acquire the OS mutex */ + + status = acpi_os_acquire_mutex(mutex_obj->mutex.os_mutex, timeout); + return (status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_release_mutex + * + * PARAMETERS: Handle - Mutex or prefix handle (optional) + * Pathname - Mutex pathname (optional) + * + * RETURN: Status + * + * DESCRIPTION: Release an AML mutex. This is a device driver interface to + * AML mutex objects, and allows for transaction locking between + * drivers and AML code. The mutex node is pointed to by + * Handle:Pathname. Either Handle or Pathname can be NULL, but + * not both. + * + ******************************************************************************/ + +acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname) +{ + acpi_status status; + union acpi_operand_object *mutex_obj; + + /* Get the low-level mutex associated with Handle:Pathname */ + + status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Release the OS mutex */ + + acpi_os_release_mutex(mutex_obj->mutex.os_mutex); + return (AE_OK); +} diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 6154036..e5d53b7 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -34,13 +34,13 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> +#include <linux/acpi_io.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/kref.h> #include <linux/rculist.h> #include <linux/interrupt.h> #include <linux/debugfs.h> -#include <acpi/atomicio.h> #include "apei-internal.h" @@ -70,7 +70,7 @@ int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val) { int rc; - rc = acpi_atomic_read(val, &entry->register_region); + rc = apei_read(val, &entry->register_region); if (rc) return rc; *val >>= entry->register_region.bit_offset; @@ -116,13 +116,13 @@ int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val) val <<= entry->register_region.bit_offset; if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { u64 valr = 0; - rc = acpi_atomic_read(&valr, &entry->register_region); + rc = apei_read(&valr, &entry->register_region); if (rc) return rc; valr &= ~(entry->mask << entry->register_region.bit_offset); val |= valr; } - rc = acpi_atomic_write(val, &entry->register_region); + rc = apei_write(val, &entry->register_region); return rc; } @@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx, u8 ins = entry->instruction; if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) - return acpi_pre_map_gar(&entry->register_region); + return acpi_os_map_generic_address(&entry->register_region); return 0; } @@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx, u8 ins = entry->instruction; if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) - acpi_post_unmap_gar(&entry->register_region); + acpi_os_unmap_generic_address(&entry->register_region); return 0; } @@ -421,6 +421,17 @@ static int apei_resources_merge(struct apei_resources *resources1, return 0; } +int apei_resources_add(struct apei_resources *resources, + unsigned long start, unsigned long size, + bool iomem) +{ + if (iomem) + return apei_res_add(&resources->iomem, start, size); + else + return apei_res_add(&resources->ioport, start, size); +} +EXPORT_SYMBOL_GPL(apei_resources_add); + /* * EINJ has two groups of GARs (EINJ table entry and trigger table * entry), so common resources are subtracted from the trigger table @@ -438,8 +449,19 @@ int apei_resources_sub(struct apei_resources *resources1, } EXPORT_SYMBOL_GPL(apei_resources_sub); +static int apei_get_nvs_callback(__u64 start, __u64 size, void *data) +{ + struct apei_resources *resources = data; + return apei_res_add(&resources->iomem, start, size); +} + +static int apei_get_nvs_resources(struct apei_resources *resources) +{ + return acpi_nvs_for_each_region(apei_get_nvs_callback, resources); +} + /* - * IO memory/port rersource management mechanism is used to check + * IO memory/port resource management mechanism is used to check * whether memory/port area used by GARs conflicts with normal memory * or IO memory/port of devices. */ @@ -448,21 +470,35 @@ int apei_resources_request(struct apei_resources *resources, { struct apei_res *res, *res_bak = NULL; struct resource *r; + struct apei_resources nvs_resources; int rc; rc = apei_resources_sub(resources, &apei_resources_all); if (rc) return rc; + /* + * Some firmware uses ACPI NVS region, that has been marked as + * busy, so exclude it from APEI resources to avoid false + * conflict. + */ + apei_resources_init(&nvs_resources); + rc = apei_get_nvs_resources(&nvs_resources); + if (rc) + goto res_fini; + rc = apei_resources_sub(resources, &nvs_resources); + if (rc) + goto res_fini; + rc = -EINVAL; list_for_each_entry(res, &resources->iomem, list) { r = request_mem_region(res->start, res->end - res->start, desc); if (!r) { pr_err(APEI_PFX - "Can not request iomem region <%016llx-%016llx> for GARs.\n", + "Can not request [mem %#010llx-%#010llx] for %s registers\n", (unsigned long long)res->start, - (unsigned long long)res->end); + (unsigned long long)res->end - 1, desc); res_bak = res; goto err_unmap_iomem; } @@ -472,9 +508,9 @@ int apei_resources_request(struct apei_resources *resources, r = request_region(res->start, res->end - res->start, desc); if (!r) { pr_err(APEI_PFX - "Can not request ioport region <%016llx-%016llx> for GARs.\n", + "Can not request [io %#06llx-%#06llx] for %s registers\n", (unsigned long long)res->start, - (unsigned long long)res->end); + (unsigned long long)res->end - 1, desc); res_bak = res; goto err_unmap_ioport; } @@ -500,6 +536,8 @@ err_unmap_iomem: break; release_mem_region(res->start, res->end - res->start); } +res_fini: + apei_resources_fini(&nvs_resources); return rc; } EXPORT_SYMBOL_GPL(apei_resources_request); @@ -553,6 +591,69 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr) return 0; } +/* read GAR in interrupt (including NMI) or process context */ +int apei_read(u64 *val, struct acpi_generic_address *reg) +{ + int rc; + u64 address; + acpi_status status; + + rc = apei_check_gar(reg, &address); + if (rc) + return rc; + + *val = 0; + switch(reg->space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + status = acpi_os_read_memory64((acpi_physical_address) + address, val, reg->bit_width); + if (ACPI_FAILURE(status)) + return -EIO; + break; + case ACPI_ADR_SPACE_SYSTEM_IO: + status = acpi_os_read_port(address, (u32 *)val, reg->bit_width); + if (ACPI_FAILURE(status)) + return -EIO; + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(apei_read); + +/* write GAR in interrupt (including NMI) or process context */ +int apei_write(u64 val, struct acpi_generic_address *reg) +{ + int rc; + u64 address; + acpi_status status; + + rc = apei_check_gar(reg, &address); + if (rc) + return rc; + + switch (reg->space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + status = acpi_os_write_memory64((acpi_physical_address) + address, val, reg->bit_width); + if (ACPI_FAILURE(status)) + return -EIO; + break; + case ACPI_ADR_SPACE_SYSTEM_IO: + status = acpi_os_write_port(address, val, reg->bit_width); + if (ACPI_FAILURE(status)) + return -EIO; + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(apei_write); + static int collect_res_callback(struct apei_exec_context *ctx, struct acpi_whea_header *entry, void *data) diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index f57050e..cca240a 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -68,6 +68,9 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio /* IP has been set in instruction function */ #define APEI_EXEC_SET_IP 1 +int apei_read(u64 *val, struct acpi_generic_address *reg); +int apei_write(u64 val, struct acpi_generic_address *reg); + int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val); int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val); int apei_exec_read_register(struct apei_exec_context *ctx, @@ -95,6 +98,9 @@ static inline void apei_resources_init(struct apei_resources *resources) } void apei_resources_fini(struct apei_resources *resources); +int apei_resources_add(struct apei_resources *resources, + unsigned long start, unsigned long size, + bool iomem); int apei_resources_sub(struct apei_resources *resources1, struct apei_resources *resources2); int apei_resources_request(struct apei_resources *resources, diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 589b96c..4ca087d 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -43,6 +43,42 @@ #define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) /* + * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action. + */ +static int acpi5; + +struct set_error_type_with_address { + u32 type; + u32 vendor_extension; + u32 flags; + u32 apicid; + u64 memory_address; + u64 memory_address_range; + u32 pcie_sbdf; +}; +enum { + SETWA_FLAGS_APICID = 1, + SETWA_FLAGS_MEM = 2, + SETWA_FLAGS_PCIE_SBDF = 4, +}; + +/* + * Vendor extensions for platform specific operations + */ +struct vendor_error_type_extension { + u32 length; + u32 pcie_sbdf; + u16 vendor_id; + u16 device_id; + u8 rev_id; + u8 reserved[3]; +}; + +static u32 vendor_flags; +static struct debugfs_blob_wrapper vendor_blob; +static char vendor_dev[64]; + +/* * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the * EINJ table through an unpublished extension. Use with caution as * most will ignore the parameter and make their own choice of address @@ -103,15 +139,7 @@ static struct apei_exec_ins_type einj_ins_type[] = { */ static DEFINE_MUTEX(einj_mutex); -static struct einj_parameter *einj_param; - -#ifndef writeq -static inline void writeq(__u64 val, volatile void __iomem *addr) -{ - writel(val, addr); - writel(val >> 32, addr+4); -} -#endif +static void *einj_param; static void einj_exec_ctx_init(struct apei_exec_context *ctx) { @@ -158,10 +186,30 @@ static int einj_timedout(u64 *t) return 0; } -static u64 einj_get_parameter_address(void) +static void check_vendor_extension(u64 paddr, + struct set_error_type_with_address *v5param) +{ + int offset = v5param->vendor_extension; + struct vendor_error_type_extension *v; + u32 sbdf; + + if (!offset) + return; + v = acpi_os_map_memory(paddr + offset, sizeof(*v)); + if (!v) + return; + sbdf = v->pcie_sbdf; + sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n", + sbdf >> 24, (sbdf >> 16) & 0xff, + (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7, + v->vendor_id, v->device_id, v->rev_id); + acpi_os_unmap_memory(v, sizeof(*v)); +} + +static void *einj_get_parameter_address(void) { int i; - u64 paddr = 0; + u64 paddrv4 = 0, paddrv5 = 0; struct acpi_whea_header *entry; entry = EINJ_TAB_ENTRY(einj_tab); @@ -170,12 +218,40 @@ static u64 einj_get_parameter_address(void) entry->instruction == ACPI_EINJ_WRITE_REGISTER && entry->register_region.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - memcpy(&paddr, &entry->register_region.address, - sizeof(paddr)); + memcpy(&paddrv4, &entry->register_region.address, + sizeof(paddrv4)); + if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS && + entry->instruction == ACPI_EINJ_WRITE_REGISTER && + entry->register_region.space_id == + ACPI_ADR_SPACE_SYSTEM_MEMORY) + memcpy(&paddrv5, &entry->register_region.address, + sizeof(paddrv5)); entry++; } + if (paddrv5) { + struct set_error_type_with_address *v5param; + + v5param = acpi_os_map_memory(paddrv5, sizeof(*v5param)); + if (v5param) { + acpi5 = 1; + check_vendor_extension(paddrv5, v5param); + return v5param; + } + } + if (paddrv4) { + struct einj_parameter *v4param; + + v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param)); + if (!v4param) + return NULL; + if (v4param->reserved1 || v4param->reserved2) { + acpi_os_unmap_memory(v4param, sizeof(*v4param)); + return NULL; + } + return v4param; + } - return paddr; + return NULL; } /* do sanity check to trigger table */ @@ -184,7 +260,7 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab) if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger)) return -EINVAL; if (trigger_tab->table_size > PAGE_SIZE || - trigger_tab->table_size <= trigger_tab->header_size) + trigger_tab->table_size < trigger_tab->header_size) return -EINVAL; if (trigger_tab->entry_count != (trigger_tab->table_size - trigger_tab->header_size) / @@ -194,8 +270,29 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab) return 0; } +static struct acpi_generic_address *einj_get_trigger_parameter_region( + struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2) +{ + int i; + struct acpi_whea_header *entry; + + entry = (struct acpi_whea_header *) + ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); + for (i = 0; i < trigger_tab->entry_count; i++) { + if (entry->action == ACPI_EINJ_TRIGGER_ERROR && + entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE && + entry->register_region.space_id == + ACPI_ADR_SPACE_SYSTEM_MEMORY && + (entry->register_region.address & param2) == (param1 & param2)) + return &entry->register_region; + entry++; + } + + return NULL; +} /* Execute instructions in trigger error action table */ -static int __einj_error_trigger(u64 trigger_paddr) +static int __einj_error_trigger(u64 trigger_paddr, u32 type, + u64 param1, u64 param2) { struct acpi_einj_trigger *trigger_tab = NULL; struct apei_exec_context trigger_ctx; @@ -204,14 +301,16 @@ static int __einj_error_trigger(u64 trigger_paddr) struct resource *r; u32 table_size; int rc = -EIO; + struct acpi_generic_address *trigger_param_region = NULL; r = request_mem_region(trigger_paddr, sizeof(*trigger_tab), "APEI EINJ Trigger Table"); if (!r) { pr_err(EINJ_PFX - "Can not request iomem region <%016llx-%016llx> for Trigger table.\n", + "Can not request [mem %#010llx-%#010llx] for Trigger table\n", (unsigned long long)trigger_paddr, - (unsigned long long)trigger_paddr+sizeof(*trigger_tab)); + (unsigned long long)trigger_paddr + + sizeof(*trigger_tab) - 1); goto out; } trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab)); @@ -225,6 +324,11 @@ static int __einj_error_trigger(u64 trigger_paddr) "The trigger error action table is invalid\n"); goto out_rel_header; } + + /* No action structures in the TRIGGER_ERROR table, nothing to do */ + if (!trigger_tab->entry_count) + goto out_rel_header; + rc = -EIO; table_size = trigger_tab->table_size; r = request_mem_region(trigger_paddr + sizeof(*trigger_tab), @@ -232,9 +336,9 @@ static int __einj_error_trigger(u64 trigger_paddr) "APEI EINJ Trigger Table"); if (!r) { pr_err(EINJ_PFX -"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n", - (unsigned long long)trigger_paddr+sizeof(*trigger_tab), - (unsigned long long)trigger_paddr + table_size); +"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n", + (unsigned long long)trigger_paddr + sizeof(*trigger_tab), + (unsigned long long)trigger_paddr + table_size - 1); goto out_rel_header; } iounmap(trigger_tab); @@ -255,6 +359,30 @@ static int __einj_error_trigger(u64 trigger_paddr) rc = apei_resources_sub(&trigger_resources, &einj_resources); if (rc) goto out_fini; + /* + * Some firmware will access target address specified in + * param1 to trigger the error when injecting memory error. + * This will cause resource conflict with regular memory. So + * remove it from trigger table resources. + */ + if (param_extension && (type & 0x0038) && param2) { + struct apei_resources addr_resources; + apei_resources_init(&addr_resources); + trigger_param_region = einj_get_trigger_parameter_region( + trigger_tab, param1, param2); + if (trigger_param_region) { + rc = apei_resources_add(&addr_resources, + trigger_param_region->address, + trigger_param_region->bit_width/8, true); + if (rc) + goto out_fini; + rc = apei_resources_sub(&trigger_resources, + &addr_resources); + } + apei_resources_fini(&addr_resources); + if (rc) + goto out_fini; + } rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger"); if (rc) goto out_fini; @@ -293,12 +421,56 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2) if (rc) return rc; apei_exec_ctx_set_input(&ctx, type); - rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); - if (rc) - return rc; - if (einj_param) { - writeq(param1, &einj_param->param1); - writeq(param2, &einj_param->param2); + if (acpi5) { + struct set_error_type_with_address *v5param = einj_param; + + v5param->type = type; + if (type & 0x80000000) { + switch (vendor_flags) { + case SETWA_FLAGS_APICID: + v5param->apicid = param1; + break; + case SETWA_FLAGS_MEM: + v5param->memory_address = param1; + v5param->memory_address_range = param2; + break; + case SETWA_FLAGS_PCIE_SBDF: + v5param->pcie_sbdf = param1; + break; + } + v5param->flags = vendor_flags; + } else { + switch (type) { + case ACPI_EINJ_PROCESSOR_CORRECTABLE: + case ACPI_EINJ_PROCESSOR_UNCORRECTABLE: + case ACPI_EINJ_PROCESSOR_FATAL: + v5param->apicid = param1; + v5param->flags = SETWA_FLAGS_APICID; + break; + case ACPI_EINJ_MEMORY_CORRECTABLE: + case ACPI_EINJ_MEMORY_UNCORRECTABLE: + case ACPI_EINJ_MEMORY_FATAL: + v5param->memory_address = param1; + v5param->memory_address_range = param2; + v5param->flags = SETWA_FLAGS_MEM; + break; + case ACPI_EINJ_PCIX_CORRECTABLE: + case ACPI_EINJ_PCIX_UNCORRECTABLE: + case ACPI_EINJ_PCIX_FATAL: + v5param->pcie_sbdf = param1; + v5param->flags = SETWA_FLAGS_PCIE_SBDF; + break; + } + } + } else { + rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); + if (rc) + return rc; + if (einj_param) { + struct einj_parameter *v4param = einj_param; + v4param->param1 = param1; + v4param->param2 = param2; + } } rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); if (rc) @@ -324,7 +496,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2) if (rc) return rc; trigger_paddr = apei_exec_ctx_get_output(&ctx); - rc = __einj_error_trigger(trigger_paddr); + rc = __einj_error_trigger(trigger_paddr, type, param1, param2); if (rc) return rc; rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION); @@ -408,15 +580,25 @@ static int error_type_set(void *data, u64 val) { int rc; u32 available_error_type = 0; + u32 tval, vendor; + + /* + * Vendor defined types have 0x80000000 bit set, and + * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE + */ + vendor = val & 0x80000000; + tval = val & 0x7fffffff; /* Only one error type can be specified */ - if (val & (val - 1)) - return -EINVAL; - rc = einj_get_available_error_type(&available_error_type); - if (rc) - return rc; - if (!(val & available_error_type)) + if (tval & (tval - 1)) return -EINVAL; + if (!vendor) { + rc = einj_get_available_error_type(&available_error_type); + if (rc) + return rc; + if (!(val & available_error_type)) + return -EINVAL; + } error_type = val; return 0; @@ -455,7 +637,6 @@ static int einj_check_table(struct acpi_table_einj *einj_tab) static int __init einj_init(void) { int rc; - u64 param_paddr; acpi_status status; struct dentry *fentry; struct apei_exec_context ctx; @@ -465,10 +646,9 @@ static int __init einj_init(void) status = acpi_get_table(ACPI_SIG_EINJ, 0, (struct acpi_table_header **)&einj_tab); - if (status == AE_NOT_FOUND) { - pr_info(EINJ_PFX "Table is not found!\n"); + if (status == AE_NOT_FOUND) return -ENODEV; - } else if (ACPI_FAILURE(status)) { + else if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); pr_err(EINJ_PFX "Failed to get table, %s\n", msg); return -EINVAL; @@ -509,23 +689,30 @@ static int __init einj_init(void) rc = apei_exec_pre_map_gars(&ctx); if (rc) goto err_release; - if (param_extension) { - param_paddr = einj_get_parameter_address(); - if (param_paddr) { - einj_param = ioremap(param_paddr, sizeof(*einj_param)); - rc = -ENOMEM; - if (!einj_param) - goto err_unmap; - fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param1); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param2); - if (!fentry) - goto err_unmap; - } else - pr_warn(EINJ_PFX "Parameter extension is not supported.\n"); + + einj_param = einj_get_parameter_address(); + if ((param_extension || acpi5) && einj_param) { + fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param1); + if (!fentry) + goto err_unmap; + fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param2); + if (!fentry) + goto err_unmap; + } + + if (vendor_dev[0]) { + vendor_blob.data = vendor_dev; + vendor_blob.size = strlen(vendor_dev); + fentry = debugfs_create_blob("vendor", S_IRUSR, + einj_debug_dir, &vendor_blob); + if (!fentry) + goto err_unmap; + fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR, + einj_debug_dir, &vendor_flags); + if (!fentry) + goto err_unmap; } pr_info(EINJ_PFX "Error INJection is initialized.\n"); @@ -533,8 +720,13 @@ static int __init einj_init(void) return 0; err_unmap: - if (einj_param) - iounmap(einj_param); + if (einj_param) { + acpi_size size = (acpi5) ? + sizeof(struct set_error_type_with_address) : + sizeof(struct einj_parameter); + + acpi_os_unmap_memory(einj_param, size); + } apei_exec_post_unmap_gars(&ctx); err_release: apei_resources_release(&einj_resources); @@ -550,8 +742,13 @@ static void __exit einj_exit(void) { struct apei_exec_context ctx; - if (einj_param) - iounmap(einj_param); + if (einj_param) { + acpi_size size = (acpi5) ? + sizeof(struct set_error_type_with_address) : + sizeof(struct einj_parameter); + + acpi_os_unmap_memory(einj_param, size); + } einj_exec_ctx_init(&ctx); apei_exec_post_unmap_gars(&ctx); apei_resources_release(&einj_resources); diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 6a9e3ba..eb9fab5 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -1127,10 +1127,9 @@ static int __init erst_init(void) status = acpi_get_table(ACPI_SIG_ERST, 0, (struct acpi_table_header **)&erst_tab); - if (status == AE_NOT_FOUND) { - pr_info(ERST_PFX "Table is not found!\n"); + if (status == AE_NOT_FOUND) goto err; - } else if (ACPI_FAILURE(status)) { + else if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); pr_err(ERST_PFX "Failed to get table, %s\n", msg); rc = -EINVAL; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index ebaf037..9b3cac0 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -33,6 +33,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> +#include <linux/acpi_io.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/timer.h> @@ -45,8 +46,9 @@ #include <linux/irq_work.h> #include <linux/llist.h> #include <linux/genalloc.h> +#include <linux/pci.h> +#include <linux/aer.h> #include <acpi/apei.h> -#include <acpi/atomicio.h> #include <acpi/hed.h> #include <asm/mce.h> #include <asm/tlbflush.h> @@ -299,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic) if (!ghes) return ERR_PTR(-ENOMEM); ghes->generic = generic; - rc = acpi_pre_map_gar(&generic->error_status_address); + rc = acpi_os_map_generic_address(&generic->error_status_address); if (rc) goto err_free; error_block_length = generic->error_block_length; @@ -319,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic) return ghes; err_unmap: - acpi_post_unmap_gar(&generic->error_status_address); + acpi_os_unmap_generic_address(&generic->error_status_address); err_free: kfree(ghes); return ERR_PTR(rc); @@ -328,7 +330,7 @@ err_free: static void ghes_fini(struct ghes *ghes) { kfree(ghes->estatus); - acpi_post_unmap_gar(&ghes->generic->error_status_address); + acpi_os_unmap_generic_address(&ghes->generic->error_status_address); } enum { @@ -399,7 +401,7 @@ static int ghes_read_estatus(struct ghes *ghes, int silent) u32 len; int rc; - rc = acpi_atomic_read(&buf_paddr, &g->error_status_address); + rc = apei_read(&buf_paddr, &g->error_status_address); if (rc) { if (!silent && printk_ratelimit()) pr_warning(FW_WARN GHES_PFX @@ -476,6 +478,27 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus) } #endif } +#ifdef CONFIG_ACPI_APEI_PCIEAER + else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, + CPER_SEC_PCIE)) { + struct cper_sec_pcie *pcie_err; + pcie_err = (struct cper_sec_pcie *)(gdata+1); + if (sev == GHES_SEV_RECOVERABLE && + sec_sev == GHES_SEV_RECOVERABLE && + pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID && + pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) { + unsigned int devfn; + int aer_severity; + devfn = PCI_DEVFN(pcie_err->device_id.device, + pcie_err->device_id.function); + aer_severity = cper_severity_to_aer(sev); + aer_recover_queue(pcie_err->device_id.segment, + pcie_err->device_id.bus, + devfn, aer_severity); + } + + } +#endif } } @@ -483,16 +506,22 @@ static void __ghes_print_estatus(const char *pfx, const struct acpi_hest_generic *generic, const struct acpi_hest_generic_status *estatus) { + static atomic_t seqno; + unsigned int curr_seqno; + char pfx_seq[64]; + if (pfx == NULL) { if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED) - pfx = KERN_WARNING HW_ERR; + pfx = KERN_WARNING; else - pfx = KERN_ERR HW_ERR; + pfx = KERN_ERR; } + curr_seqno = atomic_inc_return(&seqno); + snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno); printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", - pfx, generic->header.source_id); - apei_estatus_print(pfx, estatus); + pfx_seq, generic->header.source_id); + apei_estatus_print(pfx_seq, estatus); } static int ghes_print_estatus(const char *pfx, @@ -711,26 +740,34 @@ static int ghes_notify_sci(struct notifier_block *this, return ret; } +static struct llist_node *llist_nodes_reverse(struct llist_node *llnode) +{ + struct llist_node *next, *tail = NULL; + + while (llnode) { + next = llnode->next; + llnode->next = tail; + tail = llnode; + llnode = next; + } + + return tail; +} + static void ghes_proc_in_irq(struct irq_work *irq_work) { - struct llist_node *llnode, *next, *tail = NULL; + struct llist_node *llnode, *next; struct ghes_estatus_node *estatus_node; struct acpi_hest_generic *generic; struct acpi_hest_generic_status *estatus; u32 len, node_len; + llnode = llist_del_all(&ghes_estatus_llist); /* * Because the time order of estatus in list is reversed, * revert it back to proper order. */ - llnode = llist_del_all(&ghes_estatus_llist); - while (llnode) { - next = llnode->next; - llnode->next = tail; - tail = llnode; - llnode = next; - } - llnode = tail; + llnode = llist_nodes_reverse(llnode); while (llnode) { next = llnode->next; estatus_node = llist_entry(llnode, struct ghes_estatus_node, @@ -750,6 +787,32 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) } } +static void ghes_print_queued_estatus(void) +{ + struct llist_node *llnode; + struct ghes_estatus_node *estatus_node; + struct acpi_hest_generic *generic; + struct acpi_hest_generic_status *estatus; + u32 len, node_len; + + llnode = llist_del_all(&ghes_estatus_llist); + /* + * Because the time order of estatus in list is reversed, + * revert it back to proper order. + */ + llnode = llist_nodes_reverse(llnode); + while (llnode) { + estatus_node = llist_entry(llnode, struct ghes_estatus_node, + llnode); + estatus = GHES_ESTATUS_FROM_NODE(estatus_node); + len = apei_estatus_len(estatus); + node_len = GHES_ESTATUS_NODE_LEN(len); + generic = estatus_node->generic; + ghes_print_estatus(NULL, generic, estatus); + llnode = llnode->next; + } +} + static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) { struct ghes *ghes, *ghes_global = NULL; @@ -775,7 +838,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) if (sev_global >= GHES_SEV_PANIC) { oops_begin(); - __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic, + ghes_print_queued_estatus(); + __ghes_print_estatus(KERN_EMERG, ghes_global->generic, ghes_global->estatus); /* reboot to log the error! */ if (panic_timeout == 0) diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index ee7fddc..7f00cf3 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -221,10 +221,9 @@ void __init acpi_hest_init(void) status = acpi_get_table(ACPI_SIG_HEST, 0, (struct acpi_table_header **)&hest_tab); - if (status == AE_NOT_FOUND) { - pr_info(HEST_PFX "Table not found.\n"); + if (status == AE_NOT_FOUND) goto err; - } else if (ACPI_FAILURE(status)) { + else if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); pr_err(HEST_PFX "Failed to get table, %s\n", msg); rc = -EINVAL; diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c deleted file mode 100644 index cfc0cc1..0000000 --- a/drivers/acpi/atomicio.c +++ /dev/null @@ -1,365 +0,0 @@ -/* - * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then - * accessing in atomic context. - * - * This is used for NMI handler to access IO memory area, because - * ioremap/iounmap can not be used in NMI handler. The IO memory area - * is pre-mapped in process context and accessed in NMI handler. - * - * Copyright (C) 2009-2010, Intel Corp. - * Author: Huang Ying <ying.huang@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/init.h> -#include <linux/acpi.h> -#include <linux/io.h> -#include <linux/kref.h> -#include <linux/rculist.h> -#include <linux/interrupt.h> -#include <linux/slab.h> -#include <acpi/atomicio.h> - -#define ACPI_PFX "ACPI: " - -static LIST_HEAD(acpi_iomaps); -/* - * Used for mutual exclusion between writers of acpi_iomaps list, for - * synchronization between readers and writer, RCU is used. - */ -static DEFINE_SPINLOCK(acpi_iomaps_lock); - -struct acpi_iomap { - struct list_head list; - void __iomem *vaddr; - unsigned long size; - phys_addr_t paddr; - struct kref ref; -}; - -/* acpi_iomaps_lock or RCU read lock must be held before calling */ -static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr, - unsigned long size) -{ - struct acpi_iomap *map; - - list_for_each_entry_rcu(map, &acpi_iomaps, list) { - if (map->paddr + map->size >= paddr + size && - map->paddr <= paddr) - return map; - } - return NULL; -} - -/* - * Atomic "ioremap" used by NMI handler, if the specified IO memory - * area is not pre-mapped, NULL will be returned. - * - * acpi_iomaps_lock or RCU read lock must be held before calling - */ -static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr, - unsigned long size) -{ - struct acpi_iomap *map; - - map = __acpi_find_iomap(paddr, size/8); - if (map) - return map->vaddr + (paddr - map->paddr); - else - return NULL; -} - -/* acpi_iomaps_lock must be held before calling */ -static void __iomem *__acpi_try_ioremap(phys_addr_t paddr, - unsigned long size) -{ - struct acpi_iomap *map; - - map = __acpi_find_iomap(paddr, size); - if (map) { - kref_get(&map->ref); - return map->vaddr + (paddr - map->paddr); - } else - return NULL; -} - -/* - * Used to pre-map the specified IO memory area. First try to find - * whether the area is already pre-mapped, if it is, increase the - * reference count (in __acpi_try_ioremap) and return; otherwise, do - * the real ioremap, and add the mapping into acpi_iomaps list. - */ -static void __iomem *acpi_pre_map(phys_addr_t paddr, - unsigned long size) -{ - void __iomem *vaddr; - struct acpi_iomap *map; - unsigned long pg_sz, flags; - phys_addr_t pg_off; - - spin_lock_irqsave(&acpi_iomaps_lock, flags); - vaddr = __acpi_try_ioremap(paddr, size); - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); - if (vaddr) - return vaddr; - - pg_off = paddr & PAGE_MASK; - pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off; - vaddr = ioremap(pg_off, pg_sz); - if (!vaddr) - return NULL; - map = kmalloc(sizeof(*map), GFP_KERNEL); - if (!map) - goto err_unmap; - INIT_LIST_HEAD(&map->list); - map->paddr = pg_off; - map->size = pg_sz; - map->vaddr = vaddr; - kref_init(&map->ref); - - spin_lock_irqsave(&acpi_iomaps_lock, flags); - vaddr = __acpi_try_ioremap(paddr, size); - if (vaddr) { - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); - iounmap(map->vaddr); - kfree(map); - return vaddr; - } - list_add_tail_rcu(&map->list, &acpi_iomaps); - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); - - return map->vaddr + (paddr - map->paddr); -err_unmap: - iounmap(vaddr); - return NULL; -} - -/* acpi_iomaps_lock must be held before calling */ -static void __acpi_kref_del_iomap(struct kref *ref) -{ - struct acpi_iomap *map; - - map = container_of(ref, struct acpi_iomap, ref); - list_del_rcu(&map->list); -} - -/* - * Used to post-unmap the specified IO memory area. The iounmap is - * done only if the reference count goes zero. - */ -static void acpi_post_unmap(phys_addr_t paddr, unsigned long size) -{ - struct acpi_iomap *map; - unsigned long flags; - int del; - - spin_lock_irqsave(&acpi_iomaps_lock, flags); - map = __acpi_find_iomap(paddr, size); - BUG_ON(!map); - del = kref_put(&map->ref, __acpi_kref_del_iomap); - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); - - if (!del) - return; - - synchronize_rcu(); - iounmap(map->vaddr); - kfree(map); -} - -/* In NMI handler, should set silent = 1 */ -static int acpi_check_gar(struct acpi_generic_address *reg, - u64 *paddr, int silent) -{ - u32 width, space_id; - - width = reg->bit_width; - space_id = reg->space_id; - /* Handle possible alignment issues */ - memcpy(paddr, ®->address, sizeof(*paddr)); - if (!*paddr) { - if (!silent) - pr_warning(FW_BUG ACPI_PFX - "Invalid physical address in GAR [0x%llx/%u/%u]\n", - *paddr, width, space_id); - return -EINVAL; - } - - if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { - if (!silent) - pr_warning(FW_BUG ACPI_PFX - "Invalid bit width in GAR [0x%llx/%u/%u]\n", - *paddr, width, space_id); - return -EINVAL; - } - - if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && - space_id != ACPI_ADR_SPACE_SYSTEM_IO) { - if (!silent) - pr_warning(FW_BUG ACPI_PFX - "Invalid address space type in GAR [0x%llx/%u/%u]\n", - *paddr, width, space_id); - return -EINVAL; - } - - return 0; -} - -/* Pre-map, working on GAR */ -int acpi_pre_map_gar(struct acpi_generic_address *reg) -{ - u64 paddr; - void __iomem *vaddr; - int rc; - - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) - return 0; - - rc = acpi_check_gar(reg, &paddr, 0); - if (rc) - return rc; - - vaddr = acpi_pre_map(paddr, reg->bit_width / 8); - if (!vaddr) - return -EIO; - - return 0; -} -EXPORT_SYMBOL_GPL(acpi_pre_map_gar); - -/* Post-unmap, working on GAR */ -int acpi_post_unmap_gar(struct acpi_generic_address *reg) -{ - u64 paddr; - int rc; - - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) - return 0; - - rc = acpi_check_gar(reg, &paddr, 0); - if (rc) - return rc; - - acpi_post_unmap(paddr, reg->bit_width / 8); - - return 0; -} -EXPORT_SYMBOL_GPL(acpi_post_unmap_gar); - -/* - * Can be used in atomic (including NMI) or process context. RCU read - * lock can only be released after the IO memory area accessing. - */ -static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width) -{ - void __iomem *addr; - - rcu_read_lock(); - addr = __acpi_ioremap_fast(paddr, width); - switch (width) { - case 8: - *val = readb(addr); - break; - case 16: - *val = readw(addr); - break; - case 32: - *val = readl(addr); - break; -#ifdef readq - case 64: - *val = readq(addr); - break; -#endif - default: - return -EINVAL; - } - rcu_read_unlock(); - - return 0; -} - -static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width) -{ - void __iomem *addr; - - rcu_read_lock(); - addr = __acpi_ioremap_fast(paddr, width); - switch (width) { - case 8: - writeb(val, addr); - break; - case 16: - writew(val, addr); - break; - case 32: - writel(val, addr); - break; -#ifdef writeq - case 64: - writeq(val, addr); - break; -#endif - default: - return -EINVAL; - } - rcu_read_unlock(); - - return 0; -} - -/* GAR accessing in atomic (including NMI) or process context */ -int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg) -{ - u64 paddr; - int rc; - - rc = acpi_check_gar(reg, &paddr, 1); - if (rc) - return rc; - - *val = 0; - switch (reg->space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - return acpi_atomic_read_mem(paddr, val, reg->bit_width); - case ACPI_ADR_SPACE_SYSTEM_IO: - return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width); - default: - return -EINVAL; - } -} -EXPORT_SYMBOL_GPL(acpi_atomic_read); - -int acpi_atomic_write(u64 val, struct acpi_generic_address *reg) -{ - u64 paddr; - int rc; - - rc = acpi_check_gar(reg, &paddr, 1); - if (rc) - return rc; - - switch (reg->space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - return acpi_atomic_write_mem(paddr, val, reg->bit_width); - case ACPI_ADR_SPACE_SYSTEM_IO: - return acpi_os_write_port(paddr, val, reg->bit_width); - default: - return -EINVAL; - } -} -EXPORT_SYMBOL_GPL(acpi_atomic_write); diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 3b5c318..e56f3be 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS] static int node_to_pxm_map[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; +unsigned char acpi_srat_revision __initdata; + int pxm_to_node(int pxm) { if (pxm < 0) @@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header, static int __init acpi_parse_srat(struct acpi_table_header *table) { + struct acpi_table_srat *srat; if (!table) return -EINVAL; + srat = (struct acpi_table_srat *)table; + acpi_srat_revision = srat->header.revision; + /* Real work done in acpi_table_parse_srat below. */ return 0; diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c index 096787b..7a2035f 100644 --- a/drivers/acpi/nvs.c +++ b/drivers/acpi/nvs.c @@ -15,6 +15,56 @@ #include <linux/acpi_io.h> #include <acpi/acpiosxf.h> +/* ACPI NVS regions, APEI may use it */ + +struct nvs_region { + __u64 phys_start; + __u64 size; + struct list_head node; +}; + +static LIST_HEAD(nvs_region_list); + +#ifdef CONFIG_ACPI_SLEEP +static int suspend_nvs_register(unsigned long start, unsigned long size); +#else +static inline int suspend_nvs_register(unsigned long a, unsigned long b) +{ + return 0; +} +#endif + +int acpi_nvs_register(__u64 start, __u64 size) +{ + struct nvs_region *region; + + region = kmalloc(sizeof(*region), GFP_KERNEL); + if (!region) + return -ENOMEM; + region->phys_start = start; + region->size = size; + list_add_tail(®ion->node, &nvs_region_list); + + return suspend_nvs_register(start, size); +} + +int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data), + void *data) +{ + int rc; + struct nvs_region *region; + + list_for_each_entry(region, &nvs_region_list, node) { + rc = func(region->phys_start, region->size, data); + if (rc) + return rc; + } + + return 0; +} + + +#ifdef CONFIG_ACPI_SLEEP /* * Platforms, like ACPI, may want us to save some memory used by them during * suspend and to restore the contents of this memory during the subsequent @@ -41,7 +91,7 @@ static LIST_HEAD(nvs_list); * things so that the data from page-aligned addresses in this region will * be copied into separate RAM pages. */ -int suspend_nvs_register(unsigned long start, unsigned long size) +static int suspend_nvs_register(unsigned long start, unsigned long size) { struct nvs_page *entry, *next; @@ -159,3 +209,4 @@ void suspend_nvs_restore(void) if (entry->data) memcpy(entry->kaddr, entry->data, entry->size); } +#endif diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index f31c5c5..412a1e0 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -31,6 +31,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> +#include <linux/highmem.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/kmod.h> @@ -83,19 +84,6 @@ static struct workqueue_struct *kacpi_notify_wq; struct workqueue_struct *kacpi_hotplug_wq; EXPORT_SYMBOL(kacpi_hotplug_wq); -struct acpi_res_list { - resource_size_t start; - resource_size_t end; - acpi_adr_space_type resource_type; /* IO port, System memory, ...*/ - char name[5]; /* only can have a length of 4 chars, make use of this - one instead of res->name, no need to kalloc then */ - struct list_head resource_list; - int count; -}; - -static LIST_HEAD(resource_list_head); -static DEFINE_SPINLOCK(acpi_res_lock); - /* * This list of permanent mappings is for memory that may be accessed from * interrupt context, where we can't do the ioremap(). @@ -166,17 +154,21 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported) return supported; } -static void __init acpi_request_region (struct acpi_generic_address *addr, +static void __init acpi_request_region (struct acpi_generic_address *gas, unsigned int length, char *desc) { - if (!addr->address || !length) + u64 addr; + + /* Handle possible alignment issues */ + memcpy(&addr, &gas->address, sizeof(addr)); + if (!addr || !length) return; /* Resources are never freed */ - if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO) - request_region(addr->address, length, desc); - else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - request_mem_region(addr->address, length, desc); + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) + request_region(addr, length, desc); + else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + request_mem_region(addr, length, desc); } static int __init acpi_reserve_resources(void) @@ -330,6 +322,37 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size) return NULL; } +#ifndef CONFIG_IA64 +#define should_use_kmap(pfn) page_is_ram(pfn) +#else +/* ioremap will take care of cache attributes */ +#define should_use_kmap(pfn) 0 +#endif + +static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) +{ + unsigned long pfn; + + pfn = pg_off >> PAGE_SHIFT; + if (should_use_kmap(pfn)) { + if (pg_sz > PAGE_SIZE) + return NULL; + return (void __iomem __force *)kmap(pfn_to_page(pfn)); + } else + return acpi_os_ioremap(pg_off, pg_sz); +} + +static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) +{ + unsigned long pfn; + + pfn = pg_off >> PAGE_SHIFT; + if (page_is_ram(pfn)) + kunmap(pfn_to_page(pfn)); + else + iounmap(vaddr); +} + void __iomem *__init_refok acpi_os_map_memory(acpi_physical_address phys, acpi_size size) { @@ -362,7 +385,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) pg_off = round_down(phys, PAGE_SIZE); pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; - virt = acpi_os_ioremap(pg_off, pg_sz); + virt = acpi_map(pg_off, pg_sz); if (!virt) { mutex_unlock(&acpi_ioremap_lock); kfree(map); @@ -393,7 +416,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map) { if (!map->refcount) { synchronize_rcu(); - iounmap(map->virt); + acpi_unmap(map->phys, map->virt); kfree(map); } } @@ -427,35 +450,42 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size) __acpi_unmap_table(virt, size); } -static int acpi_os_map_generic_address(struct acpi_generic_address *addr) +int acpi_os_map_generic_address(struct acpi_generic_address *gas) { + u64 addr; void __iomem *virt; - if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) + if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) return 0; - if (!addr->address || !addr->bit_width) + /* Handle possible alignment issues */ + memcpy(&addr, &gas->address, sizeof(addr)); + if (!addr || !gas->bit_width) return -EINVAL; - virt = acpi_os_map_memory(addr->address, addr->bit_width / 8); + virt = acpi_os_map_memory(addr, gas->bit_width / 8); if (!virt) return -EIO; return 0; } +EXPORT_SYMBOL(acpi_os_map_generic_address); -static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr) +void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) { + u64 addr; struct acpi_ioremap *map; - if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) + if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) return; - if (!addr->address || !addr->bit_width) + /* Handle possible alignment issues */ + memcpy(&addr, &gas->address, sizeof(addr)); + if (!addr || !gas->bit_width) return; mutex_lock(&acpi_ioremap_lock); - map = acpi_map_lookup(addr->address, addr->bit_width / 8); + map = acpi_map_lookup(addr, gas->bit_width / 8); if (!map) { mutex_unlock(&acpi_ioremap_lock); return; @@ -465,6 +495,7 @@ static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr) acpi_os_map_cleanup(map); } +EXPORT_SYMBOL(acpi_os_unmap_generic_address); #ifdef ACPI_FUTURE_USAGE acpi_status @@ -711,6 +742,67 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) return AE_OK; } +#ifdef readq +static inline u64 read64(const volatile void __iomem *addr) +{ + return readq(addr); +} +#else +static inline u64 read64(const volatile void __iomem *addr) +{ + u64 l, h; + l = readl(addr); + h = readl(addr+4); + return l | (h << 32); +} +#endif + +acpi_status +acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width) +{ + void __iomem *virt_addr; + unsigned int size = width / 8; + bool unmap = false; + u64 dummy; + + rcu_read_lock(); + virt_addr = acpi_map_vaddr_lookup(phys_addr, size); + if (!virt_addr) { + rcu_read_unlock(); + virt_addr = acpi_os_ioremap(phys_addr, size); + if (!virt_addr) + return AE_BAD_ADDRESS; + unmap = true; + } + + if (!value) + value = &dummy; + + switch (width) { + case 8: + *(u8 *) value = readb(virt_addr); + break; + case 16: + *(u16 *) value = readw(virt_addr); + break; + case 32: + *(u32 *) value = readl(virt_addr); + break; + case 64: + *(u64 *) value = read64(virt_addr); + break; + default: + BUG(); + } + + if (unmap) + iounmap(virt_addr); + else + rcu_read_unlock(); + + return AE_OK; +} + acpi_status acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) { @@ -750,6 +842,61 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) return AE_OK; } +#ifdef writeq +static inline void write64(u64 val, volatile void __iomem *addr) +{ + writeq(val, addr); +} +#else +static inline void write64(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val>>32, addr+4); +} +#endif + +acpi_status +acpi_os_write_memory64(acpi_physical_address phys_addr, u64 value, u32 width) +{ + void __iomem *virt_addr; + unsigned int size = width / 8; + bool unmap = false; + + rcu_read_lock(); + virt_addr = acpi_map_vaddr_lookup(phys_addr, size); + if (!virt_addr) { + rcu_read_unlock(); + virt_addr = acpi_os_ioremap(phys_addr, size); + if (!virt_addr) + return AE_BAD_ADDRESS; + unmap = true; + } + + switch (width) { + case 8: + writeb(value, virt_addr); + break; + case 16: + writew(value, virt_addr); + break; + case 32: + writel(value, virt_addr); + break; + case 64: + write64(value, virt_addr); + break; + default: + BUG(); + } + + if (unmap) + iounmap(virt_addr); + else + rcu_read_unlock(); + + return AE_OK; +} + acpi_status acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, u64 *value, u32 width) @@ -1278,44 +1425,28 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); * drivers */ int acpi_check_resource_conflict(const struct resource *res) { - struct acpi_res_list *res_list_elem; - int ioport = 0, clash = 0; + acpi_adr_space_type space_id; + acpi_size length; + u8 warn = 0; + int clash = 0; if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) return 0; if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) return 0; - ioport = res->flags & IORESOURCE_IO; - - spin_lock(&acpi_res_lock); - list_for_each_entry(res_list_elem, &resource_list_head, - resource_list) { - if (ioport && (res_list_elem->resource_type - != ACPI_ADR_SPACE_SYSTEM_IO)) - continue; - if (!ioport && (res_list_elem->resource_type - != ACPI_ADR_SPACE_SYSTEM_MEMORY)) - continue; + if (res->flags & IORESOURCE_IO) + space_id = ACPI_ADR_SPACE_SYSTEM_IO; + else + space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; - if (res->end < res_list_elem->start - || res_list_elem->end < res->start) - continue; - clash = 1; - break; - } - spin_unlock(&acpi_res_lock); + length = res->end - res->start + 1; + if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) + warn = 1; + clash = acpi_check_address_range(space_id, res->start, length, warn); if (clash) { if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { - printk(KERN_WARNING "ACPI: resource %s %pR" - " conflicts with ACPI region %s " - "[%s 0x%zx-0x%zx]\n", - res->name, res, res_list_elem->name, - (res_list_elem->resource_type == - ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem", - (size_t) res_list_elem->start, - (size_t) res_list_elem->end); if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) printk(KERN_NOTICE "ACPI: This conflict may" " cause random problems and system" @@ -1467,155 +1598,6 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) kmem_cache_free(cache, object); return (AE_OK); } - -static inline int acpi_res_list_add(struct acpi_res_list *res) -{ - struct acpi_res_list *res_list_elem; - - list_for_each_entry(res_list_elem, &resource_list_head, - resource_list) { - - if (res->resource_type == res_list_elem->resource_type && - res->start == res_list_elem->start && - res->end == res_list_elem->end) { - - /* - * The Region(addr,len) already exist in the list, - * just increase the count - */ - - res_list_elem->count++; - return 0; - } - } - - res->count = 1; - list_add(&res->resource_list, &resource_list_head); - return 1; -} - -static inline void acpi_res_list_del(struct acpi_res_list *res) -{ - struct acpi_res_list *res_list_elem; - - list_for_each_entry(res_list_elem, &resource_list_head, - resource_list) { - - if (res->resource_type == res_list_elem->resource_type && - res->start == res_list_elem->start && - res->end == res_list_elem->end) { - - /* - * If the res count is decreased to 0, - * remove and free it - */ - - if (--res_list_elem->count == 0) { - list_del(&res_list_elem->resource_list); - kfree(res_list_elem); - } - return; - } - } -} - -acpi_status -acpi_os_invalidate_address( - u8 space_id, - acpi_physical_address address, - acpi_size length) -{ - struct acpi_res_list res; - - switch (space_id) { - case ACPI_ADR_SPACE_SYSTEM_IO: - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - /* Only interference checks against SystemIO and SystemMemory - are needed */ - res.start = address; - res.end = address + length - 1; - res.resource_type = space_id; - spin_lock(&acpi_res_lock); - acpi_res_list_del(&res); - spin_unlock(&acpi_res_lock); - break; - case ACPI_ADR_SPACE_PCI_CONFIG: - case ACPI_ADR_SPACE_EC: - case ACPI_ADR_SPACE_SMBUS: - case ACPI_ADR_SPACE_CMOS: - case ACPI_ADR_SPACE_PCI_BAR_TARGET: - case ACPI_ADR_SPACE_DATA_TABLE: - case ACPI_ADR_SPACE_FIXED_HARDWARE: - break; - } - return AE_OK; -} - -/****************************************************************************** - * - * FUNCTION: acpi_os_validate_address - * - * PARAMETERS: space_id - ACPI space ID - * address - Physical address - * length - Address length - * - * RETURN: AE_OK if address/length is valid for the space_id. Otherwise, - * should return AE_AML_ILLEGAL_ADDRESS. - * - * DESCRIPTION: Validate a system address via the host OS. Used to validate - * the addresses accessed by AML operation regions. - * - *****************************************************************************/ - -acpi_status -acpi_os_validate_address ( - u8 space_id, - acpi_physical_address address, - acpi_size length, - char *name) -{ - struct acpi_res_list *res; - int added; - if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) - return AE_OK; - - switch (space_id) { - case ACPI_ADR_SPACE_SYSTEM_IO: - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - /* Only interference checks against SystemIO and SystemMemory - are needed */ - res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL); - if (!res) - return AE_OK; - /* ACPI names are fixed to 4 bytes, still better use strlcpy */ - strlcpy(res->name, name, 5); - res->start = address; - res->end = address + length - 1; - res->resource_type = space_id; - spin_lock(&acpi_res_lock); - added = acpi_res_list_add(res); - spin_unlock(&acpi_res_lock); - pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, " - "name: %s\n", added ? "Added" : "Already exist", - (space_id == ACPI_ADR_SPACE_SYSTEM_IO) - ? "SystemIO" : "System Memory", - (unsigned long long)res->start, - (unsigned long long)res->end, - res->name); - if (!added) - kfree(res); - break; - case ACPI_ADR_SPACE_PCI_CONFIG: - case ACPI_ADR_SPACE_EC: - case ACPI_ADR_SPACE_SMBUS: - case ACPI_ADR_SPACE_CMOS: - case ACPI_ADR_SPACE_PCI_BAR_TARGET: - case ACPI_ADR_SPACE_DATA_TABLE: - case ACPI_ADR_SPACE_FIXED_HARDWARE: - break; - } - return AE_OK; -} #endif acpi_status __init acpi_os_initialize(void) diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 3a0428e..c850de4 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) apic_id = map_mat_entry(handle, type, acpi_id); if (apic_id == -1) apic_id = map_madt_entry(type, acpi_id); - if (apic_id == -1) - return apic_id; + if (apic_id == -1) { + /* + * On UP processor, there is no _MAT or MADT table. + * So above apic_id is always set to -1. + * + * BIOS may define multiple CPU handles even for UP processor. + * For example, + * + * Scope (_PR) + * { + * Processor (CPU0, 0x00, 0x00000410, 0x06) {} + * Processor (CPU1, 0x01, 0x00000410, 0x06) {} + * Processor (CPU2, 0x02, 0x00000410, 0x06) {} + * Processor (CPU3, 0x03, 0x00000410, 0x06) {} + * } + * + * Ignores apic_id and always return 0 for CPU0's handle. + * Return -1 for other CPU's handle. + */ + if (acpi_id == 0) + return acpi_id; + else + return apic_id; + } #ifdef CONFIG_SMP for_each_possible_cpu(i) { diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 20a68ca..8ae05ce 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -82,9 +82,9 @@ MODULE_LICENSE("GPL"); static int acpi_processor_add(struct acpi_device *device); static int acpi_processor_remove(struct acpi_device *device, int type); static void acpi_processor_notify(struct acpi_device *device, u32 event); -static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); +static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr); static int acpi_processor_handle_eject(struct acpi_processor *pr); - +static int acpi_processor_start(struct acpi_processor *pr); static const struct acpi_device_id processor_device_ids[] = { {ACPI_PROCESSOR_OBJECT_HID, 0}, @@ -324,10 +324,8 @@ static int acpi_processor_get_info(struct acpi_device *device) * they are physically not present. */ if (pr->id == -1) { - if (ACPI_FAILURE - (acpi_processor_hotadd_init(pr->handle, &pr->id))) { + if (ACPI_FAILURE(acpi_processor_hotadd_init(pr))) return -ENODEV; - } } /* * On some boxes several processors use the same processor bus id. @@ -425,10 +423,29 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, struct acpi_processor *pr = per_cpu(processors, cpu); if (action == CPU_ONLINE && pr) { - acpi_processor_ppc_has_changed(pr, 0); - acpi_processor_hotplug(pr); - acpi_processor_reevaluate_tstate(pr, action); - acpi_processor_tstate_has_changed(pr); + /* CPU got physically hotplugged and onlined the first time: + * Initialize missing things + */ + if (pr->flags.need_hotplug_init) { + struct cpuidle_driver *idle_driver = + cpuidle_get_driver(); + + printk(KERN_INFO "Will online and init hotplugged " + "CPU: %d\n", pr->id); + WARN(acpi_processor_start(pr), "Failed to start CPU:" + " %d\n", pr->id); + pr->flags.need_hotplug_init = 0; + if (idle_driver && !strcmp(idle_driver->name, + "intel_idle")) { + intel_idle_cpu_init(pr->id); + } + /* Normal CPU soft online event */ + } else { + acpi_processor_ppc_has_changed(pr, 0); + acpi_processor_cst_has_changed(pr); + acpi_processor_reevaluate_tstate(pr, action); + acpi_processor_tstate_has_changed(pr); + } } if (action == CPU_DEAD && pr) { /* invalidate the flag.throttling after one CPU is offline */ @@ -442,6 +459,71 @@ static struct notifier_block acpi_cpu_notifier = .notifier_call = acpi_cpu_soft_notify, }; +/* + * acpi_processor_start() is called by the cpu_hotplug_notifier func: + * acpi_cpu_soft_notify(). Getting it __cpuinit{data} is difficult, the + * root cause seem to be that acpi_processor_uninstall_hotplug_notify() + * is in the module_exit (__exit) func. Allowing acpi_processor_start() + * to not be in __cpuinit section, but being called from __cpuinit funcs + * via __ref looks like the right thing to do here. + */ +static __ref int acpi_processor_start(struct acpi_processor *pr) +{ + struct acpi_device *device = per_cpu(processor_device_array, pr->id); + int result = 0; + +#ifdef CONFIG_CPU_FREQ + acpi_processor_ppc_has_changed(pr, 0); +#endif + acpi_processor_get_throttling_info(pr); + acpi_processor_get_limit_info(pr); + + if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) + acpi_processor_power_init(pr, device); + + pr->cdev = thermal_cooling_device_register("Processor", device, + &processor_cooling_ops); + if (IS_ERR(pr->cdev)) { + result = PTR_ERR(pr->cdev); + goto err_power_exit; + } + + dev_dbg(&device->dev, "registered as cooling_device%d\n", + pr->cdev->id); + + result = sysfs_create_link(&device->dev.kobj, + &pr->cdev->device.kobj, + "thermal_cooling"); + if (result) { + printk(KERN_ERR PREFIX "Create sysfs link\n"); + goto err_thermal_unregister; + } + result = sysfs_create_link(&pr->cdev->device.kobj, + &device->dev.kobj, + "device"); + if (result) { + printk(KERN_ERR PREFIX "Create sysfs link\n"); + goto err_remove_sysfs_thermal; + } + + return 0; + +err_remove_sysfs_thermal: + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); +err_thermal_unregister: + thermal_cooling_device_unregister(pr->cdev); +err_power_exit: + acpi_processor_power_exit(pr, device); + + return result; +} + +/* + * Do not put anything in here which needs the core to be online. + * For example MSR access or setting up things which check for cpuinfo_x86 + * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. + * Such things have to be put in and set up above in acpi_processor_start() + */ static int __cpuinit acpi_processor_add(struct acpi_device *device) { struct acpi_processor *pr = NULL; @@ -497,48 +579,21 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) goto err_free_cpumask; } -#ifdef CONFIG_CPU_FREQ - acpi_processor_ppc_has_changed(pr, 0); -#endif - acpi_processor_get_throttling_info(pr); - acpi_processor_get_limit_info(pr); - - if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) - acpi_processor_power_init(pr, device); - - pr->cdev = thermal_cooling_device_register("Processor", device, - &processor_cooling_ops); - if (IS_ERR(pr->cdev)) { - result = PTR_ERR(pr->cdev); - goto err_power_exit; - } - - dev_dbg(&device->dev, "registered as cooling_device%d\n", - pr->cdev->id); + /* + * Do not start hotplugged CPUs now, but when they + * are onlined the first time + */ + if (pr->flags.need_hotplug_init) + return 0; - result = sysfs_create_link(&device->dev.kobj, - &pr->cdev->device.kobj, - "thermal_cooling"); - if (result) { - printk(KERN_ERR PREFIX "Create sysfs link\n"); - goto err_thermal_unregister; - } - result = sysfs_create_link(&pr->cdev->device.kobj, - &device->dev.kobj, - "device"); - if (result) { - printk(KERN_ERR PREFIX "Create sysfs link\n"); + result = acpi_processor_start(pr); + if (result) goto err_remove_sysfs; - } return 0; err_remove_sysfs: - sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); -err_thermal_unregister: - thermal_cooling_device_unregister(pr->cdev); -err_power_exit: - acpi_processor_power_exit(pr, device); + sysfs_remove_link(&device->dev.kobj, "sysdev"); err_free_cpumask: free_cpumask_var(pr->throttling.shared_cpu_map); @@ -720,21 +775,33 @@ processor_walk_namespace_cb(acpi_handle handle, return (AE_OK); } -static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) +static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) { + acpi_handle handle = pr->handle; if (!is_processor_present(handle)) { return AE_ERROR; } - if (acpi_map_lsapic(handle, p_cpu)) + if (acpi_map_lsapic(handle, &pr->id)) return AE_ERROR; - if (arch_register_cpu(*p_cpu)) { - acpi_unmap_lsapic(*p_cpu); + if (arch_register_cpu(pr->id)) { + acpi_unmap_lsapic(pr->id); return AE_ERROR; } + /* CPU got hot-plugged, but cpu_data is not initialized yet + * Set flag to delay cpu_idle/throttling initialization + * in: + * acpi_processor_add() + * acpi_processor_get_info() + * and do it when the CPU gets online the first time + * TBD: Cleanup above functions and try to do this more elegant. + */ + printk(KERN_INFO "CPU %d got hotplugged\n", pr->id); + pr->flags.need_hotplug_init = 1; + return AE_OK; } @@ -748,7 +815,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr) return (0); } #else -static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) +static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) { return AE_ERROR; } @@ -827,8 +894,6 @@ static void __exit acpi_processor_exit(void) acpi_bus_unregister_driver(&acpi_processor_driver); - cpuidle_unregister_driver(&acpi_idle_driver); - return; } diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 0a7ed69..ca191ff 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -438,6 +438,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, { .callback = init_nvs_nosave, + .ident = "Sony Vaio VPCCW29FX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), + }, + }, + { + .callback = init_nvs_nosave, .ident = "Averatec AV1020-ED2", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 69ac373c7..fdf27b9 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -1117,6 +1117,13 @@ static int piix_broken_suspend(void) }, }, { + .ident = "Satellite Pro A120", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"), + }, + }, + { .ident = "Portege M500", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 11c9aea..c06e0ec 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4125,6 +4125,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { * device and controller are SATA. */ { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, + { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, + { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index 9a7f0ea..74aaee3 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -291,6 +291,7 @@ int ata_tport_add(struct device *parent, goto tport_err; } + device_enable_async_suspend(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index d6a4677..1e65842 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c @@ -251,6 +251,8 @@ static const u32 udma_tenvmin = 20; static const u32 udma_tackmin = 20; static const u32 udma_tssmin = 50; +#define BFIN_MAX_SG_SEGMENTS 4 + /** * * Function: num_clocks_min @@ -829,79 +831,61 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl) static void bfin_bmdma_setup(struct ata_queued_cmd *qc) { - unsigned short config = WDSIZE_16; + struct ata_port *ap = qc->ap; + struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd; + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN; struct scatterlist *sg; unsigned int si; + unsigned int channel; + unsigned int dir; + unsigned int size = 0; dev_dbg(qc->ap->dev, "in atapi dma setup\n"); /* Program the ATA_CTRL register with dir */ if (qc->tf.flags & ATA_TFLAG_WRITE) { - /* fill the ATAPI DMA controller */ - set_dma_config(CH_ATAPI_TX, config); - set_dma_x_modify(CH_ATAPI_TX, 2); - for_each_sg(qc->sg, sg, qc->n_elem, si) { - set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg)); - set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1); - } + channel = CH_ATAPI_TX; + dir = DMA_TO_DEVICE; } else { + channel = CH_ATAPI_RX; + dir = DMA_FROM_DEVICE; config |= WNR; - /* fill the ATAPI DMA controller */ - set_dma_config(CH_ATAPI_RX, config); - set_dma_x_modify(CH_ATAPI_RX, 2); - for_each_sg(qc->sg, sg, qc->n_elem, si) { - set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg)); - set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1); - } } -} -/** - * bfin_bmdma_start - Start an IDE DMA transaction - * @qc: Info associated with this ATA transaction. - * - * Note: Original code is ata_bmdma_start(). - */ + dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir); -static void bfin_bmdma_start(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; - struct scatterlist *sg; - unsigned int si; + /* fill the ATAPI DMA controller */ + for_each_sg(qc->sg, sg, qc->n_elem, si) { + dma_desc_cpu[si].start_addr = sg_dma_address(sg); + dma_desc_cpu[si].cfg = config; + dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1; + dma_desc_cpu[si].x_modify = 2; + size += sg_dma_len(sg); + } - dev_dbg(qc->ap->dev, "in atapi dma start\n"); - if (!(ap->udma_mask || ap->mwdma_mask)) - return; + /* Set the last descriptor to stop mode */ + dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE); - /* start ATAPI DMA controller*/ - if (qc->tf.flags & ATA_TFLAG_WRITE) { - /* - * On blackfin arch, uncacheable memory is not - * allocated with flag GFP_DMA. DMA buffer from - * common kenel code should be flushed if WB - * data cache is enabled. Otherwise, this loop - * is an empty loop and optimized out. - */ - for_each_sg(qc->sg, sg, qc->n_elem, si) { - flush_dcache_range(sg_dma_address(sg), - sg_dma_address(sg) + sg_dma_len(sg)); - } - enable_dma(CH_ATAPI_TX); - dev_dbg(qc->ap->dev, "enable udma write\n"); + flush_dcache_range((unsigned int)dma_desc_cpu, + (unsigned int)dma_desc_cpu + + qc->n_elem * sizeof(struct dma_desc_array)); - /* Send ATA DMA write command */ - bfin_exec_command(ap, &qc->tf); + /* Enable ATA DMA operation*/ + set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma); + set_dma_x_count(channel, 0); + set_dma_x_modify(channel, 0); + set_dma_config(channel, config); + + SSYNC(); + + /* Send ATA DMA command */ + bfin_exec_command(ap, &qc->tf); + if (qc->tf.flags & ATA_TFLAG_WRITE) { /* set ATA DMA write direction */ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) | XFER_DIR)); } else { - enable_dma(CH_ATAPI_RX); - dev_dbg(qc->ap->dev, "enable udma read\n"); - - /* Send ATA DMA read command */ - bfin_exec_command(ap, &qc->tf); - /* set ATA DMA read direction */ ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base) & ~XFER_DIR)); @@ -913,12 +897,28 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc) /* Set ATAPI state machine contorl in terminate sequence */ ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM); - /* Set transfer length to buffer len */ - for_each_sg(qc->sg, sg, qc->n_elem, si) { - ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1)); - } + /* Set transfer length to the total size of sg buffers */ + ATAPI_SET_XFER_LEN(base, size >> 1); +} - /* Enable ATA DMA operation*/ +/** + * bfin_bmdma_start - Start an IDE DMA transaction + * @qc: Info associated with this ATA transaction. + * + * Note: Original code is ata_bmdma_start(). + */ + +static void bfin_bmdma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; + + dev_dbg(qc->ap->dev, "in atapi dma start\n"); + + if (!(ap->udma_mask || ap->mwdma_mask)) + return; + + /* start ATAPI transfer*/ if (ap->udma_mask) ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | ULTRA_START); @@ -935,34 +935,23 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc) static void bfin_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - struct scatterlist *sg; - unsigned int si; + unsigned int dir; dev_dbg(qc->ap->dev, "in atapi dma stop\n"); + if (!(ap->udma_mask || ap->mwdma_mask)) return; /* stop ATAPI DMA controller*/ - if (qc->tf.flags & ATA_TFLAG_WRITE) + if (qc->tf.flags & ATA_TFLAG_WRITE) { + dir = DMA_TO_DEVICE; disable_dma(CH_ATAPI_TX); - else { + } else { + dir = DMA_FROM_DEVICE; disable_dma(CH_ATAPI_RX); - if (ap->hsm_task_state & HSM_ST_LAST) { - /* - * On blackfin arch, uncacheable memory is not - * allocated with flag GFP_DMA. DMA buffer from - * common kenel code should be invalidated if - * data cache is enabled. Otherwise, this loop - * is an empty loop and optimized out. - */ - for_each_sg(qc->sg, sg, qc->n_elem, si) { - invalidate_dcache_range( - sg_dma_address(sg), - sg_dma_address(sg) - + sg_dma_len(sg)); - } - } } + + dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir); } /** @@ -1260,6 +1249,11 @@ static void bfin_port_stop(struct ata_port *ap) { dev_dbg(ap->dev, "in atapi port stop\n"); if (ap->udma_mask != 0 || ap->mwdma_mask != 0) { + dma_free_coherent(ap->dev, + BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), + ap->bmdma_prd, + ap->bmdma_prd_dma); + free_dma(CH_ATAPI_RX); free_dma(CH_ATAPI_TX); } @@ -1271,14 +1265,29 @@ static int bfin_port_start(struct ata_port *ap) if (!(ap->udma_mask || ap->mwdma_mask)) return 0; + ap->bmdma_prd = dma_alloc_coherent(ap->dev, + BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), + &ap->bmdma_prd_dma, + GFP_KERNEL); + + if (ap->bmdma_prd == NULL) { + dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n"); + goto out; + } + if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) { if (request_dma(CH_ATAPI_TX, "BFIN ATAPI TX DMA") >= 0) return 0; free_dma(CH_ATAPI_RX); + dma_free_coherent(ap->dev, + BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array), + ap->bmdma_prd, + ap->bmdma_prd_dma); } +out: ap->udma_mask = 0; ap->mwdma_mask = 0; dev_err(ap->dev, "Unable to request ATAPI DMA!" @@ -1400,7 +1409,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance) static struct scsi_host_template bfin_sht = { ATA_BASE_SHT(DRV_NAME), - .sg_tablesize = SG_NONE, + .sg_tablesize = BFIN_MAX_SG_SEGMENTS, .dma_boundary = ATA_DMA_BOUNDARY, }; diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 5a2c95b..0120b0d 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -140,6 +140,7 @@ enum { */ HCONTROL_ONLINE_PHY_RST = (1 << 31), HCONTROL_FORCE_OFFLINE = (1 << 30), + HCONTROL_LEGACY = (1 << 28), HCONTROL_PARITY_PROT_MOD = (1 << 14), HCONTROL_DPATH_PARITY = (1 << 12), HCONTROL_SNOOP_ENABLE = (1 << 10), @@ -1223,6 +1224,10 @@ static int sata_fsl_init_controller(struct ata_host *host) * part of the port_start() callback */ + /* sata controller to operate in enterprise mode */ + temp = ioread32(hcr_base + HCONTROL); + iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL); + /* ack. any pending IRQs for this controller/port */ temp = ioread32(hcr_base + HSTATUS); if (temp & 0x3F) @@ -1421,6 +1426,12 @@ static int sata_fsl_resume(struct platform_device *op) /* Recovery the CHBA register in host controller cmd register set */ iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA); + iowrite32((ioread32(hcr_base + HCONTROL) + | HCONTROL_ONLINE_PHY_RST + | HCONTROL_SNOOP_ENABLE + | HCONTROL_PMP_ATTACHED), + hcr_base + HCONTROL); + ata_host_resume(host); return 0; } diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 2c8272d..610f999 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -1,6 +1,6 @@ # Makefile for the Linux device tree -obj-y := core.o sys.o bus.o dd.o syscore.o \ +obj-y := core.o bus.o dd.o syscore.o \ driver.o class.o platform.o \ cpu.o firmware.o init.o map.o devres.o \ attribute_container.o transport_class.o \ diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 99dc592..40fb122 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -915,9 +915,10 @@ static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store); /** * __bus_register - register a driver-core subsystem - * @bus: bus. + * @bus: bus to register + * @key: lockdep class key * - * Once we have that, we registered the bus with the kobject + * Once we have that, we register the bus with the kobject * infrastructure, then register the children subsystems it has: * the devices and drivers that belong to the subsystem. */ @@ -1220,8 +1221,8 @@ static void system_root_device_release(struct device *dev) } /** * subsys_system_register - register a subsystem at /sys/devices/system/ - * @subsys - system subsystem - * @groups - default attributes for the root device + * @subsys: system subsystem + * @groups: default attributes for the root device * * All 'system' subsystems have a /sys/devices/system/<name> root device * with the name of the subsystem. The root device can carry subsystem- diff --git a/drivers/base/core.c b/drivers/base/core.c index 4a67cc0..74dda4f 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -632,6 +632,11 @@ static void klist_children_put(struct klist_node *n) * may be used for reference counting of @dev after calling this * function. * + * All fields in @dev must be initialized by the caller to 0, except + * for those explicitly set to some other value. The simplest + * approach is to use kzalloc() to allocate the structure containing + * @dev. + * * NOTE: Use put_device() to give up your reference instead of freeing * @dev directly once you have called this function. */ @@ -930,6 +935,13 @@ int device_private_init(struct device *dev) * to the global and sibling lists for the device, then * adds it to the other relevant subsystems of the driver model. * + * Do not call this routine or device_register() more than once for + * any device structure. The driver model core is not designed to work + * with devices that get unregistered and then spring back to life. + * (Among other things, it's very hard to guarantee that all references + * to the previous incarnation of @dev have been dropped.) Allocate + * and register a fresh new struct device instead. + * * NOTE: _Never_ directly free @dev after calling this function, even * if it returned an error! Always use put_device() to give up your * reference instead. @@ -1022,7 +1034,7 @@ int device_add(struct device *dev) device_pm_add(dev); /* Notify clients of device addition. This call must come - * after dpm_sysf_add() and before kobject_uevent(). + * after dpm_sysfs_add() and before kobject_uevent(). */ if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, @@ -1090,6 +1102,9 @@ name_error: * have a clearly defined need to use and refcount the device * before it is added to the hierarchy. * + * For more information, see the kerneldoc for device_initialize() + * and device_add(). + * * NOTE: _Never_ directly free @dev after calling this function, even * if it returned an error! Always use put_device() to give up the * reference initialized in this function instead. diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 26ab358..6c9387d 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -525,8 +525,7 @@ static int _request_firmware(const struct firmware **firmware_p, if (!firmware) { dev_err(device, "%s: kmalloc(struct firmware) failed\n", __func__); - retval = -ENOMEM; - goto out; + return -ENOMEM; } if (fw_get_builtin_firmware(firmware, name)) { diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 92e6a90..978bbf7 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1429,6 +1429,8 @@ static int pm_genpd_default_restore_state(struct device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP + /** * pm_genpd_default_suspend - Default "device suspend" for PM domians. * @dev: Device to handle. @@ -1517,6 +1519,19 @@ static int pm_genpd_default_thaw(struct device *dev) return cb ? cb(dev) : pm_generic_thaw(dev); } +#else /* !CONFIG_PM_SLEEP */ + +#define pm_genpd_default_suspend NULL +#define pm_genpd_default_suspend_late NULL +#define pm_genpd_default_resume_early NULL +#define pm_genpd_default_resume NULL +#define pm_genpd_default_freeze NULL +#define pm_genpd_default_freeze_late NULL +#define pm_genpd_default_thaw_early NULL +#define pm_genpd_default_thaw NULL + +#endif /* !CONFIG_PM_SLEEP */ + /** * pm_genpd_init - Initialize a generic I/O PM domain object. * @genpd: PM domain object to initialize. diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 51527ee..66a265b 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -12,6 +12,8 @@ #include <linux/pm_qos.h> #include <linux/hrtimer.h> +#ifdef CONFIG_PM_RUNTIME + /** * default_stop_ok - Default PM domain governor routine for stopping devices. * @dev: Device to check. @@ -137,16 +139,28 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) return true; } -struct dev_power_governor simple_qos_governor = { - .stop_ok = default_stop_ok, - .power_down_ok = default_power_down_ok, -}; - static bool always_on_power_down_ok(struct dev_pm_domain *domain) { return false; } +#else /* !CONFIG_PM_RUNTIME */ + +bool default_stop_ok(struct device *dev) +{ + return false; +} + +#define default_power_down_ok NULL +#define always_on_power_down_ok NULL + +#endif /* !CONFIG_PM_RUNTIME */ + +struct dev_power_governor simple_qos_governor = { + .stop_ok = default_stop_ok, + .power_down_ok = default_power_down_ok, +}; + /** * pm_genpd_gov_always_on - A governor implementing an always-on policy */ diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index be10a4f..6555803 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -284,6 +284,9 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) map->precious_reg = config->precious_reg; map->cache_type = config->cache_type; + map->cache_bypass = false; + map->cache_only = false; + ret = regcache_init(map, config); mutex_unlock(&map->lock); diff --git a/drivers/base/sys.c b/drivers/base/sys.c deleted file mode 100644 index 409f5ce..0000000 --- a/drivers/base/sys.c +++ /dev/null @@ -1,383 +0,0 @@ -/* - * sys.c - pseudo-bus for system 'devices' (cpus, PICs, timers, etc) - * - * Copyright (c) 2002-3 Patrick Mochel - * 2002-3 Open Source Development Lab - * - * This file is released under the GPLv2 - * - * This exports a 'system' bus type. - * By default, a 'sys' bus gets added to the root of the system. There will - * always be core system devices. Devices can use sysdev_register() to - * add themselves as children of the system bus. - */ - -#include <linux/sysdev.h> -#include <linux/err.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/string.h> -#include <linux/pm.h> -#include <linux/device.h> -#include <linux/mutex.h> -#include <linux/interrupt.h> - -#include "base.h" - -#define to_sysdev(k) container_of(k, struct sys_device, kobj) -#define to_sysdev_attr(a) container_of(a, struct sysdev_attribute, attr) - - -static ssize_t -sysdev_show(struct kobject *kobj, struct attribute *attr, char *buffer) -{ - struct sys_device *sysdev = to_sysdev(kobj); - struct sysdev_attribute *sysdev_attr = to_sysdev_attr(attr); - - if (sysdev_attr->show) - return sysdev_attr->show(sysdev, sysdev_attr, buffer); - return -EIO; -} - - -static ssize_t -sysdev_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) -{ - struct sys_device *sysdev = to_sysdev(kobj); - struct sysdev_attribute *sysdev_attr = to_sysdev_attr(attr); - - if (sysdev_attr->store) - return sysdev_attr->store(sysdev, sysdev_attr, buffer, count); - return -EIO; -} - -static const struct sysfs_ops sysfs_ops = { - .show = sysdev_show, - .store = sysdev_store, -}; - -static struct kobj_type ktype_sysdev = { - .sysfs_ops = &sysfs_ops, -}; - - -int sysdev_create_file(struct sys_device *s, struct sysdev_attribute *a) -{ - return sysfs_create_file(&s->kobj, &a->attr); -} - - -void sysdev_remove_file(struct sys_device *s, struct sysdev_attribute *a) -{ - sysfs_remove_file(&s->kobj, &a->attr); -} - -EXPORT_SYMBOL_GPL(sysdev_create_file); -EXPORT_SYMBOL_GPL(sysdev_remove_file); - -#define to_sysdev_class(k) container_of(k, struct sysdev_class, kset.kobj) -#define to_sysdev_class_attr(a) container_of(a, \ - struct sysdev_class_attribute, attr) - -static ssize_t sysdev_class_show(struct kobject *kobj, struct attribute *attr, - char *buffer) -{ - struct sysdev_class *class = to_sysdev_class(kobj); - struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); - - if (class_attr->show) - return class_attr->show(class, class_attr, buffer); - return -EIO; -} - -static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) -{ - struct sysdev_class *class = to_sysdev_class(kobj); - struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); - - if (class_attr->store) - return class_attr->store(class, class_attr, buffer, count); - return -EIO; -} - -static const struct sysfs_ops sysfs_class_ops = { - .show = sysdev_class_show, - .store = sysdev_class_store, -}; - -static struct kobj_type ktype_sysdev_class = { - .sysfs_ops = &sysfs_class_ops, -}; - -int sysdev_class_create_file(struct sysdev_class *c, - struct sysdev_class_attribute *a) -{ - return sysfs_create_file(&c->kset.kobj, &a->attr); -} -EXPORT_SYMBOL_GPL(sysdev_class_create_file); - -void sysdev_class_remove_file(struct sysdev_class *c, - struct sysdev_class_attribute *a) -{ - sysfs_remove_file(&c->kset.kobj, &a->attr); -} -EXPORT_SYMBOL_GPL(sysdev_class_remove_file); - -extern struct kset *system_kset; - -int sysdev_class_register(struct sysdev_class *cls) -{ - int retval; - - pr_debug("Registering sysdev class '%s'\n", cls->name); - - INIT_LIST_HEAD(&cls->drivers); - memset(&cls->kset.kobj, 0x00, sizeof(struct kobject)); - cls->kset.kobj.parent = &system_kset->kobj; - cls->kset.kobj.ktype = &ktype_sysdev_class; - cls->kset.kobj.kset = system_kset; - - retval = kobject_set_name(&cls->kset.kobj, "%s", cls->name); - if (retval) - return retval; - - retval = kset_register(&cls->kset); - if (!retval && cls->attrs) - retval = sysfs_create_files(&cls->kset.kobj, - (const struct attribute **)cls->attrs); - return retval; -} - -void sysdev_class_unregister(struct sysdev_class *cls) -{ - pr_debug("Unregistering sysdev class '%s'\n", - kobject_name(&cls->kset.kobj)); - if (cls->attrs) - sysfs_remove_files(&cls->kset.kobj, - (const struct attribute **)cls->attrs); - kset_unregister(&cls->kset); -} - -EXPORT_SYMBOL_GPL(sysdev_class_register); -EXPORT_SYMBOL_GPL(sysdev_class_unregister); - -static DEFINE_MUTEX(sysdev_drivers_lock); - -/* - * @dev != NULL means that we're unwinding because some drv->add() - * failed for some reason. You need to grab sysdev_drivers_lock before - * calling this. - */ -static void __sysdev_driver_remove(struct sysdev_class *cls, - struct sysdev_driver *drv, - struct sys_device *from_dev) -{ - struct sys_device *dev = from_dev; - - list_del_init(&drv->entry); - if (!cls) - return; - - if (!drv->remove) - goto kset_put; - - if (dev) - list_for_each_entry_continue_reverse(dev, &cls->kset.list, - kobj.entry) - drv->remove(dev); - else - list_for_each_entry(dev, &cls->kset.list, kobj.entry) - drv->remove(dev); - -kset_put: - kset_put(&cls->kset); -} - -/** - * sysdev_driver_register - Register auxiliary driver - * @cls: Device class driver belongs to. - * @drv: Driver. - * - * @drv is inserted into @cls->drivers to be - * called on each operation on devices of that class. The refcount - * of @cls is incremented. - */ -int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) -{ - struct sys_device *dev = NULL; - int err = 0; - - if (!cls) { - WARN(1, KERN_WARNING "sysdev: invalid class passed to %s!\n", - __func__); - return -EINVAL; - } - - /* Check whether this driver has already been added to a class. */ - if (drv->entry.next && !list_empty(&drv->entry)) - WARN(1, KERN_WARNING "sysdev: class %s: driver (%p) has already" - " been registered to a class, something is wrong, but " - "will forge on!\n", cls->name, drv); - - mutex_lock(&sysdev_drivers_lock); - if (cls && kset_get(&cls->kset)) { - list_add_tail(&drv->entry, &cls->drivers); - - /* If devices of this class already exist, tell the driver */ - if (drv->add) { - list_for_each_entry(dev, &cls->kset.list, kobj.entry) { - err = drv->add(dev); - if (err) - goto unwind; - } - } - } else { - err = -EINVAL; - WARN(1, KERN_ERR "%s: invalid device class\n", __func__); - } - - goto unlock; - -unwind: - __sysdev_driver_remove(cls, drv, dev); - -unlock: - mutex_unlock(&sysdev_drivers_lock); - return err; -} - -/** - * sysdev_driver_unregister - Remove an auxiliary driver. - * @cls: Class driver belongs to. - * @drv: Driver. - */ -void sysdev_driver_unregister(struct sysdev_class *cls, - struct sysdev_driver *drv) -{ - mutex_lock(&sysdev_drivers_lock); - __sysdev_driver_remove(cls, drv, NULL); - mutex_unlock(&sysdev_drivers_lock); -} -EXPORT_SYMBOL_GPL(sysdev_driver_register); -EXPORT_SYMBOL_GPL(sysdev_driver_unregister); - -/** - * sysdev_register - add a system device to the tree - * @sysdev: device in question - * - */ -int sysdev_register(struct sys_device *sysdev) -{ - int error; - struct sysdev_class *cls = sysdev->cls; - - if (!cls) - return -EINVAL; - - pr_debug("Registering sys device of class '%s'\n", - kobject_name(&cls->kset.kobj)); - - /* initialize the kobject to 0, in case it had previously been used */ - memset(&sysdev->kobj, 0x00, sizeof(struct kobject)); - - /* Make sure the kset is set */ - sysdev->kobj.kset = &cls->kset; - - /* Register the object */ - error = kobject_init_and_add(&sysdev->kobj, &ktype_sysdev, NULL, - "%s%d", kobject_name(&cls->kset.kobj), - sysdev->id); - - if (!error) { - struct sysdev_driver *drv; - - pr_debug("Registering sys device '%s'\n", - kobject_name(&sysdev->kobj)); - - mutex_lock(&sysdev_drivers_lock); - /* Generic notification is implicit, because it's that - * code that should have called us. - */ - - /* Notify class auxiliary drivers */ - list_for_each_entry(drv, &cls->drivers, entry) { - if (drv->add) - drv->add(sysdev); - } - mutex_unlock(&sysdev_drivers_lock); - kobject_uevent(&sysdev->kobj, KOBJ_ADD); - } - - return error; -} - -void sysdev_unregister(struct sys_device *sysdev) -{ - struct sysdev_driver *drv; - - mutex_lock(&sysdev_drivers_lock); - list_for_each_entry(drv, &sysdev->cls->drivers, entry) { - if (drv->remove) - drv->remove(sysdev); - } - mutex_unlock(&sysdev_drivers_lock); - - kobject_put(&sysdev->kobj); -} - -EXPORT_SYMBOL_GPL(sysdev_register); -EXPORT_SYMBOL_GPL(sysdev_unregister); - -#define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) - -ssize_t sysdev_store_ulong(struct sys_device *sysdev, - struct sysdev_attribute *attr, - const char *buf, size_t size) -{ - struct sysdev_ext_attribute *ea = to_ext_attr(attr); - char *end; - unsigned long new = simple_strtoul(buf, &end, 0); - if (end == buf) - return -EINVAL; - *(unsigned long *)(ea->var) = new; - /* Always return full write size even if we didn't consume all */ - return size; -} -EXPORT_SYMBOL_GPL(sysdev_store_ulong); - -ssize_t sysdev_show_ulong(struct sys_device *sysdev, - struct sysdev_attribute *attr, - char *buf) -{ - struct sysdev_ext_attribute *ea = to_ext_attr(attr); - return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var)); -} -EXPORT_SYMBOL_GPL(sysdev_show_ulong); - -ssize_t sysdev_store_int(struct sys_device *sysdev, - struct sysdev_attribute *attr, - const char *buf, size_t size) -{ - struct sysdev_ext_attribute *ea = to_ext_attr(attr); - char *end; - long new = simple_strtol(buf, &end, 0); - if (end == buf || new > INT_MAX || new < INT_MIN) - return -EINVAL; - *(int *)(ea->var) = new; - /* Always return full write size even if we didn't consume all */ - return size; -} -EXPORT_SYMBOL_GPL(sysdev_store_int); - -ssize_t sysdev_show_int(struct sys_device *sysdev, - struct sysdev_attribute *attr, - char *buf) -{ - struct sysdev_ext_attribute *ea = to_ext_attr(attr); - return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var)); -} -EXPORT_SYMBOL_GPL(sysdev_show_int); - diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index fda56bd..0def898 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -19,6 +19,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus, struct bcma_device *core_cc, struct bcma_device *core_mips); #ifdef CONFIG_PM +int bcma_bus_suspend(struct bcma_bus *bus); int bcma_bus_resume(struct bcma_bus *bus); #endif diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 443b83a..f59244e 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -235,38 +235,32 @@ static void bcma_host_pci_remove(struct pci_dev *dev) } #ifdef CONFIG_PM -static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state) +static int bcma_host_pci_suspend(struct device *dev) { - /* Host specific */ - pci_save_state(dev); - pci_disable_device(dev); - pci_set_power_state(dev, pci_choose_state(dev, state)); + struct pci_dev *pdev = to_pci_dev(dev); + struct bcma_bus *bus = pci_get_drvdata(pdev); - return 0; + bus->mapped_core = NULL; + + return bcma_bus_suspend(bus); } -static int bcma_host_pci_resume(struct pci_dev *dev) +static int bcma_host_pci_resume(struct device *dev) { - struct bcma_bus *bus = pci_get_drvdata(dev); - int err; + struct pci_dev *pdev = to_pci_dev(dev); + struct bcma_bus *bus = pci_get_drvdata(pdev); - /* Host specific */ - pci_set_power_state(dev, 0); - err = pci_enable_device(dev); - if (err) - return err; - pci_restore_state(dev); + return bcma_bus_resume(bus); +} - /* Bus specific */ - err = bcma_bus_resume(bus); - if (err) - return err; +static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend, + bcma_host_pci_resume); +#define BCMA_PM_OPS (&bcma_pm_ops) - return 0; -} #else /* CONFIG_PM */ -# define bcma_host_pci_suspend NULL -# define bcma_host_pci_resume NULL + +#define BCMA_PM_OPS NULL + #endif /* CONFIG_PM */ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { @@ -284,8 +278,7 @@ static struct pci_driver bcma_pci_bridge_driver = { .id_table = bcma_pci_bridge_tbl, .probe = bcma_host_pci_probe, .remove = bcma_host_pci_remove, - .suspend = bcma_host_pci_suspend, - .resume = bcma_host_pci_resume, + .driver.pm = BCMA_PM_OPS, }; int __init bcma_host_pci_init(void) diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 10f92b3..febbc0a 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -241,6 +241,21 @@ int __init bcma_bus_early_register(struct bcma_bus *bus, } #ifdef CONFIG_PM +int bcma_bus_suspend(struct bcma_bus *bus) +{ + struct bcma_device *core; + + list_for_each_entry(core, &bus->cores, list) { + struct device_driver *drv = core->dev.driver; + if (drv) { + struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); + if (adrv->suspend) + adrv->suspend(core); + } + } + return 0; +} + int bcma_bus_resume(struct bcma_bus *bus) { struct bcma_device *core; @@ -252,6 +267,15 @@ int bcma_bus_resume(struct bcma_bus *bus) bcma_core_chipcommon_init(&bus->drv_cc); } + list_for_each_entry(core, &bus->cores, list) { + struct device_driver *drv = core->dev.driver; + if (drv) { + struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); + if (adrv->resume) + adrv->resume(core); + } + } + return 0; } #endif diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index a30aa10..4e4c8a4 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -317,6 +317,17 @@ config BLK_DEV_NBD If unsure, say N. +config BLK_DEV_NVME + tristate "NVM Express block device" + depends on PCI + ---help--- + The NVM Express driver is for solid state drives directly + connected to the PCI or PCI Express bus. If you know you + don't have one of these, it is safe to answer N. + + To compile this driver as a module, choose M here: the + module will be called nvme. + config BLK_DEV_OSD tristate "OSD object-as-blkdev support" depends on SCSI_OSD_ULD diff --git a/drivers/block/Makefile b/drivers/block/Makefile index ad7b74a..5b79505 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE) += xsysace.o obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o obj-$(CONFIG_MG_DISK) += mg_disk.o obj-$(CONFIG_SUNVDC) += sunvdc.o +obj-$(CONFIG_BLK_DEV_NVME) += nvme.o obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o obj-$(CONFIG_BLK_DEV_UMEM) += umem.o diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c new file mode 100644 index 0000000..c1dc4d8 --- /dev/null +++ b/drivers/block/nvme.c @@ -0,0 +1,1739 @@ +/* + * NVM Express device driver + * Copyright (c) 2011, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/nvme.h> +#include <linux/bio.h> +#include <linux/bitops.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/genhd.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kdev_t.h> +#include <linux/kthread.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <linux/poison.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/version.h> + +#define NVME_Q_DEPTH 1024 +#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) +#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) +#define NVME_MINORS 64 +#define NVME_IO_TIMEOUT (5 * HZ) +#define ADMIN_TIMEOUT (60 * HZ) + +static int nvme_major; +module_param(nvme_major, int, 0); + +static int use_threaded_interrupts; +module_param(use_threaded_interrupts, int, 0); + +static DEFINE_SPINLOCK(dev_list_lock); +static LIST_HEAD(dev_list); +static struct task_struct *nvme_thread; + +/* + * Represents an NVM Express device. Each nvme_dev is a PCI function. + */ +struct nvme_dev { + struct list_head node; + struct nvme_queue **queues; + u32 __iomem *dbs; + struct pci_dev *pci_dev; + struct dma_pool *prp_page_pool; + struct dma_pool *prp_small_pool; + int instance; + int queue_count; + int db_stride; + u32 ctrl_config; + struct msix_entry *entry; + struct nvme_bar __iomem *bar; + struct list_head namespaces; + char serial[20]; + char model[40]; + char firmware_rev[8]; +}; + +/* + * An NVM Express namespace is equivalent to a SCSI LUN + */ +struct nvme_ns { + struct list_head list; + + struct nvme_dev *dev; + struct request_queue *queue; + struct gendisk *disk; + + int ns_id; + int lba_shift; +}; + +/* + * An NVM Express queue. Each device has at least two (one for admin + * commands and one for I/O commands). + */ +struct nvme_queue { + struct device *q_dmadev; + struct nvme_dev *dev; + spinlock_t q_lock; + struct nvme_command *sq_cmds; + volatile struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + wait_queue_head_t sq_full; + wait_queue_t sq_cong_wait; + struct bio_list sq_cong; + u32 __iomem *q_db; + u16 q_depth; + u16 cq_vector; + u16 sq_head; + u16 sq_tail; + u16 cq_head; + u16 cq_phase; + unsigned long cmdid_data[]; +}; + +/* + * Check we didin't inadvertently grow the command struct + */ +static inline void _nvme_check_size(void) +{ + BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); + BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); + BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); + BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); + BUILD_BUG_ON(sizeof(struct nvme_features) != 64); + BUILD_BUG_ON(sizeof(struct nvme_command) != 64); + BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); + BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); + BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); +} + +typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, + struct nvme_completion *); + +struct nvme_cmd_info { + nvme_completion_fn fn; + void *ctx; + unsigned long timeout; +}; + +static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) +{ + return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; +} + +/** + * alloc_cmdid() - Allocate a Command ID + * @nvmeq: The queue that will be used for this command + * @ctx: A pointer that will be passed to the handler + * @handler: The function to call on completion + * + * Allocate a Command ID for a queue. The data passed in will + * be passed to the completion handler. This is implemented by using + * the bottom two bits of the ctx pointer to store the handler ID. + * Passing in a pointer that's not 4-byte aligned will cause a BUG. + * We can change this if it becomes a problem. + * + * May be called with local interrupts disabled and the q_lock held, + * or with interrupts enabled and no locks held. + */ +static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, + nvme_completion_fn handler, unsigned timeout) +{ + int depth = nvmeq->q_depth - 1; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + int cmdid; + + do { + cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); + if (cmdid >= depth) + return -EBUSY; + } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); + + info[cmdid].fn = handler; + info[cmdid].ctx = ctx; + info[cmdid].timeout = jiffies + timeout; + return cmdid; +} + +static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, + nvme_completion_fn handler, unsigned timeout) +{ + int cmdid; + wait_event_killable(nvmeq->sq_full, + (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); + return (cmdid < 0) ? -EINTR : cmdid; +} + +/* Special values must be less than 0x1000 */ +#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA) +#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE) +#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) +#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) +#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) + +static void special_completion(struct nvme_dev *dev, void *ctx, + struct nvme_completion *cqe) +{ + if (ctx == CMD_CTX_CANCELLED) + return; + if (ctx == CMD_CTX_FLUSH) + return; + if (ctx == CMD_CTX_COMPLETED) { + dev_warn(&dev->pci_dev->dev, + "completed id %d twice on queue %d\n", + cqe->command_id, le16_to_cpup(&cqe->sq_id)); + return; + } + if (ctx == CMD_CTX_INVALID) { + dev_warn(&dev->pci_dev->dev, + "invalid id %d completed on queue %d\n", + cqe->command_id, le16_to_cpup(&cqe->sq_id)); + return; + } + + dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); +} + +/* + * Called with local interrupts disabled and the q_lock held. May not sleep. + */ +static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, + nvme_completion_fn *fn) +{ + void *ctx; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + + if (cmdid >= nvmeq->q_depth) { + *fn = special_completion; + return CMD_CTX_INVALID; + } + *fn = info[cmdid].fn; + ctx = info[cmdid].ctx; + info[cmdid].fn = special_completion; + info[cmdid].ctx = CMD_CTX_COMPLETED; + clear_bit(cmdid, nvmeq->cmdid_data); + wake_up(&nvmeq->sq_full); + return ctx; +} + +static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, + nvme_completion_fn *fn) +{ + void *ctx; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + if (fn) + *fn = info[cmdid].fn; + ctx = info[cmdid].ctx; + info[cmdid].fn = special_completion; + info[cmdid].ctx = CMD_CTX_CANCELLED; + return ctx; +} + +static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) +{ + return dev->queues[get_cpu() + 1]; +} + +static void put_nvmeq(struct nvme_queue *nvmeq) +{ + put_cpu(); +} + +/** + * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell + * @nvmeq: The queue to use + * @cmd: The command to send + * + * Safe to use from interrupt context + */ +static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) +{ + unsigned long flags; + u16 tail; + spin_lock_irqsave(&nvmeq->q_lock, flags); + tail = nvmeq->sq_tail; + memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); + if (++tail == nvmeq->q_depth) + tail = 0; + writel(tail, nvmeq->q_db); + nvmeq->sq_tail = tail; + spin_unlock_irqrestore(&nvmeq->q_lock, flags); + + return 0; +} + +/* + * The nvme_iod describes the data in an I/O, including the list of PRP + * entries. You can't see it in this data structure because C doesn't let + * me express that. Use nvme_alloc_iod to ensure there's enough space + * allocated to store the PRP list. + */ +struct nvme_iod { + void *private; /* For the use of the submitter of the I/O */ + int npages; /* In the PRP list. 0 means small pool in use */ + int offset; /* Of PRP list */ + int nents; /* Used in scatterlist */ + int length; /* Of data, in bytes */ + dma_addr_t first_dma; + struct scatterlist sg[0]; +}; + +static __le64 **iod_list(struct nvme_iod *iod) +{ + return ((void *)iod) + iod->offset; +} + +/* + * Will slightly overestimate the number of pages needed. This is OK + * as it only leads to a small amount of wasted memory for the lifetime of + * the I/O. + */ +static int nvme_npages(unsigned size) +{ + unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE); + return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); +} + +static struct nvme_iod * +nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) +{ + struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + + sizeof(__le64 *) * nvme_npages(nbytes) + + sizeof(struct scatterlist) * nseg, gfp); + + if (iod) { + iod->offset = offsetof(struct nvme_iod, sg[nseg]); + iod->npages = -1; + iod->length = nbytes; + } + + return iod; +} + +static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) +{ + const int last_prp = PAGE_SIZE / 8 - 1; + int i; + __le64 **list = iod_list(iod); + dma_addr_t prp_dma = iod->first_dma; + + if (iod->npages == 0) + dma_pool_free(dev->prp_small_pool, list[0], prp_dma); + for (i = 0; i < iod->npages; i++) { + __le64 *prp_list = list[i]; + dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); + dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); + prp_dma = next_prp_dma; + } + kfree(iod); +} + +static void requeue_bio(struct nvme_dev *dev, struct bio *bio) +{ + struct nvme_queue *nvmeq = get_nvmeq(dev); + if (bio_list_empty(&nvmeq->sq_cong)) + add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); + bio_list_add(&nvmeq->sq_cong, bio); + put_nvmeq(nvmeq); + wake_up_process(nvme_thread); +} + +static void bio_completion(struct nvme_dev *dev, void *ctx, + struct nvme_completion *cqe) +{ + struct nvme_iod *iod = ctx; + struct bio *bio = iod->private; + u16 status = le16_to_cpup(&cqe->status) >> 1; + + dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, + bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + nvme_free_iod(dev, iod); + if (status) { + bio_endio(bio, -EIO); + } else if (bio->bi_vcnt > bio->bi_idx) { + requeue_bio(dev, bio); + } else { + bio_endio(bio, 0); + } +} + +/* length is in bytes. gfp flags indicates whether we may sleep. */ +static int nvme_setup_prps(struct nvme_dev *dev, + struct nvme_common_command *cmd, struct nvme_iod *iod, + int total_len, gfp_t gfp) +{ + struct dma_pool *pool; + int length = total_len; + struct scatterlist *sg = iod->sg; + int dma_len = sg_dma_len(sg); + u64 dma_addr = sg_dma_address(sg); + int offset = offset_in_page(dma_addr); + __le64 *prp_list; + __le64 **list = iod_list(iod); + dma_addr_t prp_dma; + int nprps, i; + + cmd->prp1 = cpu_to_le64(dma_addr); + length -= (PAGE_SIZE - offset); + if (length <= 0) + return total_len; + + dma_len -= (PAGE_SIZE - offset); + if (dma_len) { + dma_addr += (PAGE_SIZE - offset); + } else { + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + if (length <= PAGE_SIZE) { + cmd->prp2 = cpu_to_le64(dma_addr); + return total_len; + } + + nprps = DIV_ROUND_UP(length, PAGE_SIZE); + if (nprps <= (256 / 8)) { + pool = dev->prp_small_pool; + iod->npages = 0; + } else { + pool = dev->prp_page_pool; + iod->npages = 1; + } + + prp_list = dma_pool_alloc(pool, gfp, &prp_dma); + if (!prp_list) { + cmd->prp2 = cpu_to_le64(dma_addr); + iod->npages = -1; + return (total_len - length) + PAGE_SIZE; + } + list[0] = prp_list; + iod->first_dma = prp_dma; + cmd->prp2 = cpu_to_le64(prp_dma); + i = 0; + for (;;) { + if (i == PAGE_SIZE / 8) { + __le64 *old_prp_list = prp_list; + prp_list = dma_pool_alloc(pool, gfp, &prp_dma); + if (!prp_list) + return total_len - length; + list[iod->npages++] = prp_list; + prp_list[0] = old_prp_list[i - 1]; + old_prp_list[i - 1] = cpu_to_le64(prp_dma); + i = 1; + } + prp_list[i++] = cpu_to_le64(dma_addr); + dma_len -= PAGE_SIZE; + dma_addr += PAGE_SIZE; + length -= PAGE_SIZE; + if (length <= 0) + break; + if (dma_len > 0) + continue; + BUG_ON(dma_len < 0); + sg = sg_next(sg); + dma_addr = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + } + + return total_len; +} + +/* NVMe scatterlists require no holes in the virtual address */ +#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ + (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) + +static int nvme_map_bio(struct device *dev, struct nvme_iod *iod, + struct bio *bio, enum dma_data_direction dma_dir, int psegs) +{ + struct bio_vec *bvec, *bvprv = NULL; + struct scatterlist *sg = NULL; + int i, old_idx, length = 0, nsegs = 0; + + sg_init_table(iod->sg, psegs); + old_idx = bio->bi_idx; + bio_for_each_segment(bvec, bio, i) { + if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { + sg->length += bvec->bv_len; + } else { + if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) + break; + sg = sg ? sg + 1 : iod->sg; + sg_set_page(sg, bvec->bv_page, bvec->bv_len, + bvec->bv_offset); + nsegs++; + } + length += bvec->bv_len; + bvprv = bvec; + } + bio->bi_idx = i; + iod->nents = nsegs; + sg_mark_end(sg); + if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) { + bio->bi_idx = old_idx; + return -ENOMEM; + } + return length; +} + +static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, + int cmdid) +{ + struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; + + memset(cmnd, 0, sizeof(*cmnd)); + cmnd->common.opcode = nvme_cmd_flush; + cmnd->common.command_id = cmdid; + cmnd->common.nsid = cpu_to_le32(ns->ns_id); + + if (++nvmeq->sq_tail == nvmeq->q_depth) + nvmeq->sq_tail = 0; + writel(nvmeq->sq_tail, nvmeq->q_db); + + return 0; +} + +static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) +{ + int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, + special_completion, NVME_IO_TIMEOUT); + if (unlikely(cmdid < 0)) + return cmdid; + + return nvme_submit_flush(nvmeq, ns, cmdid); +} + +/* + * Called with local interrupts disabled and the q_lock held. May not sleep. + */ +static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, + struct bio *bio) +{ + struct nvme_command *cmnd; + struct nvme_iod *iod; + enum dma_data_direction dma_dir; + int cmdid, length, result = -ENOMEM; + u16 control; + u32 dsmgmt; + int psegs = bio_phys_segments(ns->queue, bio); + + if ((bio->bi_rw & REQ_FLUSH) && psegs) { + result = nvme_submit_flush_data(nvmeq, ns); + if (result) + return result; + } + + iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); + if (!iod) + goto nomem; + iod->private = bio; + + result = -EBUSY; + cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); + if (unlikely(cmdid < 0)) + goto free_iod; + + if ((bio->bi_rw & REQ_FLUSH) && !psegs) + return nvme_submit_flush(nvmeq, ns, cmdid); + + control = 0; + if (bio->bi_rw & REQ_FUA) + control |= NVME_RW_FUA; + if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD)) + control |= NVME_RW_LR; + + dsmgmt = 0; + if (bio->bi_rw & REQ_RAHEAD) + dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; + + cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; + + memset(cmnd, 0, sizeof(*cmnd)); + if (bio_data_dir(bio)) { + cmnd->rw.opcode = nvme_cmd_write; + dma_dir = DMA_TO_DEVICE; + } else { + cmnd->rw.opcode = nvme_cmd_read; + dma_dir = DMA_FROM_DEVICE; + } + + result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); + if (result < 0) + goto free_iod; + length = result; + + cmnd->rw.command_id = cmdid; + cmnd->rw.nsid = cpu_to_le32(ns->ns_id); + length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, + GFP_ATOMIC); + cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); + cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); + cmnd->rw.control = cpu_to_le16(control); + cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); + + bio->bi_sector += length >> 9; + + if (++nvmeq->sq_tail == nvmeq->q_depth) + nvmeq->sq_tail = 0; + writel(nvmeq->sq_tail, nvmeq->q_db); + + return 0; + + free_iod: + nvme_free_iod(nvmeq->dev, iod); + nomem: + return result; +} + +static void nvme_make_request(struct request_queue *q, struct bio *bio) +{ + struct nvme_ns *ns = q->queuedata; + struct nvme_queue *nvmeq = get_nvmeq(ns->dev); + int result = -EBUSY; + + spin_lock_irq(&nvmeq->q_lock); + if (bio_list_empty(&nvmeq->sq_cong)) + result = nvme_submit_bio_queue(nvmeq, ns, bio); + if (unlikely(result)) { + if (bio_list_empty(&nvmeq->sq_cong)) + add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); + bio_list_add(&nvmeq->sq_cong, bio); + } + + spin_unlock_irq(&nvmeq->q_lock); + put_nvmeq(nvmeq); +} + +static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) +{ + u16 head, phase; + + head = nvmeq->cq_head; + phase = nvmeq->cq_phase; + + for (;;) { + void *ctx; + nvme_completion_fn fn; + struct nvme_completion cqe = nvmeq->cqes[head]; + if ((le16_to_cpu(cqe.status) & 1) != phase) + break; + nvmeq->sq_head = le16_to_cpu(cqe.sq_head); + if (++head == nvmeq->q_depth) { + head = 0; + phase = !phase; + } + + ctx = free_cmdid(nvmeq, cqe.command_id, &fn); + fn(nvmeq->dev, ctx, &cqe); + } + + /* If the controller ignores the cq head doorbell and continuously + * writes to the queue, it is theoretically possible to wrap around + * the queue twice and mistakenly return IRQ_NONE. Linux only + * requires that 0.1% of your interrupts are handled, so this isn't + * a big problem. + */ + if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) + return IRQ_NONE; + + writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); + nvmeq->cq_head = head; + nvmeq->cq_phase = phase; + + return IRQ_HANDLED; +} + +static irqreturn_t nvme_irq(int irq, void *data) +{ + irqreturn_t result; + struct nvme_queue *nvmeq = data; + spin_lock(&nvmeq->q_lock); + result = nvme_process_cq(nvmeq); + spin_unlock(&nvmeq->q_lock); + return result; +} + +static irqreturn_t nvme_irq_check(int irq, void *data) +{ + struct nvme_queue *nvmeq = data; + struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; + if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) + return IRQ_NONE; + return IRQ_WAKE_THREAD; +} + +static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) +{ + spin_lock_irq(&nvmeq->q_lock); + cancel_cmdid(nvmeq, cmdid, NULL); + spin_unlock_irq(&nvmeq->q_lock); +} + +struct sync_cmd_info { + struct task_struct *task; + u32 result; + int status; +}; + +static void sync_completion(struct nvme_dev *dev, void *ctx, + struct nvme_completion *cqe) +{ + struct sync_cmd_info *cmdinfo = ctx; + cmdinfo->result = le32_to_cpup(&cqe->result); + cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; + wake_up_process(cmdinfo->task); +} + +/* + * Returns 0 on success. If the result is negative, it's a Linux error code; + * if the result is positive, it's an NVM Express status code + */ +static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, + struct nvme_command *cmd, u32 *result, unsigned timeout) +{ + int cmdid; + struct sync_cmd_info cmdinfo; + + cmdinfo.task = current; + cmdinfo.status = -EINTR; + + cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, + timeout); + if (cmdid < 0) + return cmdid; + cmd->common.command_id = cmdid; + + set_current_state(TASK_KILLABLE); + nvme_submit_cmd(nvmeq, cmd); + schedule(); + + if (cmdinfo.status == -EINTR) { + nvme_abort_command(nvmeq, cmdid); + return -EINTR; + } + + if (result) + *result = cmdinfo.result; + + return cmdinfo.status; +} + +static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, + u32 *result) +{ + return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); +} + +static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) +{ + int status; + struct nvme_command c; + + memset(&c, 0, sizeof(c)); + c.delete_queue.opcode = opcode; + c.delete_queue.qid = cpu_to_le16(id); + + status = nvme_submit_admin_cmd(dev, &c, NULL); + if (status) + return -EIO; + return 0; +} + +static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, + struct nvme_queue *nvmeq) +{ + int status; + struct nvme_command c; + int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; + + memset(&c, 0, sizeof(c)); + c.create_cq.opcode = nvme_admin_create_cq; + c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); + c.create_cq.cqid = cpu_to_le16(qid); + c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); + c.create_cq.cq_flags = cpu_to_le16(flags); + c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); + + status = nvme_submit_admin_cmd(dev, &c, NULL); + if (status) + return -EIO; + return 0; +} + +static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, + struct nvme_queue *nvmeq) +{ + int status; + struct nvme_command c; + int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; + + memset(&c, 0, sizeof(c)); + c.create_sq.opcode = nvme_admin_create_sq; + c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); + c.create_sq.sqid = cpu_to_le16(qid); + c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); + c.create_sq.sq_flags = cpu_to_le16(flags); + c.create_sq.cqid = cpu_to_le16(qid); + + status = nvme_submit_admin_cmd(dev, &c, NULL); + if (status) + return -EIO; + return 0; +} + +static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) +{ + return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); +} + +static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) +{ + return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); +} + +static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, + dma_addr_t dma_addr) +{ + struct nvme_command c; + + memset(&c, 0, sizeof(c)); + c.identify.opcode = nvme_admin_identify; + c.identify.nsid = cpu_to_le32(nsid); + c.identify.prp1 = cpu_to_le64(dma_addr); + c.identify.cns = cpu_to_le32(cns); + + return nvme_submit_admin_cmd(dev, &c, NULL); +} + +static int nvme_get_features(struct nvme_dev *dev, unsigned fid, + unsigned dword11, dma_addr_t dma_addr) +{ + struct nvme_command c; + + memset(&c, 0, sizeof(c)); + c.features.opcode = nvme_admin_get_features; + c.features.prp1 = cpu_to_le64(dma_addr); + c.features.fid = cpu_to_le32(fid); + c.features.dword11 = cpu_to_le32(dword11); + + return nvme_submit_admin_cmd(dev, &c, NULL); +} + +static int nvme_set_features(struct nvme_dev *dev, unsigned fid, + unsigned dword11, dma_addr_t dma_addr, u32 *result) +{ + struct nvme_command c; + + memset(&c, 0, sizeof(c)); + c.features.opcode = nvme_admin_set_features; + c.features.prp1 = cpu_to_le64(dma_addr); + c.features.fid = cpu_to_le32(fid); + c.features.dword11 = cpu_to_le32(dword11); + + return nvme_submit_admin_cmd(dev, &c, result); +} + +static void nvme_free_queue(struct nvme_dev *dev, int qid) +{ + struct nvme_queue *nvmeq = dev->queues[qid]; + int vector = dev->entry[nvmeq->cq_vector].vector; + + irq_set_affinity_hint(vector, NULL); + free_irq(vector, nvmeq); + + /* Don't tell the adapter to delete the admin queue */ + if (qid) { + adapter_delete_sq(dev, qid); + adapter_delete_cq(dev, qid); + } + + dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), + (void *)nvmeq->cqes, nvmeq->cq_dma_addr); + dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), + nvmeq->sq_cmds, nvmeq->sq_dma_addr); + kfree(nvmeq); +} + +static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, + int depth, int vector) +{ + struct device *dmadev = &dev->pci_dev->dev; + unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info)); + struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); + if (!nvmeq) + return NULL; + + nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), + &nvmeq->cq_dma_addr, GFP_KERNEL); + if (!nvmeq->cqes) + goto free_nvmeq; + memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); + + nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), + &nvmeq->sq_dma_addr, GFP_KERNEL); + if (!nvmeq->sq_cmds) + goto free_cqdma; + + nvmeq->q_dmadev = dmadev; + nvmeq->dev = dev; + spin_lock_init(&nvmeq->q_lock); + nvmeq->cq_head = 0; + nvmeq->cq_phase = 1; + init_waitqueue_head(&nvmeq->sq_full); + init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); + bio_list_init(&nvmeq->sq_cong); + nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; + nvmeq->q_depth = depth; + nvmeq->cq_vector = vector; + + return nvmeq; + + free_cqdma: + dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, + nvmeq->cq_dma_addr); + free_nvmeq: + kfree(nvmeq); + return NULL; +} + +static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, + const char *name) +{ + if (use_threaded_interrupts) + return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, + nvme_irq_check, nvme_irq, + IRQF_DISABLED | IRQF_SHARED, + name, nvmeq); + return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, + IRQF_DISABLED | IRQF_SHARED, name, nvmeq); +} + +static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, + int qid, int cq_size, int vector) +{ + int result; + struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); + + if (!nvmeq) + return ERR_PTR(-ENOMEM); + + result = adapter_alloc_cq(dev, qid, nvmeq); + if (result < 0) + goto free_nvmeq; + + result = adapter_alloc_sq(dev, qid, nvmeq); + if (result < 0) + goto release_cq; + + result = queue_request_irq(dev, nvmeq, "nvme"); + if (result < 0) + goto release_sq; + + return nvmeq; + + release_sq: + adapter_delete_sq(dev, qid); + release_cq: + adapter_delete_cq(dev, qid); + free_nvmeq: + dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), + (void *)nvmeq->cqes, nvmeq->cq_dma_addr); + dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), + nvmeq->sq_cmds, nvmeq->sq_dma_addr); + kfree(nvmeq); + return ERR_PTR(result); +} + +static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) +{ + int result; + u32 aqa; + u64 cap; + unsigned long timeout; + struct nvme_queue *nvmeq; + + dev->dbs = ((void __iomem *)dev->bar) + 4096; + + nvmeq = nvme_alloc_queue(dev, 0, 64, 0); + if (!nvmeq) + return -ENOMEM; + + aqa = nvmeq->q_depth - 1; + aqa |= aqa << 16; + + dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM; + dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; + dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; + dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; + + writel(0, &dev->bar->cc); + writel(aqa, &dev->bar->aqa); + writeq(nvmeq->sq_dma_addr, &dev->bar->asq); + writeq(nvmeq->cq_dma_addr, &dev->bar->acq); + writel(dev->ctrl_config, &dev->bar->cc); + + cap = readq(&dev->bar->cap); + timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; + dev->db_stride = NVME_CAP_STRIDE(cap); + + while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { + msleep(100); + if (fatal_signal_pending(current)) + return -EINTR; + if (time_after(jiffies, timeout)) { + dev_err(&dev->pci_dev->dev, + "Device not ready; aborting initialisation\n"); + return -ENODEV; + } + } + + result = queue_request_irq(dev, nvmeq, "nvme admin"); + dev->queues[0] = nvmeq; + return result; +} + +static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, + unsigned long addr, unsigned length) +{ + int i, err, count, nents, offset; + struct scatterlist *sg; + struct page **pages; + struct nvme_iod *iod; + + if (addr & 3) + return ERR_PTR(-EINVAL); + if (!length) + return ERR_PTR(-EINVAL); + + offset = offset_in_page(addr); + count = DIV_ROUND_UP(offset + length, PAGE_SIZE); + pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); + + err = get_user_pages_fast(addr, count, 1, pages); + if (err < count) { + count = err; + err = -EFAULT; + goto put_pages; + } + + iod = nvme_alloc_iod(count, length, GFP_KERNEL); + sg = iod->sg; + sg_init_table(sg, count); + for (i = 0; i < count; i++) { + sg_set_page(&sg[i], pages[i], + min_t(int, length, PAGE_SIZE - offset), offset); + length -= (PAGE_SIZE - offset); + offset = 0; + } + sg_mark_end(&sg[i - 1]); + iod->nents = count; + + err = -ENOMEM; + nents = dma_map_sg(&dev->pci_dev->dev, sg, count, + write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (!nents) + goto free_iod; + + kfree(pages); + return iod; + + free_iod: + kfree(iod); + put_pages: + for (i = 0; i < count; i++) + put_page(pages[i]); + kfree(pages); + return ERR_PTR(err); +} + +static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, + struct nvme_iod *iod) +{ + int i; + + dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, + write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + + for (i = 0; i < iod->nents; i++) + put_page(sg_page(&iod->sg[i])); +} + +static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) +{ + struct nvme_dev *dev = ns->dev; + struct nvme_queue *nvmeq; + struct nvme_user_io io; + struct nvme_command c; + unsigned length; + int status; + struct nvme_iod *iod; + + if (copy_from_user(&io, uio, sizeof(io))) + return -EFAULT; + length = (io.nblocks + 1) << ns->lba_shift; + + switch (io.opcode) { + case nvme_cmd_write: + case nvme_cmd_read: + case nvme_cmd_compare: + iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length); + break; + default: + return -EINVAL; + } + + if (IS_ERR(iod)) + return PTR_ERR(iod); + + memset(&c, 0, sizeof(c)); + c.rw.opcode = io.opcode; + c.rw.flags = io.flags; + c.rw.nsid = cpu_to_le32(ns->ns_id); + c.rw.slba = cpu_to_le64(io.slba); + c.rw.length = cpu_to_le16(io.nblocks); + c.rw.control = cpu_to_le16(io.control); + c.rw.dsmgmt = cpu_to_le16(io.dsmgmt); + c.rw.reftag = io.reftag; + c.rw.apptag = io.apptag; + c.rw.appmask = io.appmask; + /* XXX: metadata */ + length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); + + nvmeq = get_nvmeq(dev); + /* + * Since nvme_submit_sync_cmd sleeps, we can't keep preemption + * disabled. We may be preempted at any point, and be rescheduled + * to a different CPU. That will cause cacheline bouncing, but no + * additional races since q_lock already protects against other CPUs. + */ + put_nvmeq(nvmeq); + if (length != (io.nblocks + 1) << ns->lba_shift) + status = -ENOMEM; + else + status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); + + nvme_unmap_user_pages(dev, io.opcode & 1, iod); + nvme_free_iod(dev, iod); + return status; +} + +static int nvme_user_admin_cmd(struct nvme_ns *ns, + struct nvme_admin_cmd __user *ucmd) +{ + struct nvme_dev *dev = ns->dev; + struct nvme_admin_cmd cmd; + struct nvme_command c; + int status, length; + struct nvme_iod *iod; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (copy_from_user(&cmd, ucmd, sizeof(cmd))) + return -EFAULT; + + memset(&c, 0, sizeof(c)); + c.common.opcode = cmd.opcode; + c.common.flags = cmd.flags; + c.common.nsid = cpu_to_le32(cmd.nsid); + c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); + c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); + c.common.cdw10[0] = cpu_to_le32(cmd.cdw10); + c.common.cdw10[1] = cpu_to_le32(cmd.cdw11); + c.common.cdw10[2] = cpu_to_le32(cmd.cdw12); + c.common.cdw10[3] = cpu_to_le32(cmd.cdw13); + c.common.cdw10[4] = cpu_to_le32(cmd.cdw14); + c.common.cdw10[5] = cpu_to_le32(cmd.cdw15); + + length = cmd.data_len; + if (cmd.data_len) { + iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr, + length); + if (IS_ERR(iod)) + return PTR_ERR(iod); + length = nvme_setup_prps(dev, &c.common, iod, length, + GFP_KERNEL); + } + + if (length != cmd.data_len) + status = -ENOMEM; + else + status = nvme_submit_admin_cmd(dev, &c, NULL); + + if (cmd.data_len) { + nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); + nvme_free_iod(dev, iod); + } + return status; +} + +static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, + unsigned long arg) +{ + struct nvme_ns *ns = bdev->bd_disk->private_data; + + switch (cmd) { + case NVME_IOCTL_ID: + return ns->ns_id; + case NVME_IOCTL_ADMIN_CMD: + return nvme_user_admin_cmd(ns, (void __user *)arg); + case NVME_IOCTL_SUBMIT_IO: + return nvme_submit_io(ns, (void __user *)arg); + default: + return -ENOTTY; + } +} + +static const struct block_device_operations nvme_fops = { + .owner = THIS_MODULE, + .ioctl = nvme_ioctl, + .compat_ioctl = nvme_ioctl, +}; + +static void nvme_timeout_ios(struct nvme_queue *nvmeq) +{ + int depth = nvmeq->q_depth - 1; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + unsigned long now = jiffies; + int cmdid; + + for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { + void *ctx; + nvme_completion_fn fn; + static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, }; + + if (!time_after(now, info[cmdid].timeout)) + continue; + dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid); + ctx = cancel_cmdid(nvmeq, cmdid, &fn); + fn(nvmeq->dev, ctx, &cqe); + } +} + +static void nvme_resubmit_bios(struct nvme_queue *nvmeq) +{ + while (bio_list_peek(&nvmeq->sq_cong)) { + struct bio *bio = bio_list_pop(&nvmeq->sq_cong); + struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; + if (nvme_submit_bio_queue(nvmeq, ns, bio)) { + bio_list_add_head(&nvmeq->sq_cong, bio); + break; + } + if (bio_list_empty(&nvmeq->sq_cong)) + remove_wait_queue(&nvmeq->sq_full, + &nvmeq->sq_cong_wait); + } +} + +static int nvme_kthread(void *data) +{ + struct nvme_dev *dev; + + while (!kthread_should_stop()) { + __set_current_state(TASK_RUNNING); + spin_lock(&dev_list_lock); + list_for_each_entry(dev, &dev_list, node) { + int i; + for (i = 0; i < dev->queue_count; i++) { + struct nvme_queue *nvmeq = dev->queues[i]; + if (!nvmeq) + continue; + spin_lock_irq(&nvmeq->q_lock); + if (nvme_process_cq(nvmeq)) + printk("process_cq did something\n"); + nvme_timeout_ios(nvmeq); + nvme_resubmit_bios(nvmeq); + spin_unlock_irq(&nvmeq->q_lock); + } + } + spin_unlock(&dev_list_lock); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ); + } + return 0; +} + +static DEFINE_IDA(nvme_index_ida); + +static int nvme_get_ns_idx(void) +{ + int index, error; + + do { + if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL)) + return -1; + + spin_lock(&dev_list_lock); + error = ida_get_new(&nvme_index_ida, &index); + spin_unlock(&dev_list_lock); + } while (error == -EAGAIN); + + if (error) + index = -1; + return index; +} + +static void nvme_put_ns_idx(int index) +{ + spin_lock(&dev_list_lock); + ida_remove(&nvme_index_ida, index); + spin_unlock(&dev_list_lock); +} + +static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, + struct nvme_id_ns *id, struct nvme_lba_range_type *rt) +{ + struct nvme_ns *ns; + struct gendisk *disk; + int lbaf; + + if (rt->attributes & NVME_LBART_ATTRIB_HIDE) + return NULL; + + ns = kzalloc(sizeof(*ns), GFP_KERNEL); + if (!ns) + return NULL; + ns->queue = blk_alloc_queue(GFP_KERNEL); + if (!ns->queue) + goto out_free_ns; + ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; + queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); +/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */ + blk_queue_make_request(ns->queue, nvme_make_request); + ns->dev = dev; + ns->queue->queuedata = ns; + + disk = alloc_disk(NVME_MINORS); + if (!disk) + goto out_free_queue; + ns->ns_id = nsid; + ns->disk = disk; + lbaf = id->flbas & 0xf; + ns->lba_shift = id->lbaf[lbaf].ds; + + disk->major = nvme_major; + disk->minors = NVME_MINORS; + disk->first_minor = NVME_MINORS * nvme_get_ns_idx(); + disk->fops = &nvme_fops; + disk->private_data = ns; + disk->queue = ns->queue; + disk->driverfs_dev = &dev->pci_dev->dev; + sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); + set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); + + return ns; + + out_free_queue: + blk_cleanup_queue(ns->queue); + out_free_ns: + kfree(ns); + return NULL; +} + +static void nvme_ns_free(struct nvme_ns *ns) +{ + int index = ns->disk->first_minor / NVME_MINORS; + put_disk(ns->disk); + nvme_put_ns_idx(index); + blk_cleanup_queue(ns->queue); + kfree(ns); +} + +static int set_queue_count(struct nvme_dev *dev, int count) +{ + int status; + u32 result; + u32 q_count = (count - 1) | ((count - 1) << 16); + + status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0, + &result); + if (status) + return -EIO; + return min(result & 0xffff, result >> 16) + 1; +} + +static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) +{ + int result, cpu, i, nr_io_queues, db_bar_size; + + nr_io_queues = num_online_cpus(); + result = set_queue_count(dev, nr_io_queues); + if (result < 0) + return result; + if (result < nr_io_queues) + nr_io_queues = result; + + /* Deregister the admin queue's interrupt */ + free_irq(dev->entry[0].vector, dev->queues[0]); + + db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); + if (db_bar_size > 8192) { + iounmap(dev->bar); + dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), + db_bar_size); + dev->dbs = ((void __iomem *)dev->bar) + 4096; + dev->queues[0]->q_db = dev->dbs; + } + + for (i = 0; i < nr_io_queues; i++) + dev->entry[i].entry = i; + for (;;) { + result = pci_enable_msix(dev->pci_dev, dev->entry, + nr_io_queues); + if (result == 0) { + break; + } else if (result > 0) { + nr_io_queues = result; + continue; + } else { + nr_io_queues = 1; + break; + } + } + + result = queue_request_irq(dev, dev->queues[0], "nvme admin"); + /* XXX: handle failure here */ + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < nr_io_queues; i++) { + irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu)); + cpu = cpumask_next(cpu, cpu_online_mask); + } + + for (i = 0; i < nr_io_queues; i++) { + dev->queues[i + 1] = nvme_create_queue(dev, i + 1, + NVME_Q_DEPTH, i); + if (IS_ERR(dev->queues[i + 1])) + return PTR_ERR(dev->queues[i + 1]); + dev->queue_count++; + } + + for (; i < num_possible_cpus(); i++) { + int target = i % rounddown_pow_of_two(dev->queue_count - 1); + dev->queues[i + 1] = dev->queues[target + 1]; + } + + return 0; +} + +static void nvme_free_queues(struct nvme_dev *dev) +{ + int i; + + for (i = dev->queue_count - 1; i >= 0; i--) + nvme_free_queue(dev, i); +} + +static int __devinit nvme_dev_add(struct nvme_dev *dev) +{ + int res, nn, i; + struct nvme_ns *ns, *next; + struct nvme_id_ctrl *ctrl; + struct nvme_id_ns *id_ns; + void *mem; + dma_addr_t dma_addr; + + res = nvme_setup_io_queues(dev); + if (res) + return res; + + mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, + GFP_KERNEL); + + res = nvme_identify(dev, 0, 1, dma_addr); + if (res) { + res = -EIO; + goto out_free; + } + + ctrl = mem; + nn = le32_to_cpup(&ctrl->nn); + memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); + memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); + memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); + + id_ns = mem; + for (i = 1; i <= nn; i++) { + res = nvme_identify(dev, i, 0, dma_addr); + if (res) + continue; + + if (id_ns->ncap == 0) + continue; + + res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, + dma_addr + 4096); + if (res) + continue; + + ns = nvme_alloc_ns(dev, i, mem, mem + 4096); + if (ns) + list_add_tail(&ns->list, &dev->namespaces); + } + list_for_each_entry(ns, &dev->namespaces, list) + add_disk(ns->disk); + + goto out; + + out_free: + list_for_each_entry_safe(ns, next, &dev->namespaces, list) { + list_del(&ns->list); + nvme_ns_free(ns); + } + + out: + dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); + return res; +} + +static int nvme_dev_remove(struct nvme_dev *dev) +{ + struct nvme_ns *ns, *next; + + spin_lock(&dev_list_lock); + list_del(&dev->node); + spin_unlock(&dev_list_lock); + + /* TODO: wait all I/O finished or cancel them */ + + list_for_each_entry_safe(ns, next, &dev->namespaces, list) { + list_del(&ns->list); + del_gendisk(ns->disk); + nvme_ns_free(ns); + } + + nvme_free_queues(dev); + + return 0; +} + +static int nvme_setup_prp_pools(struct nvme_dev *dev) +{ + struct device *dmadev = &dev->pci_dev->dev; + dev->prp_page_pool = dma_pool_create("prp list page", dmadev, + PAGE_SIZE, PAGE_SIZE, 0); + if (!dev->prp_page_pool) + return -ENOMEM; + + /* Optimisation for I/Os between 4k and 128k */ + dev->prp_small_pool = dma_pool_create("prp list 256", dmadev, + 256, 256, 0); + if (!dev->prp_small_pool) { + dma_pool_destroy(dev->prp_page_pool); + return -ENOMEM; + } + return 0; +} + +static void nvme_release_prp_pools(struct nvme_dev *dev) +{ + dma_pool_destroy(dev->prp_page_pool); + dma_pool_destroy(dev->prp_small_pool); +} + +/* XXX: Use an ida or something to let remove / add work correctly */ +static void nvme_set_instance(struct nvme_dev *dev) +{ + static int instance; + dev->instance = instance++; +} + +static void nvme_release_instance(struct nvme_dev *dev) +{ +} + +static int __devinit nvme_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int bars, result = -ENOMEM; + struct nvme_dev *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry), + GFP_KERNEL); + if (!dev->entry) + goto free; + dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *), + GFP_KERNEL); + if (!dev->queues) + goto free; + + if (pci_enable_device_mem(pdev)) + goto free; + pci_set_master(pdev); + bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_request_selected_regions(pdev, bars, "nvme")) + goto disable; + + INIT_LIST_HEAD(&dev->namespaces); + dev->pci_dev = pdev; + pci_set_drvdata(pdev, dev); + dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + nvme_set_instance(dev); + dev->entry[0].vector = pdev->irq; + + result = nvme_setup_prp_pools(dev); + if (result) + goto disable_msix; + + dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); + if (!dev->bar) { + result = -ENOMEM; + goto disable_msix; + } + + result = nvme_configure_admin_queue(dev); + if (result) + goto unmap; + dev->queue_count++; + + spin_lock(&dev_list_lock); + list_add(&dev->node, &dev_list); + spin_unlock(&dev_list_lock); + + result = nvme_dev_add(dev); + if (result) + goto delete; + + return 0; + + delete: + spin_lock(&dev_list_lock); + list_del(&dev->node); + spin_unlock(&dev_list_lock); + + nvme_free_queues(dev); + unmap: + iounmap(dev->bar); + disable_msix: + pci_disable_msix(pdev); + nvme_release_instance(dev); + nvme_release_prp_pools(dev); + disable: + pci_disable_device(pdev); + pci_release_regions(pdev); + free: + kfree(dev->queues); + kfree(dev->entry); + kfree(dev); + return result; +} + +static void __devexit nvme_remove(struct pci_dev *pdev) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + nvme_dev_remove(dev); + pci_disable_msix(pdev); + iounmap(dev->bar); + nvme_release_instance(dev); + nvme_release_prp_pools(dev); + pci_disable_device(pdev); + pci_release_regions(pdev); + kfree(dev->queues); + kfree(dev->entry); + kfree(dev); +} + +/* These functions are yet to be implemented */ +#define nvme_error_detected NULL +#define nvme_dump_registers NULL +#define nvme_link_reset NULL +#define nvme_slot_reset NULL +#define nvme_error_resume NULL +#define nvme_suspend NULL +#define nvme_resume NULL + +static struct pci_error_handlers nvme_err_handler = { + .error_detected = nvme_error_detected, + .mmio_enabled = nvme_dump_registers, + .link_reset = nvme_link_reset, + .slot_reset = nvme_slot_reset, + .resume = nvme_error_resume, +}; + +/* Move to pci_ids.h later */ +#define PCI_CLASS_STORAGE_EXPRESS 0x010802 + +static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, nvme_id_table); + +static struct pci_driver nvme_driver = { + .name = "nvme", + .id_table = nvme_id_table, + .probe = nvme_probe, + .remove = __devexit_p(nvme_remove), + .suspend = nvme_suspend, + .resume = nvme_resume, + .err_handler = &nvme_err_handler, +}; + +static int __init nvme_init(void) +{ + int result = -EBUSY; + + nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); + if (IS_ERR(nvme_thread)) + return PTR_ERR(nvme_thread); + + nvme_major = register_blkdev(nvme_major, "nvme"); + if (nvme_major <= 0) + goto kill_kthread; + + result = pci_register_driver(&nvme_driver); + if (result) + goto unregister_blkdev; + return 0; + + unregister_blkdev: + unregister_blkdev(nvme_major, "nvme"); + kill_kthread: + kthread_stop(nvme_thread); + return result; +} + +static void __exit nvme_exit(void) +{ + pci_unregister_driver(&nvme_driver); + unregister_blkdev(nvme_major, "nvme"); + kthread_stop(nvme_thread); +} + +MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.8"); +module_init(nvme_init); +module_exit(nvme_exit); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3fd31de..a6278e7 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -380,6 +380,7 @@ static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr, rbdc = __rbd_client_find(opt); if (rbdc) { ceph_destroy_options(opt); + kfree(rbd_opts); /* using an existing client */ kref_get(&rbdc->kref); @@ -406,15 +407,15 @@ done_err: /* * Destroy ceph client + * + * Caller must hold node_lock. */ static void rbd_client_release(struct kref *kref) { struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); dout("rbd_release_client %p\n", rbdc); - spin_lock(&node_lock); list_del(&rbdc->node); - spin_unlock(&node_lock); ceph_destroy_client(rbdc->client); kfree(rbdc->rbd_opts); @@ -427,7 +428,9 @@ static void rbd_client_release(struct kref *kref) */ static void rbd_put_client(struct rbd_device *rbd_dev) { + spin_lock(&node_lock); kref_put(&rbd_dev->rbd_client->kref, rbd_client_release); + spin_unlock(&node_lock); rbd_dev->rbd_client = NULL; rbd_dev->client = NULL; } diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index 4b71647..317c28c 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c @@ -194,10 +194,10 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) err_out: if (bridge->driver->needs_scratch_page) { - void *va = page_address(bridge->scratch_page_page); + struct page *page = bridge->scratch_page_page; - bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP); - bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE); + bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP); + bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE); } if (got_gatt) bridge->driver->free_gatt_table(bridge); @@ -221,10 +221,10 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge) if (bridge->driver->agp_destroy_page && bridge->driver->needs_scratch_page) { - void *va = page_address(bridge->scratch_page_page); + struct page *page = bridge->scratch_page_page; - bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP); - bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE); + bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP); + bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE); } } diff --git a/drivers/char/random.c b/drivers/char/random.c index 732215b..54ca8b2 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -965,6 +965,7 @@ EXPORT_SYMBOL(get_random_bytes); */ static void init_std_data(struct entropy_store *r) { + int i; ktime_t now; unsigned long flags; @@ -974,6 +975,11 @@ static void init_std_data(struct entropy_store *r) now = ktime_get_real(); mix_pool_bytes(r, &now, sizeof(now)); + for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) { + if (!arch_get_random_long(&flags)) + break; + mix_pool_bytes(r, &flags, sizeof(flags)); + } mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); } diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 6a8771f..32362cf 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -846,6 +846,15 @@ int tpm_do_selftest(struct tpm_chip *chip) do { rc = __tpm_pcr_read(chip, 0, digest); + if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { + dev_info(chip->dev, + "TPM is disabled/deactivated (0x%X)\n", rc); + /* TPM is disabled and/or deactivated; driver can + * proceed and TPM does handle commands for + * suspend/resume correctly + */ + return 0; + } if (rc != TPM_WARN_DOING_SELFTEST) return rc; msleep(delay_msec); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 8c1df30..0105471 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -39,6 +39,9 @@ enum tpm_addr { }; #define TPM_WARN_DOING_SELFTEST 0x802 +#define TPM_ERR_DEACTIVATED 0x6 +#define TPM_ERR_DISABLED 0x7 + #define TPM_HEADER_SIZE 10 extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr, char *); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 5a99bb3..f1a2749 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -124,7 +124,7 @@ config MV_XOR config MX3_IPU bool "MX3x Image Processing Unit support" - depends on SOC_IMX31 ||Â SOC_IMX35 + depends on ARCH_MXC select DMA_ENGINE default y help @@ -187,6 +187,13 @@ config TIMB_DMA help Enable support for the Timberdale FPGA DMA engine. +config SIRF_DMA + tristate "CSR SiRFprimaII DMA support" + depends on ARCH_PRIMA2 + select DMA_ENGINE + help + Enable support for the CSR SiRFprimaII DMA engine. + config ARCH_HAS_ASYNC_TX_FIND_CHANNEL bool @@ -201,26 +208,26 @@ config PL330_DMA platform_data for a dma-pl330 device. config PCH_DMA - tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support" + tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" depends on PCI && X86 select DMA_ENGINE help Enable support for Intel EG20T PCH DMA engine. - This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ - Output Hub), ML7213 and ML7223. - ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is - for MP(Media Phone) use. - ML7213/ML7223 is companion chip for Intel Atom E6xx series. - ML7213/ML7223 is completely compatible for Intel EG20T PCH. + This driver also can be used for LAPIS Semiconductor IOH(Input/ + Output Hub), ML7213, ML7223 and ML7831. + ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is + for MP(Media Phone) use and ML7831 IOH is for general purpose use. + ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. config IMX_SDMA tristate "i.MX SDMA support" - depends on ARCH_MX25 || SOC_IMX31 ||Â SOC_IMX35 || ARCH_MX5 + depends on ARCH_MXC select DMA_ENGINE help Support the i.MX SDMA engine. This engine is integrated into - Freescale i.MX25/31/35/51 chips. + Freescale i.MX25/31/35/51/53 chips. config IMX_DMA tristate "i.MX DMA support" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 30cf3b1..009a222 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o obj-$(CONFIG_MXS_DMA) += mxs-dma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o +obj-$(CONFIG_SIRF_DMA) += sirf-dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PCH_DMA) += pch_dma.o diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 0698695..8a28158 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -854,8 +854,10 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, int ret; /* Check if we already have a channel */ - if (plchan->phychan) - return 0; + if (plchan->phychan) { + ch = plchan->phychan; + goto got_channel; + } ch = pl08x_get_phy_channel(pl08x, plchan); if (!ch) { @@ -880,21 +882,22 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, return -EBUSY; } ch->signal = ret; - - /* Assign the flow control signal to this channel */ - if (txd->direction == DMA_TO_DEVICE) - txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; - else if (txd->direction == DMA_FROM_DEVICE) - txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; } + plchan->phychan = ch; dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", ch->id, ch->signal, plchan->name); +got_channel: + /* Assign the flow control signal to this channel */ + if (txd->direction == DMA_MEM_TO_DEV) + txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; + else if (txd->direction == DMA_DEV_TO_MEM) + txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; + plchan->phychan_hold++; - plchan->phychan = ch; return 0; } @@ -1102,10 +1105,10 @@ static int dma_set_runtime_config(struct dma_chan *chan, /* Transfer direction */ plchan->runtime_direction = config->direction; - if (config->direction == DMA_TO_DEVICE) { + if (config->direction == DMA_MEM_TO_DEV) { addr_width = config->dst_addr_width; maxburst = config->dst_maxburst; - } else if (config->direction == DMA_FROM_DEVICE) { + } else if (config->direction == DMA_DEV_TO_MEM) { addr_width = config->src_addr_width; maxburst = config->src_maxburst; } else { @@ -1136,7 +1139,7 @@ static int dma_set_runtime_config(struct dma_chan *chan, cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; - if (plchan->runtime_direction == DMA_FROM_DEVICE) { + if (plchan->runtime_direction == DMA_DEV_TO_MEM) { plchan->src_addr = config->src_addr; plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | pl08x_select_bus(plchan->cd->periph_buses, @@ -1152,7 +1155,7 @@ static int dma_set_runtime_config(struct dma_chan *chan, "configured channel %s (%s) for %s, data width %d, " "maxburst %d words, LE, CCTL=0x%08x\n", dma_chan_name(chan), plchan->name, - (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", + (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", addr_width, maxburst, cctl); @@ -1322,7 +1325,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction direction, + unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); @@ -1354,10 +1357,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( */ txd->direction = direction; - if (direction == DMA_TO_DEVICE) { + if (direction == DMA_MEM_TO_DEV) { txd->cctl = plchan->dst_cctl; slave_addr = plchan->dst_addr; - } else if (direction == DMA_FROM_DEVICE) { + } else if (direction == DMA_DEV_TO_MEM) { txd->cctl = plchan->src_cctl; slave_addr = plchan->src_addr; } else { @@ -1368,10 +1371,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( } if (plchan->cd->device_fc) - tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER : + tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : PL080_FLOW_PER2MEM_PER; else - tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER : + tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : PL080_FLOW_PER2MEM; txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; @@ -1387,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( list_add_tail(&dsg->node, &txd->dsg_list); dsg->len = sg_dma_len(sg); - if (direction == DMA_TO_DEVICE) { + if (direction == DMA_MEM_TO_DEV) { dsg->src_addr = sg_phys(sg); dsg->dst_addr = slave_addr; } else { diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index fcfa0a8..f4aed5f 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -23,6 +23,8 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_device.h> #include "at_hdmac_regs.h" @@ -660,7 +662,7 @@ err_desc_get: */ static struct dma_async_tx_descriptor * atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction direction, + unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags) { struct at_dma_chan *atchan = to_at_dma_chan(chan); @@ -678,7 +680,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", sg_len, - direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", + direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", flags); if (unlikely(!atslave || !sg_len)) { @@ -692,7 +694,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctrlb = ATC_IEN; switch (direction) { - case DMA_TO_DEVICE: + case DMA_MEM_TO_DEV: ctrla |= ATC_DST_WIDTH(reg_width); ctrlb |= ATC_DST_ADDR_MODE_FIXED | ATC_SRC_ADDR_MODE_INCR @@ -725,7 +727,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, total_len += len; } break; - case DMA_FROM_DEVICE: + case DMA_DEV_TO_MEM: ctrla |= ATC_SRC_WIDTH(reg_width); ctrlb |= ATC_DST_ADDR_MODE_INCR | ATC_SRC_ADDR_MODE_FIXED @@ -787,7 +789,7 @@ err_desc_get: */ static int atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, - size_t period_len, enum dma_data_direction direction) + size_t period_len, enum dma_transfer_direction direction) { if (period_len > (ATC_BTSIZE_MAX << reg_width)) goto err_out; @@ -795,7 +797,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, goto err_out; if (unlikely(buf_addr & ((1 << reg_width) - 1))) goto err_out; - if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) + if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) goto err_out; return 0; @@ -810,7 +812,7 @@ err_out: static int atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, unsigned int period_index, dma_addr_t buf_addr, - size_t period_len, enum dma_data_direction direction) + size_t period_len, enum dma_transfer_direction direction) { u32 ctrla; unsigned int reg_width = atslave->reg_width; @@ -822,7 +824,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | period_len >> reg_width; switch (direction) { - case DMA_TO_DEVICE: + case DMA_MEM_TO_DEV: desc->lli.saddr = buf_addr + (period_len * period_index); desc->lli.daddr = atslave->tx_reg; desc->lli.ctrla = ctrla; @@ -833,7 +835,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | ATC_DIF(AT_DMA_PER_IF); break; - case DMA_FROM_DEVICE: + case DMA_DEV_TO_MEM: desc->lli.saddr = atslave->rx_reg; desc->lli.daddr = buf_addr + (period_len * period_index); desc->lli.ctrla = ctrla; @@ -861,7 +863,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, */ static struct dma_async_tx_descriptor * atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, - size_t period_len, enum dma_data_direction direction) + size_t period_len, enum dma_transfer_direction direction) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_slave *atslave = chan->private; @@ -872,7 +874,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, unsigned int i; dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", - direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", + direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", buf_addr, periods, buf_len, period_len); @@ -1175,6 +1177,56 @@ static void atc_free_chan_resources(struct dma_chan *chan) /*-- Module Management -----------------------------------------------*/ +/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ +static struct at_dma_platform_data at91sam9rl_config = { + .nr_channels = 2, +}; +static struct at_dma_platform_data at91sam9g45_config = { + .nr_channels = 8, +}; + +#if defined(CONFIG_OF) +static const struct of_device_id atmel_dma_dt_ids[] = { + { + .compatible = "atmel,at91sam9rl-dma", + .data = &at91sam9rl_config, + }, { + .compatible = "atmel,at91sam9g45-dma", + .data = &at91sam9g45_config, + }, { + /* sentinel */ + } +}; + +MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); +#endif + +static const struct platform_device_id atdma_devtypes[] = { + { + .name = "at91sam9rl_dma", + .driver_data = (unsigned long) &at91sam9rl_config, + }, { + .name = "at91sam9g45_dma", + .driver_data = (unsigned long) &at91sam9g45_config, + }, { + /* sentinel */ + } +}; + +static inline struct at_dma_platform_data * __init at_dma_get_driver_data( + struct platform_device *pdev) +{ + if (pdev->dev.of_node) { + const struct of_device_id *match; + match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); + if (match == NULL) + return NULL; + return match->data; + } + return (struct at_dma_platform_data *) + platform_get_device_id(pdev)->driver_data; +} + /** * at_dma_off - disable DMA controller * @atdma: the Atmel HDAMC device @@ -1193,18 +1245,23 @@ static void at_dma_off(struct at_dma *atdma) static int __init at_dma_probe(struct platform_device *pdev) { - struct at_dma_platform_data *pdata; struct resource *io; struct at_dma *atdma; size_t size; int irq; int err; int i; + struct at_dma_platform_data *plat_dat; - /* get DMA Controller parameters from platform */ - pdata = pdev->dev.platform_data; - if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS) - return -EINVAL; + /* setup platform data for each SoC */ + dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); + dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); + dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); + + /* get DMA parameters from controller type */ + plat_dat = at_dma_get_driver_data(pdev); + if (!plat_dat) + return -ENODEV; io = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!io) @@ -1215,14 +1272,14 @@ static int __init at_dma_probe(struct platform_device *pdev) return irq; size = sizeof(struct at_dma); - size += pdata->nr_channels * sizeof(struct at_dma_chan); + size += plat_dat->nr_channels * sizeof(struct at_dma_chan); atdma = kzalloc(size, GFP_KERNEL); if (!atdma) return -ENOMEM; - /* discover transaction capabilites from the platform data */ - atdma->dma_common.cap_mask = pdata->cap_mask; - atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; + /* discover transaction capabilities */ + atdma->dma_common.cap_mask = plat_dat->cap_mask; + atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; size = resource_size(io); if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { @@ -1268,7 +1325,7 @@ static int __init at_dma_probe(struct platform_device *pdev) /* initialize channels related values */ INIT_LIST_HEAD(&atdma->dma_common.channels); - for (i = 0; i < pdata->nr_channels; i++) { + for (i = 0; i < plat_dat->nr_channels; i++) { struct at_dma_chan *atchan = &atdma->chan[i]; atchan->chan_common.device = &atdma->dma_common; @@ -1286,7 +1343,7 @@ static int __init at_dma_probe(struct platform_device *pdev) tasklet_init(&atchan->tasklet, atc_tasklet, (unsigned long)atchan); - atc_enable_irq(atchan); + atc_enable_chan_irq(atdma, i); } /* set base routines */ @@ -1313,7 +1370,7 @@ static int __init at_dma_probe(struct platform_device *pdev) dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", - pdata->nr_channels); + plat_dat->nr_channels); dma_async_device_register(&atdma->dma_common); @@ -1353,7 +1410,7 @@ static int __exit at_dma_remove(struct platform_device *pdev) struct at_dma_chan *atchan = to_at_dma_chan(chan); /* Disable interrupts */ - atc_disable_irq(atchan); + atc_disable_chan_irq(atdma, chan->chan_id); tasklet_disable(&atchan->tasklet); tasklet_kill(&atchan->tasklet); @@ -1495,9 +1552,11 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = { static struct platform_driver at_dma_driver = { .remove = __exit_p(at_dma_remove), .shutdown = at_dma_shutdown, + .id_table = atdma_devtypes, .driver = { .name = "at_hdmac", .pm = &at_dma_dev_pm_ops, + .of_match_table = of_match_ptr(atmel_dma_dt_ids), }, }; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index aa4c9ae..a8d3277 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -251,6 +251,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) /** * struct at_dma - internal representation of an Atmel HDMA Controller * @chan_common: common dmaengine dma_device object members + * @atdma_devtype: identifier of DMA controller compatibility * @ch_regs: memory mapped register base * @clk: dma controller clock * @save_imr: interrupt mask register that is saved on suspend/resume cycle @@ -326,28 +327,27 @@ static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) } -static void atc_setup_irq(struct at_dma_chan *atchan, int on) +static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on) { - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); - u32 ebci; + u32 ebci; /* enable interrupts on buffer transfer completion & error */ - ebci = AT_DMA_BTC(atchan->chan_common.chan_id) - | AT_DMA_ERR(atchan->chan_common.chan_id); + ebci = AT_DMA_BTC(chan_id) + | AT_DMA_ERR(chan_id); if (on) dma_writel(atdma, EBCIER, ebci); else dma_writel(atdma, EBCIDR, ebci); } -static inline void atc_enable_irq(struct at_dma_chan *atchan) +static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id) { - atc_setup_irq(atchan, 1); + atc_setup_irq(atdma, chan_id, 1); } -static inline void atc_disable_irq(struct at_dma_chan *atchan) +static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id) { - atc_setup_irq(atchan, 0); + atc_setup_irq(atdma, chan_id, 0); } diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 4234f41..d65a718 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -39,7 +39,7 @@ struct coh901318_desc { struct scatterlist *sg; unsigned int sg_len; struct coh901318_lli *lli; - enum dma_data_direction dir; + enum dma_transfer_direction dir; unsigned long flags; u32 head_config; u32 head_ctrl; @@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, static struct dma_async_tx_descriptor * coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction direction, + unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags) { struct coh901318_chan *cohc = to_coh901318_chan(chan); @@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctrl_last |= cohc->runtime_ctrl; ctrl |= cohc->runtime_ctrl; - if (direction == DMA_TO_DEVICE) { + if (direction == DMA_MEM_TO_DEV) { u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; @@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctrl_chained |= tx_flags; ctrl_last |= tx_flags; ctrl |= tx_flags; - } else if (direction == DMA_FROM_DEVICE) { + } else if (direction == DMA_DEV_TO_MEM) { u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; @@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, int i = 0; /* We only support mem to per or per to mem transfers */ - if (config->direction == DMA_FROM_DEVICE) { + if (config->direction == DMA_DEV_TO_MEM) { addr = config->src_addr; addr_width = config->src_addr_width; maxburst = config->src_maxburst; - } else if (config->direction == DMA_TO_DEVICE) { + } else if (config->direction == DMA_MEM_TO_DEV) { addr = config->dst_addr; addr_width = config->dst_addr_width; maxburst = config->dst_maxburst; diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index 9f7e0e6a..6c0e2d4 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c @@ -7,11 +7,10 @@ * Author: Per Friden <per.friden@stericsson.com> */ -#include <linux/dma-mapping.h> #include <linux/spinlock.h> -#include <linux/dmapool.h> #include <linux/memory.h> #include <linux/gfp.h> +#include <linux/dmapool.h> #include <mach/coh901318.h> #include "coh901318_lli.h" @@ -177,18 +176,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, struct coh901318_lli *lli, dma_addr_t buf, unsigned int size, dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom, - enum dma_data_direction dir) + enum dma_transfer_direction dir) { int s = size; dma_addr_t src; dma_addr_t dst; - if (dir == DMA_TO_DEVICE) { + if (dir == DMA_MEM_TO_DEV) { src = buf; dst = dev_addr; - } else if (dir == DMA_FROM_DEVICE) { + } else if (dir == DMA_DEV_TO_MEM) { src = dev_addr; dst = buf; @@ -215,9 +214,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, lli = coh901318_lli_next(lli); - if (dir == DMA_TO_DEVICE) + if (dir == DMA_MEM_TO_DEV) src += block_size; - else if (dir == DMA_FROM_DEVICE) + else if (dir == DMA_DEV_TO_MEM) dst += block_size; } @@ -234,7 +233,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, struct scatterlist *sgl, unsigned int nents, dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, u32 ctrl_last, - enum dma_data_direction dir, u32 ctrl_irq_mask) + enum dma_transfer_direction dir, u32 ctrl_irq_mask) { int i; struct scatterlist *sg; @@ -249,9 +248,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, spin_lock(&pool->lock); - if (dir == DMA_TO_DEVICE) + if (dir == DMA_MEM_TO_DEV) dst = dev_addr; - else if (dir == DMA_FROM_DEVICE) + else if (dir == DMA_DEV_TO_MEM) src = dev_addr; else goto err; @@ -269,7 +268,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, ctrl_sg = ctrl ? ctrl : ctrl_last; - if (dir == DMA_TO_DEVICE) + if (dir == DMA_MEM_TO_DEV) /* increment source address */ src = sg_phys(sg); else @@ -293,7 +292,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, lli->src_addr = src; lli->dst_addr = dst; - if (dir == DMA_FROM_DEVICE) + if (dir == DMA_DEV_TO_MEM) dst += elem_size; else src += elem_size; diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h index 7a5c809..abff371 100644 --- a/drivers/dma/coh901318_lli.h +++ b/drivers/dma/coh901318_lli.h @@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, struct coh901318_lli *lli, dma_addr_t buf, unsigned int size, dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last, - enum dma_data_direction dir); + enum dma_transfer_direction dir); /** * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer @@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, struct scatterlist *sg, unsigned int nents, dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, u32 ctrl_last, - enum dma_data_direction dir, u32 ctrl_irq_mask); + enum dma_transfer_direction dir, u32 ctrl_irq_mask); #endif /* COH901318_LLI_H */ diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index b48967b..a6c6051 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device) !device->device_prep_dma_interrupt); BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && !device->device_prep_dma_sg); - BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && - !device->device_prep_slave_sg); BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic); BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->device_control); + BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && + !device->device_prep_interleaved_dma); BUG_ON(!device->device_alloc_chan_resources); BUG_ON(!device->device_free_chan_resources); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 2b8661b..24225f0 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -599,7 +599,7 @@ static int dmatest_add_channel(struct dma_chan *chan) } if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { cnt = dmatest_add_threads(dtc, DMA_PQ); - thread_count += cnt > 0 ?: 0; + thread_count += cnt > 0 ? cnt : 0; } pr_info("dmatest: Started %u threads using %s\n", diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 9bfd6d3..9b592b0 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -166,6 +166,38 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) return cookie; } +static void dwc_initialize(struct dw_dma_chan *dwc) +{ + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_dma_slave *dws = dwc->chan.private; + u32 cfghi = DWC_CFGH_FIFO_MODE; + u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); + + if (dwc->initialized == true) + return; + + if (dws) { + /* + * We need controller-specific data to set up slave + * transfers. + */ + BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); + + cfghi = dws->cfg_hi; + cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; + } + + channel_writel(dwc, CFG_LO, cfglo); + channel_writel(dwc, CFG_HI, cfghi); + + /* Enable interrupts */ + channel_set_bit(dw, MASK.XFER, dwc->mask); + channel_set_bit(dw, MASK.BLOCK, dwc->mask); + channel_set_bit(dw, MASK.ERROR, dwc->mask); + + dwc->initialized = true; +} + /*----------------------------------------------------------------------*/ /* Called with dwc->lock held and bh disabled */ @@ -189,6 +221,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) return; } + dwc_initialize(dwc); + channel_writel(dwc, LLP, first->txd.phys); channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); @@ -696,7 +730,7 @@ err_desc_get: static struct dma_async_tx_descriptor * dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction direction, + unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); @@ -720,7 +754,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, prev = first = NULL; switch (direction) { - case DMA_TO_DEVICE: + case DMA_MEM_TO_DEV: ctllo = (DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_FIX @@ -777,7 +811,7 @@ slave_sg_todev_fill_desc: goto slave_sg_todev_fill_desc; } break; - case DMA_FROM_DEVICE: + case DMA_DEV_TO_MEM: ctllo = (DWC_DEFAULT_CTLLO(chan->private) | DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_DST_INC @@ -959,10 +993,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc; - struct dw_dma_slave *dws; int i; - u32 cfghi; - u32 cfglo; unsigned long flags; dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); @@ -975,26 +1006,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) dwc->completed = chan->cookie = 1; - cfghi = DWC_CFGH_FIFO_MODE; - cfglo = 0; - - dws = chan->private; - if (dws) { - /* - * We need controller-specific data to set up slave - * transfers. - */ - BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); - - cfghi = dws->cfg_hi; - cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; - } - - cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority); - - channel_writel(dwc, CFG_LO, cfglo); - channel_writel(dwc, CFG_HI, cfghi); - /* * NOTE: some controllers may have additional features that we * need to initialize here, like "scatter-gather" (which @@ -1026,11 +1037,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) i = ++dwc->descs_allocated; } - /* Enable interrupts */ - channel_set_bit(dw, MASK.XFER, dwc->mask); - channel_set_bit(dw, MASK.BLOCK, dwc->mask); - channel_set_bit(dw, MASK.ERROR, dwc->mask); - spin_unlock_irqrestore(&dwc->lock, flags); dev_dbg(chan2dev(chan), @@ -1058,6 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&dwc->lock, flags); list_splice_init(&dwc->free_list, &list); dwc->descs_allocated = 0; + dwc->initialized = false; /* Disable interrupts */ channel_clear_bit(dw, MASK.XFER, dwc->mask); @@ -1165,7 +1172,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop); */ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, - enum dma_data_direction direction) + enum dma_transfer_direction direction) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_cyclic_desc *cdesc; @@ -1206,7 +1213,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, goto out_err; if (unlikely(buf_addr & ((1 << reg_width) - 1))) goto out_err; - if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) + if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) goto out_err; retval = ERR_PTR(-ENOMEM); @@ -1228,7 +1235,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, goto out_err_desc_get; switch (direction) { - case DMA_TO_DEVICE: + case DMA_MEM_TO_DEV: desc->lli.dar = dws->tx_reg; desc->lli.sar = buf_addr + (period_len * i); desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) @@ -1239,7 +1246,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | DWC_CTLL_FC(dws->fc) | DWC_CTLL_INT_EN); break; - case DMA_FROM_DEVICE: + case DMA_DEV_TO_MEM: desc->lli.dar = buf_addr + (period_len * i); desc->lli.sar = dws->rx_reg; desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) @@ -1335,6 +1342,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free); static void dw_dma_off(struct dw_dma *dw) { + int i; + dma_writel(dw, CFG, 0); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); @@ -1345,6 +1354,9 @@ static void dw_dma_off(struct dw_dma *dw) while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) cpu_relax(); + + for (i = 0; i < dw->dma.chancnt; i++) + dw->chan[i].initialized = false; } static int __init dw_probe(struct platform_device *pdev) @@ -1533,6 +1545,7 @@ static int dw_suspend_noirq(struct device *dev) dw_dma_off(platform_get_drvdata(pdev)); clk_disable(dw->clk); + return 0; } diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index c341951..5eef694 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -140,6 +140,7 @@ struct dw_dma_chan { u8 mask; u8 priority; bool paused; + bool initialized; spinlock_t lock; diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index b47e2b8..59e7a96 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, static struct ep93xx_dma_desc * ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) { + if (list_empty(&edmac->active)) + return NULL; + return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); } @@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) */ static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) { + struct ep93xx_dma_desc *desc; + list_rotate_left(&edmac->active); if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) return true; + desc = ep93xx_dma_get_active(edmac); + if (!desc) + return false; + /* * If txd.cookie is set it means that we are back in the first * descriptor in the chain and hence done with it. */ - return !ep93xx_dma_get_active(edmac)->txd.cookie; + return !desc->txd.cookie; } /* @@ -327,10 +336,16 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) { - struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); + struct ep93xx_dma_desc *desc; u32 bus_addr; - if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE) + desc = ep93xx_dma_get_active(edmac); + if (!desc) { + dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n"); + return; + } + + if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) bus_addr = desc->src_addr; else bus_addr = desc->dst_addr; @@ -443,7 +458,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) control = (5 << M2M_CONTROL_PWSC_SHIFT); control |= M2M_CONTROL_NO_HDSK; - if (data->direction == DMA_TO_DEVICE) { + if (data->direction == DMA_MEM_TO_DEV) { control |= M2M_CONTROL_DAH; control |= M2M_CONTROL_TM_TX; control |= M2M_CONTROL_RSS_SSPTX; @@ -459,11 +474,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) * This IDE part is totally untested. Values below are taken * from the EP93xx Users's Guide and might not be correct. */ - control |= M2M_CONTROL_NO_HDSK; - control |= M2M_CONTROL_RSS_IDE; - control |= M2M_CONTROL_PW_16; - - if (data->direction == DMA_TO_DEVICE) { + if (data->direction == DMA_MEM_TO_DEV) { /* Worst case from the UG */ control = (3 << M2M_CONTROL_PWSC_SHIFT); control |= M2M_CONTROL_DAH; @@ -473,6 +484,10 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) control |= M2M_CONTROL_SAH; control |= M2M_CONTROL_TM_RX; } + + control |= M2M_CONTROL_NO_HDSK; + control |= M2M_CONTROL_RSS_IDE; + control |= M2M_CONTROL_PW_16; break; default: @@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac) static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) { - struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); + struct ep93xx_dma_desc *desc; + + desc = ep93xx_dma_get_active(edmac); + if (!desc) { + dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n"); + return; + } if (edmac->buffer == 0) { writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); @@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data) { struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; struct ep93xx_dma_desc *desc, *d; - dma_async_tx_callback callback; - void *callback_param; + dma_async_tx_callback callback = NULL; + void *callback_param = NULL; LIST_HEAD(list); spin_lock_irq(&edmac->lock); + /* + * If dma_terminate_all() was called before we get to run, the active + * list has become empty. If that happens we aren't supposed to do + * anything more than call ep93xx_dma_advance_work(). + */ desc = ep93xx_dma_get_active(edmac); - if (desc->complete) { - edmac->last_completed = desc->txd.cookie; - list_splice_init(&edmac->active, &list); + if (desc) { + if (desc->complete) { + edmac->last_completed = desc->txd.cookie; + list_splice_init(&edmac->active, &list); + } + callback = desc->txd.callback; + callback_param = desc->txd.callback_param; } spin_unlock_irq(&edmac->lock); /* Pick up the next descriptor from the queue */ ep93xx_dma_advance_work(edmac); - callback = desc->txd.callback; - callback_param = desc->txd.callback_param; - /* Now we can release all the chained descriptors */ list_for_each_entry_safe(desc, d, &list, node) { /* @@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data) static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) { struct ep93xx_dma_chan *edmac = dev_id; + struct ep93xx_dma_desc *desc; irqreturn_t ret = IRQ_HANDLED; spin_lock(&edmac->lock); + desc = ep93xx_dma_get_active(edmac); + if (!desc) { + dev_warn(chan2dev(edmac), + "got interrupt while active list is empty\n"); + spin_unlock(&edmac->lock); + return IRQ_NONE; + } + switch (edmac->edma->hw_interrupt(edmac)) { case INTERRUPT_DONE: - ep93xx_dma_get_active(edmac)->complete = true; + desc->complete = true; tasklet_schedule(&edmac->tasklet); break; @@ -803,8 +839,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) switch (data->port) { case EP93XX_DMA_SSP: case EP93XX_DMA_IDE: - if (data->direction != DMA_TO_DEVICE && - data->direction != DMA_FROM_DEVICE) + if (data->direction != DMA_MEM_TO_DEV && + data->direction != DMA_DEV_TO_MEM) return -EINVAL; break; default: @@ -952,7 +988,7 @@ fail: */ static struct dma_async_tx_descriptor * ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction dir, + unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags) { struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); @@ -988,7 +1024,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, goto fail; } - if (dir == DMA_TO_DEVICE) { + if (dir == DMA_MEM_TO_DEV) { desc->src_addr = sg_dma_address(sg); desc->dst_addr = edmac->runtime_addr; } else { @@ -1032,7 +1068,7 @@ fail: static struct dma_async_tx_descriptor * ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, - enum dma_data_direction dir) + enum dma_transfer_direction dir) { struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); struct ep93xx_dma_desc *desc, *first; @@ -1065,7 +1101,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, goto fail; } - if (dir == DMA_TO_DEVICE) { + if (dir == DMA_MEM_TO_DEV) { desc->src_addr = dma_addr + offset; desc->dst_addr = edmac->runtime_addr; } else { @@ -1133,12 +1169,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, return -EINVAL; switch (config->direction) { - case DMA_FROM_DEVICE: + case DMA_DEV_TO_MEM: width = config->src_addr_width; addr = config->src_addr; break; - case DMA_TO_DEVICE: + case DMA_MEM_TO_DEV: width = config->dst_addr_width; addr = config->dst_addr; break; diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 8a78154..b98070c 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -772,7 +772,7 @@ fail: */ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, - enum dma_data_direction direction, unsigned long flags) + enum dma_transfer_direction direction, unsigned long flags) { /* * This operation is not supported on the Freescale DMA controller @@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan, return -ENXIO; /* we set the controller burst size depending on direction */ - if (config->direction == DMA_TO_DEVICE) + if (config->direction == DMA_MEM_TO_DEV) size = config->dst_addr_width * config->dst_maxburst; else size = config->src_addr_width * config->src_maxburst; diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 4be55f9..e4383ee 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, imx_dma_disable(imxdmac->imxdma_channel); return 0; case DMA_SLAVE_CONFIG: - if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { + if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { imxdmac->per_address = dmaengine_cfg->src_addr; imxdmac->watermark_level = dmaengine_cfg->src_maxburst; imxdmac->word_size = dmaengine_cfg->src_addr_width; @@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan) static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction direction, + unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); @@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( dma_length += sg->length; } - if (direction == DMA_FROM_DEVICE) + if (direction == DMA_DEV_TO_MEM) dmamode = DMA_MODE_READ; else dmamode = DMA_MODE_WRITE; @@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, - size_t period_len, enum dma_data_direction direction) + size_t period_len, enum dma_transfer_direction direction) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imxdma_engine *imxdma = imxdmac->imxdma; @@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( imxdmac->sg_list[periods].page_link = ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; - if (direction == DMA_FROM_DEVICE) + if (direction == DMA_DEV_TO_MEM) dmamode = DMA_MODE_READ; else dmamode = DMA_MODE_WRITE; diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index f993955..8bc5acf 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -247,7 +247,7 @@ struct sdma_engine; struct sdma_channel { struct sdma_engine *sdma; unsigned int channel; - enum dma_data_direction direction; + enum dma_transfer_direction direction; enum sdma_peripheral_type peripheral_type; unsigned int event_id0; unsigned int event_id1; @@ -268,6 +268,8 @@ struct sdma_channel { struct dma_async_tx_descriptor desc; dma_cookie_t last_completed; enum dma_status status; + unsigned int chn_count; + unsigned int chn_real_count; }; #define IMX_DMA_SG_LOOP (1 << 0) @@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) struct sdma_buffer_descriptor *bd; int i, error = 0; + sdmac->chn_real_count = 0; /* * non loop mode. Iterate over all descriptors, collect * errors and call callback function @@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) if (bd->mode.status & (BD_DONE | BD_RROR)) error = -EIO; + sdmac->chn_real_count += bd->mode.count; } if (error) @@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) else sdmac->status = DMA_SUCCESS; + sdmac->last_completed = sdmac->desc.cookie; if (sdmac->desc.callback) sdmac->desc.callback(sdmac->desc.callback_param); - sdmac->last_completed = sdmac->desc.cookie; } static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) @@ -650,7 +654,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; int ret; - if (sdmac->direction == DMA_FROM_DEVICE) { + if (sdmac->direction == DMA_DEV_TO_MEM) { load_address = sdmac->pc_from_device; } else { load_address = sdmac->pc_to_device; @@ -832,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) { + unsigned long flags; struct sdma_channel *sdmac = to_sdma_chan(tx->chan); struct sdma_engine *sdma = sdmac->sdma; dma_cookie_t cookie; - spin_lock_irq(&sdmac->lock); + spin_lock_irqsave(&sdmac->lock, flags); cookie = sdma_assign_cookie(sdmac); sdma_enable_channel(sdma, sdmac->channel); - spin_unlock_irq(&sdmac->lock); + spin_unlock_irqrestore(&sdmac->lock, flags); return cookie; } @@ -911,7 +916,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) static struct dma_async_tx_descriptor *sdma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction direction, + unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags) { struct sdma_channel *sdmac = to_sdma_chan(chan); @@ -941,6 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( goto err_out; } + sdmac->chn_count = 0; for_each_sg(sgl, sg, sg_len, i) { struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; int param; @@ -957,6 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( } bd->mode.count = count; + sdmac->chn_count += count; if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { ret = -EINVAL; @@ -1008,7 +1015,7 @@ err_out: static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, - size_t period_len, enum dma_data_direction direction) + size_t period_len, enum dma_transfer_direction direction) { struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; @@ -1093,15 +1100,18 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, sdma_disable_channel(sdmac); return 0; case DMA_SLAVE_CONFIG: - if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { + if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { sdmac->per_address = dmaengine_cfg->src_addr; - sdmac->watermark_level = dmaengine_cfg->src_maxburst; + sdmac->watermark_level = dmaengine_cfg->src_maxburst * + dmaengine_cfg->src_addr_width; sdmac->word_size = dmaengine_cfg->src_addr_width; } else { sdmac->per_address = dmaengine_cfg->dst_addr; - sdmac->watermark_level = dmaengine_cfg->dst_maxburst; + sdmac->watermark_level = dmaengine_cfg->dst_maxburst * + dmaengine_cfg->dst_addr_width; sdmac->word_size = dmaengine_cfg->dst_addr_width; } + sdmac->direction = dmaengine_cfg->direction; return sdma_config_channel(sdmac); default: return -ENOSYS; @@ -1119,7 +1129,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, last_used = chan->cookie; - dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); + dma_set_tx_state(txstate, sdmac->last_completed, last_used, + sdmac->chn_count - sdmac->chn_real_count); return sdmac->status; } diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 19a0c64..74f70aa 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc, * callbacks but must be called with the lock held. */ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, - struct intel_mid_dma_desc *desc) + struct intel_mid_dma_desc *desc) + __releases(&midc->lock) __acquires(&midc->lock) { struct dma_async_tx_descriptor *txd = &desc->txd; dma_async_tx_callback callback_txd = NULL; @@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, pci_pool_free(desc->lli_pool, desc->lli, desc->lli_phys); pci_pool_destroy(desc->lli_pool); + desc->lli = NULL; } list_move(&desc->desc_node, &midc->free_list); midc->busy = false; @@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, midc->dma->block_size); /*Populate SAR and DAR values*/ sg_phy_addr = sg_phys(sg); - if (desc->dirn == DMA_TO_DEVICE) { + if (desc->dirn == DMA_MEM_TO_DEV) { lli_bloc_desc->sar = sg_phy_addr; lli_bloc_desc->dar = mids->dma_slave.dst_addr; - } else if (desc->dirn == DMA_FROM_DEVICE) { + } else if (desc->dirn == DMA_DEV_TO_MEM) { lli_bloc_desc->sar = mids->dma_slave.src_addr; lli_bloc_desc->dar = sg_phy_addr; } @@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret != DMA_SUCCESS) { + spin_lock_bh(&midc->lock); midc_scan_descriptors(to_middma_device(chan->device), midc); + spin_unlock_bh(&midc->lock); last_complete = midc->completed; last_used = chan->cookie; @@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan, pci_pool_free(desc->lli_pool, desc->lli, desc->lli_phys); pci_pool_destroy(desc->lli_pool); + desc->lli = NULL; } list_move(&desc->desc_node, &midc->free_list); } @@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( if (midc->dma->pimr_mask) { cfg_hi.cfgx.protctl = 0x0; /*default value*/ cfg_hi.cfgx.fifo_mode = 1; - if (mids->dma_slave.direction == DMA_TO_DEVICE) { + if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { cfg_hi.cfgx.src_per = 0; if (mids->device_instance == 0) cfg_hi.cfgx.dst_per = 3; if (mids->device_instance == 1) cfg_hi.cfgx.dst_per = 1; - } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { + } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { if (mids->device_instance == 0) cfg_hi.cfgx.src_per = 2; if (mids->device_instance == 1) @@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( ctl_lo.ctlx.sinc = 0; ctl_lo.ctlx.dinc = 0; } else { - if (mids->dma_slave.direction == DMA_TO_DEVICE) { + if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { ctl_lo.ctlx.sinc = 0; ctl_lo.ctlx.dinc = 2; ctl_lo.ctlx.tt_fc = 1; - } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { + } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { ctl_lo.ctlx.sinc = 2; ctl_lo.ctlx.dinc = 0; ctl_lo.ctlx.tt_fc = 2; @@ -732,7 +737,7 @@ err_desc_get: */ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction direction, + unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags) { struct intel_mid_dma_chan *midc = NULL; @@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) pm_runtime_get_sync(&mid->pdev->dev); if (mid->state == SUSPENDED) { - if (dma_resume(mid->pdev)) { + if (dma_resume(&mid->pdev->dev)) { pr_err("ERR_MDMA: resume failed"); return -EFAULT; } @@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev) LNW_PERIPHRAL_MASK_SIZE); if (dma->mask_reg == NULL) { pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); - return -ENOMEM; + err = -ENOMEM; + goto err_ioremap; } } else dma->mask_reg = NULL; @@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev) err_engine: free_irq(pdev->irq, dma); err_irq: + if (dma->mask_reg) + iounmap(dma->mask_reg); +err_ioremap: pci_pool_destroy(dma->dma_pool); err_dma_pool: pr_err("ERR_MDMA:setup_dma failed: %d\n", err); @@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) * * This function is called by OS when a power event occurs */ -int dma_suspend(struct pci_dev *pci, pm_message_t state) +static int dma_suspend(struct device *dev) { + struct pci_dev *pci = to_pci_dev(dev); int i; struct middma_device *device = pci_get_drvdata(pci); pr_debug("MDMA: dma_suspend called\n"); @@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state) * * This function is called by OS when a power event occurs */ -int dma_resume(struct pci_dev *pci) +int dma_resume(struct device *dev) { + struct pci_dev *pci = to_pci_dev(dev); int ret; struct middma_device *device = pci_get_drvdata(pci); @@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = { .runtime_suspend = dma_runtime_suspend, .runtime_resume = dma_runtime_resume, .runtime_idle = dma_runtime_idle, + .suspend = dma_suspend, + .resume = dma_resume, }; static struct pci_driver intel_mid_dma_pci_driver = { @@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = { .probe = intel_mid_dma_probe, .remove = __devexit_p(intel_mid_dma_remove), #ifdef CONFIG_PM - .suspend = dma_suspend, - .resume = dma_resume, .driver = { .pm = &intel_mid_dma_pm, }, diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h index aea5ee8..c83d35b 100644 --- a/drivers/dma/intel_mid_dma_regs.h +++ b/drivers/dma/intel_mid_dma_regs.h @@ -262,7 +262,7 @@ struct intel_mid_dma_desc { unsigned int lli_length; unsigned int current_lli; dma_addr_t next; - enum dma_data_direction dirn; + enum dma_transfer_direction dirn; enum dma_status status; enum dma_slave_buswidth width; /*width of DMA txn*/ enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ @@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave } -int dma_resume(struct pci_dev *pci); +int dma_resume(struct device *dev); #endif /*__INTEL_MID_DMAC_REGS_H__*/ diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index e03f811..04be90b 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -1735,8 +1735,6 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) spin_unlock_bh(&iop_chan->lock); } -MODULE_ALIAS("platform:iop-adma"); - static struct platform_driver iop_adma_driver = { .probe = iop_adma_probe, .remove = __devexit_p(iop_adma_remove), @@ -1746,19 +1744,9 @@ static struct platform_driver iop_adma_driver = { }, }; -static int __init iop_adma_init (void) -{ - return platform_driver_register(&iop_adma_driver); -} - -static void __exit iop_adma_exit (void) -{ - platform_driver_unregister(&iop_adma_driver); - return; -} -module_exit(iop_adma_exit); -module_init(iop_adma_init); +module_platform_driver(iop_adma_driver); MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("IOP ADMA Engine Driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:iop-adma"); diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 0e5ef33..6212b16 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, case IPU_PIX_FMT_RGB565: params->ip.bpp = 2; params->ip.pfs = 4; - params->ip.npb = 7; + params->ip.npb = 15; params->ip.sat = 2; /* SAT = 32-bit access */ params->ip.ofs0 = 0; /* Red bit offset */ params->ip.ofs1 = 5; /* Green bit offset */ @@ -422,12 +422,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, params->pp.nsb = 1; } -static void ipu_ch_param_set_burst_size(union chan_param_mem *params, - uint16_t burst_pixels) -{ - params->pp.npb = burst_pixels - 1; -} - static void ipu_ch_param_set_buffer(union chan_param_mem *params, dma_addr_t buf0, dma_addr_t buf1) { @@ -690,23 +684,6 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan, ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes); ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1); ipu_ch_param_set_rotation(¶ms, rot_mode); - /* Some channels (rotation) have restriction on burst length */ - switch (channel) { - case IDMAC_IC_7: /* Hangs with burst 8, 16, other values - invalid - Table 44-30 */ -/* - ipu_ch_param_set_burst_size(¶ms, 8); - */ - break; - case IDMAC_SDC_0: - case IDMAC_SDC_1: - /* In original code only IPU_PIX_FMT_RGB565 was setting burst */ - ipu_ch_param_set_burst_size(¶ms, 16); - break; - case IDMAC_IC_0: - default: - break; - } spin_lock_irqsave(&ipu->lock, flags); @@ -1364,7 +1341,7 @@ static void ipu_gc_tasklet(unsigned long arg) /* Allocate and initialise a transfer descriptor. */ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, - enum dma_data_direction direction, unsigned long tx_flags) + enum dma_transfer_direction direction, unsigned long tx_flags) { struct idmac_channel *ichan = to_idmac_chan(chan); struct idmac_tx_desc *desc = NULL; @@ -1376,7 +1353,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan chan->chan_id != IDMAC_IC_7) return NULL; - if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { + if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) { dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); return NULL; } diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 8ba4edc..4d6d4cf 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -835,17 +835,7 @@ static struct platform_driver mpc_dma_driver = { }, }; -static int __init mpc_dma_init(void) -{ - return platform_driver_register(&mpc_dma_driver); -} -module_init(mpc_dma_init); - -static void __exit mpc_dma_exit(void) -{ - platform_driver_unregister(&mpc_dma_driver); -} -module_exit(mpc_dma_exit); +module_platform_driver(mpc_dma_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index fc903c0..b06cd4c 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -44,7 +44,6 @@ #define HW_APBHX_CTRL0 0x000 #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) -#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 #define BP_APBH_CTRL0_RESET_CHANNEL 16 #define HW_APBHX_CTRL1 0x010 #define HW_APBHX_CTRL2 0x020 @@ -111,6 +110,7 @@ struct mxs_dma_chan { int chan_irq; struct mxs_dma_ccw *ccw; dma_addr_t ccw_phys; + int desc_count; dma_cookie_t last_completed; enum dma_status status; unsigned int flags; @@ -130,23 +130,6 @@ struct mxs_dma_engine { struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; }; -static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable) -{ - struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; - int chan_id = mxs_chan->chan.chan_id; - int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR; - - /* enable apbh channel clock */ - if (dma_is_apbh()) { - if (apbh_is_old()) - writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), - mxs_dma->base + HW_APBHX_CTRL0 + set_clr); - else - writel(1 << chan_id, - mxs_dma->base + HW_APBHX_CTRL0 + set_clr); - } -} - static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) { struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; @@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; - /* clkgate needs to be enabled before writing other registers */ - mxs_dma_clkgate(mxs_chan, 1); - /* set cmd_addr up */ writel(mxs_chan->ccw_phys, mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); @@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) { - /* disable apbh channel clock */ - mxs_dma_clkgate(mxs_chan, 0); - mxs_chan->status = DMA_SUCCESS; } @@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) /* * When both completion and error of termination bits set at the * same time, we do not take it as an error. IOW, it only becomes - * an error we need to handler here in case of ether it's (1) an bus + * an error we need to handle here in case of either it's (1) a bus * error or (2) a termination error with no completion. */ stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ @@ -338,10 +315,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) if (ret) goto err_clk; - /* clkgate needs to be enabled for reset to finish */ - mxs_dma_clkgate(mxs_chan, 1); mxs_dma_reset_chan(mxs_chan); - mxs_dma_clkgate(mxs_chan, 0); dma_async_tx_descriptor_init(&mxs_chan->desc, chan); mxs_chan->desc.tx_submit = mxs_dma_tx_submit; @@ -377,7 +351,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction direction, + unsigned int sg_len, enum dma_transfer_direction direction, unsigned long append) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); @@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( struct scatterlist *sg; int i, j; u32 *pio; - static int idx; + int idx = append ? mxs_chan->desc_count : 0; if (mxs_chan->status == DMA_IN_PROGRESS && !append) return NULL; @@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( idx = 0; } - if (direction == DMA_NONE) { + if (direction == DMA_TRANS_NONE) { ccw = &mxs_chan->ccw[idx++]; pio = (u32 *) sgl; @@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( ccw->bits |= CCW_CHAIN; ccw->bits |= CCW_HALT_ON_TERM; ccw->bits |= CCW_TERM_FLUSH; - ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? + ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); @@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( } } } + mxs_chan->desc_count = idx; return &mxs_chan->desc; @@ -472,7 +447,7 @@ err_out: static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, - size_t period_len, enum dma_data_direction direction) + size_t period_len, enum dma_transfer_direction direction) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; @@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( ccw->bits |= CCW_IRQ; ccw->bits |= CCW_HALT_ON_TERM; ccw->bits |= CCW_TERM_FLUSH; - ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? + ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); dma_addr += period_len; @@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( i++; } + mxs_chan->desc_count = i; return &mxs_chan->desc; @@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, switch (cmd) { case DMA_TERMINATE_ALL: - mxs_dma_disable_chan(mxs_chan); mxs_dma_reset_chan(mxs_chan); + mxs_dma_disable_chan(mxs_chan); break; case DMA_PAUSE: mxs_dma_pause_chan(mxs_chan); @@ -580,7 +556,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) ret = clk_prepare_enable(mxs_dma->clk); if (ret) - goto err_out; + return ret; ret = mxs_reset_block(mxs_dma->base); if (ret) @@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); - clk_disable_unprepare(mxs_dma->clk); - - return 0; - err_out: + clk_disable_unprepare(mxs_dma->clk); return ret; } diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index a6d0e3d..823f581 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -1,7 +1,7 @@ /* * Topcliff PCH DMA controller driver * Copyright (c) 2010 Intel Corporation - * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. + * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -99,7 +99,7 @@ struct pch_dma_desc { struct pch_dma_chan { struct dma_chan chan; void __iomem *membase; - enum dma_data_direction dir; + enum dma_transfer_direction dir; struct tasklet_struct tasklet; unsigned long err_status; @@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan) mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << (DMA_CTL0_BITS_PER_CH * chan->chan_id)); val &= mask_mode; - if (pd_chan->dir == DMA_TO_DEVICE) + if (pd_chan->dir == DMA_MEM_TO_DEV) val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + DMA_CTL0_DIR_SHIFT_BITS); else @@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan) mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << (DMA_CTL0_BITS_PER_CH * ch)); val &= mask_mode; - if (pd_chan->dir == DMA_TO_DEVICE) + if (pd_chan->dir == DMA_MEM_TO_DEV) val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + DMA_CTL0_DIR_SHIFT_BITS); else @@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan) static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, - enum dma_data_direction direction, unsigned long flags) + enum dma_transfer_direction direction, unsigned long flags) { struct pch_dma_chan *pd_chan = to_pd_chan(chan); struct pch_dma_slave *pd_slave = chan->private; @@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, return NULL; } - if (direction == DMA_FROM_DEVICE) + if (direction == DMA_DEV_TO_MEM) reg = pd_slave->rx_reg; - else if (direction == DMA_TO_DEVICE) + else if (direction == DMA_MEM_TO_DEV) reg = pd_slave->tx_reg; else return NULL; @@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B +#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 +#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, @@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */ + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */ { 0, }, }; @@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void) module_init(pch_dma_init); module_exit(pch_dma_exit); -MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " +MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH " "DMA controller driver"); MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 09adcfc..b8ec03e 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -350,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned case DMA_SLAVE_CONFIG: slave_config = (struct dma_slave_config *)arg; - if (slave_config->direction == DMA_TO_DEVICE) { + if (slave_config->direction == DMA_MEM_TO_DEV) { if (slave_config->dst_addr) pch->fifo_addr = slave_config->dst_addr; if (slave_config->dst_addr_width) pch->burst_sz = __ffs(slave_config->dst_addr_width); if (slave_config->dst_maxburst) pch->burst_len = slave_config->dst_maxburst; - } else if (slave_config->direction == DMA_FROM_DEVICE) { + } else if (slave_config->direction == DMA_DEV_TO_MEM) { if (slave_config->src_addr) pch->fifo_addr = slave_config->src_addr; if (slave_config->src_addr_width) @@ -621,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t len, - size_t period_len, enum dma_data_direction direction) + size_t period_len, enum dma_transfer_direction direction) { struct dma_pl330_desc *desc; struct dma_pl330_chan *pch = to_pchan(chan); @@ -636,14 +636,14 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( } switch (direction) { - case DMA_TO_DEVICE: + case DMA_MEM_TO_DEV: desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 0; desc->req.rqtype = MEMTODEV; src = dma_addr; dst = pch->fifo_addr; break; - case DMA_FROM_DEVICE: + case DMA_DEV_TO_MEM: desc->rqcfg.src_inc = 0; desc->rqcfg.dst_inc = 1; desc->req.rqtype = DEVTOMEM; @@ -710,7 +710,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, static struct dma_async_tx_descriptor * pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction direction, + unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flg) { struct dma_pl330_desc *first, *desc = NULL; @@ -759,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, else list_add_tail(&desc->node, &first->node); - if (direction == DMA_TO_DEVICE) { + if (direction == DMA_MEM_TO_DEV) { desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 0; desc->req.rqtype = MEMTODEV; @@ -834,17 +834,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) amba_set_drvdata(adev, pdmac); -#ifdef CONFIG_PM_RUNTIME - /* to use the runtime PM helper functions */ - pm_runtime_enable(&adev->dev); - - /* enable the power domain */ - if (pm_runtime_get_sync(&adev->dev)) { - dev_err(&adev->dev, "failed to get runtime pm\n"); - ret = -ENODEV; - goto probe_err1; - } -#else +#ifndef CONFIG_PM_RUNTIME /* enable dma clk */ clk_enable(pdmac->clk); #endif @@ -977,10 +967,7 @@ static int __devexit pl330_remove(struct amba_device *adev) res = &adev->res; release_mem_region(res->start, resource_size(res)); -#ifdef CONFIG_PM_RUNTIME - pm_runtime_put(&adev->dev); - pm_runtime_disable(&adev->dev); -#else +#ifndef CONFIG_PM_RUNTIME clk_disable(pdmac->clk); #endif diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 81809c2..812fd76 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -23,7 +23,6 @@ #include <linux/interrupt.h> #include <linux/dmaengine.h> #include <linux/delay.h> -#include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/sh_dma.h> @@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices); static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); +static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); + +static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) +{ + struct sh_dmae_device *shdev = to_sh_dev(sh_dc); + + __raw_writel(data, shdev->chan_reg + + shdev->pdata->channel[sh_dc->id].chclr_offset); +} static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) { @@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); + if (shdev->pdata->chclr_present) { + int i; + for (i = 0; i < shdev->pdata->channel_num; i++) { + struct sh_dmae_chan *sh_chan = shdev->chan[i]; + if (sh_chan) + chclr_write(sh_chan, 0); + } + } + dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); dmaor = dmaor_read(shdev); @@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); return -EIO; } + if (shdev->pdata->dmaor_init & ~dmaor) + dev_warn(shdev->common.dev, + "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", + dmaor, shdev->pdata->dmaor_init); return 0; } @@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) return 0; } -static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); - static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) { struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; @@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) sh_chan_xfer_ld_queue(sh_chan); sh_chan->pm_state = DMAE_PM_ESTABLISHED; } + } else { + sh_chan->pm_state = DMAE_PM_PENDING; } spin_unlock_irq(&sh_chan->desc_lock); @@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) * @sh_chan: DMA channel * @flags: DMA transfer flags * @dest: destination DMA address, incremented when direction equals - * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL + * DMA_DEV_TO_MEM * @src: source DMA address, incremented when direction equals - * DMA_TO_DEVICE or DMA_BIDIRECTIONAL + * DMA_MEM_TO_DEV * @len: DMA transfer length * @first: if NULL, set to the current descriptor and cookie set to -EBUSY * @direction: needed for slave DMA to decide which address to keep constant, - * equals DMA_BIDIRECTIONAL for MEMCPY + * equals DMA_MEM_TO_MEM for MEMCPY * Returns 0 or an error * Locks: called with desc_lock held */ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, - struct sh_desc **first, enum dma_data_direction direction) + struct sh_desc **first, enum dma_transfer_direction direction) { struct sh_desc *new; size_t copy_size; @@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, new->direction = direction; *len -= copy_size; - if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) + if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) *src += copy_size; - if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) + if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) *dest += copy_size; return new; @@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, * converted to scatter-gather to guarantee consistent locking and a correct * list manipulation. For slave DMA direction carries the usual meaning, and, * logically, the SG list is RAM and the addr variable contains slave address, - * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL + * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM * and the SG list contains only one element and points at the source buffer. */ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, - enum dma_data_direction direction, unsigned long flags) + enum dma_transfer_direction direction, unsigned long flags) { struct scatterlist *sg; struct sh_desc *first = NULL, *new = NULL /* compiler... */; @@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", i, sg, len, (unsigned long long)sg_addr); - if (direction == DMA_FROM_DEVICE) + if (direction == DMA_DEV_TO_MEM) new = sh_dmae_add_desc(sh_chan, flags, &sg_addr, addr, &len, &first, direction); @@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( sg_dma_address(&sg) = dma_src; sg_dma_len(&sg) = len; - return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, + return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); } static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, - enum dma_data_direction direction, unsigned long flags) + enum dma_transfer_direction direction, unsigned long flags) { struct sh_dmae_slave *param; struct sh_dmae_chan *sh_chan; @@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data) spin_lock_irq(&sh_chan->desc_lock); list_for_each_entry(desc, &sh_chan->ld_queue, node) { if (desc->mark == DESC_SUBMITTED && - ((desc->direction == DMA_FROM_DEVICE && + ((desc->direction == DMA_DEV_TO_MEM && (desc->hw.dar + desc->hw.tcr) == dar_buf) || (desc->hw.sar + desc->hw.tcr) == sar_buf)) { dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", @@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev) platform_set_drvdata(pdev, shdev); + shdev->common.dev = &pdev->dev; + pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); @@ -1239,7 +1262,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev) INIT_LIST_HEAD(&shdev->common.channels); - dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); + if (!pdata->slave_only) + dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); if (pdata->slave && pdata->slave_num) dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); @@ -1254,7 +1278,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev) shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; shdev->common.device_control = sh_dmae_control; - shdev->common.dev = &pdev->dev; /* Default transfer size of 32 bytes requires 32-byte alignment */ shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; @@ -1435,22 +1458,17 @@ static int sh_dmae_runtime_resume(struct device *dev) #ifdef CONFIG_PM static int sh_dmae_suspend(struct device *dev) { - struct sh_dmae_device *shdev = dev_get_drvdata(dev); - int i; - - for (i = 0; i < shdev->pdata->channel_num; i++) { - struct sh_dmae_chan *sh_chan = shdev->chan[i]; - if (sh_chan->descs_allocated) - sh_chan->pm_error = pm_runtime_put_sync(dev); - } - return 0; } static int sh_dmae_resume(struct device *dev) { struct sh_dmae_device *shdev = dev_get_drvdata(dev); - int i; + int i, ret; + + ret = sh_dmae_rst(shdev); + if (ret < 0) + dev_err(dev, "Failed to reset!\n"); for (i = 0; i < shdev->pdata->channel_num; i++) { struct sh_dmae_chan *sh_chan = shdev->chan[i]; @@ -1459,9 +1477,6 @@ static int sh_dmae_resume(struct device *dev) if (!sh_chan->descs_allocated) continue; - if (!sh_chan->pm_error) - pm_runtime_get_sync(dev); - if (param) { const struct sh_dmae_slave_config *cfg = param->config; dmae_set_dmars(sh_chan, cfg->mid_rid); diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c new file mode 100644 index 0000000..2333810 --- /dev/null +++ b/drivers/dma/sirf-dma.c @@ -0,0 +1,707 @@ +/* + * DMA controller driver for CSR SiRFprimaII + * + * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ + +#include <linux/module.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/sirfsoc_dma.h> + +#define SIRFSOC_DMA_DESCRIPTORS 16 +#define SIRFSOC_DMA_CHANNELS 16 + +#define SIRFSOC_DMA_CH_ADDR 0x00 +#define SIRFSOC_DMA_CH_XLEN 0x04 +#define SIRFSOC_DMA_CH_YLEN 0x08 +#define SIRFSOC_DMA_CH_CTRL 0x0C + +#define SIRFSOC_DMA_WIDTH_0 0x100 +#define SIRFSOC_DMA_CH_VALID 0x140 +#define SIRFSOC_DMA_CH_INT 0x144 +#define SIRFSOC_DMA_INT_EN 0x148 +#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 + +#define SIRFSOC_DMA_MODE_CTRL_BIT 4 +#define SIRFSOC_DMA_DIR_CTRL_BIT 5 + +/* xlen and dma_width register is in 4 bytes boundary */ +#define SIRFSOC_DMA_WORD_LEN 4 + +struct sirfsoc_dma_desc { + struct dma_async_tx_descriptor desc; + struct list_head node; + + /* SiRFprimaII 2D-DMA parameters */ + + int xlen; /* DMA xlen */ + int ylen; /* DMA ylen */ + int width; /* DMA width */ + int dir; + bool cyclic; /* is loop DMA? */ + u32 addr; /* DMA buffer address */ +}; + +struct sirfsoc_dma_chan { + struct dma_chan chan; + struct list_head free; + struct list_head prepared; + struct list_head queued; + struct list_head active; + struct list_head completed; + dma_cookie_t completed_cookie; + unsigned long happened_cyclic; + unsigned long completed_cyclic; + + /* Lock for this structure */ + spinlock_t lock; + + int mode; +}; + +struct sirfsoc_dma { + struct dma_device dma; + struct tasklet_struct tasklet; + struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; + void __iomem *base; + int irq; +}; + +#define DRV_NAME "sirfsoc_dma" + +/* Convert struct dma_chan to struct sirfsoc_dma_chan */ +static inline +struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct sirfsoc_dma_chan, chan); +} + +/* Convert struct dma_chan to struct sirfsoc_dma */ +static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); + return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); +} + +/* Execute all queued DMA descriptors */ +static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) +{ + struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); + int cid = schan->chan.chan_id; + struct sirfsoc_dma_desc *sdesc = NULL; + + /* + * lock has been held by functions calling this, so we don't hold + * lock again + */ + + sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, + node); + /* Move the first queued descriptor to active list */ + list_move_tail(&schan->queued, &schan->active); + + /* Start the DMA transfer */ + writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + + cid * 4); + writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) | + (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), + sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); + writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + + SIRFSOC_DMA_CH_XLEN); + writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + + SIRFSOC_DMA_CH_YLEN); + writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | + (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); + + /* + * writel has an implict memory write barrier to make sure data is + * flushed into memory before starting DMA + */ + writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); + + if (sdesc->cyclic) { + writel((1 << cid) | 1 << (cid + 16) | + readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL), + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); + schan->happened_cyclic = schan->completed_cyclic = 0; + } +} + +/* Interrupt handler */ +static irqreturn_t sirfsoc_dma_irq(int irq, void *data) +{ + struct sirfsoc_dma *sdma = data; + struct sirfsoc_dma_chan *schan; + struct sirfsoc_dma_desc *sdesc = NULL; + u32 is; + int ch; + + is = readl(sdma->base + SIRFSOC_DMA_CH_INT); + while ((ch = fls(is) - 1) >= 0) { + is &= ~(1 << ch); + writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); + schan = &sdma->channels[ch]; + + spin_lock(&schan->lock); + + sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, + node); + if (!sdesc->cyclic) { + /* Execute queued descriptors */ + list_splice_tail_init(&schan->active, &schan->completed); + if (!list_empty(&schan->queued)) + sirfsoc_dma_execute(schan); + } else + schan->happened_cyclic++; + + spin_unlock(&schan->lock); + } + + /* Schedule tasklet */ + tasklet_schedule(&sdma->tasklet); + + return IRQ_HANDLED; +} + +/* process completed descriptors */ +static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) +{ + dma_cookie_t last_cookie = 0; + struct sirfsoc_dma_chan *schan; + struct sirfsoc_dma_desc *sdesc; + struct dma_async_tx_descriptor *desc; + unsigned long flags; + unsigned long happened_cyclic; + LIST_HEAD(list); + int i; + + for (i = 0; i < sdma->dma.chancnt; i++) { + schan = &sdma->channels[i]; + + /* Get all completed descriptors */ + spin_lock_irqsave(&schan->lock, flags); + if (!list_empty(&schan->completed)) { + list_splice_tail_init(&schan->completed, &list); + spin_unlock_irqrestore(&schan->lock, flags); + + /* Execute callbacks and run dependencies */ + list_for_each_entry(sdesc, &list, node) { + desc = &sdesc->desc; + + if (desc->callback) + desc->callback(desc->callback_param); + + last_cookie = desc->cookie; + dma_run_dependencies(desc); + } + + /* Free descriptors */ + spin_lock_irqsave(&schan->lock, flags); + list_splice_tail_init(&list, &schan->free); + schan->completed_cookie = last_cookie; + spin_unlock_irqrestore(&schan->lock, flags); + } else { + /* for cyclic channel, desc is always in active list */ + sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, + node); + + if (!sdesc || (sdesc && !sdesc->cyclic)) { + /* without active cyclic DMA */ + spin_unlock_irqrestore(&schan->lock, flags); + continue; + } + + /* cyclic DMA */ + happened_cyclic = schan->happened_cyclic; + spin_unlock_irqrestore(&schan->lock, flags); + + desc = &sdesc->desc; + while (happened_cyclic != schan->completed_cyclic) { + if (desc->callback) + desc->callback(desc->callback_param); + schan->completed_cyclic++; + } + } + } +} + +/* DMA Tasklet */ +static void sirfsoc_dma_tasklet(unsigned long data) +{ + struct sirfsoc_dma *sdma = (void *)data; + + sirfsoc_dma_process_completed(sdma); +} + +/* Submit descriptor to hardware */ +static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); + struct sirfsoc_dma_desc *sdesc; + unsigned long flags; + dma_cookie_t cookie; + + sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); + + spin_lock_irqsave(&schan->lock, flags); + + /* Move descriptor to queue */ + list_move_tail(&sdesc->node, &schan->queued); + + /* Update cookie */ + cookie = schan->chan.cookie + 1; + if (cookie <= 0) + cookie = 1; + + schan->chan.cookie = cookie; + sdesc->desc.cookie = cookie; + + spin_unlock_irqrestore(&schan->lock, flags); + + return cookie; +} + +static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, + struct dma_slave_config *config) +{ + unsigned long flags; + + if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || + (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) + return -EINVAL; + + spin_lock_irqsave(&schan->lock, flags); + schan->mode = (config->src_maxburst == 4 ? 1 : 0); + spin_unlock_irqrestore(&schan->lock, flags); + + return 0; +} + +static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) +{ + struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); + int cid = schan->chan.chan_id; + unsigned long flags; + + writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & + ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); + writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); + + writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) + & ~((1 << cid) | 1 << (cid + 16)), + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); + + spin_lock_irqsave(&schan->lock, flags); + list_splice_tail_init(&schan->active, &schan->free); + list_splice_tail_init(&schan->queued, &schan->free); + spin_unlock_irqrestore(&schan->lock, flags); + + return 0; +} + +static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct dma_slave_config *config; + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + + switch (cmd) { + case DMA_TERMINATE_ALL: + return sirfsoc_dma_terminate_all(schan); + case DMA_SLAVE_CONFIG: + config = (struct dma_slave_config *)arg; + return sirfsoc_dma_slave_config(schan, config); + + default: + break; + } + + return -ENOSYS; +} + +/* Alloc channel resources */ +static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + struct sirfsoc_dma_desc *sdesc; + unsigned long flags; + LIST_HEAD(descs); + int i; + + /* Alloc descriptors for this channel */ + for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { + sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); + if (!sdesc) { + dev_notice(sdma->dma.dev, "Memory allocation error. " + "Allocated only %u descriptors\n", i); + break; + } + + dma_async_tx_descriptor_init(&sdesc->desc, chan); + sdesc->desc.flags = DMA_CTRL_ACK; + sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; + + list_add_tail(&sdesc->node, &descs); + } + + /* Return error only if no descriptors were allocated */ + if (i == 0) + return -ENOMEM; + + spin_lock_irqsave(&schan->lock, flags); + + list_splice_tail_init(&descs, &schan->free); + spin_unlock_irqrestore(&schan->lock, flags); + + return i; +} + +/* Free channel resources */ +static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + struct sirfsoc_dma_desc *sdesc, *tmp; + unsigned long flags; + LIST_HEAD(descs); + + spin_lock_irqsave(&schan->lock, flags); + + /* Channel must be idle */ + BUG_ON(!list_empty(&schan->prepared)); + BUG_ON(!list_empty(&schan->queued)); + BUG_ON(!list_empty(&schan->active)); + BUG_ON(!list_empty(&schan->completed)); + + /* Move data */ + list_splice_tail_init(&schan->free, &descs); + + spin_unlock_irqrestore(&schan->lock, flags); + + /* Free descriptors */ + list_for_each_entry_safe(sdesc, tmp, &descs, node) + kfree(sdesc); +} + +/* Send pending descriptor to hardware */ +static void sirfsoc_dma_issue_pending(struct dma_chan *chan) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&schan->lock, flags); + + if (list_empty(&schan->active) && !list_empty(&schan->queued)) + sirfsoc_dma_execute(schan); + + spin_unlock_irqrestore(&schan->lock, flags); +} + +/* Check request completion status */ +static enum dma_status +sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + unsigned long flags; + dma_cookie_t last_used; + dma_cookie_t last_complete; + + spin_lock_irqsave(&schan->lock, flags); + last_used = schan->chan.cookie; + last_complete = schan->completed_cookie; + spin_unlock_irqrestore(&schan->lock, flags); + + dma_set_tx_state(txstate, last_complete, last_used, 0); + return dma_async_is_complete(cookie, last_complete, last_used); +} + +static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + struct sirfsoc_dma_desc *sdesc = NULL; + unsigned long iflags; + int ret; + + if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) { + ret = -EINVAL; + goto err_dir; + } + + /* Get free descriptor */ + spin_lock_irqsave(&schan->lock, iflags); + if (!list_empty(&schan->free)) { + sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, + node); + list_del(&sdesc->node); + } + spin_unlock_irqrestore(&schan->lock, iflags); + + if (!sdesc) { + /* try to free completed descriptors */ + sirfsoc_dma_process_completed(sdma); + ret = 0; + goto no_desc; + } + + /* Place descriptor in prepared list */ + spin_lock_irqsave(&schan->lock, iflags); + + /* + * Number of chunks in a frame can only be 1 for prima2 + * and ylen (number of frame - 1) must be at least 0 + */ + if ((xt->frame_size == 1) && (xt->numf > 0)) { + sdesc->cyclic = 0; + sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; + sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / + SIRFSOC_DMA_WORD_LEN; + sdesc->ylen = xt->numf - 1; + if (xt->dir == DMA_MEM_TO_DEV) { + sdesc->addr = xt->src_start; + sdesc->dir = 1; + } else { + sdesc->addr = xt->dst_start; + sdesc->dir = 0; + } + + list_add_tail(&sdesc->node, &schan->prepared); + } else { + pr_err("sirfsoc DMA Invalid xfer\n"); + ret = -EINVAL; + goto err_xfer; + } + spin_unlock_irqrestore(&schan->lock, iflags); + + return &sdesc->desc; +err_xfer: + spin_unlock_irqrestore(&schan->lock, iflags); +no_desc: +err_dir: + return ERR_PTR(ret); +} + +static struct dma_async_tx_descriptor * +sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction direction) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + struct sirfsoc_dma_desc *sdesc = NULL; + unsigned long iflags; + + /* + * we only support cycle transfer with 2 period + * If the X-length is set to 0, it would be the loop mode. + * The DMA address keeps increasing until reaching the end of a loop + * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then + * the DMA address goes back to the beginning of this area. + * In loop mode, the DMA data region is divided into two parts, BUFA + * and BUFB. DMA controller generates interrupts twice in each loop: + * when the DMA address reaches the end of BUFA or the end of the + * BUFB + */ + if (buf_len != 2 * period_len) + return ERR_PTR(-EINVAL); + + /* Get free descriptor */ + spin_lock_irqsave(&schan->lock, iflags); + if (!list_empty(&schan->free)) { + sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, + node); + list_del(&sdesc->node); + } + spin_unlock_irqrestore(&schan->lock, iflags); + + if (!sdesc) + return 0; + + /* Place descriptor in prepared list */ + spin_lock_irqsave(&schan->lock, iflags); + sdesc->addr = addr; + sdesc->cyclic = 1; + sdesc->xlen = 0; + sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; + sdesc->width = 1; + list_add_tail(&sdesc->node, &schan->prepared); + spin_unlock_irqrestore(&schan->lock, iflags); + + return &sdesc->desc; +} + +/* + * The DMA controller consists of 16 independent DMA channels. + * Each channel is allocated to a different function + */ +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) +{ + unsigned int ch_nr = (unsigned int) chan_id; + + if (ch_nr == chan->chan_id + + chan->device->dev_id * SIRFSOC_DMA_CHANNELS) + return true; + + return false; +} +EXPORT_SYMBOL(sirfsoc_dma_filter_id); + +static int __devinit sirfsoc_dma_probe(struct platform_device *op) +{ + struct device_node *dn = op->dev.of_node; + struct device *dev = &op->dev; + struct dma_device *dma; + struct sirfsoc_dma *sdma; + struct sirfsoc_dma_chan *schan; + struct resource res; + ulong regs_start, regs_size; + u32 id; + int ret, i; + + sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); + if (!sdma) { + dev_err(dev, "Memory exhausted!\n"); + return -ENOMEM; + } + + if (of_property_read_u32(dn, "cell-index", &id)) { + dev_err(dev, "Fail to get DMAC index\n"); + ret = -ENODEV; + goto free_mem; + } + + sdma->irq = irq_of_parse_and_map(dn, 0); + if (sdma->irq == NO_IRQ) { + dev_err(dev, "Error mapping IRQ!\n"); + ret = -EINVAL; + goto free_mem; + } + + ret = of_address_to_resource(dn, 0, &res); + if (ret) { + dev_err(dev, "Error parsing memory region!\n"); + goto free_mem; + } + + regs_start = res.start; + regs_size = resource_size(&res); + + sdma->base = devm_ioremap(dev, regs_start, regs_size); + if (!sdma->base) { + dev_err(dev, "Error mapping memory region!\n"); + ret = -ENOMEM; + goto irq_dispose; + } + + ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, + sdma); + if (ret) { + dev_err(dev, "Error requesting IRQ!\n"); + ret = -EINVAL; + goto unmap_mem; + } + + dma = &sdma->dma; + dma->dev = dev; + dma->chancnt = SIRFSOC_DMA_CHANNELS; + + dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; + dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; + dma->device_issue_pending = sirfsoc_dma_issue_pending; + dma->device_control = sirfsoc_dma_control; + dma->device_tx_status = sirfsoc_dma_tx_status; + dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; + dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; + + INIT_LIST_HEAD(&dma->channels); + dma_cap_set(DMA_SLAVE, dma->cap_mask); + dma_cap_set(DMA_CYCLIC, dma->cap_mask); + dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); + dma_cap_set(DMA_PRIVATE, dma->cap_mask); + + for (i = 0; i < dma->chancnt; i++) { + schan = &sdma->channels[i]; + + schan->chan.device = dma; + schan->chan.cookie = 1; + schan->completed_cookie = schan->chan.cookie; + + INIT_LIST_HEAD(&schan->free); + INIT_LIST_HEAD(&schan->prepared); + INIT_LIST_HEAD(&schan->queued); + INIT_LIST_HEAD(&schan->active); + INIT_LIST_HEAD(&schan->completed); + + spin_lock_init(&schan->lock); + list_add_tail(&schan->chan.device_node, &dma->channels); + } + + tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); + + /* Register DMA engine */ + dev_set_drvdata(dev, sdma); + ret = dma_async_device_register(dma); + if (ret) + goto free_irq; + + dev_info(dev, "initialized SIRFSOC DMAC driver\n"); + + return 0; + +free_irq: + devm_free_irq(dev, sdma->irq, sdma); +irq_dispose: + irq_dispose_mapping(sdma->irq); +unmap_mem: + iounmap(sdma->base); +free_mem: + devm_kfree(dev, sdma); + return ret; +} + +static int __devexit sirfsoc_dma_remove(struct platform_device *op) +{ + struct device *dev = &op->dev; + struct sirfsoc_dma *sdma = dev_get_drvdata(dev); + + dma_async_device_unregister(&sdma->dma); + devm_free_irq(dev, sdma->irq, sdma); + irq_dispose_mapping(sdma->irq); + iounmap(sdma->base); + devm_kfree(dev, sdma); + return 0; +} + +static struct of_device_id sirfsoc_dma_match[] = { + { .compatible = "sirf,prima2-dmac", }, + {}, +}; + +static struct platform_driver sirfsoc_dma_driver = { + .probe = sirfsoc_dma_probe, + .remove = __devexit_p(sirfsoc_dma_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = sirfsoc_dma_match, + }, +}; + +module_platform_driver(sirfsoc_dma_driver); + +MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " + "Barry Song <baohua.song@csr.com>"); +MODULE_DESCRIPTION("SIRFSOC DMA control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 13259ca..cc5ecbc 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -14,6 +14,8 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> #include <linux/err.h> #include <linux/amba/bus.h> @@ -32,6 +34,9 @@ /* Maximum iterations taken before giving up suspending a channel */ #define D40_SUSPEND_MAX_IT 500 +/* Milliseconds */ +#define DMA40_AUTOSUSPEND_DELAY 100 + /* Hardware requirement on LCLA alignment */ #define LCLA_ALIGNMENT 0x40000 @@ -62,6 +67,55 @@ enum d40_command { D40_DMA_SUSPENDED = 3 }; +/* + * These are the registers that has to be saved and later restored + * when the DMA hw is powered off. + * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. + */ +static u32 d40_backup_regs[] = { + D40_DREG_LCPA, + D40_DREG_LCLA, + D40_DREG_PRMSE, + D40_DREG_PRMSO, + D40_DREG_PRMOE, + D40_DREG_PRMOO, +}; + +#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) + +/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ +static u32 d40_backup_regs_v3[] = { + D40_DREG_PSEG1, + D40_DREG_PSEG2, + D40_DREG_PSEG3, + D40_DREG_PSEG4, + D40_DREG_PCEG1, + D40_DREG_PCEG2, + D40_DREG_PCEG3, + D40_DREG_PCEG4, + D40_DREG_RSEG1, + D40_DREG_RSEG2, + D40_DREG_RSEG3, + D40_DREG_RSEG4, + D40_DREG_RCEG1, + D40_DREG_RCEG2, + D40_DREG_RCEG3, + D40_DREG_RCEG4, +}; + +#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) + +static u32 d40_backup_regs_chan[] = { + D40_CHAN_REG_SSCFG, + D40_CHAN_REG_SSELT, + D40_CHAN_REG_SSPTR, + D40_CHAN_REG_SSLNK, + D40_CHAN_REG_SDCFG, + D40_CHAN_REG_SDELT, + D40_CHAN_REG_SDPTR, + D40_CHAN_REG_SDLNK, +}; + /** * struct d40_lli_pool - Structure for keeping LLIs in memory * @@ -96,7 +150,7 @@ struct d40_lli_pool { * during a transfer. * @node: List entry. * @is_in_client_list: true if the client owns this descriptor. - * the previous one. + * @cyclic: true if this is a cyclic job * * This descriptor is used for both logical and physical transfers. */ @@ -143,6 +197,7 @@ struct d40_lcla_pool { * channels. * * @lock: A lock protection this entity. + * @reserved: True if used by secure world or otherwise. * @num: The physical channel number of this entity. * @allocated_src: Bit mapped to show which src event line's are mapped to * this physical channel. Can also be free or physically allocated. @@ -152,6 +207,7 @@ struct d40_lcla_pool { */ struct d40_phy_res { spinlock_t lock; + bool reserved; int num; u32 allocated_src; u32 allocated_dst; @@ -185,7 +241,6 @@ struct d40_base; * @src_def_cfg: Default cfg register setting for src. * @dst_def_cfg: Default cfg register setting for dst. * @log_def: Default logical channel settings. - * @lcla: Space for one dst src pair for logical channel transfers. * @lcpa: Pointer to dst and src lcpa settings. * @runtime_addr: runtime configured address. * @runtime_direction: runtime configured direction. @@ -217,7 +272,7 @@ struct d40_chan { struct d40_log_lli_full *lcpa; /* Runtime reconfiguration */ dma_addr_t runtime_addr; - enum dma_data_direction runtime_direction; + enum dma_transfer_direction runtime_direction; }; /** @@ -241,6 +296,7 @@ struct d40_chan { * @dma_both: dma_device channels that can do both memcpy and slave transfers. * @dma_slave: dma_device channels that can do only do slave transfers. * @dma_memcpy: dma_device channels that can do only do memcpy transfers. + * @phy_chans: Room for all possible physical channels in system. * @log_chans: Room for all possible logical channels in system. * @lookup_log_chans: Used to map interrupt number to logical channel. Points * to log_chans entries. @@ -248,12 +304,20 @@ struct d40_chan { * to phy_chans entries. * @plat_data: Pointer to provided platform_data which is the driver * configuration. + * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. * @phy_res: Vector containing all physical channels. * @lcla_pool: lcla pool settings and data. * @lcpa_base: The virtual mapped address of LCPA. * @phy_lcpa: The physical address of the LCPA. * @lcpa_size: The size of the LCPA area. * @desc_slab: cache for descriptors. + * @reg_val_backup: Here the values of some hardware registers are stored + * before the DMA is powered off. They are restored when the power is back on. + * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and + * later. + * @reg_val_backup_chan: Backup data for standard channel parameter registers. + * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. + * @initialized: true if the dma has been initialized */ struct d40_base { spinlock_t interrupt_lock; @@ -275,6 +339,7 @@ struct d40_base { struct d40_chan **lookup_log_chans; struct d40_chan **lookup_phy_chans; struct stedma40_platform_data *plat_data; + struct regulator *lcpa_regulator; /* Physical half channels */ struct d40_phy_res *phy_res; struct d40_lcla_pool lcla_pool; @@ -282,6 +347,11 @@ struct d40_base { dma_addr_t phy_lcpa; resource_size_t lcpa_size; struct kmem_cache *desc_slab; + u32 reg_val_backup[BACKUP_REGS_SZ]; + u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; + u32 *reg_val_backup_chan; + u16 gcc_pwr_off_mask; + bool initialized; }; /** @@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) struct d40_desc *d; struct d40_desc *_d; - list_for_each_entry_safe(d, _d, &d40c->client, node) + list_for_each_entry_safe(d, _d, &d40c->client, node) { if (async_tx_test_ack(&d->txd)) { d40_desc_remove(d); desc = d; memset(desc, 0, sizeof(*desc)); break; } + } } if (!desc) @@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) bool cyclic = desc->cyclic; int curr_lcla = -EINVAL; int first_lcla = 0; + bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; bool linkback; /* @@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) &lli->src[lli_current], next_lcla, flags); - dma_sync_single_range_for_device(chan->base->dev, - pool->dma_addr, lcla_offset, - 2 * sizeof(struct d40_log_lli), - DMA_TO_DEVICE); - + /* + * Cache maintenance is not needed if lcla is + * mapped in esram + */ + if (!use_esram_lcla) { + dma_sync_single_range_for_device(chan->base->dev, + pool->dma_addr, lcla_offset, + 2 * sizeof(struct d40_log_lli), + DMA_TO_DEVICE); + } curr_lcla = next_lcla; if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { @@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, return len; } -/* Support functions for logical channels */ + +#ifdef CONFIG_PM +static void dma40_backup(void __iomem *baseaddr, u32 *backup, + u32 *regaddr, int num, bool save) +{ + int i; + + for (i = 0; i < num; i++) { + void __iomem *addr = baseaddr + regaddr[i]; + + if (save) + backup[i] = readl_relaxed(addr); + else + writel_relaxed(backup[i], addr); + } +} + +static void d40_save_restore_registers(struct d40_base *base, bool save) +{ + int i; + + /* Save/Restore channel specific registers */ + for (i = 0; i < base->num_phy_chans; i++) { + void __iomem *addr; + int idx; + + if (base->phy_res[i].reserved) + continue; + + addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; + idx = i * ARRAY_SIZE(d40_backup_regs_chan); + + dma40_backup(addr, &base->reg_val_backup_chan[idx], + d40_backup_regs_chan, + ARRAY_SIZE(d40_backup_regs_chan), + save); + } + + /* Save/Restore global registers */ + dma40_backup(base->virtbase, base->reg_val_backup, + d40_backup_regs, ARRAY_SIZE(d40_backup_regs), + save); + + /* Save/Restore registers only existing on dma40 v3 and later */ + if (base->rev >= 3) + dma40_backup(base->virtbase, base->reg_val_backup_v3, + d40_backup_regs_v3, + ARRAY_SIZE(d40_backup_regs_v3), + save); +} +#else +static void d40_save_restore_registers(struct d40_base *base, bool save) +{ +} +#endif static int d40_channel_execute_command(struct d40_chan *d40c, enum d40_command command) @@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c) /* Set LIDX for lcla */ writel(lidx, chanbase + D40_CHAN_REG_SSELT); writel(lidx, chanbase + D40_CHAN_REG_SDELT); + + /* Clear LNK which will be used by d40_chan_has_events() */ + writel(0, chanbase + D40_CHAN_REG_SSLNK); + writel(0, chanbase + D40_CHAN_REG_SDLNK); } } @@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c) if (!d40c->busy) return 0; + pm_runtime_get_sync(d40c->base->dev); spin_lock_irqsave(&d40c->lock, flags); res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); @@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c) D40_DMA_RUN); } } - + pm_runtime_mark_last_busy(d40c->base->dev); + pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); return res; } @@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c) return 0; spin_lock_irqsave(&d40c->lock, flags); - + pm_runtime_get_sync(d40c->base->dev); if (d40c->base->rev == 0) if (chan_is_logical(d40c)) { res = d40_channel_execute_command(d40c, @@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c) } no_suspend: + pm_runtime_mark_last_busy(d40c->base->dev); + pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); return res; } @@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) d40d = d40_first_queued(d40c); if (d40d != NULL) { - d40c->busy = true; + if (!d40c->busy) + d40c->busy = true; + + pm_runtime_get_sync(d40c->base->dev); /* Remove from queue */ d40_desc_remove(d40d); @@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c) if (d40_queue_start(d40c) == NULL) d40c->busy = false; + pm_runtime_mark_last_busy(d40c->base->dev); + pm_runtime_put_autosuspend(d40c->base->dev); } d40c->pending_tx++; @@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c, return res; } -static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, - int log_event_line, bool is_log) +static bool d40_alloc_mask_set(struct d40_phy_res *phy, + bool is_src, int log_event_line, bool is_log, + bool *first_user) { unsigned long flags; spin_lock_irqsave(&phy->lock, flags); + + *first_user = ((phy->allocated_src | phy->allocated_dst) + == D40_ALLOC_FREE); + if (!is_log) { /* Physical interrupts are masked per physical full channel */ if (phy->allocated_src == D40_ALLOC_FREE && @@ -1490,7 +1639,7 @@ out: return is_free; } -static int d40_allocate_channel(struct d40_chan *d40c) +static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) { int dev_type; int event_group; @@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) for (i = 0; i < d40c->base->num_phy_chans; i++) { if (d40_alloc_mask_set(&phys[i], is_src, - 0, is_log)) + 0, is_log, + first_phy_user)) goto found_phy; } } else @@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) if (d40_alloc_mask_set(&phys[i], is_src, 0, - is_log)) + is_log, + first_phy_user)) goto found_phy; } } @@ -1552,6 +1703,25 @@ found_phy: /* Find logical channel */ for (j = 0; j < d40c->base->num_phy_chans; j += 8) { int phy_num = j + event_group * 2; + + if (d40c->dma_cfg.use_fixed_channel) { + i = d40c->dma_cfg.phy_channel; + + if ((i != phy_num) && (i != phy_num + 1)) { + dev_err(chan2dev(d40c), + "invalid fixed phy channel %d\n", i); + return -EINVAL; + } + + if (d40_alloc_mask_set(&phys[i], is_src, event_line, + is_log, first_phy_user)) + goto found_log; + + dev_err(chan2dev(d40c), + "could not allocate fixed phy channel %d\n", i); + return -EINVAL; + } + /* * Spread logical channels across all available physical rather * than pack every logical channel at the first available phy @@ -1560,13 +1730,15 @@ found_phy: if (is_src) { for (i = phy_num; i < phy_num + 2; i++) { if (d40_alloc_mask_set(&phys[i], is_src, - event_line, is_log)) + event_line, is_log, + first_phy_user)) goto found_log; } } else { for (i = phy_num + 1; i >= phy_num; i--) { if (d40_alloc_mask_set(&phys[i], is_src, - event_line, is_log)) + event_line, is_log, + first_phy_user)) goto found_log; } } @@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c) return -EINVAL; } + pm_runtime_get_sync(d40c->base->dev); res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); if (res) { chan_err(d40c, "suspend failed\n"); - return res; + goto out; } if (chan_is_logical(d40c)) { @@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c) if (d40_chan_has_events(d40c)) { res = d40_channel_execute_command(d40c, D40_DMA_RUN); - if (res) { + if (res) chan_err(d40c, "Executing RUN command\n"); - return res; - } } - return 0; + goto out; } } else { (void) d40_alloc_mask_free(phy, is_src, 0); @@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c) res = d40_channel_execute_command(d40c, D40_DMA_STOP); if (res) { chan_err(d40c, "Failed to stop channel\n"); - return res; + goto out; } + + if (d40c->busy) { + pm_runtime_mark_last_busy(d40c->base->dev); + pm_runtime_put_autosuspend(d40c->base->dev); + } + + d40c->busy = false; d40c->phy_chan = NULL; d40c->configured = false; d40c->base->lookup_phy_chans[phy->num] = NULL; +out: - return 0; + pm_runtime_mark_last_busy(d40c->base->dev); + pm_runtime_put_autosuspend(d40c->base->dev); + return res; } static bool d40_is_paused(struct d40_chan *d40c) @@ -1855,7 +2036,7 @@ err: } static dma_addr_t -d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) +d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) { struct stedma40_platform_data *plat = chan->base->plat_data; struct stedma40_chan_cfg *cfg = &chan->dma_cfg; @@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) if (chan->runtime_addr) return chan->runtime_addr; - if (direction == DMA_FROM_DEVICE) + if (direction == DMA_DEV_TO_MEM) addr = plat->dev_rx[cfg->src_dev_type]; - else if (direction == DMA_TO_DEVICE) + else if (direction == DMA_MEM_TO_DEV) addr = plat->dev_tx[cfg->dst_dev_type]; return addr; @@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) static struct dma_async_tx_descriptor * d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, struct scatterlist *sg_dst, unsigned int sg_len, - enum dma_data_direction direction, unsigned long dma_flags) + enum dma_transfer_direction direction, unsigned long dma_flags) { struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); dma_addr_t src_dev_addr = 0; @@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, if (direction != DMA_NONE) { dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); - if (direction == DMA_FROM_DEVICE) + if (direction == DMA_DEV_TO_MEM) src_dev_addr = dev_addr; - else if (direction == DMA_TO_DEVICE) + else if (direction == DMA_MEM_TO_DEV) dst_dev_addr = dev_addr; } @@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) goto fail; } } - is_free_phy = (d40c->phy_chan == NULL); - err = d40_allocate_channel(d40c); + err = d40_allocate_channel(d40c, &is_free_phy); if (err) { chan_err(d40c, "Failed to allocate channel\n"); + d40c->configured = false; goto fail; } + pm_runtime_get_sync(d40c->base->dev); /* Fill in basic CFG register values */ d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg, chan_is_logical(d40c)); @@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; } + dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", + chan_is_logical(d40c) ? "logical" : "physical", + d40c->phy_chan->num, + d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); + + /* * Only write channel configuration to the DMA if the physical * resource is free. In case of multiple logical channels @@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) if (is_free_phy) d40_config_write(d40c); fail: + pm_runtime_mark_last_busy(d40c->base->dev); + pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); return err; } @@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan, static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, - enum dma_data_direction direction, + enum dma_transfer_direction direction, unsigned long dma_flags) { - if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) + if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) return NULL; return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); @@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, static struct dma_async_tx_descriptor * dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, - enum dma_data_direction direction) + enum dma_transfer_direction direction) { unsigned int periods = buf_len / period_len; struct dma_async_tx_descriptor *txd; @@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, dst_addr_width = config->dst_addr_width; dst_maxburst = config->dst_maxburst; - if (config->direction == DMA_FROM_DEVICE) { + if (config->direction == DMA_DEV_TO_MEM) { dma_addr_t dev_addr_rx = d40c->base->plat_data->dev_rx[cfg->src_dev_type]; @@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, if (dst_maxburst == 0) dst_maxburst = src_maxburst; - } else if (config->direction == DMA_TO_DEVICE) { + } else if (config->direction == DMA_MEM_TO_DEV) { dma_addr_t dev_addr_tx = d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; @@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, "configured channel %s for %s, data width %d/%d, " "maxburst %d/%d elements, LE, no flow control\n", dma_chan_name(chan), - (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", + (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", src_addr_width, dst_addr_width, src_maxburst, dst_maxburst); @@ -2519,6 +2709,72 @@ failure1: return err; } +/* Suspend resume functionality */ +#ifdef CONFIG_PM +static int dma40_pm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct d40_base *base = platform_get_drvdata(pdev); + int ret = 0; + if (!pm_runtime_suspended(dev)) + return -EBUSY; + + if (base->lcpa_regulator) + ret = regulator_disable(base->lcpa_regulator); + return ret; +} + +static int dma40_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct d40_base *base = platform_get_drvdata(pdev); + + d40_save_restore_registers(base, true); + + /* Don't disable/enable clocks for v1 due to HW bugs */ + if (base->rev != 1) + writel_relaxed(base->gcc_pwr_off_mask, + base->virtbase + D40_DREG_GCC); + + return 0; +} + +static int dma40_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct d40_base *base = platform_get_drvdata(pdev); + + if (base->initialized) + d40_save_restore_registers(base, false); + + writel_relaxed(D40_DREG_GCC_ENABLE_ALL, + base->virtbase + D40_DREG_GCC); + return 0; +} + +static int dma40_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct d40_base *base = platform_get_drvdata(pdev); + int ret = 0; + + if (base->lcpa_regulator) + ret = regulator_enable(base->lcpa_regulator); + + return ret; +} + +static const struct dev_pm_ops dma40_pm_ops = { + .suspend = dma40_pm_suspend, + .runtime_suspend = dma40_runtime_suspend, + .runtime_resume = dma40_runtime_resume, + .resume = dma40_resume, +}; +#define DMA40_PM_OPS (&dma40_pm_ops) +#else +#define DMA40_PM_OPS NULL +#endif + /* Initialization functions. */ static int __init d40_phy_res_init(struct d40_base *base) @@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base) int num_phy_chans_avail = 0; u32 val[2]; int odd_even_bit = -2; + int gcc = D40_DREG_GCC_ENA; val[0] = readl(base->virtbase + D40_DREG_PRSME); val[1] = readl(base->virtbase + D40_DREG_PRSMO); @@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base) /* Mark security only channels as occupied */ base->phy_res[i].allocated_src = D40_ALLOC_PHY; base->phy_res[i].allocated_dst = D40_ALLOC_PHY; + base->phy_res[i].reserved = true; + gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), + D40_DREG_GCC_SRC); + gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), + D40_DREG_GCC_DST); + + } else { base->phy_res[i].allocated_src = D40_ALLOC_FREE; base->phy_res[i].allocated_dst = D40_ALLOC_FREE; + base->phy_res[i].reserved = false; num_phy_chans_avail++; } spin_lock_init(&base->phy_res[i].lock); @@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base) base->phy_res[chan].allocated_src = D40_ALLOC_PHY; base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; + base->phy_res[chan].reserved = true; + gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), + D40_DREG_GCC_SRC); + gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), + D40_DREG_GCC_DST); num_phy_chans_avail--; } @@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base) val[0] = val[0] >> 2; } + /* + * To keep things simple, Enable all clocks initially. + * The clocks will get managed later post channel allocation. + * The clocks for the event lines on which reserved channels exists + * are not managed here. + */ + writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); + base->gcc_pwr_off_mask = gcc; + return num_phy_chans_avail; } @@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) goto failure; } - base->lcla_pool.alloc_map = kzalloc(num_phy_chans * - sizeof(struct d40_desc *) * - D40_LCLA_LINK_PER_EVENT_GRP, + base->reg_val_backup_chan = kmalloc(base->num_phy_chans * + sizeof(d40_backup_regs_chan), GFP_KERNEL); + if (!base->reg_val_backup_chan) + goto failure; + + base->lcla_pool.alloc_map = + kzalloc(num_phy_chans * sizeof(struct d40_desc *) + * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); if (!base->lcla_pool.alloc_map) goto failure; @@ -2741,9 +3025,9 @@ failure: static void __init d40_hw_init(struct d40_base *base) { - static const struct d40_reg_val dma_init_reg[] = { + static struct d40_reg_val dma_init_reg[] = { /* Clock every part of the DMA block from start */ - { .reg = D40_DREG_GCC, .val = 0x0000ff01}, + { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, /* Interrupts on all logical channels */ { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, @@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev) d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); goto failure; } + /* If lcla has to be located in ESRAM we don't need to allocate */ + if (base->plat_data->use_esram_lcla) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "lcla_esram"); + if (!res) { + ret = -ENOENT; + d40_err(&pdev->dev, + "No \"lcla_esram\" memory resource\n"); + goto failure; + } + base->lcla_pool.base = ioremap(res->start, + resource_size(res)); + if (!base->lcla_pool.base) { + ret = -ENOMEM; + d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); + goto failure; + } + writel(res->start, base->virtbase + D40_DREG_LCLA); - ret = d40_lcla_allocate(base); - if (ret) { - d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); - goto failure; + } else { + ret = d40_lcla_allocate(base); + if (ret) { + d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); + goto failure; + } } spin_lock_init(&base->lcla_pool.lock); @@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev) goto failure; } + pm_runtime_irq_safe(base->dev); + pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(base->dev); + pm_runtime_enable(base->dev); + pm_runtime_resume(base->dev); + + if (base->plat_data->use_esram_lcla) { + + base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); + if (IS_ERR(base->lcpa_regulator)) { + d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); + base->lcpa_regulator = NULL; + goto failure; + } + + ret = regulator_enable(base->lcpa_regulator); + if (ret) { + d40_err(&pdev->dev, + "Failed to enable lcpa_regulator\n"); + regulator_put(base->lcpa_regulator); + base->lcpa_regulator = NULL; + goto failure; + } + } + + base->initialized = true; err = d40_dmaengine_init(base, num_reserved_chans); if (err) goto failure; @@ -2976,6 +3306,11 @@ failure: if (base->virtbase) iounmap(base->virtbase); + if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { + iounmap(base->lcla_pool.base); + base->lcla_pool.base = NULL; + } + if (base->lcla_pool.dma_addr) dma_unmap_single(base->dev, base->lcla_pool.dma_addr, SZ_1K * base->num_phy_chans, @@ -2998,6 +3333,11 @@ failure: clk_put(base->clk); } + if (base->lcpa_regulator) { + regulator_disable(base->lcpa_regulator); + regulator_put(base->lcpa_regulator); + } + kfree(base->lcla_pool.alloc_map); kfree(base->lookup_log_chans); kfree(base->lookup_phy_chans); @@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = { .driver = { .owner = THIS_MODULE, .name = D40_NAME, + .pm = DMA40_PM_OPS, }, }; diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index b44c4551..8d3d490 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -16,6 +16,8 @@ #define D40_TYPE_TO_GROUP(type) (type / 16) #define D40_TYPE_TO_EVENT(type) (type % 16) +#define D40_GROUP_SIZE 8 +#define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2) /* Most bits of the CFG register are the same in log as in phy mode */ #define D40_SREG_CFG_MST_POS 15 @@ -123,6 +125,15 @@ /* DMA Register Offsets */ #define D40_DREG_GCC 0x000 +#define D40_DREG_GCC_ENA 0x1 +/* This assumes that there are only 4 event groups */ +#define D40_DREG_GCC_ENABLE_ALL 0xff01 +#define D40_DREG_GCC_EVTGRP_POS 8 +#define D40_DREG_GCC_SRC 0 +#define D40_DREG_GCC_DST 1 +#define D40_DREG_GCC_EVTGRP_ENA(x, y) \ + (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y)) + #define D40_DREG_PRTYP 0x004 #define D40_DREG_PRSME 0x008 #define D40_DREG_PRSMO 0x00C diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index a4a398f..a6f9c16 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -90,7 +90,7 @@ struct timb_dma_chan { struct list_head queue; struct list_head free_list; unsigned int bytes_per_line; - enum dma_data_direction direction; + enum dma_transfer_direction direction; unsigned int descs; /* Descriptors to allocate */ unsigned int desc_elems; /* number of elems per descriptor */ }; @@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, if (single) dma_unmap_single(chan2dev(&td_chan->chan), addr, len, - td_chan->direction); + DMA_TO_DEVICE); else dma_unmap_page(chan2dev(&td_chan->chan), addr, len, - td_chan->direction); + DMA_TO_DEVICE); } static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) @@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan) "td_chan: %p, chan: %d, membase: %p\n", td_chan, td_chan->chan.chan_id, td_chan->membase); - if (td_chan->direction == DMA_FROM_DEVICE) { + if (td_chan->direction == DMA_DEV_TO_MEM) { /* descriptor address */ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); @@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) txd->cookie); /* make sure to stop the transfer */ - if (td_chan->direction == DMA_FROM_DEVICE) + if (td_chan->direction == DMA_DEV_TO_MEM) iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); /* Currently no support for stopping DMA transfers else @@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan) static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, - enum dma_data_direction direction, unsigned long flags) + enum dma_transfer_direction direction, unsigned long flags) { struct timb_dma_chan *td_chan = container_of(chan, struct timb_dma_chan, chan); @@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, } dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, - td_desc->desc_list_len, DMA_TO_DEVICE); + td_desc->desc_list_len, DMA_MEM_TO_DEV); return &td_desc->txd; } @@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev) td_chan->descs = pchan->descriptors; td_chan->desc_elems = pchan->descriptor_elements; td_chan->bytes_per_line = pchan->bytes_per_line; - td_chan->direction = pchan->rx ? DMA_FROM_DEVICE : - DMA_TO_DEVICE; + td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : + DMA_MEM_TO_DEV; td_chan->membase = td->membase + (i / 2) * TIMBDMA_INSTANCE_OFFSET + @@ -841,17 +841,7 @@ static struct platform_driver td_driver = { .remove = __exit_p(td_remove), }; -static int __init td_init(void) -{ - return platform_driver_register(&td_driver); -} -module_init(td_init); - -static void __exit td_exit(void) -{ - platform_driver_unregister(&td_driver); -} -module_exit(td_exit); +module_platform_driver(td_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Timberdale DMA controller driver"); diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index cbd83e36..6122c36 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, static struct dma_async_tx_descriptor * txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_data_direction direction, + unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags) { struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); @@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, BUG_ON(!ds || !ds->reg_width); if (ds->tx_reg) - BUG_ON(direction != DMA_TO_DEVICE); + BUG_ON(direction != DMA_MEM_TO_DEV); else - BUG_ON(direction != DMA_FROM_DEVICE); + BUG_ON(direction != DMA_DEV_TO_MEM); if (unlikely(!sg_len)) return NULL; @@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); if (__is_dmac64(ddev)) { - if (direction == DMA_TO_DEVICE) { + if (direction == DMA_MEM_TO_DEV) { desc->hwdesc.SAR = mem; desc->hwdesc.DAR = ds->tx_reg; } else { @@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } desc->hwdesc.CNTR = sg_dma_len(sg); } else { - if (direction == DMA_TO_DEVICE) { + if (direction == DMA_MEM_TO_DEV) { desc->hwdesc32.SAR = mem; desc->hwdesc32.DAR = ds->tx_reg; } else { @@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } desc->hwdesc32.CNTR = sg_dma_len(sg); } - if (direction == DMA_TO_DEVICE) { + if (direction == DMA_MEM_TO_DEV) { sai = ds->reg_width; dai = 0; } else { diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 6628fea..7f5f0da 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -263,6 +263,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) static char ohci_driver_name[] = KBUILD_MODNAME; #define PCI_DEVICE_ID_AGERE_FW643 0x5901 +#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020 @@ -289,6 +290,9 @@ static const struct { {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, QUIRK_NO_MSI}, + {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, + QUIRK_RESET_PACKET}, + {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, QUIRK_NO_MSI}, @@ -299,7 +303,7 @@ static const struct { QUIRK_NO_MSI}, {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, - QUIRK_CYCLE_TIMER}, + QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 37c4bd1..d0c4118 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -87,6 +87,7 @@ config GPIO_GENERIC_PLATFORM config GPIO_IT8761E tristate "IT8761E GPIO support" + depends on X86 # unconditional access to IO space. help Say yes here to support GPIO functionality of IT8761E super I/O chip. diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c index 5b69480..ddfacc5 100644 --- a/drivers/gpio/gpio-lpc32xx.c +++ b/drivers/gpio/gpio-lpc32xx.c @@ -96,7 +96,7 @@ static const char *gpio_p2_names[LPC32XX_GPIO_P2_MAX] = { }; static const char *gpio_p3_names[LPC32XX_GPIO_P3_MAX] = { - "gpi000", "gpio01", "gpio02", "gpio03", + "gpio00", "gpio01", "gpio02", "gpio03", "gpio04", "gpio05" }; diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c index 461958f..f0febe5 100644 --- a/drivers/gpio/gpio-ml-ioh.c +++ b/drivers/gpio/gpio-ml-ioh.c @@ -248,7 +248,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port) static int ioh_irq_type(struct irq_data *d, unsigned int type) { u32 im; - u32 *im_reg; + void __iomem *im_reg; u32 ien; u32 im_pos; int ch; @@ -412,7 +412,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev, int i, j; struct ioh_gpio *chip; void __iomem *base; - void __iomem *chip_save; + void *chip_save; int irq_base; ret = pci_enable_device(pdev); @@ -428,7 +428,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev, } base = pci_iomap(pdev, 1, 0); - if (base == 0) { + if (!base) { dev_err(&pdev->dev, "%s : pci_iomap failed", __func__); ret = -ENOMEM; goto err_iomap; @@ -448,6 +448,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev, chip->reg = chip->base; chip->ch = i; mutex_init(&chip->lock); + spin_lock_init(&chip->spinlock); ioh_gpio_setup(chip, num_ports[i]); ret = gpiochip_add(&chip->gpio); if (ret) { @@ -521,7 +522,7 @@ static void __devexit ioh_gpio_remove(struct pci_dev *pdev) int err; int i; struct ioh_gpio *chip = pci_get_drvdata(pdev); - void __iomem *chip_save; + void *chip_save; chip_save = chip; diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c index f060329..e8729cc 100644 --- a/drivers/gpio/gpio-pch.c +++ b/drivers/gpio/gpio-pch.c @@ -231,7 +231,7 @@ static void pch_gpio_setup(struct pch_gpio *chip) static int pch_irq_type(struct irq_data *d, unsigned int type) { u32 im; - u32 *im_reg; + u32 __iomem *im_reg; u32 ien; u32 im_pos; int ch; @@ -376,7 +376,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev, } chip->base = pci_iomap(pdev, 1, 0); - if (chip->base == 0) { + if (!chip->base) { dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__); ret = -ENOMEM; goto err_iomap; @@ -392,6 +392,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev, chip->reg = chip->base; pci_set_drvdata(pdev, chip); mutex_init(&chip->lock); + spin_lock_init(&chip->spinlock); pch_gpio_setup(chip); ret = gpiochip_add(&chip->gpio); if (ret) { diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index a766177..0a79a11 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c @@ -2387,27 +2387,30 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = { }; #if defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF) -static int exynos4_gpio_xlate(struct gpio_chip *gc, struct device_node *np, - const void *gpio_spec, u32 *flags) +static int exynos4_gpio_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, u32 *flags) { - const __be32 *gpio = gpio_spec; - const u32 n = be32_to_cpup(gpio); - unsigned int pin = gc->base + be32_to_cpu(gpio[0]); + unsigned int pin; if (WARN_ON(gc->of_gpio_n_cells < 4)) return -EINVAL; - if (n > gc->ngpio) + if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells)) return -EINVAL; - if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(be32_to_cpu(gpio[1])))) + if (gpiospec->args[0] > gc->ngpio) + return -EINVAL; + + pin = gc->base + gpiospec->args[0]; + + if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(gpiospec->args[1]))) pr_warn("gpio_xlate: failed to set pin function\n"); - if (s3c_gpio_setpull(pin, be32_to_cpu(gpio[2]))) + if (s3c_gpio_setpull(pin, gpiospec->args[2])) pr_warn("gpio_xlate: failed to set pin pull up/down\n"); - if (s5p_gpio_set_drvstr(pin, be32_to_cpu(gpio[3]))) + if (s5p_gpio_set_drvstr(pin, gpiospec->args[3])) pr_warn("gpio_xlate: failed to set pin drive strength\n"); - return n; + return gpiospec->args[0]; } static const struct of_device_id exynos4_gpio_dt_match[] __initdata = { diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c index b9c1c29..91f45b9 100644 --- a/drivers/gpio/gpio-tps65910.c +++ b/drivers/gpio/gpio-tps65910.c @@ -52,7 +52,7 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset, struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); /* Set the initial value */ - tps65910_gpio_set(gc, 0, value); + tps65910_gpio_set(gc, offset, value); return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset, GPIO_CFG_MASK); diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 3f46772..ba23790 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -101,7 +101,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv, * Searches and unlinks the entry in drm_device::magiclist with the magic * number hash key, while holding the drm_device::struct_mutex lock. */ -static int drm_remove_magic(struct drm_master *master, drm_magic_t magic) +int drm_remove_magic(struct drm_master *master, drm_magic_t magic) { struct drm_magic_entry *pt; struct drm_hash_item *hash; @@ -136,6 +136,8 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic) * If there is a magic number in drm_file::magic then use it, otherwise * searches an unique non-zero magic number and add it associating it with \p * file_priv. + * This ioctl needs protection by the drm_global_mutex, which protects + * struct drm_file::magic and struct drm_magic_entry::priv. */ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -173,6 +175,8 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) * \return zero if authentication successed, or a negative number otherwise. * * Checks if \p file_priv is associated with the magic number passed in \arg. + * This ioctl needs protection by the drm_global_mutex, which protects + * struct drm_file::magic and struct drm_magic_entry::priv. */ int drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index c00cf15..6263b01 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -487,6 +487,11 @@ int drm_release(struct inode *inode, struct file *filp) (long)old_encode_dev(file_priv->minor->device), dev->open_count); + /* Release any auth tokens that might point to this file_priv, + (do that under the drm_global_mutex) */ + if (file_priv->magic) + (void) drm_remove_magic(file_priv->master, file_priv->magic); + /* if the master has gone away we can't do anything with the lock */ if (file_priv->minor->master) drm_master_release(dev, filp); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 396e60c..f8625e2 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -140,7 +140,7 @@ int drm_gem_object_init(struct drm_device *dev, obj->dev = dev; obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); if (IS_ERR(obj->filp)) - return -ENOMEM; + return PTR_ERR(obj->filp); kref_init(&obj->refcount); atomic_set(&obj->handle_count, 0); diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index f9aaa56..b9e5266c 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -13,7 +13,7 @@ config DRM_EXYNOS config DRM_EXYNOS_FIMD tristate "Exynos DRM FIMD" - depends on DRM_EXYNOS + depends on DRM_EXYNOS && !FB_S3C default n help Choose this option if you want to use Exynos FIMD for DRM. @@ -21,7 +21,7 @@ config DRM_EXYNOS_FIMD config DRM_EXYNOS_HDMI tristate "Exynos DRM HDMI" - depends on DRM_EXYNOS + depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV help Choose this option if you want to use Exynos HDMI for DRM. If M is selected, the module will be called exynos_drm_hdmi diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index ca83139..b6a737d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -158,7 +158,8 @@ static void fimd_dpms(struct device *subdrv_dev, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - pm_runtime_put_sync(subdrv_dev); + if (!ctx->suspended) + pm_runtime_put_sync(subdrv_dev); break; default: DRM_DEBUG_KMS("unspecified mode %d\n", mode); @@ -734,6 +735,46 @@ static void fimd_clear_win(struct fimd_context *ctx, int win) writel(val, ctx->regs + SHADOWCON); } +static int fimd_power_on(struct fimd_context *ctx, bool enable) +{ + struct exynos_drm_subdrv *subdrv = &ctx->subdrv; + struct device *dev = subdrv->manager.dev; + + DRM_DEBUG_KMS("%s\n", __FILE__); + + if (enable != false && enable != true) + return -EINVAL; + + if (enable) { + int ret; + + ret = clk_enable(ctx->bus_clk); + if (ret < 0) + return ret; + + ret = clk_enable(ctx->lcd_clk); + if (ret < 0) { + clk_disable(ctx->bus_clk); + return ret; + } + + ctx->suspended = false; + + /* if vblank was enabled status, enable it again. */ + if (test_and_clear_bit(0, &ctx->irq_flags)) + fimd_enable_vblank(dev); + + fimd_apply(dev); + } else { + clk_disable(ctx->lcd_clk); + clk_disable(ctx->bus_clk); + + ctx->suspended = true; + } + + return 0; +} + static int __devinit fimd_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -911,39 +952,30 @@ out: #ifdef CONFIG_PM_SLEEP static int fimd_suspend(struct device *dev) { - int ret; + struct fimd_context *ctx = get_fimd_context(dev); if (pm_runtime_suspended(dev)) return 0; - ret = pm_runtime_suspend(dev); - if (ret < 0) - return ret; - - return 0; + /* + * do not use pm_runtime_suspend(). if pm_runtime_suspend() is + * called here, an error would be returned by that interface + * because the usage_count of pm runtime is more than 1. + */ + return fimd_power_on(ctx, false); } static int fimd_resume(struct device *dev) { - int ret; - - ret = pm_runtime_resume(dev); - if (ret < 0) { - DRM_ERROR("failed to resume runtime pm.\n"); - return ret; - } - - pm_runtime_disable(dev); - - ret = pm_runtime_set_active(dev); - if (ret < 0) { - DRM_ERROR("failed to active runtime pm.\n"); - pm_runtime_enable(dev); - pm_runtime_suspend(dev); - return ret; - } + struct fimd_context *ctx = get_fimd_context(dev); - pm_runtime_enable(dev); + /* + * if entered to sleep when lcd panel was on, the usage_count + * of pm runtime would still be 1 so in this case, fimd driver + * should be on directly not drawing on pm runtime interface. + */ + if (!pm_runtime_suspended(dev)) + return fimd_power_on(ctx, true); return 0; } @@ -956,39 +988,16 @@ static int fimd_runtime_suspend(struct device *dev) DRM_DEBUG_KMS("%s\n", __FILE__); - clk_disable(ctx->lcd_clk); - clk_disable(ctx->bus_clk); - - ctx->suspended = true; - return 0; + return fimd_power_on(ctx, false); } static int fimd_runtime_resume(struct device *dev) { struct fimd_context *ctx = get_fimd_context(dev); - int ret; DRM_DEBUG_KMS("%s\n", __FILE__); - ret = clk_enable(ctx->bus_clk); - if (ret < 0) - return ret; - - ret = clk_enable(ctx->lcd_clk); - if (ret < 0) { - clk_disable(ctx->bus_clk); - return ret; - } - - ctx->suspended = false; - - /* if vblank was enabled status, enable it again. */ - if (test_and_clear_bit(0, &ctx->irq_flags)) - fimd_enable_vblank(dev); - - fimd_apply(dev); - - return 0; + return fimd_power_on(ctx, true); } #endif diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index f48f7ce..3429d3f 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1116,8 +1116,8 @@ err_ddc: err_iomap: iounmap(hdata->regs); err_req_region: - release_resource(hdata->regs_res); - kfree(hdata->regs_res); + release_mem_region(hdata->regs_res->start, + resource_size(hdata->regs_res)); err_resource: hdmi_resources_cleanup(hdata); err_data: @@ -1145,8 +1145,8 @@ static int __devexit hdmi_remove(struct platform_device *pdev) iounmap(hdata->regs); - release_resource(hdata->regs_res); - kfree(hdata->regs_res); + release_mem_region(hdata->regs_res->start, + resource_size(hdata->regs_res)); /* hdmiphy i2c driver */ i2c_del_driver(&hdmiphy_driver); diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 791c0ef..830dfdd6b 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -113,12 +113,12 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info) void psbfb_suspend(struct drm_device *dev) { - struct drm_framebuffer *fb = 0; - struct psb_framebuffer *psbfb = to_psb_fb(fb); + struct drm_framebuffer *fb; console_lock(); mutex_lock(&dev->mode_config.mutex); list_for_each_entry(fb, &dev->mode_config.fb_list, head) { + struct psb_framebuffer *psbfb = to_psb_fb(fb); struct fb_info *info = psbfb->fbdev; fb_set_suspend(info, 1); drm_fb_helper_blank(FB_BLANK_POWERDOWN, info); @@ -129,12 +129,12 @@ void psbfb_suspend(struct drm_device *dev) void psbfb_resume(struct drm_device *dev) { - struct drm_framebuffer *fb = 0; - struct psb_framebuffer *psbfb = to_psb_fb(fb); + struct drm_framebuffer *fb; console_lock(); mutex_lock(&dev->mode_config.mutex); list_for_each_entry(fb, &dev->mode_config.fb_list, head) { + struct psb_framebuffer *psbfb = to_psb_fb(fb); struct fb_info *info = psbfb->fbdev; fb_set_suspend(info, 0); drm_fb_helper_blank(FB_BLANK_UNBLANK, info); diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index e770bd1..5d5330f 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -20,6 +20,7 @@ */ #include <drm/drmP.h> +#include <linux/shmem_fs.h> #include "psb_drv.h" @@ -203,9 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt) gt->npage = pages; for (i = 0; i < pages; i++) { - /* FIXME: needs updating as per mail from Hugh Dickins */ - p = read_cache_page_gfp(mapping, i, - __GFP_COLD | GFP_KERNEL); + p = shmem_read_mapping_page(mapping, i); if (IS_ERR(p)) goto err; gt->pages[i] = p; diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index f7c17b2..7f4b4e1 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -886,7 +886,7 @@ static int i810_flush_queue(struct drm_device *dev) } /* Must be called with the lock held */ -void i810_driver_reclaim_buffers(struct drm_device *dev, +static void i810_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; @@ -1223,17 +1223,12 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) if (dev_priv->page_flipping) i810_do_cleanup_pageflip(dev); } +} - if (file_priv->master && file_priv->master->lock.hw_lock) { - drm_idlelock_take(&file_priv->master->lock); - i810_driver_reclaim_buffers(dev, file_priv); - drm_idlelock_release(&file_priv->master->lock); - } else { - /* master disappeared, clean up stuff anyway and hope nothing - * goes wrong */ - i810_driver_reclaim_buffers(dev, file_priv); - } - +void i810_driver_reclaim_buffers_locked(struct drm_device *dev, + struct drm_file *file_priv) +{ + i810_reclaim_buffers(dev, file_priv); } int i810_driver_dma_quiescent(struct drm_device *dev) diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index 053f1ee..ec12f7d 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -63,6 +63,7 @@ static struct drm_driver driver = { .lastclose = i810_driver_lastclose, .preclose = i810_driver_preclose, .device_is_agp = i810_driver_device_is_agp, + .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked, .dma_quiescent = i810_driver_dma_quiescent, .ioctls = i810_ioctls, .fops = &i810_driver_fops, diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h index 6e0acad..c9339f4 100644 --- a/drivers/gpu/drm/i810/i810_drv.h +++ b/drivers/gpu/drm/i810/i810_drv.h @@ -116,12 +116,14 @@ typedef struct drm_i810_private { /* i810_dma.c */ extern int i810_driver_dma_quiescent(struct drm_device *dev); -void i810_driver_reclaim_buffers(struct drm_device *dev, - struct drm_file *file_priv); +extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, + struct drm_file *file_priv); extern int i810_driver_load(struct drm_device *, unsigned long flags); extern void i810_driver_lastclose(struct drm_device *dev); extern void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); +extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, + struct drm_file *file_priv); extern int i810_driver_device_is_agp(struct drm_device *dev); extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1180798..deaa657 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -121,11 +121,11 @@ static const char *cache_level_str(int type) static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s", + seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), - obj->base.size, + obj->base.size / 1024, obj->base.read_domains, obj->base.write_domain, obj->last_rendering_seqno, @@ -653,7 +653,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) seq_printf(m, " Size : %08x\n", ring->size); seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); - if (IS_GEN6(dev)) { + if (IS_GEN6(dev) || IS_GEN7(dev)) { seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); } @@ -1075,6 +1075,7 @@ static int gen6_drpc_info(struct seq_file *m) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 rpmodectl1, gt_core_status, rcctl1; + unsigned forcewake_count; int count=0, ret; @@ -1082,9 +1083,13 @@ static int gen6_drpc_info(struct seq_file *m) if (ret) return ret; - if (atomic_read(&dev_priv->forcewake_count)) { - seq_printf(m, "RC information inaccurate because userspace " - "holds a reference \n"); + spin_lock_irq(&dev_priv->gt_lock); + forcewake_count = dev_priv->forcewake_count; + spin_unlock_irq(&dev_priv->gt_lock); + + if (forcewake_count) { + seq_printf(m, "RC information inaccurate because somebody " + "holds a forcewake reference \n"); } else { /* NB: we cannot use forcewake, else we read the wrong values */ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) @@ -1106,7 +1111,7 @@ static int gen6_drpc_info(struct seq_file *m) seq_printf(m, "SW control enabled: %s\n", yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); - seq_printf(m, "RC6 Enabled: %s\n", + seq_printf(m, "RC1e Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); seq_printf(m, "RC6 Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); @@ -1398,9 +1403,13 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; + unsigned forcewake_count; + + spin_lock_irq(&dev_priv->gt_lock); + forcewake_count = dev_priv->forcewake_count; + spin_unlock_irq(&dev_priv->gt_lock); - seq_printf(m, "forcewake count = %d\n", - atomic_read(&dev_priv->forcewake_count)); + seq_printf(m, "forcewake count = %u\n", forcewake_count); return 0; } @@ -1665,7 +1674,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - if (!IS_GEN6(dev)) + if (INTEL_INFO(dev)->gen < 6) return 0; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -1682,7 +1691,7 @@ int i915_forcewake_release(struct inode *inode, struct file *file) struct drm_device *dev = inode->i_private; struct drm_i915_private *dev_priv = dev->dev_private; - if (!IS_GEN6(dev)) + if (INTEL_INFO(dev)->gen < 6) return 0; /* diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 5f4d589..ddfe3d9 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2045,6 +2045,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); + spin_lock_init(&dev_priv->gt_lock); spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->error_lock); spin_lock_init(&dev_priv->rps_lock); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8f71879..308f819 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -368,11 +368,12 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) */ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { - WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); + unsigned long irqflags; - /* Forcewake is atomic in case we get in here without the lock */ - if (atomic_add_return(1, &dev_priv->forcewake_count) == 1) + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); + if (dev_priv->forcewake_count++ == 0) dev_priv->display.force_wake_get(dev_priv); + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); } void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) @@ -392,10 +393,12 @@ void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) */ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { - WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); + unsigned long irqflags; - if (atomic_dec_and_test(&dev_priv->forcewake_count)) + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); + if (--dev_priv->forcewake_count == 0) dev_priv->display.force_wake_put(dev_priv); + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); } void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) @@ -597,9 +600,36 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags) static int gen6_do_reset(struct drm_device *dev, u8 flags) { struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + unsigned long irqflags; - I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL); - return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); + /* Hold gt_lock across reset to prevent any register access + * with forcewake not set correctly + */ + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); + + /* Reset the chip */ + + /* GEN6_GDRST is not in the gt power well, no need to check + * for fifo space for the write or forcewake the chip for + * the read + */ + I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); + + /* Spin waiting for the device to ack the reset request */ + ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); + + /* If reset with a user forcewake, try to restore, otherwise turn it off */ + if (dev_priv->forcewake_count) + dev_priv->display.force_wake_get(dev_priv); + else + dev_priv->display.force_wake_put(dev_priv); + + /* Restore fifo count */ + dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); + + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); + return ret; } /** @@ -643,9 +673,6 @@ int i915_reset(struct drm_device *dev, u8 flags) case 7: case 6: ret = gen6_do_reset(dev, flags); - /* If reset with a user forcewake, try to restore */ - if (atomic_read(&dev_priv->forcewake_count)) - __gen6_gt_force_wake_get(dev_priv); break; case 5: ret = ironlake_do_reset(dev, flags); @@ -927,9 +954,14 @@ MODULE_LICENSE("GPL and additional rights"); u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = 0; \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ - gen6_gt_force_wake_get(dev_priv); \ + unsigned long irqflags; \ + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ + if (dev_priv->forcewake_count == 0) \ + dev_priv->display.force_wake_get(dev_priv); \ val = read##y(dev_priv->regs + reg); \ - gen6_gt_force_wake_put(dev_priv); \ + if (dev_priv->forcewake_count == 0) \ + dev_priv->display.force_wake_put(dev_priv); \ + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ } else { \ val = read##y(dev_priv->regs + reg); \ } \ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 602bc80..9689ca3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -288,7 +288,13 @@ typedef struct drm_i915_private { int relative_constants_mode; void __iomem *regs; - u32 gt_fifo_count; + /** gt_fifo_count and the subsequent register write are synchronized + * with dev->struct_mutex. */ + unsigned gt_fifo_count; + /** forcewake_count is protected by gt_lock */ + unsigned forcewake_count; + /** gt_lock is also taken in irq contexts. */ + struct spinlock gt_lock; struct intel_gmbus { struct i2c_adapter adapter; @@ -741,8 +747,6 @@ typedef struct drm_i915_private { struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; - - atomic_t forcewake_count; } drm_i915_private_t; enum i915_cache_level { diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5d433fc..5bd4361 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1751,7 +1751,8 @@ static void ironlake_irq_preinstall(struct drm_device *dev) INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); I915_WRITE(HWSTAM, 0xeffe); - if (IS_GEN6(dev) || IS_GEN7(dev)) { + + if (IS_GEN6(dev)) { /* Workaround stalls observed on Sandy Bridge GPUs by * making the blitter command streamer generate a * write to the Hardware Status Page for diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 7886e4f..2b5eb22 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -28,14 +28,19 @@ #include "drm.h" #include "i915_drm.h" #include "intel_drv.h" +#include "i915_reg.h" static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; u32 dpll_reg; + /* On IVB, 3rd pipe shares PLL with another one */ + if (pipe > 1) + return false; + if (HAS_PCH_SPLIT(dev)) - dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B; + dpll_reg = PCH_DPLL(pipe); else dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; @@ -822,7 +827,7 @@ int i915_save_state(struct drm_device *dev) if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); - if (IS_GEN6(dev)) + if (INTEL_INFO(dev)->gen >= 6) gen6_disable_rps(dev); /* Cache mode state */ @@ -881,7 +886,7 @@ int i915_restore_state(struct drm_device *dev) intel_init_emon(dev); } - if (IS_GEN6(dev)) { + if (INTEL_INFO(dev)->gen >= 6) { gen6_enable_rps(dev_priv); gen6_update_ring_freq(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 8af3735..dbda6e3 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -467,8 +467,12 @@ struct edp_link_params { struct bdb_edp { struct edp_power_seq power_seqs[16]; u32 color_depth; - u32 sdrrs_msa_timing_delay; struct edp_link_params link_params[16]; + u32 sdrrs_msa_timing_delay; + + /* ith bit indicates enabled/disabled for (i+1)th panel */ + u16 edp_s3d_feature; + u16 edp_t3_optimization; } __attribute__ ((packed)); void intel_setup_bios(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index fee0ad0..dd729d4 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -24,6 +24,7 @@ * Eric Anholt <eric@anholt.net> */ +#include <linux/dmi.h> #include <linux/i2c.h> #include <linux/slab.h> #include "drmP.h" @@ -540,6 +541,24 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { .destroy = intel_encoder_destroy, }; +static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) +{ + DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident); + return 1; +} + +static const struct dmi_system_id intel_no_crt[] = { + { + .callback = intel_no_crt_dmi_callback, + .ident = "ACER ZGB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ACER"), + DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"), + }, + }, + { } +}; + void intel_crt_init(struct drm_device *dev) { struct drm_connector *connector; @@ -547,6 +566,10 @@ void intel_crt_init(struct drm_device *dev) struct intel_connector *intel_connector; struct drm_i915_private *dev_priv = dev->dev_private; + /* Skip machines without VGA that falsely report hotplug events */ + if (dmi_check_system(intel_no_crt)) + return; + crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); if (!crt) return; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2a3f707..b3b51c4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5808,12 +5808,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, if (is_lvds) { temp = I915_READ(PCH_LVDS); temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; - if (HAS_PCH_CPT(dev)) + if (HAS_PCH_CPT(dev)) { + temp &= ~PORT_TRANS_SEL_MASK; temp |= PORT_TRANS_SEL_CPT(pipe); - else if (pipe == 1) - temp |= LVDS_PIPEB_SELECT; - else - temp &= ~LVDS_PIPEB_SELECT; + } else { + if (pipe == 1) + temp |= LVDS_PIPEB_SELECT; + else + temp &= ~LVDS_PIPEB_SELECT; + } /* set the corresponsding LVDS_BORDER bit */ temp |= dev_priv->lvds_border_bits; @@ -9025,12 +9028,9 @@ void intel_modeset_init(struct drm_device *dev) for (i = 0; i < dev_priv->num_pipe; i++) { intel_crtc_init(dev, i); - if (HAS_PCH_SPLIT(dev)) { - ret = intel_plane_init(dev, i); - if (ret) - DRM_ERROR("plane %d init failed: %d\n", - i, ret); - } + ret = intel_plane_init(dev, i); + if (ret) + DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); } /* Just disable it once at startup */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index e441911..798f6e1 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -708,6 +708,14 @@ static const struct dmi_system_id intel_no_lvds[] = { }, }, { + .callback = intel_no_lvds_dmi_callback, + .ident = "Clientron E830", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), + DMI_MATCH(DMI_PRODUCT_NAME, "E830"), + }, + }, + { .callback = intel_no_lvds_dmi_callback, .ident = "Asus EeeBox PC EB1007", .matches = { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 77e729d..1ab842c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -636,6 +636,19 @@ render_ring_add_request(struct intel_ring_buffer *ring, } static u32 +gen6_ring_get_seqno(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + + /* Workaround to force correct ordering between irq and seqno writes on + * ivb (and maybe also on snb) by reading from a CS register (like + * ACTHD) before reading the status page. */ + if (IS_GEN7(dev)) + intel_ring_get_active_head(ring); + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); +} + +static u32 ring_get_seqno(struct intel_ring_buffer *ring) { return intel_read_status_page(ring, I915_GEM_HWS_INDEX); @@ -792,17 +805,6 @@ ring_add_request(struct intel_ring_buffer *ring, } static bool -gen7_blt_ring_get_irq(struct intel_ring_buffer *ring) -{ - /* The BLT ring on IVB appears to have broken synchronization - * between the seqno write and the interrupt, so that the - * interrupt appears first. Returning false here makes - * i915_wait_request() do a polling loop, instead. - */ - return false; -} - -static bool gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) { struct drm_device *dev = ring->dev; @@ -811,6 +813,12 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) if (!dev->irq_enabled) return false; + /* It looks like we need to prevent the gt from suspending while waiting + * for an notifiy irq, otherwise irqs seem to get lost on at least the + * blt/bsd rings on ivb. */ + if (IS_GEN7(dev)) + gen6_gt_force_wake_get(dev_priv); + spin_lock(&ring->irq_lock); if (ring->irq_refcount++ == 0) { ring->irq_mask &= ~rflag; @@ -835,6 +843,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) ironlake_disable_irq(dev_priv, gflag); } spin_unlock(&ring->irq_lock); + + if (IS_GEN7(dev)) + gen6_gt_force_wake_put(dev_priv); } static bool @@ -1341,7 +1352,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .write_tail = gen6_bsd_ring_write_tail, .flush = gen6_ring_flush, .add_request = gen6_add_request, - .get_seqno = ring_get_seqno, + .get_seqno = gen6_ring_get_seqno, .irq_get = gen6_bsd_ring_get_irq, .irq_put = gen6_bsd_ring_put_irq, .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, @@ -1476,7 +1487,7 @@ static const struct intel_ring_buffer gen6_blt_ring = { .write_tail = ring_write_tail, .flush = blt_ring_flush, .add_request = gen6_add_request, - .get_seqno = ring_get_seqno, + .get_seqno = gen6_ring_get_seqno, .irq_get = blt_ring_get_irq, .irq_put = blt_ring_put_irq, .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, @@ -1499,6 +1510,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->flush = gen6_render_ring_flush; ring->irq_get = gen6_render_ring_get_irq; ring->irq_put = gen6_render_ring_put_irq; + ring->get_seqno = gen6_ring_get_seqno; } else if (IS_GEN5(dev)) { ring->add_request = pc_render_add_request; ring->get_seqno = pc_render_get_seqno; @@ -1577,8 +1589,5 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) *ring = gen6_blt_ring; - if (IS_GEN7(dev)) - ring->irq_get = gen7_blt_ring_get_irq; - return intel_init_ring_buffer(dev, ring); } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index f7b9268..e334ec3 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1066,15 +1066,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, /* Set the SDVO control regs. */ if (INTEL_INFO(dev)->gen >= 4) { - sdvox = 0; + /* The real mode polarity is set by the SDVO commands, using + * struct intel_sdvo_dtd. */ + sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; if (intel_sdvo->is_hdmi) sdvox |= intel_sdvo->color_range; if (INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_BORDER_ENABLE; - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) - sdvox |= SDVO_VSYNC_ACTIVE_HIGH; - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) - sdvox |= SDVO_HSYNC_ACTIVE_HIGH; } else { sdvox = I915_READ(intel_sdvo->sdvo_reg); switch (intel_sdvo->sdvo_reg) { diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index d13989f..2288abf 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -466,10 +466,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); - if (ret) { - DRM_ERROR("failed to pin object\n"); + if (ret) goto out_unlock; - } intel_plane->obj = obj; @@ -632,10 +630,8 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe) unsigned long possible_crtcs; int ret; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) { - DRM_ERROR("new plane code only for SNB+\n"); + if (!(IS_GEN6(dev) || IS_GEN7(dev))) return -ENODEV; - } intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL); if (!intel_plane) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index f3c6a9a..1571be3 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -417,7 +417,7 @@ static const struct tv_mode tv_modes[] = { { .name = "NTSC-M", .clock = 108000, - .refresh = 29970, + .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ @@ -460,7 +460,7 @@ static const struct tv_mode tv_modes[] = { { .name = "NTSC-443", .clock = 108000, - .refresh = 29970, + .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */ @@ -502,7 +502,7 @@ static const struct tv_mode tv_modes[] = { { .name = "NTSC-J", .clock = 108000, - .refresh = 29970, + .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -545,7 +545,7 @@ static const struct tv_mode tv_modes[] = { { .name = "PAL-M", .clock = 108000, - .refresh = 29970, + .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -589,7 +589,7 @@ static const struct tv_mode tv_modes[] = { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL-N", .clock = 108000, - .refresh = 25000, + .refresh = 50000, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -634,7 +634,7 @@ static const struct tv_mode tv_modes[] = { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL", .clock = 108000, - .refresh = 25000, + .refresh = 50000, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -674,78 +674,6 @@ static const struct tv_mode tv_modes[] = { .filter_table = filter_table, }, { - .name = "480p@59.94Hz", - .clock = 107520, - .refresh = 59940, - .oversample = TV_OVERSAMPLE_4X, - .component_only = 1, - - .hsync_end = 64, .hblank_end = 122, - .hblank_start = 842, .htotal = 857, - - .progressive = true, .trilevel_sync = false, - - .vsync_start_f1 = 12, .vsync_start_f2 = 12, - .vsync_len = 12, - - .veq_ena = false, - - .vi_end_f1 = 44, .vi_end_f2 = 44, - .nbr_end = 479, - - .burst_ena = false, - - .filter_table = filter_table, - }, - { - .name = "480p@60Hz", - .clock = 107520, - .refresh = 60000, - .oversample = TV_OVERSAMPLE_4X, - .component_only = 1, - - .hsync_end = 64, .hblank_end = 122, - .hblank_start = 842, .htotal = 856, - - .progressive = true, .trilevel_sync = false, - - .vsync_start_f1 = 12, .vsync_start_f2 = 12, - .vsync_len = 12, - - .veq_ena = false, - - .vi_end_f1 = 44, .vi_end_f2 = 44, - .nbr_end = 479, - - .burst_ena = false, - - .filter_table = filter_table, - }, - { - .name = "576p", - .clock = 107520, - .refresh = 50000, - .oversample = TV_OVERSAMPLE_4X, - .component_only = 1, - - .hsync_end = 64, .hblank_end = 139, - .hblank_start = 859, .htotal = 863, - - .progressive = true, .trilevel_sync = false, - - .vsync_start_f1 = 10, .vsync_start_f2 = 10, - .vsync_len = 10, - - .veq_ena = false, - - .vi_end_f1 = 48, .vi_end_f2 = 48, - .nbr_end = 575, - - .burst_ena = false, - - .filter_table = filter_table, - }, - { .name = "720p@60Hz", .clock = 148800, .refresh = 60000, @@ -770,30 +698,6 @@ static const struct tv_mode tv_modes[] = { .filter_table = filter_table, }, { - .name = "720p@59.94Hz", - .clock = 148800, - .refresh = 59940, - .oversample = TV_OVERSAMPLE_2X, - .component_only = 1, - - .hsync_end = 80, .hblank_end = 300, - .hblank_start = 1580, .htotal = 1651, - - .progressive = true, .trilevel_sync = true, - - .vsync_start_f1 = 10, .vsync_start_f2 = 10, - .vsync_len = 10, - - .veq_ena = false, - - .vi_end_f1 = 29, .vi_end_f2 = 29, - .nbr_end = 719, - - .burst_ena = false, - - .filter_table = filter_table, - }, - { .name = "720p@50Hz", .clock = 148800, .refresh = 50000, @@ -821,7 +725,7 @@ static const struct tv_mode tv_modes[] = { { .name = "1080i@50Hz", .clock = 148800, - .refresh = 25000, + .refresh = 50000, .oversample = TV_OVERSAMPLE_2X, .component_only = 1, @@ -847,7 +751,7 @@ static const struct tv_mode tv_modes[] = { { .name = "1080i@60Hz", .clock = 148800, - .refresh = 30000, + .refresh = 60000, .oversample = TV_OVERSAMPLE_2X, .component_only = 1, @@ -870,32 +774,6 @@ static const struct tv_mode tv_modes[] = { .filter_table = filter_table, }, - { - .name = "1080i@59.94Hz", - .clock = 148800, - .refresh = 29970, - .oversample = TV_OVERSAMPLE_2X, - .component_only = 1, - - .hsync_end = 88, .hblank_end = 235, - .hblank_start = 2155, .htotal = 2201, - - .progressive = false, .trilevel_sync = true, - - .vsync_start_f1 = 4, .vsync_start_f2 = 5, - .vsync_len = 10, - - .veq_ena = true, .veq_start_f1 = 4, - .veq_start_f2 = 4, .veq_len = 10, - - - .vi_end_f1 = 21, .vi_end_f2 = 22, - .nbr_end = 539, - - .burst_ena = false, - - .filter_table = filter_table, - }, }; static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 1e382ad..a37c31e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -54,9 +54,10 @@ struct bit_entry { int bit_table(struct drm_device *, u8 id, struct bit_entry *); enum dcb_gpio_tag { - DCB_GPIO_TVDAC0 = 0xc, + DCB_GPIO_PANEL_POWER = 0x01, + DCB_GPIO_TVDAC0 = 0x0c, DCB_GPIO_TVDAC1 = 0x2d, - DCB_GPIO_PWM_FAN = 0x9, + DCB_GPIO_PWM_FAN = 0x09, DCB_GPIO_FAN_SENSE = 0x3d, DCB_GPIO_UNUSED = 0xff }; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 724b41a..ec54364 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -812,6 +812,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_vma *vma; + /* ttm can now (stupidly) pass the driver bos it didn't create... */ + if (bo->destroy != nouveau_bo_del_ttm) + return; + list_for_each_entry(vma, &nvbo->vma_list, head) { if (new_mem && new_mem->mem_type == TTM_PL_VRAM) { nouveau_vm_map(vma, new_mem->mm_node); diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 3cb52bc5..795a9e3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -219,6 +219,16 @@ nouveau_display_init(struct drm_device *dev) if (ret) return ret; + /* power on internal panel if it's not already. the init tables of + * some vbios default this to off for some reason, causing the + * panel to not work after resume + */ + if (nouveau_gpio_func_get(dev, DCB_GPIO_PANEL_POWER) == 0) { + nouveau_gpio_func_set(dev, DCB_GPIO_PANEL_POWER, true); + msleep(300); + } + + /* enable polling for external displays */ drm_kms_helper_poll_enable(dev); /* enable hotplug interrupts */ diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index e4a7cfe..81d7962 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -124,7 +124,7 @@ MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n"); int nouveau_ctxfw; module_param_named(ctxfw, nouveau_ctxfw, int, 0400); -MODULE_PARM_DESC(ctxfw, "Santise DCB table according to MXM-SIS\n"); +MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS\n"); int nouveau_mxmdcb = 1; module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 5f0bc57..7ce3fde 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -380,6 +380,25 @@ retry: } static int +validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) +{ + struct nouveau_fence *fence = NULL; + int ret = 0; + + spin_lock(&nvbo->bo.bdev->fence_lock); + if (nvbo->bo.sync_obj) + fence = nouveau_fence_ref(nvbo->bo.sync_obj); + spin_unlock(&nvbo->bo.bdev->fence_lock); + + if (fence) { + ret = nouveau_fence_sync(fence, chan); + nouveau_fence_unref(&fence); + } + + return ret; +} + +static int validate_list(struct nouveau_channel *chan, struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) { @@ -393,7 +412,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, list_for_each_entry(nvbo, list, entry) { struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; - ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); + ret = validate_sync(chan, nvbo); if (unlikely(ret)) { NV_ERROR(dev, "fail pre-validate sync\n"); return ret; @@ -416,7 +435,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, return ret; } - ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); + ret = validate_sync(chan, nvbo); if (unlikely(ret)) { NV_ERROR(dev, "fail post-validate sync\n"); return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c index 8bccddf..e5a64f0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mxm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mxm.c @@ -656,7 +656,16 @@ nouveau_mxm_init(struct drm_device *dev) if (mxm_shadow(dev, mxm[0])) { MXM_MSG(dev, "failed to locate valid SIS\n"); +#if 0 + /* we should, perhaps, fall back to some kind of limited + * mode here if the x86 vbios hasn't already done the + * work for us (so we prevent loading with completely + * whacked vbios tables). + */ return -EINVAL; +#else + return 0; +#endif } MXM_MSG(dev, "MXMS Version %d.%d\n", diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c index 0393721..ec5481d 100644 --- a/drivers/gpu/drm/nouveau/nv50_pm.c +++ b/drivers/gpu/drm/nouveau/nv50_pm.c @@ -495,9 +495,9 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_pm_state *info; struct pll_lims pll; - int ret = -EINVAL; + int clk, ret = -EINVAL; int N, M, P1, P2; - u32 clk, out; + u32 out; if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 0fda830..742f17f 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -355,15 +355,12 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } -static void atombios_disable_ss(struct drm_crtc *crtc) +static void atombios_disable_ss(struct radeon_device *rdev, int pll_id) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; u32 ss_cntl; if (ASIC_IS_DCE4(rdev)) { - switch (radeon_crtc->pll_id) { + switch (pll_id) { case ATOM_PPLL1: ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL); ss_cntl &= ~EVERGREEN_PxPLL_SS_EN; @@ -379,7 +376,7 @@ static void atombios_disable_ss(struct drm_crtc *crtc) return; } } else if (ASIC_IS_AVIVO(rdev)) { - switch (radeon_crtc->pll_id) { + switch (pll_id) { case ATOM_PPLL1: ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL); ss_cntl &= ~1; @@ -406,13 +403,11 @@ union atom_enable_ss { ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3; }; -static void atombios_crtc_program_ss(struct drm_crtc *crtc, +static void atombios_crtc_program_ss(struct radeon_device *rdev, int enable, int pll_id, struct radeon_atom_ss *ss) { - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); union atom_enable_ss args; @@ -479,7 +474,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, } else if (ASIC_IS_AVIVO(rdev)) { if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) { - atombios_disable_ss(crtc); + atombios_disable_ss(rdev, pll_id); return; } args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); @@ -491,7 +486,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, } else { if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) { - atombios_disable_ss(crtc); + atombios_disable_ss(rdev, pll_id); return; } args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); @@ -523,6 +518,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, int encoder_mode = 0; u32 dp_clock = mode->clock; int bpc = 8; + bool is_duallink = false; /* reset the pll flags */ pll->flags = 0; @@ -557,6 +553,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (connector && connector->display_info.bpc) bpc = connector->display_info.bpc; encoder_mode = atombios_get_encoder_mode(encoder); + is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { if (connector) { @@ -652,7 +649,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (dig->coherent_mode) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; - if (mode->clock > 165000) + if (is_duallink) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK; } @@ -702,11 +699,9 @@ union set_pixel_clock { /* on DCE5, make sure the voltage is high enough to support the * required disp clk. */ -static void atombios_crtc_set_dcpll(struct drm_crtc *crtc, +static void atombios_crtc_set_dcpll(struct radeon_device *rdev, u32 dispclk) { - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; u8 frev, crev; int index; union set_pixel_clock args; @@ -996,7 +991,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); - atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss); + atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss); atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, encoder_mode, radeon_encoder->encoder_id, mode->clock, @@ -1019,7 +1014,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode ss.step = step_size; } - atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss); + atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss); } } @@ -1189,7 +1184,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, - crtc->mode.vdisplay); + target_fb->height); x &= ~3; y &= ~1; WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, @@ -1358,7 +1353,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, - crtc->mode.vdisplay); + target_fb->height); x &= ~3; y &= ~1; WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, @@ -1494,6 +1489,24 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) } +void radeon_atom_dcpll_init(struct radeon_device *rdev) +{ + /* always set DCPLL */ + if (ASIC_IS_DCE4(rdev)) { + struct radeon_atom_ss ss; + bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_SS_ON_DCPLL, + rdev->clock.default_dispclk); + if (ss_enabled) + atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss); + /* XXX: DCE5, make sure voltage, dispclk is high enough */ + atombios_crtc_set_dcpll(rdev, rdev->clock.default_dispclk); + if (ss_enabled) + atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss); + } + +} + int atombios_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -1515,19 +1528,6 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, } } - /* always set DCPLL */ - if (ASIC_IS_DCE4(rdev)) { - struct radeon_atom_ss ss; - bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, - ASIC_INTERNAL_SS_ON_DCPLL, - rdev->clock.default_dispclk); - if (ss_enabled) - atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss); - /* XXX: DCE5, make sure voltage, dispclk is high enough */ - atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk); - if (ss_enabled) - atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss); - } atombios_crtc_set_pll(crtc, adjusted_mode); if (ASIC_IS_DCE4(rdev)) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 6fb335a..552b436 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -549,8 +549,8 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) return false; } -static void radeon_dp_set_panel_mode(struct drm_encoder *encoder, - struct drm_connector *connector) +int radeon_dp_get_panel_mode(struct drm_encoder *encoder, + struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -558,28 +558,33 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder, int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; if (!ASIC_IS_DCE4(rdev)) - return; + return panel_mode; if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == ENCODER_OBJECT_ID_NUTMEG) panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == - ENCODER_OBJECT_ID_TRAVIS) - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; - else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + ENCODER_OBJECT_ID_TRAVIS) { + u8 id[6]; + int i; + for (i = 0; i < 6; i++) + id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i); + if (id[0] == 0x73 && + id[1] == 0x69 && + id[2] == 0x76 && + id[3] == 0x61 && + id[4] == 0x72 && + id[5] == 0x54) + panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; + else + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); if (tmp & 1) panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; } - atombios_dig_encoder_setup(encoder, - ATOM_ENCODER_CMD_SETUP_PANEL_MODE, - panel_mode); - - if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) && - (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { - radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1); - } + return panel_mode; } void radeon_dp_set_link_config(struct drm_connector *connector, @@ -717,6 +722,8 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp) static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; u8 tmp; /* power up the sink */ @@ -732,7 +739,10 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info) radeon_write_dpcd_reg(dp_info->radeon_connector, DP_DOWNSPREAD_CTRL, 0); - radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector); + if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) && + (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { + radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1); + } /* set the lane count on the sink */ tmp = dp_info->dp_lane_count; diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index f1f06ca..b88c460 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -57,22 +57,6 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) } } -static struct drm_connector * -radeon_get_connector_for_encoder_init(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_connector *connector; - struct radeon_connector *radeon_connector; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - radeon_connector = to_radeon_connector(connector); - if (radeon_encoder->devices & radeon_connector->devices) - return connector; - } - return NULL; -} - static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -253,7 +237,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action) /* R4xx, R5xx */ args.ext_tmds.sXTmdsEncoder.ucEnable = action; - if (radeon_encoder->pixel_clock > 165000) + if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; @@ -265,7 +249,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action) /* DFP1, CRT1, TV1 depending on the type of port */ args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; - if (radeon_encoder->pixel_clock > 165000) + if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; break; case 3: @@ -349,7 +333,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) } else { if (dig->linkb) args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; - if (radeon_encoder->pixel_clock > 165000) + if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; /*if (pScrn->rgbBits == 8) */ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; @@ -388,7 +372,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) } else { if (dig->linkb) args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; - if (radeon_encoder->pixel_clock > 165000) + if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; } break; @@ -432,7 +416,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ - if (drm_detect_monitor_audio(radeon_connector->edid) && + if (drm_detect_hdmi_monitor(radeon_connector->edid) && radeon_audio) return ATOM_ENCODER_MODE_HDMI; else if (radeon_connector->use_digital) @@ -443,7 +427,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: default: - if (drm_detect_monitor_audio(radeon_connector->edid) && + if (drm_detect_hdmi_monitor(radeon_connector->edid) && radeon_audio) return ATOM_ENCODER_MODE_HDMI; else @@ -457,7 +441,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) return ATOM_ENCODER_MODE_DP; - else if (drm_detect_monitor_audio(radeon_connector->edid) && + else if (drm_detect_hdmi_monitor(radeon_connector->edid) && radeon_audio) return ATOM_ENCODER_MODE_HDMI; else @@ -587,7 +571,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) args.v1.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v1.ucLaneNum = 8; else args.v1.ucLaneNum = 4; @@ -622,7 +606,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) args.v3.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.ucLaneNum = 8; else args.v3.ucLaneNum = 4; @@ -662,7 +646,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) args.v4.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v4.ucLaneNum = 8; else args.v4.ucLaneNum = 4; @@ -806,7 +790,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t if (is_dp) args.v1.usPixelClock = cpu_to_le16(dp_clock / 10); - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); @@ -821,7 +805,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t if ((rdev->flags & RADEON_IS_IGP) && (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { - if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { + if (is_dp || + !radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) { if (igp_lane_info & 0x1) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; else if (igp_lane_info & 0x2) @@ -848,7 +833,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; - if (radeon_encoder->pixel_clock > 165000) + if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; } break; @@ -863,7 +848,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t if (is_dp) args.v2.usPixelClock = cpu_to_le16(dp_clock / 10); - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); @@ -891,7 +876,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v2.acConfig.fCoherentMode = 1; - if (radeon_encoder->pixel_clock > 165000) + if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v2.acConfig.fDualLinkConnector = 1; } break; @@ -906,7 +891,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t if (is_dp) args.v3.usPixelClock = cpu_to_le16(dp_clock / 10); - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); @@ -914,7 +899,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t if (is_dp) args.v3.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.ucLaneNum = 8; else args.v3.ucLaneNum = 4; @@ -951,7 +936,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v3.acConfig.fCoherentMode = 1; - if (radeon_encoder->pixel_clock > 165000) + if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.acConfig.fDualLinkConnector = 1; } break; @@ -966,7 +951,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t if (is_dp) args.v4.usPixelClock = cpu_to_le16(dp_clock / 10); - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); @@ -974,7 +959,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t if (is_dp) args.v4.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v4.ucLaneNum = 8; else args.v4.ucLaneNum = 4; @@ -1014,7 +999,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { if (dig->coherent_mode) args.v4.acConfig.fCoherentMode = 1; - if (radeon_encoder->pixel_clock > 165000) + if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v4.acConfig.fDualLinkConnector = 1; } break; @@ -1137,7 +1122,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, if (dp_clock == 270000) args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; args.v1.sDigEncoder.ucLaneNum = dp_lane_count; - } else if (radeon_encoder->pixel_clock > 165000) + } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v1.sDigEncoder.ucLaneNum = 8; else args.v1.sDigEncoder.ucLaneNum = 4; @@ -1156,7 +1141,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, else if (dp_clock == 540000) args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; args.v3.sExtEncoder.ucLaneNum = dp_lane_count; - } else if (radeon_encoder->pixel_clock > 165000) + } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.sExtEncoder.ucLaneNum = 8; else args.v3.sExtEncoder.ucLaneNum = 4; @@ -1341,7 +1326,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: /* some early dce3.2 boards have a bug in their transmitter control table */ - if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730)) + if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) || + ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); else atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); @@ -1351,8 +1337,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) ATOM_TRANSMITTER_ACTION_POWER_ON); radeon_dig_connector->edp_on = true; } - if (ASIC_IS_DCE4(rdev)) - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); radeon_dp_link_train(encoder, connector); if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); @@ -1363,7 +1347,10 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + else + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); @@ -1810,7 +1797,21 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - if (ASIC_IS_DCE4(rdev)) { + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + + if (!connector) + dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + else + dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); + + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + atombios_dig_encoder_setup(encoder, + ATOM_ENCODER_CMD_SETUP_PANEL_MODE, + dig->panel_mode); + } else if (ASIC_IS_DCE4(rdev)) { /* disable the transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); /* setup and enable the encoder */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 636660f..ae09fe8 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1455,6 +1455,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) #endif WREG32(CP_RB_CNTL, tmp); WREG32(CP_SEM_WAIT_TIMER, 0x0); + WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); /* Set the write pointer delay */ WREG32(CP_RB_WPTR_DELAY, 0); diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index b502216..74713d4 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -108,6 +108,7 @@ #define CP_RB_WPTR_ADDR_HI 0xC11C #define CP_RB_WPTR_DELAY 0x8704 #define CP_SEM_WAIT_TIMER 0x85BC +#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8 #define CP_DEBUG 0xC1FC diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 3211372..db09065 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1219,6 +1219,7 @@ int cayman_cp_resume(struct radeon_device *rdev) RREG32(GRBM_SOFT_RESET); WREG32(CP_SEM_WAIT_TIMER, 0x0); + WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); /* Set the write pointer delay */ WREG32(CP_RB_WPTR_DELAY, 0); diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index f9df2a6..9a7f3b6 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -222,6 +222,7 @@ #define SCRATCH_UMSK 0x8540 #define SCRATCH_ADDR 0x8544 #define CP_SEM_WAIT_TIMER 0x85BC +#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8 #define CP_COHER_CNTL2 0x85E8 #define CP_ME_CNTL 0x86D8 #define CP_ME_HALT (1 << 28) diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index d996f43..accc032 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -468,27 +468,42 @@ set_default_state(struct radeon_device *rdev) radeon_ring_write(ring, sq_stack_resource_mgmt_2); } +#define I2F_MAX_BITS 15 +#define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1) +#define I2F_SHIFT (24 - I2F_MAX_BITS) + +/* + * Converts unsigned integer into 32-bit IEEE floating point representation. + * Conversion is not universal and only works for the range from 0 + * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between + * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary, + * I2F_MAX_BITS can be increased, but that will add to the loop iterations + * and slow us down. Conversion is done by shifting the input and counting + * down until the first 1 reaches bit position 23. The resulting counter + * and the shifted input are, respectively, the exponent and the fraction. + * The sign is always zero. + */ static uint32_t i2f(uint32_t input) { u32 result, i, exponent, fraction; - if ((input & 0x3fff) == 0) - result = 0; /* 0 is a special case */ + WARN_ON_ONCE(input > I2F_MAX_INPUT); + + if ((input & I2F_MAX_INPUT) == 0) + result = 0; else { - exponent = 140; /* exponent biased by 127; */ - fraction = (input & 0x3fff) << 10; /* cheat and only - handle numbers below 2^^15 */ - for (i = 0; i < 14; i++) { + exponent = 126 + I2F_MAX_BITS; + fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT; + + for (i = 0; i < I2F_MAX_BITS; i++) { if (fraction & 0x800000) break; else { - fraction = fraction << 1; /* keep - shifting left until top bit = 1 */ + fraction = fraction << 1; exponent = exponent - 1; } } - result = exponent << 23 | (fraction & 0x7fffff); /* mask - off top bit; assumed 1 */ + result = exponent << 23 | (fraction & 0x7fffff); } return result; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 73e05cb..1668ec1 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -157,6 +157,47 @@ bool radeon_get_bios(struct radeon_device *rdev); /* + * Mutex which allows recursive locking from the same process. + */ +struct radeon_mutex { + struct mutex mutex; + struct task_struct *owner; + int level; +}; + +static inline void radeon_mutex_init(struct radeon_mutex *mutex) +{ + mutex_init(&mutex->mutex); + mutex->owner = NULL; + mutex->level = 0; +} + +static inline void radeon_mutex_lock(struct radeon_mutex *mutex) +{ + if (mutex_trylock(&mutex->mutex)) { + /* The mutex was unlocked before, so it's ours now */ + mutex->owner = current; + } else if (mutex->owner != current) { + /* Another process locked the mutex, take it */ + mutex_lock(&mutex->mutex); + mutex->owner = current; + } + /* Otherwise the mutex was already locked by this process */ + + mutex->level++; +} + +static inline void radeon_mutex_unlock(struct radeon_mutex *mutex) +{ + if (--mutex->level > 0) + return; + + mutex->owner = NULL; + mutex_unlock(&mutex->mutex); +} + + +/* * Dummy page */ struct radeon_dummy_page { @@ -598,7 +639,7 @@ struct radeon_ib { * mutex protects scheduled_ibs, ready, alloc_bm */ struct radeon_ib_pool { - struct mutex mutex; + struct radeon_mutex mutex; struct radeon_sa_manager sa_manager; struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; bool ready; @@ -1355,47 +1396,6 @@ struct r600_vram_scratch { /* - * Mutex which allows recursive locking from the same process. - */ -struct radeon_mutex { - struct mutex mutex; - struct task_struct *owner; - int level; -}; - -static inline void radeon_mutex_init(struct radeon_mutex *mutex) -{ - mutex_init(&mutex->mutex); - mutex->owner = NULL; - mutex->level = 0; -} - -static inline void radeon_mutex_lock(struct radeon_mutex *mutex) -{ - if (mutex_trylock(&mutex->mutex)) { - /* The mutex was unlocked before, so it's ours now */ - mutex->owner = current; - } else if (mutex->owner != current) { - /* Another process locked the mutex, take it */ - mutex_lock(&mutex->mutex); - mutex->owner = current; - } - /* Otherwise the mutex was already locked by this process */ - - mutex->level++; -} - -static inline void radeon_mutex_unlock(struct radeon_mutex *mutex) -{ - if (--mutex->level > 0) - return; - - mutex->owner = NULL; - mutex_unlock(&mutex->mutex); -} - - -/* * Core structure, functions and helpers. */ typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t); diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 9d95792..98724fc 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -58,7 +58,8 @@ static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, } obj = (union acpi_object *)buffer.pointer; - memcpy(bios+offset, obj->buffer.pointer, len); + memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); + len = obj->buffer.length; kfree(buffer.pointer); return len; } diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 229a20f..501f488 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -120,7 +120,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) ret = radeon_atrm_get_bios_chunk(rdev->bios, (i * ATRM_BIOS_PAGE), ATRM_BIOS_PAGE); - if (ret <= 0) + if (ret < ATRM_BIOS_PAGE) break; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0afb13b..49f7cb7 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -720,7 +720,7 @@ int radeon_device_init(struct radeon_device *rdev, /* mutex initialization are all done here so we * can recall function without having locking issues */ radeon_mutex_init(&rdev->cs_mutex); - mutex_init(&rdev->ib_pool.mutex); + radeon_mutex_init(&rdev->ib_pool.mutex); for (i = 0; i < RADEON_NUM_RINGS; ++i) mutex_init(&rdev->ring[i].mutex); mutex_init(&rdev->dc_hw_i2c_mutex); @@ -883,6 +883,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; + drm_kms_helper_poll_disable(dev); + /* turn off display hw */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); @@ -959,9 +961,11 @@ int radeon_resume_kms(struct drm_device *dev) radeon_fbdev_set_suspend(rdev, 0); console_unlock(); - /* init dig PHYs */ - if (rdev->is_atom_bios) + /* init dig PHYs, disp eng pll */ + if (rdev->is_atom_bios) { radeon_atom_encoder_init(rdev); + radeon_atom_dcpll_init(rdev); + } /* reset hpd state */ radeon_hpd_init(rdev); /* blat the mode back in */ @@ -970,6 +974,8 @@ int radeon_resume_kms(struct drm_device *dev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } + + drm_kms_helper_poll_enable(dev); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d3ffc18..8c49fef 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1305,9 +1305,11 @@ int radeon_modeset_init(struct radeon_device *rdev) return ret; } - /* init dig PHYs */ - if (rdev->is_atom_bios) + /* init dig PHYs, disp eng pll */ + if (rdev->is_atom_bios) { radeon_atom_encoder_init(rdev); + radeon_atom_dcpll_init(rdev); + } /* initialize hpd */ radeon_hpd_init(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 4b27efa..9419c51 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -202,6 +202,22 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) return NULL; } +struct drm_connector * +radeon_get_connector_for_encoder_init(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_connector *connector; + struct radeon_connector *radeon_connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + radeon_connector = to_radeon_connector(connector); + if (radeon_encoder->devices & radeon_connector->devices) + return connector; + } + return NULL; +} + struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -288,3 +304,64 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder, } +bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, + u32 pixel_clock) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct drm_connector *connector; + struct radeon_connector *radeon_connector; + struct radeon_connector_atom_dig *dig_connector; + + connector = radeon_get_connector_for_encoder(encoder); + /* if we don't have an active device yet, just use one of + * the connectors tied to the encoder. + */ + if (!connector) + connector = radeon_get_connector_for_encoder_init(encoder); + radeon_connector = to_radeon_connector(connector); + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_HDMIB: + if (radeon_connector->use_digital) { + /* HDMI 1.3 supports up to 340 Mhz over single link */ + if (ASIC_IS_DCE3(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (pixel_clock > 340000) + return true; + else + return false; + } else { + if (pixel_clock > 165000) + return true; + else + return false; + } + } else + return false; + case DRM_MODE_CONNECTOR_DVID: + case DRM_MODE_CONNECTOR_HDMIA: + case DRM_MODE_CONNECTOR_DisplayPort: + dig_connector = radeon_connector->con_priv; + if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || + (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) + return false; + else { + /* HDMI 1.3 supports up to 340 Mhz over single link */ + if (ASIC_IS_DCE3(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (pixel_clock > 340000) + return true; + else + return false; + } else { + if (pixel_clock > 165000) + return true; + else + return false; + } + } + default: + return false; + } +} + diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 7bb1b07..98a8ad6 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -897,6 +897,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; i2c->adapter.class = I2C_CLASS_DDC; + i2c->adapter.dev.parent = &dev->pdev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); if (rec->mm_i2c || @@ -957,6 +958,7 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; i2c->adapter.class = I2C_CLASS_DDC; + i2c->adapter.dev.parent = &dev->pdev->dev; i2c->dev = dev; snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "Radeon aux bus %s", name); diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index be38921..66d5fe1 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -138,6 +138,12 @@ static bool radeon_msi_ok(struct radeon_device *rdev) /* Dell RS690 only seems to work with MSIs. */ if ((rdev->pdev->device == 0x791f) && (rdev->pdev->subsystem_vendor == 0x1028) && + (rdev->pdev->subsystem_device == 0x01fc)) + return true; + + /* Dell RS690 only seems to work with MSIs. */ + if ((rdev->pdev->device == 0x791f) && + (rdev->pdev->subsystem_vendor == 0x1028) && (rdev->pdev->subsystem_device == 0x01fd)) return true; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 08ff857..4330e32 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -362,6 +362,7 @@ struct radeon_encoder_atom_dig { struct backlight_device *bl_dev; int dpms_mode; uint8_t backlight_level; + int panel_mode; }; struct radeon_encoder_atom_dac { @@ -466,6 +467,10 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev); extern struct drm_connector * radeon_get_connector_for_encoder(struct drm_encoder *encoder); +extern struct drm_connector * +radeon_get_connector_for_encoder_init(struct drm_encoder *encoder); +extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder, + u32 pixel_clock); extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder); extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector); @@ -482,8 +487,11 @@ extern void radeon_dp_link_train(struct drm_encoder *encoder, extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); +extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder, + struct drm_connector *connector); extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); extern void radeon_atom_encoder_init(struct radeon_device *rdev); +extern void radeon_atom_dcpll_init(struct radeon_device *rdev); extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index e8bc709..30a4c50 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -109,12 +109,12 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, return r; } - mutex_lock(&rdev->ib_pool.mutex); + radeon_mutex_lock(&rdev->ib_pool.mutex); idx = rdev->ib_pool.head_id; retry: if (cretry > 5) { dev_err(rdev->dev, "failed to get an ib after 5 retry\n"); - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); radeon_fence_unref(&fence); return -ENOMEM; } @@ -139,7 +139,7 @@ retry: */ rdev->ib_pool.head_id = (1 + idx); rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1); - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); return 0; } } @@ -158,7 +158,7 @@ retry: } idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1); } - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); radeon_fence_unref(&fence); return r; } @@ -171,12 +171,12 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) if (tmp == NULL) { return; } - mutex_lock(&rdev->ib_pool.mutex); + radeon_mutex_lock(&rdev->ib_pool.mutex); if (tmp->fence && !tmp->fence->emitted) { radeon_sa_bo_free(rdev, &tmp->sa_bo); radeon_fence_unref(&tmp->fence); } - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); } int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) @@ -204,22 +204,25 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) int radeon_ib_pool_init(struct radeon_device *rdev) { + struct radeon_sa_manager tmp; int i, r; - mutex_lock(&rdev->ib_pool.mutex); - if (rdev->ib_pool.ready) { - mutex_unlock(&rdev->ib_pool.mutex); - return 0; - } - - r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager, + r = radeon_sa_bo_manager_init(rdev, &tmp, RADEON_IB_POOL_SIZE*64*1024, RADEON_GEM_DOMAIN_GTT); if (r) { - mutex_unlock(&rdev->ib_pool.mutex); return r; } + radeon_mutex_lock(&rdev->ib_pool.mutex); + if (rdev->ib_pool.ready) { + radeon_mutex_unlock(&rdev->ib_pool.mutex); + radeon_sa_bo_manager_fini(rdev, &tmp); + return 0; + } + + rdev->ib_pool.sa_manager = tmp; + INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo); for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { rdev->ib_pool.ibs[i].fence = NULL; rdev->ib_pool.ibs[i].idx = i; @@ -236,7 +239,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) if (radeon_debugfs_ring_init(rdev)) { DRM_ERROR("Failed to register debugfs file for rings !\n"); } - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); return 0; } @@ -244,7 +247,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) { unsigned i; - mutex_lock(&rdev->ib_pool.mutex); + radeon_mutex_lock(&rdev->ib_pool.mutex); if (rdev->ib_pool.ready) { for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo); @@ -253,7 +256,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager); rdev->ib_pool.ready = false; } - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); } int radeon_ib_pool_start(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 06da063..573220c 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -40,7 +40,6 @@ static struct pci_device_id pciidlist[] = { static int sis_driver_load(struct drm_device *dev, unsigned long chipset) { drm_sis_private_t *dev_priv; - int ret; dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL); if (dev_priv == NULL) @@ -50,7 +49,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->chipset = chipset; idr_init(&dev->object_name_idr); - return ret; + return 0; } static int sis_driver_unload(struct drm_device *dev) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 2f0eab6..7c3a57d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -404,6 +404,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } } + if (bdev->driver->move_notify) + bdev->driver->move_notify(bo, mem); + if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); @@ -413,11 +416,17 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, else ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); - if (ret) - goto out_err; + if (ret) { + if (bdev->driver->move_notify) { + struct ttm_mem_reg tmp_mem = *mem; + *mem = bo->mem; + bo->mem = tmp_mem; + bdev->driver->move_notify(bo, mem); + bo->mem = *mem; + } - if (bdev->driver->move_notify) - bdev->driver->move_notify(bo, mem); + goto out_err; + } moved: if (bo->evicted) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 0af6ebd..b66ef0e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -378,7 +378,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, unsigned int *handle) { if (handle) - handle = 0; + *handle = 0; return 0; } diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index 0c33ae9..4066324 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -548,6 +548,7 @@ static int mousevsc_remove(struct hv_device *dev) struct mousevsc_dev *input_dev = hv_get_drvdata(dev); vmbus_close(dev->channel); + hid_hw_stop(input_dev->hid_device); hid_destroy_device(input_dev->hid_device); mousevsc_free_device(input_dev); diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c index b47e58b..acab74c 100644 --- a/drivers/hid/hid-wacom.c +++ b/drivers/hid/hid-wacom.c @@ -531,7 +531,6 @@ static int wacom_probe(struct hid_device *hdev, wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY; wdata->battery.use_for_apm = 0; - power_supply_powers(&wdata->battery, &hdev->dev); ret = power_supply_register(&hdev->dev, &wdata->battery); if (ret) { @@ -540,6 +539,8 @@ static int wacom_probe(struct hid_device *hdev, goto err_battery; } + power_supply_powers(&wdata->battery, &hdev->dev); + wdata->ac.properties = wacom_ac_props; wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props); wdata->ac.get_property = wacom_ac_get_property; @@ -547,14 +548,14 @@ static int wacom_probe(struct hid_device *hdev, wdata->ac.type = POWER_SUPPLY_TYPE_MAINS; wdata->ac.use_for_apm = 0; - power_supply_powers(&wdata->battery, &hdev->dev); - ret = power_supply_register(&hdev->dev, &wdata->ac); if (ret) { hid_warn(hdev, "can't create ac battery attribute, err: %d\n", ret); goto err_ac; } + + power_supply_powers(&wdata->ac, &hdev->dev); #endif return 0; diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c index fc253b4..cac3589 100644 --- a/drivers/hid/hid-wiimote-core.c +++ b/drivers/hid/hid-wiimote-core.c @@ -1226,14 +1226,14 @@ static int wiimote_hid_probe(struct hid_device *hdev, wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY; wdata->battery.use_for_apm = 0; - power_supply_powers(&wdata->battery, &hdev->dev); - ret = power_supply_register(&wdata->hdev->dev, &wdata->battery); if (ret) { hid_err(hdev, "Cannot register battery device\n"); goto err_battery; } + power_supply_powers(&wdata->battery, &hdev->dev); + ret = wiimote_leds_create(wdata); if (ret) goto err_free; diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 7c297d3..b1ec0e2 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -922,11 +922,11 @@ void hiddev_disconnect(struct hid_device *hid) struct hiddev *hiddev = hid->hiddev; struct usbhid_device *usbhid = hid->driver_data; + usb_deregister_dev(usbhid->intf, &hiddev_class); + mutex_lock(&hiddev->existancelock); hiddev->exist = 0; - usb_deregister_dev(usbhid->intf, &hiddev_class); - if (hiddev->open) { mutex_unlock(&hiddev->existancelock); usbhid_close(hiddev->hid); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index cb351d3..0226040 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -474,8 +474,8 @@ config SENSORS_IT87 select HWMON_VID help If you say yes here you get support for ITE IT8705F, IT8712F, - IT8716F, IT8718F, IT8720F, IT8721F, IT8726F and IT8758E sensor - chips, and the SiS960 clone. + IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F and IT8758E + sensor chips, and the SiS960 clone. This driver can also be built as a module. If so, the module will be called it87. @@ -515,11 +515,11 @@ config SENSORS_LINEAGE will be called lineage-pem. config SENSORS_LM63 - tristate "National Semiconductor LM63 and LM64" + tristate "National Semiconductor LM63 and compatibles" depends on I2C help If you say yes here you get support for the National - Semiconductor LM63 and LM64 remote diode digital temperature + Semiconductor LM63, LM64, and LM96163 remote diode digital temperature sensors with integrated fan control. Such chips are found on the Tyan S4882 (Thunder K8QS Pro) motherboard, among others. diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index e6291da..97e2cfb 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -155,7 +155,8 @@ adm1031_write_value(struct i2c_client *client, u8 reg, unsigned int value) #define TEMP_OFFSET_FROM_REG(val) TEMP_FROM_REG((val) < 0 ? \ (val) | 0x70 : (val)) -#define FAN_FROM_REG(reg, div) ((reg) ? (11250 * 60) / ((reg) * (div)) : 0) +#define FAN_FROM_REG(reg, div) ((reg) ? \ + (11250 * 60) / ((reg) * (div)) : 0) static int FAN_TO_REG(int reg, int div) { @@ -174,8 +175,8 @@ static int FAN_TO_REG(int reg, int div) (((reg) & 0x1F) | (((val) << 5) & 0xe0)) #define AUTO_TEMP_MIN_TO_REG(val, reg) \ - ((((val)/500) & 0xf8)|((reg) & 0x7)) -#define AUTO_TEMP_RANGE_FROM_REG(reg) (5000 * (1<< ((reg)&0x7))) + ((((val) / 500) & 0xf8) | ((reg) & 0x7)) +#define AUTO_TEMP_RANGE_FROM_REG(reg) (5000 * (1 << ((reg) & 0x7))) #define AUTO_TEMP_MIN_FROM_REG(reg) (1000 * ((((reg) >> 3) & 0x1f) << 2)) #define AUTO_TEMP_MIN_FROM_REG_DEG(reg) ((((reg) >> 3) & 0x1f) << 2) @@ -202,7 +203,7 @@ static int AUTO_TEMP_MAX_TO_REG(int val, int reg, int pwm) /* FAN auto control */ #define GET_FAN_AUTO_BITFIELD(data, idx) \ - (*(data)->chan_select_table)[FAN_CHAN_FROM_REG((data)->conf1)][idx%2] + (*(data)->chan_select_table)[FAN_CHAN_FROM_REG((data)->conf1)][idx % 2] /* The tables below contains the possible values for the auto fan * control bitfields. the index in the table is the register value. @@ -230,7 +231,7 @@ static const auto_chan_table_t auto_channel_select_table_adm1030 = { */ static int get_fan_auto_nearest(struct adm1031_data *data, - int chan, u8 val, u8 reg, u8 * new_reg) + int chan, u8 val, u8 reg, u8 *new_reg) { int i; int first_match = -1, exact_match = -1; @@ -258,13 +259,13 @@ get_fan_auto_nearest(struct adm1031_data *data, } } - if (exact_match >= 0) { + if (exact_match >= 0) *new_reg = exact_match; - } else if (first_match >= 0) { + else if (first_match >= 0) *new_reg = first_match; - } else { + else return -EINVAL; - } + return 0; } @@ -283,23 +284,28 @@ set_fan_auto_channel(struct device *dev, struct device_attribute *attr, struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; - int val = simple_strtol(buf, NULL, 10); + long val; u8 reg; int ret; u8 old_fan_mode; + ret = kstrtol(buf, 10, &val); + if (ret) + return ret; + old_fan_mode = data->conf1; mutex_lock(&data->update_lock); - if ((ret = get_fan_auto_nearest(data, nr, val, data->conf1, ®))) { + ret = get_fan_auto_nearest(data, nr, val, data->conf1, ®); + if (ret) { mutex_unlock(&data->update_lock); return ret; } data->conf1 = FAN_CHAN_TO_REG(reg, data->conf1); if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) ^ (old_fan_mode & ADM1031_CONF1_AUTO_MODE)) { - if (data->conf1 & ADM1031_CONF1_AUTO_MODE){ + if (data->conf1 & ADM1031_CONF1_AUTO_MODE) { /* Switch to Auto Fan Mode * Save PWM registers * Set PWM registers to 33% Both */ @@ -350,7 +356,12 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr, struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; - int val = simple_strtol(buf, NULL, 10); + long val; + int ret; + + ret = kstrtol(buf, 10, &val); + if (ret) + return ret; mutex_lock(&data->update_lock); data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); @@ -374,10 +385,16 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr, struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; - int val = simple_strtol(buf, NULL, 10); + long val; + int ret; + + ret = kstrtol(buf, 10, &val); + if (ret) + return ret; mutex_lock(&data->update_lock); - data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], data->pwm[nr]); + data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], + data->pwm[nr]); adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), data->temp_max[nr]); mutex_unlock(&data->update_lock); @@ -410,8 +427,12 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr, struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; - int val = simple_strtol(buf, NULL, 10); - int reg; + long val; + int ret, reg; + + ret = kstrtol(buf, 10, &val); + if (ret) + return ret; mutex_lock(&data->update_lock); if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) && @@ -449,9 +470,13 @@ static int trust_fan_readings(struct adm1031_data *data, int chan) if (data->conf1 & ADM1031_CONF1_AUTO_MODE) { switch (data->conf1 & 0x60) { - case 0x00: /* remote temp1 controls fan1 remote temp2 controls fan2 */ + case 0x00: + /* + * remote temp1 controls fan1, + * remote temp2 controls fan2 + */ res = data->temp[chan+1] >= - AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[chan+1]); + AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[chan+1]); break; case 0x20: /* remote temp1 controls both fans */ res = @@ -515,7 +540,12 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; - int val = simple_strtol(buf, NULL, 10); + long val; + int ret; + + ret = kstrtol(buf, 10, &val); + if (ret) + return ret; mutex_lock(&data->update_lock); if (val) { @@ -534,10 +564,15 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; - int val = simple_strtol(buf, NULL, 10); + long val; u8 tmp; int old_div; int new_min; + int ret; + + ret = kstrtol(buf, 10, &val); + if (ret) + return ret; tmp = val == 8 ? 0xc0 : val == 4 ? 0x80 : @@ -631,9 +666,13 @@ static ssize_t set_temp_offset(struct device *dev, struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; - int val; + long val; + int ret; + + ret = kstrtol(buf, 10, &val); + if (ret) + return ret; - val = simple_strtol(buf, NULL, 10); val = SENSORS_LIMIT(val, -15000, 15000); mutex_lock(&data->update_lock); data->temp_offset[nr] = TEMP_OFFSET_TO_REG(val); @@ -648,9 +687,13 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; - int val; + long val; + int ret; + + ret = kstrtol(buf, 10, &val); + if (ret) + return ret; - val = simple_strtol(buf, NULL, 10); val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); @@ -665,9 +708,13 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; - int val; + long val; + int ret; + + ret = kstrtol(buf, 10, &val); + if (ret) + return ret; - val = simple_strtol(buf, NULL, 10); val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); @@ -682,9 +729,13 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr, struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); int nr = to_sensor_dev_attr(attr)->index; - int val; + long val; + int ret; + + ret = kstrtol(buf, 10, &val); + if (ret) + return ret; - val = simple_strtol(buf, NULL, 10); val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); mutex_lock(&data->update_lock); data->temp_crit[nr] = TEMP_TO_REG(val); @@ -711,7 +762,8 @@ temp_reg(2); temp_reg(3); /* Alarms */ -static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, + char *buf) { struct adm1031_data *data = adm1031_update_device(dev); return sprintf(buf, "%d\n", data->alarm); @@ -919,12 +971,13 @@ static int adm1031_probe(struct i2c_client *client, adm1031_init_client(client); /* Register sysfs hooks */ - if ((err = sysfs_create_group(&client->dev.kobj, &adm1031_group))) + err = sysfs_create_group(&client->dev.kobj, &adm1031_group); + if (err) goto exit_free; if (data->chip_type == adm1031) { - if ((err = sysfs_create_group(&client->dev.kobj, - &adm1031_group_opt))) + err = sysfs_create_group(&client->dev.kobj, &adm1031_group_opt); + if (err) goto exit_remove; } @@ -970,14 +1023,13 @@ static void adm1031_init_client(struct i2c_client *client) } /* Initialize the ADM1031 chip (enables fan speed reading ) */ read_val = adm1031_read_value(client, ADM1031_REG_CONF2); - if ((read_val | mask) != read_val) { - adm1031_write_value(client, ADM1031_REG_CONF2, read_val | mask); - } + if ((read_val | mask) != read_val) + adm1031_write_value(client, ADM1031_REG_CONF2, read_val | mask); read_val = adm1031_read_value(client, ADM1031_REG_CONF1); if ((read_val | ADM1031_CONF1_MONITOR_ENABLE) != read_val) { - adm1031_write_value(client, ADM1031_REG_CONF1, read_val | - ADM1031_CONF1_MONITOR_ENABLE); + adm1031_write_value(client, ADM1031_REG_CONF1, + read_val | ADM1031_CONF1_MONITOR_ENABLE); } /* Read the chip's update rate */ @@ -1024,8 +1076,7 @@ static struct adm1031_data *adm1031_update_device(struct device *dev) /* oldh is actually newer */ if (newh != oldh) dev_warn(&client->dev, - "Remote temperature may be " - "wrong.\n"); + "Remote temperature may be wrong.\n"); #endif } data->temp[chan] = newh; @@ -1052,22 +1103,24 @@ static struct adm1031_data *adm1031_update_device(struct device *dev) data->conf2 = adm1031_read_value(client, ADM1031_REG_CONF2); data->alarm = adm1031_read_value(client, ADM1031_REG_STATUS(0)) - | (adm1031_read_value(client, ADM1031_REG_STATUS(1)) - << 8); - if (data->chip_type == adm1030) { + | (adm1031_read_value(client, ADM1031_REG_STATUS(1)) << 8); + if (data->chip_type == adm1030) data->alarm &= 0xc0ff; - } - for (chan=0; chan<(data->chip_type == adm1030 ? 1 : 2); chan++) { + for (chan = 0; chan < (data->chip_type == adm1030 ? 1 : 2); + chan++) { data->fan_div[chan] = - adm1031_read_value(client, ADM1031_REG_FAN_DIV(chan)); + adm1031_read_value(client, + ADM1031_REG_FAN_DIV(chan)); data->fan_min[chan] = - adm1031_read_value(client, ADM1031_REG_FAN_MIN(chan)); + adm1031_read_value(client, + ADM1031_REG_FAN_MIN(chan)); data->fan[chan] = - adm1031_read_value(client, ADM1031_REG_FAN_SPEED(chan)); + adm1031_read_value(client, + ADM1031_REG_FAN_SPEED(chan)); data->pwm[chan] = - 0xf & (adm1031_read_value(client, ADM1031_REG_PWM) >> - (4*chan)); + (adm1031_read_value(client, + ADM1031_REG_PWM) >> (4 * chan)) & 0x0f; } data->last_updated = jiffies; data->valid = 1; diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 1fdef88..a6c6ec3 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -190,7 +190,8 @@ static ssize_t show_temp(struct device *dev, return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN; } -static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) +static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, + struct device *dev) { /* The 100C is default for both mobile and non mobile CPUs */ @@ -284,7 +285,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) return tjmax; } -static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) +static int __cpuinit get_tjmax(struct cpuinfo_x86 *c, u32 id, + struct device *dev) { int err; u32 eax, edx; @@ -323,7 +325,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) return adjust_tjmax(c, id, dev); } -static int create_name_attr(struct platform_data *pdata, struct device *dev) +static int __devinit create_name_attr(struct platform_data *pdata, + struct device *dev) { sysfs_attr_init(&pdata->name_attr.attr); pdata->name_attr.attr.name = "name"; @@ -332,8 +335,8 @@ static int create_name_attr(struct platform_data *pdata, struct device *dev) return device_create_file(dev, &pdata->name_attr); } -static int create_core_attrs(struct temp_data *tdata, struct device *dev, - int attr_no) +static int __cpuinit create_core_attrs(struct temp_data *tdata, + struct device *dev, int attr_no) { int err, i; static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev, @@ -383,7 +386,7 @@ static int __cpuinit chk_ucode_version(unsigned int cpu) return 0; } -static struct platform_device *coretemp_get_pdev(unsigned int cpu) +static struct platform_device __cpuinit *coretemp_get_pdev(unsigned int cpu) { u16 phys_proc_id = TO_PHYS_ID(cpu); struct pdev_entry *p; @@ -400,7 +403,8 @@ static struct platform_device *coretemp_get_pdev(unsigned int cpu) return NULL; } -static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) +static struct temp_data __cpuinit *init_temp_data(unsigned int cpu, + int pkg_flag) { struct temp_data *tdata; @@ -418,7 +422,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) return tdata; } -static int create_core_data(struct platform_device *pdev, +static int __cpuinit create_core_data(struct platform_device *pdev, unsigned int cpu, int pkg_flag) { struct temp_data *tdata; @@ -489,7 +493,7 @@ exit_free: return err; } -static void coretemp_add_core(unsigned int cpu, int pkg_flag) +static void __cpuinit coretemp_add_core(unsigned int cpu, int pkg_flag) { struct platform_device *pdev = coretemp_get_pdev(cpu); int err; @@ -618,7 +622,7 @@ exit: return err; } -static void coretemp_device_remove(unsigned int cpu) +static void __cpuinit coretemp_device_remove(unsigned int cpu) { struct pdev_entry *p, *n; u16 phys_proc_id = TO_PHYS_ID(cpu); @@ -634,7 +638,7 @@ static void coretemp_device_remove(unsigned int cpu) mutex_unlock(&pdev_list_mutex); } -static bool is_any_core_online(struct platform_data *pdata) +static bool __cpuinit is_any_core_online(struct platform_data *pdata) { int i; diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c index 92f9497..6dbfd3e 100644 --- a/drivers/hwmon/f71805f.c +++ b/drivers/hwmon/f71805f.c @@ -283,11 +283,11 @@ static inline long temp_from_reg(u8 reg) static inline u8 temp_to_reg(long val) { - if (val < 0) - val = 0; - else if (val > 1000 * 0xff) - val = 0xff; - return ((val + 500) / 1000); + if (val <= 0) + return 0; + if (val >= 1000 * 0xff) + return 0xff; + return (val + 500) / 1000; } /* diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 603ef2a..0054d6f 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -17,6 +17,7 @@ * IT8720F Super I/O chip w/LPC interface * IT8721F Super I/O chip w/LPC interface * IT8726F Super I/O chip w/LPC interface + * IT8728F Super I/O chip w/LPC interface * IT8758E Super I/O chip w/LPC interface * Sis950 A clone of the IT8705F * @@ -58,7 +59,7 @@ #define DRVNAME "it87" -enum chips { it87, it8712, it8716, it8718, it8720, it8721 }; +enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728 }; static unsigned short force_id; module_param(force_id, ushort, 0); @@ -135,6 +136,7 @@ static inline void superio_exit(void) #define IT8720F_DEVID 0x8720 #define IT8721F_DEVID 0x8721 #define IT8726F_DEVID 0x8726 +#define IT8728F_DEVID 0x8728 #define IT87_ACT_REG 0x30 #define IT87_BASE_REG 0x60 @@ -274,11 +276,31 @@ struct it87_data { s8 auto_temp[3][5]; /* [nr][0] is point1_temp_hyst */ }; +static inline int has_12mv_adc(const struct it87_data *data) +{ + /* + * IT8721F and later have a 12 mV ADC, also with internal scaling + * on selected inputs. + */ + return data->type == it8721 + || data->type == it8728; +} + +static inline int has_newer_autopwm(const struct it87_data *data) +{ + /* + * IT8721F and later have separate registers for the temperature + * mapping and the manual duty cycle. + */ + return data->type == it8721 + || data->type == it8728; +} + static u8 in_to_reg(const struct it87_data *data, int nr, long val) { long lsb; - if (data->type == it8721) { + if (has_12mv_adc(data)) { if (data->in_scaled & (1 << nr)) lsb = 24; else @@ -292,7 +314,7 @@ static u8 in_to_reg(const struct it87_data *data, int nr, long val) static int in_from_reg(const struct it87_data *data, int nr, int val) { - if (data->type == it8721) { + if (has_12mv_adc(data)) { if (data->in_scaled & (1 << nr)) return val * 24; else @@ -329,7 +351,7 @@ static inline u16 FAN16_TO_REG(long rpm) static u8 pwm_to_reg(const struct it87_data *data, long val) { - if (data->type == it8721) + if (has_newer_autopwm(data)) return val; else return val >> 1; @@ -337,7 +359,7 @@ static u8 pwm_to_reg(const struct it87_data *data, long val) static int pwm_from_reg(const struct it87_data *data, u8 reg) { - if (data->type == it8721) + if (has_newer_autopwm(data)) return reg; else return (reg & 0x7f) << 1; @@ -374,7 +396,8 @@ static inline int has_16bit_fans(const struct it87_data *data) || data->type == it8716 || data->type == it8718 || data->type == it8720 - || data->type == it8721; + || data->type == it8721 + || data->type == it8728; } static inline int has_old_autopwm(const struct it87_data *data) @@ -842,7 +865,7 @@ static ssize_t set_pwm_enable(struct device *dev, data->fan_main_ctrl); } else { if (val == 1) /* Manual mode */ - data->pwm_ctrl[nr] = data->type == it8721 ? + data->pwm_ctrl[nr] = has_newer_autopwm(data) ? data->pwm_temp_map[nr] : data->pwm_duty[nr]; else /* Automatic mode */ @@ -870,7 +893,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr, return -EINVAL; mutex_lock(&data->update_lock); - if (data->type == it8721) { + if (has_newer_autopwm(data)) { /* If we are in automatic mode, the PWM duty cycle register * is read-only so we can't write the value */ if (data->pwm_ctrl[nr] & 0x80) { @@ -1311,8 +1334,8 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr, struct it87_data *data = dev_get_drvdata(dev); int nr = to_sensor_dev_attr(attr)->index; - return sprintf(buf, "%s\n", data->type == it8721 ? labels_it8721[nr] - : labels[nr]); + return sprintf(buf, "%s\n", has_12mv_adc(data) ? labels_it8721[nr] + : labels[nr]); } static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0); static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1); @@ -1605,6 +1628,9 @@ static int __init it87_find(unsigned short *address, case IT8721F_DEVID: sio_data->type = it8721; break; + case IT8728F_DEVID: + sio_data->type = it8728; + break; case 0xffff: /* No device at all */ goto exit; default: @@ -1646,8 +1672,11 @@ static int __init it87_find(unsigned short *address, superio_select(GPIO); reg = superio_inb(IT87_SIO_GPIO3_REG); - if (sio_data->type == it8721) { - /* The IT8721F/IT8758E doesn't have VID pins at all */ + if (sio_data->type == it8721 || sio_data->type == it8728) { + /* + * The IT8721F/IT8758E doesn't have VID pins at all, + * not sure about the IT8728F. + */ sio_data->skip_vid = 1; } else { /* We need at least 4 VID pins */ @@ -1692,7 +1721,8 @@ static int __init it87_find(unsigned short *address, } if (reg & (1 << 0)) sio_data->internal |= (1 << 0); - if ((reg & (1 << 1)) || sio_data->type == it8721) + if ((reg & (1 << 1)) || sio_data->type == it8721 || + sio_data->type == it8728) sio_data->internal |= (1 << 1); sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f; @@ -1770,6 +1800,7 @@ static int __devinit it87_probe(struct platform_device *pdev) "it8718", "it8720", "it8721", + "it8728", }; res = platform_get_resource(pdev, IORESOURCE_IO, 0); @@ -1807,7 +1838,7 @@ static int __devinit it87_probe(struct platform_device *pdev) enable_pwm_interface = it87_check_pwm(dev); /* Starting with IT8721F, we handle scaling of internal voltages */ - if (data->type == it8721) { + if (has_12mv_adc(data)) { if (sio_data->internal & (1 << 0)) data->in_scaled |= (1 << 3); /* in3 is AVCC */ if (sio_data->internal & (1 << 1)) @@ -2093,7 +2124,7 @@ static void __devinit it87_init_device(struct platform_device *pdev) static void it87_update_pwm_ctrl(struct it87_data *data, int nr) { data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM(nr)); - if (data->type == it8721) { + if (has_newer_autopwm(data)) { data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03; data->pwm_duty[nr] = it87_read_value(data, IT87_REG_PWM_DUTY(nr)); diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index 508cb29..5e6457a 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -47,10 +47,14 @@ #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> +#include <linux/types.h> /* * Addresses to scan - * Address is fully defined internally and cannot be changed. + * Address is fully defined internally and cannot be changed except for + * LM64 which has one pin dedicated to address selection. + * LM63 and LM96163 have address 0x4c. + * LM64 can have address 0x18 or 0x4e. */ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END }; @@ -60,6 +64,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END }; */ #define LM63_REG_CONFIG1 0x03 +#define LM63_REG_CONVRATE 0x04 #define LM63_REG_CONFIG2 0xBF #define LM63_REG_CONFIG_FAN 0x4A @@ -70,6 +75,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END }; #define LM63_REG_PWM_VALUE 0x4C #define LM63_REG_PWM_FREQ 0x4D +#define LM63_REG_LUT_TEMP_HYST 0x4F +#define LM63_REG_LUT_TEMP(nr) (0x50 + 2 * (nr)) +#define LM63_REG_LUT_PWM(nr) (0x51 + 2 * (nr)) #define LM63_REG_LOCAL_TEMP 0x00 #define LM63_REG_LOCAL_HIGH 0x05 @@ -91,6 +99,16 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END }; #define LM63_REG_MAN_ID 0xFE #define LM63_REG_CHIP_ID 0xFF +#define LM96163_REG_TRUTHERM 0x30 +#define LM96163_REG_REMOTE_TEMP_U_MSB 0x31 +#define LM96163_REG_REMOTE_TEMP_U_LSB 0x32 +#define LM96163_REG_CONFIG_ENHANCED 0x45 + +#define LM63_MAX_CONVRATE 9 + +#define LM63_MAX_CONVRATE_HZ 32 +#define LM96163_MAX_CONVRATE_HZ 26 + /* * Conversions and various macros * For tachometer counts, the LM63 uses 16-bit values. @@ -112,15 +130,24 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END }; (val) >= 127000 ? 127 : \ (val) < 0 ? ((val) - 500) / 1000 : \ ((val) + 500) / 1000) +#define TEMP8U_TO_REG(val) ((val) <= 0 ? 0 : \ + (val) >= 255000 ? 255 : \ + ((val) + 500) / 1000) #define TEMP11_FROM_REG(reg) ((reg) / 32 * 125) #define TEMP11_TO_REG(val) ((val) <= -128000 ? 0x8000 : \ (val) >= 127875 ? 0x7FE0 : \ (val) < 0 ? ((val) - 62) / 125 * 32 : \ ((val) + 62) / 125 * 32) +#define TEMP11U_TO_REG(val) ((val) <= 0 ? 0 : \ + (val) >= 255875 ? 0xFFE0 : \ + ((val) + 62) / 125 * 32) #define HYST_TO_REG(val) ((val) <= 0 ? 0 : \ (val) >= 127000 ? 127 : \ ((val) + 500) / 1000) +#define UPDATE_INTERVAL(max, rate) \ + ((1000 << (LM63_MAX_CONVRATE - (rate))) / (max)) + /* * Functions declaration */ @@ -134,7 +161,7 @@ static struct lm63_data *lm63_update_device(struct device *dev); static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info); static void lm63_init_client(struct i2c_client *client); -enum chips { lm63, lm64 }; +enum chips { lm63, lm64, lm96163 }; /* * Driver data (common to all clients) @@ -143,6 +170,7 @@ enum chips { lm63, lm64 }; static const struct i2c_device_id lm63_id[] = { { "lm63", lm63 }, { "lm64", lm64 }, + { "lm96163", lm96163 }, { } }; MODULE_DEVICE_TABLE(i2c, lm63_id); @@ -167,26 +195,53 @@ struct lm63_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* zero until following fields are valid */ + char lut_valid; /* zero until lut fields are valid */ unsigned long last_updated; /* in jiffies */ - int kind; + unsigned long lut_last_updated; /* in jiffies */ + enum chips kind; int temp2_offset; + int update_interval; /* in milliseconds */ + int max_convrate_hz; + int lut_size; /* 8 or 12 */ + /* registers values */ u8 config, config_fan; u16 fan[2]; /* 0: input 1: low limit */ u8 pwm1_freq; - u8 pwm1_value; - s8 temp8[3]; /* 0: local input + u8 pwm1[13]; /* 0: current output + 1-12: lookup table */ + s8 temp8[15]; /* 0: local input 1: local high limit - 2: remote critical limit */ - s16 temp11[3]; /* 0: remote input + 2: remote critical limit + 3-14: lookup table */ + s16 temp11[4]; /* 0: remote input 1: remote low limit - 2: remote high limit */ + 2: remote high limit + 3: remote offset */ + u16 temp11u; /* remote input (unsigned) */ u8 temp2_crit_hyst; + u8 lut_temp_hyst; u8 alarms; + bool pwm_highres; + bool lut_temp_highres; + bool remote_unsigned; /* true if unsigned remote upper limits */ + bool trutherm; }; +static inline int temp8_from_reg(struct lm63_data *data, int nr) +{ + if (data->remote_unsigned) + return TEMP8_FROM_REG((u8)data->temp8[nr]); + return TEMP8_FROM_REG(data->temp8[nr]); +} + +static inline int lut_temp_from_reg(struct lm63_data *data, int nr) +{ + return data->temp8[nr] * (data->lut_temp_highres ? 500 : 1000); +} + /* * Sysfs callback functions and files */ @@ -204,7 +259,12 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *dummy, { struct i2c_client *client = to_i2c_client(dev); struct lm63_data *data = i2c_get_clientdata(client); - unsigned long val = simple_strtoul(buf, NULL, 10); + unsigned long val; + int err; + + err = kstrtoul(buf, 10, &val); + if (err) + return err; mutex_lock(&data->update_lock); data->fan[1] = FAN_TO_REG(val); @@ -216,13 +276,22 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *dummy, return count; } -static ssize_t show_pwm1(struct device *dev, struct device_attribute *dummy, +static ssize_t show_pwm1(struct device *dev, struct device_attribute *devattr, char *buf) { + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm63_data *data = lm63_update_device(dev); - return sprintf(buf, "%d\n", data->pwm1_value >= 2 * data->pwm1_freq ? - 255 : (data->pwm1_value * 255 + data->pwm1_freq) / - (2 * data->pwm1_freq)); + int nr = attr->index; + int pwm; + + if (data->pwm_highres) + pwm = data->pwm1[nr]; + else + pwm = data->pwm1[nr] >= 2 * data->pwm1_freq ? + 255 : (data->pwm1[nr] * 255 + data->pwm1_freq) / + (2 * data->pwm1_freq); + + return sprintf(buf, "%d\n", pwm); } static ssize_t set_pwm1(struct device *dev, struct device_attribute *dummy, @@ -231,22 +300,26 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *dummy, struct i2c_client *client = to_i2c_client(dev); struct lm63_data *data = i2c_get_clientdata(client); unsigned long val; - + int err; + if (!(data->config_fan & 0x20)) /* register is read-only */ return -EPERM; - val = simple_strtoul(buf, NULL, 10); + err = kstrtoul(buf, 10, &val); + if (err) + return err; + + val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); - data->pwm1_value = val <= 0 ? 0 : - val >= 255 ? 2 * data->pwm1_freq : - (val * data->pwm1_freq * 2 + 127) / 255; - i2c_smbus_write_byte_data(client, LM63_REG_PWM_VALUE, data->pwm1_value); + data->pwm1[0] = data->pwm_highres ? val : + (val * data->pwm1_freq * 2 + 127) / 255; + i2c_smbus_write_byte_data(client, LM63_REG_PWM_VALUE, data->pwm1[0]); mutex_unlock(&data->update_lock); return count; } -static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dummy, - char *buf) +static ssize_t show_pwm1_enable(struct device *dev, + struct device_attribute *dummy, char *buf) { struct lm63_data *data = lm63_update_device(dev); return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2); @@ -273,21 +346,47 @@ static ssize_t show_remote_temp8(struct device *dev, { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm63_data *data = lm63_update_device(dev); - return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]) + return sprintf(buf, "%d\n", temp8_from_reg(data, attr->index) + + data->temp2_offset); +} + +static ssize_t show_lut_temp(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct lm63_data *data = lm63_update_device(dev); + return sprintf(buf, "%d\n", lut_temp_from_reg(data, attr->index) + data->temp2_offset); } -static ssize_t set_local_temp8(struct device *dev, - struct device_attribute *dummy, - const char *buf, size_t count) +static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) { + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct lm63_data *data = i2c_get_clientdata(client); - long val = simple_strtol(buf, NULL, 10); + int nr = attr->index; + int reg = nr == 2 ? LM63_REG_REMOTE_TCRIT : LM63_REG_LOCAL_HIGH; + long val; + int err; + int temp; + + err = kstrtol(buf, 10, &val); + if (err) + return err; mutex_lock(&data->update_lock); - data->temp8[1] = TEMP8_TO_REG(val); - i2c_smbus_write_byte_data(client, LM63_REG_LOCAL_HIGH, data->temp8[1]); + if (nr == 2) { + if (data->remote_unsigned) + temp = TEMP8U_TO_REG(val - data->temp2_offset); + else + temp = TEMP8_TO_REG(val - data->temp2_offset); + } else { + temp = TEMP8_TO_REG(val); + } + data->temp8[nr] = temp; + i2c_smbus_write_byte_data(client, reg, temp); mutex_unlock(&data->update_lock); return count; } @@ -297,28 +396,56 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr, { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct lm63_data *data = lm63_update_device(dev); - return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index]) - + data->temp2_offset); + int nr = attr->index; + int temp; + + if (!nr) { + /* + * Use unsigned temperature unless its value is zero. + * If it is zero, use signed temperature. + */ + if (data->temp11u) + temp = TEMP11_FROM_REG(data->temp11u); + else + temp = TEMP11_FROM_REG(data->temp11[nr]); + } else { + if (data->remote_unsigned && nr == 2) + temp = TEMP11_FROM_REG((u16)data->temp11[nr]); + else + temp = TEMP11_FROM_REG(data->temp11[nr]); + } + return sprintf(buf, "%d\n", temp + data->temp2_offset); } static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { - static const u8 reg[4] = { + static const u8 reg[6] = { LM63_REG_REMOTE_LOW_MSB, LM63_REG_REMOTE_LOW_LSB, LM63_REG_REMOTE_HIGH_MSB, LM63_REG_REMOTE_HIGH_LSB, + LM63_REG_REMOTE_OFFSET_MSB, + LM63_REG_REMOTE_OFFSET_LSB, }; struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct lm63_data *data = i2c_get_clientdata(client); - long val = simple_strtol(buf, NULL, 10); + long val; + int err; int nr = attr->index; + err = kstrtol(buf, 10, &val); + if (err) + return err; + mutex_lock(&data->update_lock); - data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset); + if (data->remote_unsigned && nr == 2) + data->temp11[nr] = TEMP11U_TO_REG(val - data->temp2_offset); + else + data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset); + i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2], data->temp11[nr] >> 8); i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1], @@ -327,35 +454,143 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr, return count; } -/* Hysteresis register holds a relative value, while we want to present - an absolute to user-space */ -static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute *dummy, - char *buf) +/* + * Hysteresis register holds a relative value, while we want to present + * an absolute to user-space + */ +static ssize_t show_temp2_crit_hyst(struct device *dev, + struct device_attribute *dummy, char *buf) { struct lm63_data *data = lm63_update_device(dev); - return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2]) + return sprintf(buf, "%d\n", temp8_from_reg(data, 2) + data->temp2_offset - TEMP8_FROM_REG(data->temp2_crit_hyst)); } -/* And now the other way around, user-space provides an absolute - hysteresis value and we have to store a relative one */ -static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *dummy, +static ssize_t show_lut_temp_hyst(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct lm63_data *data = lm63_update_device(dev); + + return sprintf(buf, "%d\n", lut_temp_from_reg(data, attr->index) + + data->temp2_offset + - TEMP8_FROM_REG(data->lut_temp_hyst)); +} + +/* + * And now the other way around, user-space provides an absolute + * hysteresis value and we have to store a relative one + */ +static ssize_t set_temp2_crit_hyst(struct device *dev, + struct device_attribute *dummy, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct lm63_data *data = i2c_get_clientdata(client); - long val = simple_strtol(buf, NULL, 10); + long val; + int err; long hyst; + err = kstrtol(buf, 10, &val); + if (err) + return err; + mutex_lock(&data->update_lock); - hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val; + hyst = temp8_from_reg(data, 2) + data->temp2_offset - val; i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST, HYST_TO_REG(hyst)); mutex_unlock(&data->update_lock); return count; } +/* + * Set conversion rate. + * client->update_lock must be held when calling this function. + */ +static void lm63_set_convrate(struct i2c_client *client, struct lm63_data *data, + unsigned int interval) +{ + int i; + unsigned int update_interval; + + /* Shift calculations to avoid rounding errors */ + interval <<= 6; + + /* find the nearest update rate */ + update_interval = (1 << (LM63_MAX_CONVRATE + 6)) * 1000 + / data->max_convrate_hz; + for (i = 0; i < LM63_MAX_CONVRATE; i++, update_interval >>= 1) + if (interval >= update_interval * 3 / 4) + break; + + i2c_smbus_write_byte_data(client, LM63_REG_CONVRATE, i); + data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz, i); +} + +static ssize_t show_update_interval(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct lm63_data *data = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", data->update_interval); +} + +static ssize_t set_update_interval(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lm63_data *data = i2c_get_clientdata(client); + unsigned long val; + int err; + + err = kstrtoul(buf, 10, &val); + if (err) + return err; + + mutex_lock(&data->update_lock); + lm63_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000)); + mutex_unlock(&data->update_lock); + + return count; +} + +static ssize_t show_type(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lm63_data *data = i2c_get_clientdata(client); + + return sprintf(buf, data->trutherm ? "1\n" : "2\n"); +} + +static ssize_t set_type(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lm63_data *data = i2c_get_clientdata(client); + unsigned long val; + int ret; + u8 reg; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) + return ret; + if (val != 1 && val != 2) + return -EINVAL; + + mutex_lock(&data->update_lock); + data->trutherm = val == 1; + reg = i2c_smbus_read_byte_data(client, LM96163_REG_TRUTHERM) & ~0x02; + i2c_smbus_write_byte_data(client, LM96163_REG_TRUTHERM, + reg | (data->trutherm ? 0x02 : 0x00)); + data->valid = 0; + mutex_unlock(&data->update_lock); + + return count; +} + static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy, char *buf) { @@ -377,27 +612,87 @@ static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0); static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan, set_fan, 1); -static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1); +static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1, 0); static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL); +static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IRUGO, show_pwm1, NULL, 1); +static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp, S_IRUGO, + show_lut_temp, NULL, 3); +static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 3); +static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IRUGO, show_pwm1, NULL, 2); +static SENSOR_DEVICE_ATTR(pwm1_auto_point2_temp, S_IRUGO, + show_lut_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(pwm1_auto_point2_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 4); +static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IRUGO, show_pwm1, NULL, 3); +static SENSOR_DEVICE_ATTR(pwm1_auto_point3_temp, S_IRUGO, + show_lut_temp, NULL, 5); +static SENSOR_DEVICE_ATTR(pwm1_auto_point3_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 5); +static SENSOR_DEVICE_ATTR(pwm1_auto_point4_pwm, S_IRUGO, show_pwm1, NULL, 4); +static SENSOR_DEVICE_ATTR(pwm1_auto_point4_temp, S_IRUGO, + show_lut_temp, NULL, 6); +static SENSOR_DEVICE_ATTR(pwm1_auto_point4_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 6); +static SENSOR_DEVICE_ATTR(pwm1_auto_point5_pwm, S_IRUGO, show_pwm1, NULL, 5); +static SENSOR_DEVICE_ATTR(pwm1_auto_point5_temp, S_IRUGO, + show_lut_temp, NULL, 7); +static SENSOR_DEVICE_ATTR(pwm1_auto_point5_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 7); +static SENSOR_DEVICE_ATTR(pwm1_auto_point6_pwm, S_IRUGO, show_pwm1, NULL, 6); +static SENSOR_DEVICE_ATTR(pwm1_auto_point6_temp, S_IRUGO, + show_lut_temp, NULL, 8); +static SENSOR_DEVICE_ATTR(pwm1_auto_point6_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 8); +static SENSOR_DEVICE_ATTR(pwm1_auto_point7_pwm, S_IRUGO, show_pwm1, NULL, 7); +static SENSOR_DEVICE_ATTR(pwm1_auto_point7_temp, S_IRUGO, + show_lut_temp, NULL, 9); +static SENSOR_DEVICE_ATTR(pwm1_auto_point7_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 9); +static SENSOR_DEVICE_ATTR(pwm1_auto_point8_pwm, S_IRUGO, show_pwm1, NULL, 8); +static SENSOR_DEVICE_ATTR(pwm1_auto_point8_temp, S_IRUGO, + show_lut_temp, NULL, 10); +static SENSOR_DEVICE_ATTR(pwm1_auto_point8_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 10); +static SENSOR_DEVICE_ATTR(pwm1_auto_point9_pwm, S_IRUGO, show_pwm1, NULL, 9); +static SENSOR_DEVICE_ATTR(pwm1_auto_point9_temp, S_IRUGO, + show_lut_temp, NULL, 11); +static SENSOR_DEVICE_ATTR(pwm1_auto_point9_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 11); +static SENSOR_DEVICE_ATTR(pwm1_auto_point10_pwm, S_IRUGO, show_pwm1, NULL, 10); +static SENSOR_DEVICE_ATTR(pwm1_auto_point10_temp, S_IRUGO, + show_lut_temp, NULL, 12); +static SENSOR_DEVICE_ATTR(pwm1_auto_point10_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 12); +static SENSOR_DEVICE_ATTR(pwm1_auto_point11_pwm, S_IRUGO, show_pwm1, NULL, 11); +static SENSOR_DEVICE_ATTR(pwm1_auto_point11_temp, S_IRUGO, + show_lut_temp, NULL, 13); +static SENSOR_DEVICE_ATTR(pwm1_auto_point11_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 13); +static SENSOR_DEVICE_ATTR(pwm1_auto_point12_pwm, S_IRUGO, show_pwm1, NULL, 12); +static SENSOR_DEVICE_ATTR(pwm1_auto_point12_temp, S_IRUGO, + show_lut_temp, NULL, 14); +static SENSOR_DEVICE_ATTR(pwm1_auto_point12_temp_hyst, S_IRUGO, + show_lut_temp_hyst, NULL, 14); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8, - set_local_temp8, 1); + set_temp8, 1); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11, set_temp11, 1); static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11, set_temp11, 2); -/* - * On LM63, temp2_crit can be set only once, which should be job - * of the bootloader. - */ +static SENSOR_DEVICE_ATTR(temp2_offset, S_IWUSR | S_IRUGO, show_temp11, + set_temp11, 3); static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8, - NULL, 2); + set_temp8, 2); static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst, set_temp2_crit_hyst); +static DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type, set_type); + /* Individual alarm files */ static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1); @@ -408,14 +703,43 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6); /* Raw alarm file for compatibility */ static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); +static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval, + set_update_interval); + static struct attribute *lm63_attributes[] = { - &dev_attr_pwm1.attr, + &sensor_dev_attr_pwm1.dev_attr.attr, &dev_attr_pwm1_enable.attr, + &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point1_temp_hyst.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point2_temp_hyst.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point3_temp_hyst.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point4_temp_hyst.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point5_temp_hyst.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point6_temp_hyst.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point7_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point7_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point7_temp_hyst.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point8_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point8_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point8_temp_hyst.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, + &sensor_dev_attr_temp2_offset.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &dev_attr_temp2_crit_hyst.attr, @@ -425,10 +749,54 @@ static struct attribute *lm63_attributes[] = { &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &dev_attr_alarms.attr, + &dev_attr_update_interval.attr, NULL }; +static struct attribute *lm63_attributes_extra_lut[] = { + &sensor_dev_attr_pwm1_auto_point9_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point9_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point9_temp_hyst.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point10_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point10_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point10_temp_hyst.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point11_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point11_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point11_temp_hyst.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point12_pwm.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point12_temp.dev_attr.attr, + &sensor_dev_attr_pwm1_auto_point12_temp_hyst.dev_attr.attr, + NULL +}; + +static const struct attribute_group lm63_group_extra_lut = { + .attrs = lm63_attributes_extra_lut, +}; + +/* + * On LM63, temp2_crit can be set only once, which should be job + * of the bootloader. + * On LM64, temp2_crit can always be set. + * On LM96163, temp2_crit can be set if bit 1 of the configuration + * register is true. + */ +static umode_t lm63_attribute_mode(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct i2c_client *client = to_i2c_client(dev); + struct lm63_data *data = i2c_get_clientdata(client); + + if (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr + && (data->kind == lm64 || + (data->kind == lm96163 && (data->config & 0x02)))) + return attr->mode | S_IWUSR; + + return attr->mode; +} + static const struct attribute_group lm63_group = { + .is_visible = lm63_attribute_mode, .attrs = lm63_attributes, }; @@ -487,6 +855,8 @@ static int lm63_detect(struct i2c_client *new_client, strlcpy(info->type, "lm63", I2C_NAME_SIZE); else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e)) strlcpy(info->type, "lm64", I2C_NAME_SIZE); + else if (chip_id == 0x49 && address == 0x4c) + strlcpy(info->type, "lm96163", I2C_NAME_SIZE); else return -ENODEV; @@ -518,12 +888,24 @@ static int lm63_probe(struct i2c_client *new_client, lm63_init_client(new_client); /* Register sysfs hooks */ - if ((err = sysfs_create_group(&new_client->dev.kobj, - &lm63_group))) + err = sysfs_create_group(&new_client->dev.kobj, &lm63_group); + if (err) goto exit_free; if (data->config & 0x04) { /* tachometer enabled */ - if ((err = sysfs_create_group(&new_client->dev.kobj, - &lm63_group_fan1))) + err = sysfs_create_group(&new_client->dev.kobj, + &lm63_group_fan1); + if (err) + goto exit_remove_files; + } + if (data->kind == lm96163) { + err = device_create_file(&new_client->dev, + &dev_attr_temp2_type); + if (err) + goto exit_remove_files; + + err = sysfs_create_group(&new_client->dev.kobj, + &lm63_group_extra_lut); + if (err) goto exit_remove_files; } @@ -538,17 +920,25 @@ static int lm63_probe(struct i2c_client *new_client, exit_remove_files: sysfs_remove_group(&new_client->dev.kobj, &lm63_group); sysfs_remove_group(&new_client->dev.kobj, &lm63_group_fan1); + if (data->kind == lm96163) { + device_remove_file(&new_client->dev, &dev_attr_temp2_type); + sysfs_remove_group(&new_client->dev.kobj, + &lm63_group_extra_lut); + } exit_free: kfree(data); exit: return err; } -/* Idealy we shouldn't have to initialize anything, since the BIOS - should have taken care of everything */ +/* + * Ideally we shouldn't have to initialize anything, since the BIOS + * should have taken care of everything + */ static void lm63_init_client(struct i2c_client *client) { struct lm63_data *data = i2c_get_clientdata(client); + u8 convrate; data->config = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG1); data->config_fan = i2c_smbus_read_byte_data(client, @@ -561,16 +951,57 @@ static void lm63_init_client(struct i2c_client *client) i2c_smbus_write_byte_data(client, LM63_REG_CONFIG1, data->config); } + /* Tachometer is always enabled on LM64 */ + if (data->kind == lm64) + data->config |= 0x04; /* We may need pwm1_freq before ever updating the client data */ data->pwm1_freq = i2c_smbus_read_byte_data(client, LM63_REG_PWM_FREQ); if (data->pwm1_freq == 0) data->pwm1_freq = 1; + switch (data->kind) { + case lm63: + case lm64: + data->max_convrate_hz = LM63_MAX_CONVRATE_HZ; + data->lut_size = 8; + break; + case lm96163: + data->max_convrate_hz = LM96163_MAX_CONVRATE_HZ; + data->lut_size = 12; + data->trutherm + = i2c_smbus_read_byte_data(client, + LM96163_REG_TRUTHERM) & 0x02; + break; + } + convrate = i2c_smbus_read_byte_data(client, LM63_REG_CONVRATE); + if (unlikely(convrate > LM63_MAX_CONVRATE)) + convrate = LM63_MAX_CONVRATE; + data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz, + convrate); + + /* + * For LM96163, check if high resolution PWM + * and unsigned temperature format is enabled. + */ + if (data->kind == lm96163) { + u8 config_enhanced + = i2c_smbus_read_byte_data(client, + LM96163_REG_CONFIG_ENHANCED); + if (config_enhanced & 0x20) + data->lut_temp_highres = true; + if ((config_enhanced & 0x10) + && !(data->config_fan & 0x08) && data->pwm1_freq == 8) + data->pwm_highres = true; + if (config_enhanced & 0x08) + data->remote_unsigned = true; + } + /* Show some debug info about the LM63 configuration */ - dev_dbg(&client->dev, "Alert/tach pin configured for %s\n", - (data->config & 0x04) ? "tachometer input" : - "alert output"); + if (data->kind == lm63) + dev_dbg(&client->dev, "Alert/tach pin configured for %s\n", + (data->config & 0x04) ? "tachometer input" : + "alert output"); dev_dbg(&client->dev, "PWM clock %s kHz, output frequency %u Hz\n", (data->config_fan & 0x08) ? "1.4" : "360", ((data->config_fan & 0x08) ? 700 : 180000) / data->pwm1_freq); @@ -586,6 +1017,10 @@ static int lm63_remove(struct i2c_client *client) hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm63_group); sysfs_remove_group(&client->dev.kobj, &lm63_group_fan1); + if (data->kind == lm96163) { + device_remove_file(&client->dev, &dev_attr_temp2_type); + sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut); + } kfree(data); return 0; @@ -595,10 +1030,15 @@ static struct lm63_data *lm63_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct lm63_data *data = i2c_get_clientdata(client); + unsigned long next_update; + int i; mutex_lock(&data->update_lock); - if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { + next_update = data->last_updated + + msecs_to_jiffies(data->update_interval) + 1; + + if (time_after(jiffies, next_update) || !data->valid) { if (data->config & 0x04) { /* tachometer enabled */ /* order matters for fan1_input */ data->fan[0] = i2c_smbus_read_byte_data(client, @@ -615,8 +1055,8 @@ static struct lm63_data *lm63_update_device(struct device *dev) LM63_REG_PWM_FREQ); if (data->pwm1_freq == 0) data->pwm1_freq = 1; - data->pwm1_value = i2c_smbus_read_byte_data(client, - LM63_REG_PWM_VALUE); + data->pwm1[0] = i2c_smbus_read_byte_data(client, + LM63_REG_PWM_VALUE); data->temp8[0] = i2c_smbus_read_byte_data(client, LM63_REG_LOCAL_TEMP); @@ -636,6 +1076,17 @@ static struct lm63_data *lm63_update_device(struct device *dev) LM63_REG_REMOTE_HIGH_MSB) << 8) | i2c_smbus_read_byte_data(client, LM63_REG_REMOTE_HIGH_LSB); + data->temp11[3] = (i2c_smbus_read_byte_data(client, + LM63_REG_REMOTE_OFFSET_MSB) << 8) + | i2c_smbus_read_byte_data(client, + LM63_REG_REMOTE_OFFSET_LSB); + + if (data->kind == lm96163) + data->temp11u = (i2c_smbus_read_byte_data(client, + LM96163_REG_REMOTE_TEMP_U_MSB) << 8) + | i2c_smbus_read_byte_data(client, + LM96163_REG_REMOTE_TEMP_U_LSB); + data->temp8[2] = i2c_smbus_read_byte_data(client, LM63_REG_REMOTE_TCRIT); data->temp2_crit_hyst = i2c_smbus_read_byte_data(client, @@ -648,6 +1099,21 @@ static struct lm63_data *lm63_update_device(struct device *dev) data->valid = 1; } + if (time_after(jiffies, data->lut_last_updated + 5 * HZ) || + !data->lut_valid) { + for (i = 0; i < data->lut_size; i++) { + data->pwm1[1 + i] = i2c_smbus_read_byte_data(client, + LM63_REG_LUT_PWM(i)); + data->temp8[3 + i] = i2c_smbus_read_byte_data(client, + LM63_REG_LUT_TEMP(i)); + } + data->lut_temp_hyst = i2c_smbus_read_byte_data(client, + LM63_REG_LUT_TEMP_HYST); + + data->lut_last_updated = jiffies; + data->lut_valid = 1; + } + mutex_unlock(&data->update_lock); return data; diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index bdfd675..d2dd5f9 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -917,7 +917,7 @@ static ssize_t set_update_interval(struct device *dev, return err; mutex_lock(&data->update_lock); - lm90_set_convrate(client, data, val); + lm90_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000)); mutex_unlock(&data->update_lock); return count; diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c index 84ef3a8..482ca90 100644 --- a/drivers/hwmon/max1111.c +++ b/drivers/hwmon/max1111.c @@ -106,11 +106,14 @@ static ssize_t show_adc(struct device *dev, if (ret < 0) return ret; - return sprintf(buf, "%d\n", ret); + /* assume the reference voltage to be 2.048V, with an 8-bit sample, + * the LSB weight is 8mV + */ + return sprintf(buf, "%d\n", ret * 8); } #define MAX1111_ADC_ATTR(_id) \ - SENSOR_DEVICE_ATTR(adc##_id##_in, S_IRUGO, show_adc, NULL, _id) + SENSOR_DEVICE_ATTR(in##_id##_input, S_IRUGO, show_adc, NULL, _id) static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static MAX1111_ADC_ATTR(0); @@ -120,10 +123,10 @@ static MAX1111_ADC_ATTR(3); static struct attribute *max1111_attributes[] = { &dev_attr_name.attr, - &sensor_dev_attr_adc0_in.dev_attr.attr, - &sensor_dev_attr_adc1_in.dev_attr.attr, - &sensor_dev_attr_adc2_in.dev_attr.attr, - &sensor_dev_attr_adc3_in.dev_attr.attr, + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in3_input.dev_attr.attr, NULL, }; diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index 6ddeae0..91fdd1f 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -883,7 +883,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb, static int __devinit sht15_probe(struct platform_device *pdev) { - int ret = 0; + int ret; struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL); u8 status = 0; @@ -901,6 +901,7 @@ static int __devinit sht15_probe(struct platform_device *pdev) init_waitqueue_head(&data->wait_queue); if (pdev->dev.platform_data == NULL) { + ret = -EINVAL; dev_err(&pdev->dev, "no platform data supplied\n"); goto err_free_data; } diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index 0e0af04..4d383e7 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -1319,6 +1319,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, { struct w83627ehf_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); + struct w83627ehf_sio_data *sio_data = dev->platform_data; int nr = sensor_attr->index; unsigned long val; int err; @@ -1330,6 +1331,11 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, if (val > 1) return -EINVAL; + + /* On NCT67766F, DC mode is only supported for pwm1 */ + if (sio_data->kind == nct6776 && nr && val != 1) + return -EINVAL; + mutex_lock(&data->update_lock); reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); data->pwm_mode[nr] = val; @@ -1914,9 +1920,26 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data, fan4min = 0; fan5pin = 0; } else if (sio_data->kind == nct6776) { - fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40); - fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01); - fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02); + bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80; + + superio_select(sio_data->sioreg, W83627EHF_LD_HWM); + regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE); + + if (regval & 0x80) + fan3pin = gpok; + else + fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40); + + if (regval & 0x40) + fan4pin = gpok; + else + fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01); + + if (regval & 0x20) + fan5pin = gpok; + else + fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02); + fan4min = fan4pin; } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { fan3pin = 1; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index cbe7a2f..3101dd5 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -682,19 +682,19 @@ config I2C_XILINX will be called xilinx_i2c. config I2C_EG20T - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223)" + tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C" depends on PCI help This driver is for PCH(Platform controller Hub) I2C of EG20T which is an IOH(Input/Output Hub) for x86 embedded processor. This driver can access PCH I2C bus device. - This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ - Output Hub), ML7213 and ML7223. - ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is - for MP(Media Phone) use. - ML7213/ML7223 is companion chip for Intel Atom E6xx series. - ML7213/ML7223 is completely compatible for Intel EG20T PCH. + This driver also can be used for LAPIS Semiconductor IOH(Input/ + Output Hub), ML7213, ML7223 and ML7831. + ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is + for MP(Media Phone) use and ML7831 IOH is for general purpose use. + ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. comment "External I2C/SMBus adapter drivers" diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 3ef3557..ca88776 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. + * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -136,7 +136,8 @@ /* Set the number of I2C instance max Intel EG20T PCH : 1ch -OKI SEMICONDUCTOR ML7213 IOH : 2ch +LAPIS Semiconductor ML7213 IOH : 2ch +LAPIS Semiconductor ML7831 IOH : 1ch */ #define PCH_I2C_MAX_DEV 2 @@ -180,15 +181,17 @@ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */ static wait_queue_head_t pch_event; static DEFINE_MUTEX(pch_mutex); -/* Definition for ML7213 by OKI SEMICONDUCTOR */ +/* Definition for ML7213 by LAPIS Semiconductor */ #define PCI_VENDOR_ID_ROHM 0x10DB #define PCI_DEVICE_ID_ML7213_I2C 0x802D #define PCI_DEVICE_ID_ML7223_I2C 0x8010 +#define PCI_DEVICE_ID_ML7831_I2C 0x8817 static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, }, {0,} }; @@ -243,7 +246,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap) if (pch_clk > PCH_MAX_CLK) pch_clk = 62500; - pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8; + pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8); /* Set transfer speed in I2CBC */ iowrite32(pch_i2cbc, p + PCH_I2CBC); @@ -918,7 +921,9 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev, pch_adap->dev.parent = &pdev->dev; pch_i2c_init(&adap_info->pch_data[i]); - ret = i2c_add_adapter(pch_adap); + + pch_adap->nr = i; + ret = i2c_add_numbered_adapter(pch_adap); if (ret) { pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i); goto err_add_adapter; @@ -1058,8 +1063,8 @@ static void __exit pch_pci_exit(void) } module_exit(pch_pci_exit); -MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver"); +MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C"); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>"); +MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.lapis-semi.com>"); module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR)); module_param(pch_clk, int, (S_IRUSR | S_IWUSR)); diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index fa23faa..801df60 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -37,6 +37,9 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/of.h> +#include <linux/of_i2c.h> +#include <linux/of_device.h> #include <linux/slab.h> #include <linux/i2c-omap.h> #include <linux/pm_runtime.h> @@ -182,7 +185,9 @@ struct omap_i2c_dev { u32 latency; /* maximum mpu wkup latency */ void (*set_mpu_wkup_lat)(struct device *dev, long latency); - u32 speed; /* Speed of bus in Khz */ + u32 speed; /* Speed of bus in kHz */ + u32 dtrev; /* extra revision from DT */ + u32 flags; u16 cmd_err; u8 *buf; u8 *regs; @@ -235,7 +240,7 @@ static const u8 reg_map_ip_v2[] = { [OMAP_I2C_BUF_REG] = 0x94, [OMAP_I2C_CNT_REG] = 0x98, [OMAP_I2C_DATA_REG] = 0x9c, - [OMAP_I2C_SYSC_REG] = 0x20, + [OMAP_I2C_SYSC_REG] = 0x10, [OMAP_I2C_CON_REG] = 0xa4, [OMAP_I2C_OA_REG] = 0xa8, [OMAP_I2C_SA_REG] = 0xac, @@ -266,11 +271,7 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg) static void omap_i2c_unidle(struct omap_i2c_dev *dev) { - struct omap_i2c_bus_platform_data *pdata; - - pdata = dev->dev->platform_data; - - if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { + if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate); omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate); @@ -291,13 +292,10 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev) static void omap_i2c_idle(struct omap_i2c_dev *dev) { - struct omap_i2c_bus_platform_data *pdata; u16 iv; - pdata = dev->dev->platform_data; - dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); - if (pdata->rev == OMAP_I2C_IP_VERSION_2) + if (dev->dtrev == OMAP_I2C_IP_VERSION_2) omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1); else omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0); @@ -320,9 +318,6 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) unsigned long timeout; unsigned long internal_clk = 0; struct clk *fclk; - struct omap_i2c_bus_platform_data *pdata; - - pdata = dev->dev->platform_data; if (dev->rev >= OMAP_I2C_OMAP1_REV_2) { /* Disable I2C controller before soft reset */ @@ -373,7 +368,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) } omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); - if (pdata->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) { + if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) { /* * The I2C functional clock is the armxor_ck, so there's * no need to get "armxor_ck" separately. Now, if OMAP2420 @@ -397,7 +392,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) psc = fclk_rate / 12000000; } - if (!(pdata->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) { + if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) { /* * HSI2C controller internal clk rate should be 19.2 Mhz for @@ -406,7 +401,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) * The filter is iclk (fclk for HS) period. */ if (dev->speed > 400 || - pdata->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK) + dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK) internal_clk = 19200; else if (dev->speed > 100) internal_clk = 9600; @@ -475,7 +470,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) dev->errata = 0; - if (pdata->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207) + if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207) dev->errata |= I2C_OMAP_ERRATA_I207; /* Enable interrupts */ @@ -484,7 +479,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) OMAP_I2C_IE_AL) | ((dev->fifo_size) ? (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0); omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate); - if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { + if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { dev->pscstate = psc; dev->scllstate = scll; dev->sclhstate = sclh; @@ -804,9 +799,6 @@ omap_i2c_isr(int this_irq, void *dev_id) u16 bits; u16 stat, w; int err, count = 0; - struct omap_i2c_bus_platform_data *pdata; - - pdata = dev->dev->platform_data; if (pm_runtime_suspended(dev->dev)) return IRQ_NONE; @@ -830,11 +822,9 @@ complete: ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); - if (stat & OMAP_I2C_STAT_NACK) { + if (stat & OMAP_I2C_STAT_NACK) err |= OMAP_I2C_STAT_NACK; - omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, - OMAP_I2C_CON_STP); - } + if (stat & OMAP_I2C_STAT_AL) { dev_err(dev->dev, "Arbitration lost\n"); err |= OMAP_I2C_STAT_AL; @@ -875,7 +865,7 @@ complete: * Data reg in 2430, omap3 and * omap4 is 8 bit wide */ - if (pdata->flags & + if (dev->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) { if (dev->buf_len) { *dev->buf++ = w >> 8; @@ -918,7 +908,7 @@ complete: * Data reg in 2430, omap3 and * omap4 is 8 bit wide */ - if (pdata->flags & + if (dev->flags & OMAP_I2C_FLAG_16BIT_DATA_REG) { if (dev->buf_len) { w |= *dev->buf++ << 8; @@ -965,6 +955,32 @@ static const struct i2c_algorithm omap_i2c_algo = { .functionality = omap_i2c_func, }; +#ifdef CONFIG_OF +static struct omap_i2c_bus_platform_data omap3_pdata = { + .rev = OMAP_I2C_IP_VERSION_1, + .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 | + OMAP_I2C_FLAG_RESET_REGS_POSTIDLE | + OMAP_I2C_FLAG_BUS_SHIFT_2, +}; + +static struct omap_i2c_bus_platform_data omap4_pdata = { + .rev = OMAP_I2C_IP_VERSION_2, +}; + +static const struct of_device_id omap_i2c_of_match[] = { + { + .compatible = "ti,omap4-i2c", + .data = &omap4_pdata, + }, + { + .compatible = "ti,omap3-i2c", + .data = &omap3_pdata, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, omap_i2c_of_match); +#endif + static int __devinit omap_i2c_probe(struct platform_device *pdev) { @@ -972,9 +988,10 @@ omap_i2c_probe(struct platform_device *pdev) struct i2c_adapter *adap; struct resource *mem, *irq, *ioarea; struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data; + struct device_node *node = pdev->dev.of_node; + const struct of_device_id *match; irq_handler_t isr; int r; - u32 speed = 0; /* NOTE: driver uses the static register mapping */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1001,15 +1018,24 @@ omap_i2c_probe(struct platform_device *pdev) goto err_release_region; } - if (pdata != NULL) { - speed = pdata->clkrate; + match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev); + if (match) { + u32 freq = 100000; /* default to 100000 Hz */ + + pdata = match->data; + dev->dtrev = pdata->rev; + dev->flags = pdata->flags; + + of_property_read_u32(node, "clock-frequency", &freq); + /* convert DT freq value in Hz into kHz for speed */ + dev->speed = freq / 1000; + } else if (pdata != NULL) { + dev->speed = pdata->clkrate; + dev->flags = pdata->flags; dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat; - } else { - speed = 100; /* Default speed */ - dev->set_mpu_wkup_lat = NULL; + dev->dtrev = pdata->rev; } - dev->speed = speed; dev->dev = &pdev->dev; dev->irq = irq->start; dev->base = ioremap(mem->start, resource_size(mem)); @@ -1020,9 +1046,9 @@ omap_i2c_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); - dev->reg_shift = (pdata->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3; + dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3; - if (pdata->rev == OMAP_I2C_IP_VERSION_2) + if (dev->dtrev == OMAP_I2C_IP_VERSION_2) dev->regs = (u8 *)reg_map_ip_v2; else dev->regs = (u8 *)reg_map_ip_v1; @@ -1035,7 +1061,7 @@ omap_i2c_probe(struct platform_device *pdev) if (dev->rev <= OMAP_I2C_REV_ON_3430) dev->errata |= I2C_OMAP3_1P153; - if (!(pdata->flags & OMAP_I2C_FLAG_NO_FIFO)) { + if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) { u16 s; /* Set up the fifo size - Get total size */ @@ -1058,7 +1084,7 @@ omap_i2c_probe(struct platform_device *pdev) /* calculate wakeup latency constraint for MPU */ if (dev->set_mpu_wkup_lat != NULL) dev->latency = (1000000 * dev->fifo_size) / - (1000 * speed / 8); + (1000 * dev->speed / 8); } /* reset ASAP, clearing any IRQs */ @@ -1074,7 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev) } dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id, - pdata->rev, dev->rev >> 4, dev->rev & 0xf, dev->speed); + dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed); pm_runtime_put(dev->dev); @@ -1085,6 +1111,7 @@ omap_i2c_probe(struct platform_device *pdev) strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name)); adap->algo = &omap_i2c_algo; adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; /* i2c device drivers may be active on return from add_adapter() */ adap->nr = pdev->id; @@ -1094,6 +1121,8 @@ omap_i2c_probe(struct platform_device *pdev) goto err_free_irq; } + of_i2c_register_devices(adap); + return 0; err_free_irq: @@ -1166,6 +1195,7 @@ static struct platform_driver omap_i2c_driver = { .name = "omap_i2c", .owner = THIS_MODULE, .pm = OMAP_I2C_PM_OPS, + .of_match_table = of_match_ptr(omap_i2c_of_match), }, }; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 5d2f8e1..54ab97b 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -197,7 +197,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { .enter = &intel_idle }, }; -static int get_driver_data(int cstate) +static long get_driver_data(int cstate) { int driver_data; switch (cstate) { @@ -232,6 +232,7 @@ static int get_driver_data(int cstate) * @drv: cpuidle driver * @index: index of cpuidle state * + * Must be called under local_irq_disable(). */ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) @@ -247,8 +248,6 @@ static int intel_idle(struct cpuidle_device *dev, cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; - local_irq_disable(); - /* * leave_mm() to avoid costly and often unnecessary wakeups * for flushing the user TLB's associated with the active mm. @@ -348,7 +347,8 @@ static int intel_idle_probe(void) cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || - !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) + !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || + !mwait_substates) return -ENODEV; pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); @@ -394,7 +394,7 @@ static int intel_idle_probe(void) if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; else { - smp_call_function(__setup_broadcast_timer, (void *)true, 1); + on_each_cpu(__setup_broadcast_timer, (void *)true, 1); register_cpu_notifier(&setup_broadcast_notifier); } @@ -471,71 +471,67 @@ static int intel_idle_cpuidle_driver_init(void) } if (auto_demotion_disable_flags) - smp_call_function(auto_demotion_disable, NULL, 1); + on_each_cpu(auto_demotion_disable, NULL, 1); return 0; } /* - * intel_idle_cpuidle_devices_init() + * intel_idle_cpu_init() * allocate, initialize, register cpuidle_devices + * @cpu: cpu/core to initialize */ -static int intel_idle_cpuidle_devices_init(void) +int intel_idle_cpu_init(int cpu) { - int i, cstate; + int cstate; struct cpuidle_device *dev; - intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); - if (intel_idle_cpuidle_devices == NULL) - return -ENOMEM; - - for_each_online_cpu(i) { - dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); + dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); - dev->state_count = 1; + dev->state_count = 1; - for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { - int num_substates; + for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { + int num_substates; - if (cstate > max_cstate) { - printk(PREFIX "max_cstate %d reached\n", - max_cstate); - break; - } + if (cstate > max_cstate) { + printk(PREFIX "max_cstate %d reached\n", + max_cstate); + break; + } - /* does the state exist in CPUID.MWAIT? */ - num_substates = (mwait_substates >> ((cstate) * 4)) - & MWAIT_SUBSTATE_MASK; - if (num_substates == 0) - continue; - /* is the state not enabled? */ - if (cpuidle_state_table[cstate].enter == NULL) { - continue; - } + /* does the state exist in CPUID.MWAIT? */ + num_substates = (mwait_substates >> ((cstate) * 4)) + & MWAIT_SUBSTATE_MASK; + if (num_substates == 0) + continue; + /* is the state not enabled? */ + if (cpuidle_state_table[cstate].enter == NULL) + continue; - dev->states_usage[dev->state_count].driver_data = - (void *)get_driver_data(cstate); + dev->states_usage[dev->state_count].driver_data = + (void *)get_driver_data(cstate); dev->state_count += 1; } + dev->cpu = cpu; - dev->cpu = i; - if (cpuidle_register_device(dev)) { - pr_debug(PREFIX "cpuidle_register_device %d failed!\n", - i); - intel_idle_cpuidle_devices_uninit(); - return -EIO; - } + if (cpuidle_register_device(dev)) { + pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu); + intel_idle_cpuidle_devices_uninit(); + return -EIO; } + if (auto_demotion_disable_flags) + smp_call_function_single(cpu, auto_demotion_disable, NULL, 1); + return 0; } - +EXPORT_SYMBOL_GPL(intel_idle_cpu_init); static int __init intel_idle_init(void) { - int retval; + int retval, i; /* Do not load intel_idle at all for now if idle= is passed */ if (boot_option_idle_override != IDLE_NO_OVERRIDE) @@ -553,10 +549,16 @@ static int __init intel_idle_init(void) return retval; } - retval = intel_idle_cpuidle_devices_init(); - if (retval) { - cpuidle_unregister_driver(&intel_idle_driver); - return retval; + intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); + if (intel_idle_cpuidle_devices == NULL) + return -ENOMEM; + + for_each_online_cpu(i) { + retval = intel_idle_cpu_init(i); + if (retval) { + cpuidle_unregister_driver(&intel_idle_driver); + return retval; + } } return 0; @@ -568,7 +570,7 @@ static void __exit intel_idle_exit(void) cpuidle_unregister_driver(&intel_idle_driver); if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { - smp_call_function(__setup_broadcast_timer, (void *)false, 1); + on_each_cpu(__setup_broadcast_timer, (void *)false, 1); unregister_cpu_notifier(&setup_broadcast_notifier); } diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 0f9a84c..eb0add31 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -55,6 +55,7 @@ source "drivers/infiniband/hw/nes/Kconfig" source "drivers/infiniband/ulp/ipoib/Kconfig" source "drivers/infiniband/ulp/srp/Kconfig" +source "drivers/infiniband/ulp/srpt/Kconfig" source "drivers/infiniband/ulp/iser/Kconfig" diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index 9cc7a47..a3b2d8e 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile @@ -10,4 +10,5 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ +obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/ obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index b37b0c0..5034a87 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -808,9 +808,12 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, return PTR_ERR(ctx); if (cmd.conn_param.valid) { - ctx->uid = cmd.uid; ucma_copy_conn_param(&conn_param, &cmd.conn_param); + mutex_lock(&file->mut); ret = rdma_accept(ctx->cm_id, &conn_param); + if (!ret) + ctx->uid = cmd.uid; + mutex_unlock(&file->mut); } else ret = rdma_accept(ctx->cm_id, NULL); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index b930da4..4d27e4c3 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1485,6 +1485,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, qp->event_handler = attr.event_handler; qp->qp_context = attr.qp_context; qp->qp_type = attr.qp_type; + atomic_set(&qp->usecnt, 0); atomic_inc(&pd->usecnt); atomic_inc(&attr.send_cq->usecnt); if (attr.recv_cq) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 602b1bd..575b780 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -421,6 +421,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, qp->uobject = NULL; qp->qp_type = qp_init_attr->qp_type; + atomic_set(&qp->usecnt, 0); if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { qp->event_handler = __ib_shared_qp_event_handler; qp->qp_context = qp; @@ -430,7 +431,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, qp->xrcd = qp_init_attr->xrcd; atomic_inc(&qp_init_attr->xrcd->usecnt); INIT_LIST_HEAD(&qp->open_list); - atomic_set(&qp->usecnt, 0); real_qp = qp; qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index b7d4216..a4de9d5 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -89,7 +89,7 @@ static int create_file(const char *name, umode_t mode, error = ipathfs_mknod(parent->d_inode, *dentry, mode, fops, data); else - error = PTR_ERR(dentry); + error = PTR_ERR(*dentry); mutex_unlock(&parent->d_inode->i_mutex); return error; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 95c94d8..259b067 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -257,12 +257,9 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, return IB_MAD_RESULT_SUCCESS; /* - * Don't process SMInfo queries or vendor-specific - * MADs -- the SMA can't handle them. + * Don't process SMInfo queries -- the SMA can't handle them. */ - if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || - ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == - IB_SMP_ATTR_VENDOR_MASK)) + if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) return IB_MAD_RESULT_SUCCESS; } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 || diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 7013da5..7140199 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 568b4f1..c438e46 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 425065b..a4972ab 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -233,6 +233,7 @@ static int send_mpa_reject(struct nes_cm_node *cm_node) u8 *start_ptr = &start_addr; u8 **start_buff = &start_ptr; u16 buff_len = 0; + struct ietf_mpa_v1 *mpa_frame; skb = dev_alloc_skb(MAX_CM_BUFFER); if (!skb) { @@ -242,6 +243,8 @@ static int send_mpa_reject(struct nes_cm_node *cm_node) /* send an MPA reject frame */ cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REPLY); + mpa_frame = (struct ietf_mpa_v1 *)*start_buff; + mpa_frame->flags |= IETF_MPA_FLAGS_REJECT; form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK | SET_FIN); cm_node->state = NES_CM_STATE_FIN_WAIT1; @@ -1360,8 +1363,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, neigh->ha, ETH_ALEN)) { /* Mac address same as in nes_arp_table */ - ip_rt_put(rt); - return rc; + goto out; } nes_manage_arp_cache(nesvnic->netdev, @@ -1377,6 +1379,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi neigh_event_send(neigh, NULL); } } + +out: rcu_read_unlock(); ip_rt_put(rt); return rc; diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index bdfa1fb..4646e66 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h index b4393a1..a69eef1 100644 --- a/drivers/infiniband/hw/nes/nes_context.h +++ b/drivers/infiniband/hw/nes/nes_context.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 055f4b5..d42c9f4 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 0b590e1..d748e4b 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -1,5 +1,5 @@ /* -* Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. +* Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c index b3b2a24..3ba7be3 100644 --- a/drivers/infiniband/hw/nes/nes_mgt.c +++ b/drivers/infiniband/hw/nes/nes_mgt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved. + * Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/nes/nes_mgt.h b/drivers/infiniband/hw/nes/nes_mgt.h index 8c8af25..4f7f701 100644 --- a/drivers/infiniband/hw/nes/nes_mgt.h +++ b/drivers/infiniband/hw/nes/nes_mgt.h @@ -1,5 +1,5 @@ /* -* Copyright (c) 2010 Intel-NE, Inc. All rights reserved. +* Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 4b3fa71..f3a3ecf 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h index 71e133a..4926de7 100644 --- a/drivers/infiniband/hw/nes/nes_user.h +++ b/drivers/infiniband/hw/nes/nes_user.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c index 8b4c2ff..e98f4fc 100644 --- a/drivers/infiniband/hw/nes/nes_utils.c +++ b/drivers/infiniband/hw/nes/nes_utils.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 5095bc4..0927b5c 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -3428,6 +3428,8 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, ib_wr->wr.fast_reg.length); set_wqe_32bit_value(wqe->wqe_words, + NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); + set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX, ib_wr->wr.fast_reg.rkey); /* Set page size: */ @@ -3724,7 +3726,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) entry->opcode = IB_WC_SEND; break; case NES_IWARP_SQ_OP_LOCINV: - entry->opcode = IB_WR_LOCAL_INV; + entry->opcode = IB_WC_LOCAL_INV; break; case NES_IWARP_SQ_OP_FAST_REG: entry->opcode = IB_WC_FAST_REG_MR; diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h index fe6b6e9..0eff7c4 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.h +++ b/drivers/infiniband/hw/nes/nes_verbs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved. + * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 4f18e2d..d0c64d5 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2105,7 +2105,7 @@ static void alloc_dummy_hdrq(struct qib_devdata *dd) dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev, dd->rcd[0]->rcvhdrq_size, &dd->cspec->dummy_hdrq_phys, - GFP_KERNEL | __GFP_COMP); + GFP_ATOMIC | __GFP_COMP); if (!dd->cspec->dummy_hdrq) { qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n"); /* fallback to just 0'ing */ diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index f695061..0fde788 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -560,7 +560,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) * BIOS may not set PCIe bus-utilization parameters for best performance. * Check and optionally adjust them to maximize our throughput. */ -static int qib_pcie_caps = 0x51; +static int qib_pcie_caps; module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO); MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)"); diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig new file mode 100644 index 0000000..31ee83d --- /dev/null +++ b/drivers/infiniband/ulp/srpt/Kconfig @@ -0,0 +1,12 @@ +config INFINIBAND_SRPT + tristate "InfiniBand SCSI RDMA Protocol target support" + depends on INFINIBAND && TARGET_CORE + ---help--- + + Support for the SCSI RDMA Protocol (SRP) Target driver. The + SRP protocol is a protocol that allows an initiator to access + a block storage device on another host (target) over a network + that supports the RDMA protocol. Currently the RDMA protocol is + supported by InfiniBand and by iWarp network hardware. More + information about the SRP protocol can be found on the website + of the INCITS T10 technical committee (http://www.t10.org/). diff --git a/drivers/infiniband/ulp/srpt/Makefile b/drivers/infiniband/ulp/srpt/Makefile new file mode 100644 index 0000000..e3ee4bd --- /dev/null +++ b/drivers/infiniband/ulp/srpt/Makefile @@ -0,0 +1,2 @@ +ccflags-y := -Idrivers/target +obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o diff --git a/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/drivers/infiniband/ulp/srpt/ib_dm_mad.h new file mode 100644 index 0000000..fb1de1f --- /dev/null +++ b/drivers/infiniband/ulp/srpt/ib_dm_mad.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef IB_DM_MAD_H +#define IB_DM_MAD_H + +#include <linux/types.h> + +#include <rdma/ib_mad.h> + +enum { + /* + * See also section 13.4.7 Status Field, table 115 MAD Common Status + * Field Bit Values and also section 16.3.1.1 Status Field in the + * InfiniBand Architecture Specification. + */ + DM_MAD_STATUS_UNSUP_METHOD = 0x0008, + DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c, + DM_MAD_STATUS_INVALID_FIELD = 0x001c, + DM_MAD_STATUS_NO_IOC = 0x0100, + + /* + * See also the Device Management chapter, section 16.3.3 Attributes, + * table 279 Device Management Attributes in the InfiniBand + * Architecture Specification. + */ + DM_ATTR_CLASS_PORT_INFO = 0x01, + DM_ATTR_IOU_INFO = 0x10, + DM_ATTR_IOC_PROFILE = 0x11, + DM_ATTR_SVC_ENTRIES = 0x12 +}; + +struct ib_dm_hdr { + u8 reserved[28]; +}; + +/* + * Structure of management datagram sent by the SRP target implementation. + * Contains a management datagram header, reliable multi-packet transaction + * protocol (RMPP) header and ib_dm_hdr. Notes: + * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending + * management datagrams. + * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this + * is the header size that is passed to ib_create_send_mad() in ib_srpt.c. + * - The maximum supported size for a management datagram when not using RMPP + * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data. + */ +struct ib_dm_mad { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; + struct ib_dm_hdr dm_hdr; + u8 data[IB_MGMT_DEVICE_DATA]; +}; + +/* + * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand + * Architecture Specification. + */ +struct ib_dm_iou_info { + __be16 change_id; + u8 max_controllers; + u8 op_rom; + u8 controller_list[128]; +}; + +/* + * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of + * the InfiniBand Architecture Specification. + */ +struct ib_dm_ioc_profile { + __be64 guid; + __be32 vendor_id; + __be32 device_id; + __be16 device_version; + __be16 reserved1; + __be32 subsys_vendor_id; + __be32 subsys_device_id; + __be16 io_class; + __be16 io_subclass; + __be16 protocol; + __be16 protocol_version; + __be16 service_conn; + __be16 initiators_supported; + __be16 send_queue_depth; + u8 reserved2; + u8 rdma_read_depth; + __be32 send_size; + __be32 rdma_size; + u8 op_cap_mask; + u8 svc_cap_mask; + u8 num_svc_entries; + u8 reserved3[9]; + u8 id_string[64]; +}; + +struct ib_dm_svc_entry { + u8 name[40]; + __be64 id; +}; + +/* + * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture + * Specification. See also section B.7, table B.8 in the T10 SRP r16a document. + */ +struct ib_dm_svc_entries { + struct ib_dm_svc_entry service_entries[4]; +}; + +#endif diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c new file mode 100644 index 0000000..2b73d43 --- /dev/null +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -0,0 +1,4070 @@ +/* + * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved. + * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/ctype.h> +#include <linux/kthread.h> +#include <linux/string.h> +#include <linux/delay.h> +#include <linux/atomic.h> +#include <scsi/scsi_tcq.h> +#include <target/configfs_macros.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric_configfs.h> +#include <target/target_core_fabric.h> +#include <target/target_core_configfs.h> +#include "ib_srpt.h" + +/* Name of this kernel module. */ +#define DRV_NAME "ib_srpt" +#define DRV_VERSION "2.0.0" +#define DRV_RELDATE "2011-02-14" + +#define SRPT_ID_STRING "Linux SRP target" + +#undef pr_fmt +#define pr_fmt(fmt) DRV_NAME " " fmt + +MODULE_AUTHOR("Vu Pham and Bart Van Assche"); +MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target " + "v" DRV_VERSION " (" DRV_RELDATE ")"); +MODULE_LICENSE("Dual BSD/GPL"); + +/* + * Global Variables + */ + +static u64 srpt_service_guid; +static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */ +static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */ + +static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE; +module_param(srp_max_req_size, int, 0444); +MODULE_PARM_DESC(srp_max_req_size, + "Maximum size of SRP request messages in bytes."); + +static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE; +module_param(srpt_srq_size, int, 0444); +MODULE_PARM_DESC(srpt_srq_size, + "Shared receive queue (SRQ) size."); + +static int srpt_get_u64_x(char *buffer, struct kernel_param *kp) +{ + return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg); +} +module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid, + 0444); +MODULE_PARM_DESC(srpt_service_guid, + "Using this value for ioc_guid, id_ext, and cm_listen_id" + " instead of using the node_guid of the first HCA."); + +static struct ib_client srpt_client; +static struct target_fabric_configfs *srpt_target; +static void srpt_release_channel(struct srpt_rdma_ch *ch); +static int srpt_queue_status(struct se_cmd *cmd); + +/** + * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE. + */ +static inline +enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: return DMA_FROM_DEVICE; + case DMA_FROM_DEVICE: return DMA_TO_DEVICE; + default: return dir; + } +} + +/** + * srpt_sdev_name() - Return the name associated with the HCA. + * + * Examples are ib0, ib1, ... + */ +static inline const char *srpt_sdev_name(struct srpt_device *sdev) +{ + return sdev->device->name; +} + +static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch) +{ + unsigned long flags; + enum rdma_ch_state state; + + spin_lock_irqsave(&ch->spinlock, flags); + state = ch->state; + spin_unlock_irqrestore(&ch->spinlock, flags); + return state; +} + +static enum rdma_ch_state +srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state) +{ + unsigned long flags; + enum rdma_ch_state prev; + + spin_lock_irqsave(&ch->spinlock, flags); + prev = ch->state; + ch->state = new_state; + spin_unlock_irqrestore(&ch->spinlock, flags); + return prev; +} + +/** + * srpt_test_and_set_ch_state() - Test and set the channel state. + * + * Returns true if and only if the channel state has been set to the new state. + */ +static bool +srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old, + enum rdma_ch_state new) +{ + unsigned long flags; + enum rdma_ch_state prev; + + spin_lock_irqsave(&ch->spinlock, flags); + prev = ch->state; + if (prev == old) + ch->state = new; + spin_unlock_irqrestore(&ch->spinlock, flags); + return prev == old; +} + +/** + * srpt_event_handler() - Asynchronous IB event callback function. + * + * Callback function called by the InfiniBand core when an asynchronous IB + * event occurs. This callback may occur in interrupt context. See also + * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand + * Architecture Specification. + */ +static void srpt_event_handler(struct ib_event_handler *handler, + struct ib_event *event) +{ + struct srpt_device *sdev; + struct srpt_port *sport; + + sdev = ib_get_client_data(event->device, &srpt_client); + if (!sdev || sdev->device != event->device) + return; + + pr_debug("ASYNC event= %d on device= %s\n", event->event, + srpt_sdev_name(sdev)); + + switch (event->event) { + case IB_EVENT_PORT_ERR: + if (event->element.port_num <= sdev->device->phys_port_cnt) { + sport = &sdev->port[event->element.port_num - 1]; + sport->lid = 0; + sport->sm_lid = 0; + } + break; + case IB_EVENT_PORT_ACTIVE: + case IB_EVENT_LID_CHANGE: + case IB_EVENT_PKEY_CHANGE: + case IB_EVENT_SM_CHANGE: + case IB_EVENT_CLIENT_REREGISTER: + /* Refresh port data asynchronously. */ + if (event->element.port_num <= sdev->device->phys_port_cnt) { + sport = &sdev->port[event->element.port_num - 1]; + if (!sport->lid && !sport->sm_lid) + schedule_work(&sport->work); + } + break; + default: + printk(KERN_ERR "received unrecognized IB event %d\n", + event->event); + break; + } +} + +/** + * srpt_srq_event() - SRQ event callback function. + */ +static void srpt_srq_event(struct ib_event *event, void *ctx) +{ + printk(KERN_INFO "SRQ event %d\n", event->event); +} + +/** + * srpt_qp_event() - QP event callback function. + */ +static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) +{ + pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n", + event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch)); + + switch (event->event) { + case IB_EVENT_COMM_EST: + ib_cm_notify(ch->cm_id, event->event); + break; + case IB_EVENT_QP_LAST_WQE_REACHED: + if (srpt_test_and_set_ch_state(ch, CH_DRAINING, + CH_RELEASING)) + srpt_release_channel(ch); + else + pr_debug("%s: state %d - ignored LAST_WQE.\n", + ch->sess_name, srpt_get_ch_state(ch)); + break; + default: + printk(KERN_ERR "received unrecognized IB QP event %d\n", + event->event); + break; + } +} + +/** + * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure. + * + * @slot: one-based slot number. + * @value: four-bit value. + * + * Copies the lowest four bits of value in element slot of the array of four + * bit elements called c_list (controller list). The index slot is one-based. + */ +static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value) +{ + u16 id; + u8 tmp; + + id = (slot - 1) / 2; + if (slot & 0x1) { + tmp = c_list[id] & 0xf; + c_list[id] = (value << 4) | tmp; + } else { + tmp = c_list[id] & 0xf0; + c_list[id] = (value & 0xf) | tmp; + } +} + +/** + * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram. + * + * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture + * Specification. + */ +static void srpt_get_class_port_info(struct ib_dm_mad *mad) +{ + struct ib_class_port_info *cif; + + cif = (struct ib_class_port_info *)mad->data; + memset(cif, 0, sizeof *cif); + cif->base_version = 1; + cif->class_version = 1; + cif->resp_time_value = 20; + + mad->mad_hdr.status = 0; +} + +/** + * srpt_get_iou() - Write IOUnitInfo to a management datagram. + * + * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture + * Specification. See also section B.7, table B.6 in the SRP r16a document. + */ +static void srpt_get_iou(struct ib_dm_mad *mad) +{ + struct ib_dm_iou_info *ioui; + u8 slot; + int i; + + ioui = (struct ib_dm_iou_info *)mad->data; + ioui->change_id = __constant_cpu_to_be16(1); + ioui->max_controllers = 16; + + /* set present for slot 1 and empty for the rest */ + srpt_set_ioc(ioui->controller_list, 1, 1); + for (i = 1, slot = 2; i < 16; i++, slot++) + srpt_set_ioc(ioui->controller_list, slot, 0); + + mad->mad_hdr.status = 0; +} + +/** + * srpt_get_ioc() - Write IOControllerprofile to a management datagram. + * + * See also section 16.3.3.4 IOControllerProfile in the InfiniBand + * Architecture Specification. See also section B.7, table B.7 in the SRP + * r16a document. + */ +static void srpt_get_ioc(struct srpt_port *sport, u32 slot, + struct ib_dm_mad *mad) +{ + struct srpt_device *sdev = sport->sdev; + struct ib_dm_ioc_profile *iocp; + + iocp = (struct ib_dm_ioc_profile *)mad->data; + + if (!slot || slot > 16) { + mad->mad_hdr.status + = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); + return; + } + + if (slot > 2) { + mad->mad_hdr.status + = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); + return; + } + + memset(iocp, 0, sizeof *iocp); + strcpy(iocp->id_string, SRPT_ID_STRING); + iocp->guid = cpu_to_be64(srpt_service_guid); + iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); + iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id); + iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver); + iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); + iocp->subsys_device_id = 0x0; + iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS); + iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS); + iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL); + iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION); + iocp->send_queue_depth = cpu_to_be16(sdev->srq_size); + iocp->rdma_read_depth = 4; + iocp->send_size = cpu_to_be32(srp_max_req_size); + iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size, + 1U << 24)); + iocp->num_svc_entries = 1; + iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC | + SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC; + + mad->mad_hdr.status = 0; +} + +/** + * srpt_get_svc_entries() - Write ServiceEntries to a management datagram. + * + * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture + * Specification. See also section B.7, table B.8 in the SRP r16a document. + */ +static void srpt_get_svc_entries(u64 ioc_guid, + u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad) +{ + struct ib_dm_svc_entries *svc_entries; + + WARN_ON(!ioc_guid); + + if (!slot || slot > 16) { + mad->mad_hdr.status + = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); + return; + } + + if (slot > 2 || lo > hi || hi > 1) { + mad->mad_hdr.status + = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); + return; + } + + svc_entries = (struct ib_dm_svc_entries *)mad->data; + memset(svc_entries, 0, sizeof *svc_entries); + svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid); + snprintf(svc_entries->service_entries[0].name, + sizeof(svc_entries->service_entries[0].name), + "%s%016llx", + SRP_SERVICE_NAME_PREFIX, + ioc_guid); + + mad->mad_hdr.status = 0; +} + +/** + * srpt_mgmt_method_get() - Process a received management datagram. + * @sp: source port through which the MAD has been received. + * @rq_mad: received MAD. + * @rsp_mad: response MAD. + */ +static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad, + struct ib_dm_mad *rsp_mad) +{ + u16 attr_id; + u32 slot; + u8 hi, lo; + + attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id); + switch (attr_id) { + case DM_ATTR_CLASS_PORT_INFO: + srpt_get_class_port_info(rsp_mad); + break; + case DM_ATTR_IOU_INFO: + srpt_get_iou(rsp_mad); + break; + case DM_ATTR_IOC_PROFILE: + slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); + srpt_get_ioc(sp, slot, rsp_mad); + break; + case DM_ATTR_SVC_ENTRIES: + slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); + hi = (u8) ((slot >> 8) & 0xff); + lo = (u8) (slot & 0xff); + slot = (u16) ((slot >> 16) & 0xffff); + srpt_get_svc_entries(srpt_service_guid, + slot, hi, lo, rsp_mad); + break; + default: + rsp_mad->mad_hdr.status = + __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); + break; + } +} + +/** + * srpt_mad_send_handler() - Post MAD-send callback function. + */ +static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent, + struct ib_mad_send_wc *mad_wc) +{ + ib_destroy_ah(mad_wc->send_buf->ah); + ib_free_send_mad(mad_wc->send_buf); +} + +/** + * srpt_mad_recv_handler() - MAD reception callback function. + */ +static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, + struct ib_mad_recv_wc *mad_wc) +{ + struct srpt_port *sport = (struct srpt_port *)mad_agent->context; + struct ib_ah *ah; + struct ib_mad_send_buf *rsp; + struct ib_dm_mad *dm_mad; + + if (!mad_wc || !mad_wc->recv_buf.mad) + return; + + ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, + mad_wc->recv_buf.grh, mad_agent->port_num); + if (IS_ERR(ah)) + goto err; + + BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR); + + rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp, + mad_wc->wc->pkey_index, 0, + IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA, + GFP_KERNEL); + if (IS_ERR(rsp)) + goto err_rsp; + + rsp->ah = ah; + + dm_mad = rsp->mad; + memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad); + dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP; + dm_mad->mad_hdr.status = 0; + + switch (mad_wc->recv_buf.mad->mad_hdr.method) { + case IB_MGMT_METHOD_GET: + srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad); + break; + case IB_MGMT_METHOD_SET: + dm_mad->mad_hdr.status = + __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); + break; + default: + dm_mad->mad_hdr.status = + __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); + break; + } + + if (!ib_post_send_mad(rsp, NULL)) { + ib_free_recv_mad(mad_wc); + /* will destroy_ah & free_send_mad in send completion */ + return; + } + + ib_free_send_mad(rsp); + +err_rsp: + ib_destroy_ah(ah); +err: + ib_free_recv_mad(mad_wc); +} + +/** + * srpt_refresh_port() - Configure a HCA port. + * + * Enable InfiniBand management datagram processing, update the cached sm_lid, + * lid and gid values, and register a callback function for processing MADs + * on the specified port. + * + * Note: It is safe to call this function more than once for the same port. + */ +static int srpt_refresh_port(struct srpt_port *sport) +{ + struct ib_mad_reg_req reg_req; + struct ib_port_modify port_modify; + struct ib_port_attr port_attr; + int ret; + + memset(&port_modify, 0, sizeof port_modify); + port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; + port_modify.clr_port_cap_mask = 0; + + ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); + if (ret) + goto err_mod_port; + + ret = ib_query_port(sport->sdev->device, sport->port, &port_attr); + if (ret) + goto err_query_port; + + sport->sm_lid = port_attr.sm_lid; + sport->lid = port_attr.lid; + + ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid); + if (ret) + goto err_query_port; + + if (!sport->mad_agent) { + memset(®_req, 0, sizeof reg_req); + reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT; + reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION; + set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask); + set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask); + + sport->mad_agent = ib_register_mad_agent(sport->sdev->device, + sport->port, + IB_QPT_GSI, + ®_req, 0, + srpt_mad_send_handler, + srpt_mad_recv_handler, + sport); + if (IS_ERR(sport->mad_agent)) { + ret = PTR_ERR(sport->mad_agent); + sport->mad_agent = NULL; + goto err_query_port; + } + } + + return 0; + +err_query_port: + + port_modify.set_port_cap_mask = 0; + port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; + ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); + +err_mod_port: + + return ret; +} + +/** + * srpt_unregister_mad_agent() - Unregister MAD callback functions. + * + * Note: It is safe to call this function more than once for the same device. + */ +static void srpt_unregister_mad_agent(struct srpt_device *sdev) +{ + struct ib_port_modify port_modify = { + .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP, + }; + struct srpt_port *sport; + int i; + + for (i = 1; i <= sdev->device->phys_port_cnt; i++) { + sport = &sdev->port[i - 1]; + WARN_ON(sport->port != i); + if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0) + printk(KERN_ERR "disabling MAD processing failed.\n"); + if (sport->mad_agent) { + ib_unregister_mad_agent(sport->mad_agent); + sport->mad_agent = NULL; + } + } +} + +/** + * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure. + */ +static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev, + int ioctx_size, int dma_size, + enum dma_data_direction dir) +{ + struct srpt_ioctx *ioctx; + + ioctx = kmalloc(ioctx_size, GFP_KERNEL); + if (!ioctx) + goto err; + + ioctx->buf = kmalloc(dma_size, GFP_KERNEL); + if (!ioctx->buf) + goto err_free_ioctx; + + ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir); + if (ib_dma_mapping_error(sdev->device, ioctx->dma)) + goto err_free_buf; + + return ioctx; + +err_free_buf: + kfree(ioctx->buf); +err_free_ioctx: + kfree(ioctx); +err: + return NULL; +} + +/** + * srpt_free_ioctx() - Free an SRPT I/O context structure. + */ +static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx, + int dma_size, enum dma_data_direction dir) +{ + if (!ioctx) + return; + + ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir); + kfree(ioctx->buf); + kfree(ioctx); +} + +/** + * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures. + * @sdev: Device to allocate the I/O context ring for. + * @ring_size: Number of elements in the I/O context ring. + * @ioctx_size: I/O context size. + * @dma_size: DMA buffer size. + * @dir: DMA data direction. + */ +static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev, + int ring_size, int ioctx_size, + int dma_size, enum dma_data_direction dir) +{ + struct srpt_ioctx **ring; + int i; + + WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) + && ioctx_size != sizeof(struct srpt_send_ioctx)); + + ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL); + if (!ring) + goto out; + for (i = 0; i < ring_size; ++i) { + ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir); + if (!ring[i]) + goto err; + ring[i]->index = i; + } + goto out; + +err: + while (--i >= 0) + srpt_free_ioctx(sdev, ring[i], dma_size, dir); + kfree(ring); + ring = NULL; +out: + return ring; +} + +/** + * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures. + */ +static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring, + struct srpt_device *sdev, int ring_size, + int dma_size, enum dma_data_direction dir) +{ + int i; + + for (i = 0; i < ring_size; ++i) + srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir); + kfree(ioctx_ring); +} + +/** + * srpt_get_cmd_state() - Get the state of a SCSI command. + */ +static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx) +{ + enum srpt_command_state state; + unsigned long flags; + + BUG_ON(!ioctx); + + spin_lock_irqsave(&ioctx->spinlock, flags); + state = ioctx->state; + spin_unlock_irqrestore(&ioctx->spinlock, flags); + return state; +} + +/** + * srpt_set_cmd_state() - Set the state of a SCSI command. + * + * Does not modify the state of aborted commands. Returns the previous command + * state. + */ +static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx, + enum srpt_command_state new) +{ + enum srpt_command_state previous; + unsigned long flags; + + BUG_ON(!ioctx); + + spin_lock_irqsave(&ioctx->spinlock, flags); + previous = ioctx->state; + if (previous != SRPT_STATE_DONE) + ioctx->state = new; + spin_unlock_irqrestore(&ioctx->spinlock, flags); + + return previous; +} + +/** + * srpt_test_and_set_cmd_state() - Test and set the state of a command. + * + * Returns true if and only if the previous command state was equal to 'old'. + */ +static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx, + enum srpt_command_state old, + enum srpt_command_state new) +{ + enum srpt_command_state previous; + unsigned long flags; + + WARN_ON(!ioctx); + WARN_ON(old == SRPT_STATE_DONE); + WARN_ON(new == SRPT_STATE_NEW); + + spin_lock_irqsave(&ioctx->spinlock, flags); + previous = ioctx->state; + if (previous == old) + ioctx->state = new; + spin_unlock_irqrestore(&ioctx->spinlock, flags); + return previous == old; +} + +/** + * srpt_post_recv() - Post an IB receive request. + */ +static int srpt_post_recv(struct srpt_device *sdev, + struct srpt_recv_ioctx *ioctx) +{ + struct ib_sge list; + struct ib_recv_wr wr, *bad_wr; + + BUG_ON(!sdev); + wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index); + + list.addr = ioctx->ioctx.dma; + list.length = srp_max_req_size; + list.lkey = sdev->mr->lkey; + + wr.next = NULL; + wr.sg_list = &list; + wr.num_sge = 1; + + return ib_post_srq_recv(sdev->srq, &wr, &bad_wr); +} + +/** + * srpt_post_send() - Post an IB send request. + * + * Returns zero upon success and a non-zero value upon failure. + */ +static int srpt_post_send(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx, int len) +{ + struct ib_sge list; + struct ib_send_wr wr, *bad_wr; + struct srpt_device *sdev = ch->sport->sdev; + int ret; + + atomic_inc(&ch->req_lim); + + ret = -ENOMEM; + if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) { + printk(KERN_WARNING "IB send queue full (needed 1)\n"); + goto out; + } + + ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len, + DMA_TO_DEVICE); + + list.addr = ioctx->ioctx.dma; + list.length = len; + list.lkey = sdev->mr->lkey; + + wr.next = NULL; + wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index); + wr.sg_list = &list; + wr.num_sge = 1; + wr.opcode = IB_WR_SEND; + wr.send_flags = IB_SEND_SIGNALED; + + ret = ib_post_send(ch->qp, &wr, &bad_wr); + +out: + if (ret < 0) { + atomic_inc(&ch->sq_wr_avail); + atomic_dec(&ch->req_lim); + } + return ret; +} + +/** + * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request. + * @ioctx: Pointer to the I/O context associated with the request. + * @srp_cmd: Pointer to the SRP_CMD request data. + * @dir: Pointer to the variable to which the transfer direction will be + * written. + * @data_len: Pointer to the variable to which the total data length of all + * descriptors in the SRP_CMD request will be written. + * + * This function initializes ioctx->nrbuf and ioctx->r_bufs. + * + * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors; + * -ENOMEM when memory allocation fails and zero upon success. + */ +static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx, + struct srp_cmd *srp_cmd, + enum dma_data_direction *dir, u64 *data_len) +{ + struct srp_indirect_buf *idb; + struct srp_direct_buf *db; + unsigned add_cdb_offset; + int ret; + + /* + * The pointer computations below will only be compiled correctly + * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check + * whether srp_cmd::add_data has been declared as a byte pointer. + */ + BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) + && !__same_type(srp_cmd->add_data[0], (u8)0)); + + BUG_ON(!dir); + BUG_ON(!data_len); + + ret = 0; + *data_len = 0; + + /* + * The lower four bits of the buffer format field contain the DATA-IN + * buffer descriptor format, and the highest four bits contain the + * DATA-OUT buffer descriptor format. + */ + *dir = DMA_NONE; + if (srp_cmd->buf_fmt & 0xf) + /* DATA-IN: transfer data from target to initiator (read). */ + *dir = DMA_FROM_DEVICE; + else if (srp_cmd->buf_fmt >> 4) + /* DATA-OUT: transfer data from initiator to target (write). */ + *dir = DMA_TO_DEVICE; + + /* + * According to the SRP spec, the lower two bits of the 'ADDITIONAL + * CDB LENGTH' field are reserved and the size in bytes of this field + * is four times the value specified in bits 3..7. Hence the "& ~3". + */ + add_cdb_offset = srp_cmd->add_cdb_len & ~3; + if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) || + ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) { + ioctx->n_rbuf = 1; + ioctx->rbufs = &ioctx->single_rbuf; + + db = (struct srp_direct_buf *)(srp_cmd->add_data + + add_cdb_offset); + memcpy(ioctx->rbufs, db, sizeof *db); + *data_len = be32_to_cpu(db->len); + } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) || + ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) { + idb = (struct srp_indirect_buf *)(srp_cmd->add_data + + add_cdb_offset); + + ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db; + + if (ioctx->n_rbuf > + (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) { + printk(KERN_ERR "received unsupported SRP_CMD request" + " type (%u out + %u in != %u / %zu)\n", + srp_cmd->data_out_desc_cnt, + srp_cmd->data_in_desc_cnt, + be32_to_cpu(idb->table_desc.len), + sizeof(*db)); + ioctx->n_rbuf = 0; + ret = -EINVAL; + goto out; + } + + if (ioctx->n_rbuf == 1) + ioctx->rbufs = &ioctx->single_rbuf; + else { + ioctx->rbufs = + kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC); + if (!ioctx->rbufs) { + ioctx->n_rbuf = 0; + ret = -ENOMEM; + goto out; + } + } + + db = idb->desc_list; + memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db); + *data_len = be32_to_cpu(idb->len); + } +out: + return ret; +} + +/** + * srpt_init_ch_qp() - Initialize queue pair attributes. + * + * Initialized the attributes of queue pair 'qp' by allowing local write, + * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT. + */ +static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) +{ + struct ib_qp_attr *attr; + int ret; + + attr = kzalloc(sizeof *attr, GFP_KERNEL); + if (!attr) + return -ENOMEM; + + attr->qp_state = IB_QPS_INIT; + attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_WRITE; + attr->port_num = ch->sport->port; + attr->pkey_index = 0; + + ret = ib_modify_qp(qp, attr, + IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT | + IB_QP_PKEY_INDEX); + + kfree(attr); + return ret; +} + +/** + * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR). + * @ch: channel of the queue pair. + * @qp: queue pair to change the state of. + * + * Returns zero upon success and a negative value upon failure. + * + * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. + * If this structure ever becomes larger, it might be necessary to allocate + * it dynamically instead of on the stack. + */ +static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) +{ + struct ib_qp_attr qp_attr; + int attr_mask; + int ret; + + qp_attr.qp_state = IB_QPS_RTR; + ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); + if (ret) + goto out; + + qp_attr.max_dest_rd_atomic = 4; + + ret = ib_modify_qp(qp, &qp_attr, attr_mask); + +out: + return ret; +} + +/** + * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS). + * @ch: channel of the queue pair. + * @qp: queue pair to change the state of. + * + * Returns zero upon success and a negative value upon failure. + * + * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. + * If this structure ever becomes larger, it might be necessary to allocate + * it dynamically instead of on the stack. + */ +static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) +{ + struct ib_qp_attr qp_attr; + int attr_mask; + int ret; + + qp_attr.qp_state = IB_QPS_RTS; + ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); + if (ret) + goto out; + + qp_attr.max_rd_atomic = 4; + + ret = ib_modify_qp(qp, &qp_attr, attr_mask); + +out: + return ret; +} + +/** + * srpt_ch_qp_err() - Set the channel queue pair state to 'error'. + */ +static int srpt_ch_qp_err(struct srpt_rdma_ch *ch) +{ + struct ib_qp_attr qp_attr; + + qp_attr.qp_state = IB_QPS_ERR; + return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE); +} + +/** + * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list. + */ +static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx) +{ + struct scatterlist *sg; + enum dma_data_direction dir; + + BUG_ON(!ch); + BUG_ON(!ioctx); + BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius); + + while (ioctx->n_rdma) + kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge); + + kfree(ioctx->rdma_ius); + ioctx->rdma_ius = NULL; + + if (ioctx->mapped_sg_count) { + sg = ioctx->sg; + WARN_ON(!sg); + dir = ioctx->cmd.data_direction; + BUG_ON(dir == DMA_NONE); + ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt, + opposite_dma_dir(dir)); + ioctx->mapped_sg_count = 0; + } +} + +/** + * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list. + */ +static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx) +{ + struct se_cmd *cmd; + struct scatterlist *sg, *sg_orig; + int sg_cnt; + enum dma_data_direction dir; + struct rdma_iu *riu; + struct srp_direct_buf *db; + dma_addr_t dma_addr; + struct ib_sge *sge; + u64 raddr; + u32 rsize; + u32 tsize; + u32 dma_len; + int count, nrdma; + int i, j, k; + + BUG_ON(!ch); + BUG_ON(!ioctx); + cmd = &ioctx->cmd; + dir = cmd->data_direction; + BUG_ON(dir == DMA_NONE); + + transport_do_task_sg_chain(cmd); + ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained; + ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no; + + count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt, + opposite_dma_dir(dir)); + if (unlikely(!count)) + return -EAGAIN; + + ioctx->mapped_sg_count = count; + + if (ioctx->rdma_ius && ioctx->n_rdma_ius) + nrdma = ioctx->n_rdma_ius; + else { + nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE + + ioctx->n_rbuf; + + ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL); + if (!ioctx->rdma_ius) + goto free_mem; + + ioctx->n_rdma_ius = nrdma; + } + + db = ioctx->rbufs; + tsize = cmd->data_length; + dma_len = sg_dma_len(&sg[0]); + riu = ioctx->rdma_ius; + + /* + * For each remote desc - calculate the #ib_sge. + * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then + * each remote desc rdma_iu is required a rdma wr; + * else + * we need to allocate extra rdma_iu to carry extra #ib_sge in + * another rdma wr + */ + for (i = 0, j = 0; + j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) { + rsize = be32_to_cpu(db->len); + raddr = be64_to_cpu(db->va); + riu->raddr = raddr; + riu->rkey = be32_to_cpu(db->key); + riu->sge_cnt = 0; + + /* calculate how many sge required for this remote_buf */ + while (rsize > 0 && tsize > 0) { + + if (rsize >= dma_len) { + tsize -= dma_len; + rsize -= dma_len; + raddr += dma_len; + + if (tsize > 0) { + ++j; + if (j < count) { + sg = sg_next(sg); + dma_len = sg_dma_len(sg); + } + } + } else { + tsize -= rsize; + dma_len -= rsize; + rsize = 0; + } + + ++riu->sge_cnt; + + if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) { + ++ioctx->n_rdma; + riu->sge = + kmalloc(riu->sge_cnt * sizeof *riu->sge, + GFP_KERNEL); + if (!riu->sge) + goto free_mem; + + ++riu; + riu->sge_cnt = 0; + riu->raddr = raddr; + riu->rkey = be32_to_cpu(db->key); + } + } + + ++ioctx->n_rdma; + riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge, + GFP_KERNEL); + if (!riu->sge) + goto free_mem; + } + + db = ioctx->rbufs; + tsize = cmd->data_length; + riu = ioctx->rdma_ius; + sg = sg_orig; + dma_len = sg_dma_len(&sg[0]); + dma_addr = sg_dma_address(&sg[0]); + + /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */ + for (i = 0, j = 0; + j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) { + rsize = be32_to_cpu(db->len); + sge = riu->sge; + k = 0; + + while (rsize > 0 && tsize > 0) { + sge->addr = dma_addr; + sge->lkey = ch->sport->sdev->mr->lkey; + + if (rsize >= dma_len) { + sge->length = + (tsize < dma_len) ? tsize : dma_len; + tsize -= dma_len; + rsize -= dma_len; + + if (tsize > 0) { + ++j; + if (j < count) { + sg = sg_next(sg); + dma_len = sg_dma_len(sg); + dma_addr = sg_dma_address(sg); + } + } + } else { + sge->length = (tsize < rsize) ? tsize : rsize; + tsize -= rsize; + dma_len -= rsize; + dma_addr += rsize; + rsize = 0; + } + + ++k; + if (k == riu->sge_cnt && rsize > 0 && tsize > 0) { + ++riu; + sge = riu->sge; + k = 0; + } else if (rsize > 0 && tsize > 0) + ++sge; + } + } + + return 0; + +free_mem: + srpt_unmap_sg_to_ib_sge(ch, ioctx); + + return -ENOMEM; +} + +/** + * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator. + */ +static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) +{ + struct srpt_send_ioctx *ioctx; + unsigned long flags; + + BUG_ON(!ch); + + ioctx = NULL; + spin_lock_irqsave(&ch->spinlock, flags); + if (!list_empty(&ch->free_list)) { + ioctx = list_first_entry(&ch->free_list, + struct srpt_send_ioctx, free_list); + list_del(&ioctx->free_list); + } + spin_unlock_irqrestore(&ch->spinlock, flags); + + if (!ioctx) + return ioctx; + + BUG_ON(ioctx->ch != ch); + kref_init(&ioctx->kref); + spin_lock_init(&ioctx->spinlock); + ioctx->state = SRPT_STATE_NEW; + ioctx->n_rbuf = 0; + ioctx->rbufs = NULL; + ioctx->n_rdma = 0; + ioctx->n_rdma_ius = 0; + ioctx->rdma_ius = NULL; + ioctx->mapped_sg_count = 0; + init_completion(&ioctx->tx_done); + ioctx->queue_status_only = false; + /* + * transport_init_se_cmd() does not initialize all fields, so do it + * here. + */ + memset(&ioctx->cmd, 0, sizeof(ioctx->cmd)); + memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data)); + + return ioctx; +} + +/** + * srpt_put_send_ioctx() - Free up resources. + */ +static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx) +{ + struct srpt_rdma_ch *ch; + unsigned long flags; + + BUG_ON(!ioctx); + ch = ioctx->ch; + BUG_ON(!ch); + + WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE); + + srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); + transport_generic_free_cmd(&ioctx->cmd, 0); + + if (ioctx->n_rbuf > 1) { + kfree(ioctx->rbufs); + ioctx->rbufs = NULL; + ioctx->n_rbuf = 0; + } + + spin_lock_irqsave(&ch->spinlock, flags); + list_add(&ioctx->free_list, &ch->free_list); + spin_unlock_irqrestore(&ch->spinlock, flags); +} + +static void srpt_put_send_ioctx_kref(struct kref *kref) +{ + srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref)); +} + +/** + * srpt_abort_cmd() - Abort a SCSI command. + * @ioctx: I/O context associated with the SCSI command. + * @context: Preferred execution context. + */ +static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) +{ + enum srpt_command_state state; + unsigned long flags; + + BUG_ON(!ioctx); + + /* + * If the command is in a state where the target core is waiting for + * the ib_srpt driver, change the state to the next state. Changing + * the state of the command from SRPT_STATE_NEED_DATA to + * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this + * function a second time. + */ + + spin_lock_irqsave(&ioctx->spinlock, flags); + state = ioctx->state; + switch (state) { + case SRPT_STATE_NEED_DATA: + ioctx->state = SRPT_STATE_DATA_IN; + break; + case SRPT_STATE_DATA_IN: + case SRPT_STATE_CMD_RSP_SENT: + case SRPT_STATE_MGMT_RSP_SENT: + ioctx->state = SRPT_STATE_DONE; + break; + default: + break; + } + spin_unlock_irqrestore(&ioctx->spinlock, flags); + + if (state == SRPT_STATE_DONE) + goto out; + + pr_debug("Aborting cmd with state %d and tag %lld\n", state, + ioctx->tag); + + switch (state) { + case SRPT_STATE_NEW: + case SRPT_STATE_DATA_IN: + case SRPT_STATE_MGMT: + /* + * Do nothing - defer abort processing until + * srpt_queue_response() is invoked. + */ + WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false)); + break; + case SRPT_STATE_NEED_DATA: + /* DMA_TO_DEVICE (write) - RDMA read error. */ + atomic_set(&ioctx->cmd.transport_lun_stop, 1); + transport_generic_handle_data(&ioctx->cmd); + break; + case SRPT_STATE_CMD_RSP_SENT: + /* + * SRP_RSP sending failed or the SRP_RSP send completion has + * not been received in time. + */ + srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); + atomic_set(&ioctx->cmd.transport_lun_stop, 1); + kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); + break; + case SRPT_STATE_MGMT_RSP_SENT: + srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); + kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); + break; + default: + WARN_ON("ERROR: unexpected command state"); + break; + } + +out: + return state; +} + +/** + * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion. + */ +static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id) +{ + struct srpt_send_ioctx *ioctx; + enum srpt_command_state state; + struct se_cmd *cmd; + u32 index; + + atomic_inc(&ch->sq_wr_avail); + + index = idx_from_wr_id(wr_id); + ioctx = ch->ioctx_ring[index]; + state = srpt_get_cmd_state(ioctx); + cmd = &ioctx->cmd; + + WARN_ON(state != SRPT_STATE_CMD_RSP_SENT + && state != SRPT_STATE_MGMT_RSP_SENT + && state != SRPT_STATE_NEED_DATA + && state != SRPT_STATE_DONE); + + /* If SRP_RSP sending failed, undo the ch->req_lim change. */ + if (state == SRPT_STATE_CMD_RSP_SENT + || state == SRPT_STATE_MGMT_RSP_SENT) + atomic_dec(&ch->req_lim); + + srpt_abort_cmd(ioctx); +} + +/** + * srpt_handle_send_comp() - Process an IB send completion notification. + */ +static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx) +{ + enum srpt_command_state state; + + atomic_inc(&ch->sq_wr_avail); + + state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); + + if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT + && state != SRPT_STATE_MGMT_RSP_SENT + && state != SRPT_STATE_DONE)) + pr_debug("state = %d\n", state); + + if (state != SRPT_STATE_DONE) + kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); + else + printk(KERN_ERR "IB completion has been received too late for" + " wr_id = %u.\n", ioctx->ioctx.index); +} + +/** + * srpt_handle_rdma_comp() - Process an IB RDMA completion notification. + * + * Note: transport_generic_handle_data() is asynchronous so unmapping the + * data that has been transferred via IB RDMA must be postponed until the + * check_stop_free() callback. + */ +static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx, + enum srpt_opcode opcode) +{ + WARN_ON(ioctx->n_rdma <= 0); + atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); + + if (opcode == SRPT_RDMA_READ_LAST) { + if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA, + SRPT_STATE_DATA_IN)) + transport_generic_handle_data(&ioctx->cmd); + else + printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__, + __LINE__, srpt_get_cmd_state(ioctx)); + } else if (opcode == SRPT_RDMA_ABORT) { + ioctx->rdma_aborted = true; + } else { + WARN(true, "unexpected opcode %d\n", opcode); + } +} + +/** + * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion. + */ +static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx, + enum srpt_opcode opcode) +{ + struct se_cmd *cmd; + enum srpt_command_state state; + + cmd = &ioctx->cmd; + state = srpt_get_cmd_state(ioctx); + switch (opcode) { + case SRPT_RDMA_READ_LAST: + if (ioctx->n_rdma <= 0) { + printk(KERN_ERR "Received invalid RDMA read" + " error completion with idx %d\n", + ioctx->ioctx.index); + break; + } + atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); + if (state == SRPT_STATE_NEED_DATA) + srpt_abort_cmd(ioctx); + else + printk(KERN_ERR "%s[%d]: wrong state = %d\n", + __func__, __LINE__, state); + break; + case SRPT_RDMA_WRITE_LAST: + atomic_set(&ioctx->cmd.transport_lun_stop, 1); + break; + default: + printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__, + __LINE__, opcode); + break; + } +} + +/** + * srpt_build_cmd_rsp() - Build an SRP_RSP response. + * @ch: RDMA channel through which the request has been received. + * @ioctx: I/O context associated with the SRP_CMD request. The response will + * be built in the buffer ioctx->buf points at and hence this function will + * overwrite the request data. + * @tag: tag of the request for which this response is being generated. + * @status: value for the STATUS field of the SRP_RSP information unit. + * + * Returns the size in bytes of the SRP_RSP response. + * + * An SRP_RSP response contains a SCSI status or service response. See also + * section 6.9 in the SRP r16a document for the format of an SRP_RSP + * response. See also SPC-2 for more information about sense data. + */ +static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx, u64 tag, + int status) +{ + struct srp_rsp *srp_rsp; + const u8 *sense_data; + int sense_data_len, max_sense_len; + + /* + * The lowest bit of all SAM-3 status codes is zero (see also + * paragraph 5.3 in SAM-3). + */ + WARN_ON(status & 1); + + srp_rsp = ioctx->ioctx.buf; + BUG_ON(!srp_rsp); + + sense_data = ioctx->sense_data; + sense_data_len = ioctx->cmd.scsi_sense_length; + WARN_ON(sense_data_len > sizeof(ioctx->sense_data)); + + memset(srp_rsp, 0, sizeof *srp_rsp); + srp_rsp->opcode = SRP_RSP; + srp_rsp->req_lim_delta = + __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); + srp_rsp->tag = tag; + srp_rsp->status = status; + + if (sense_data_len) { + BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp)); + max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp); + if (sense_data_len > max_sense_len) { + printk(KERN_WARNING "truncated sense data from %d to %d" + " bytes\n", sense_data_len, max_sense_len); + sense_data_len = max_sense_len; + } + + srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID; + srp_rsp->sense_data_len = cpu_to_be32(sense_data_len); + memcpy(srp_rsp + 1, sense_data, sense_data_len); + } + + return sizeof(*srp_rsp) + sense_data_len; +} + +/** + * srpt_build_tskmgmt_rsp() - Build a task management response. + * @ch: RDMA channel through which the request has been received. + * @ioctx: I/O context in which the SRP_RSP response will be built. + * @rsp_code: RSP_CODE that will be stored in the response. + * @tag: Tag of the request for which this response is being generated. + * + * Returns the size in bytes of the SRP_RSP response. + * + * An SRP_RSP response contains a SCSI status or service response. See also + * section 6.9 in the SRP r16a document for the format of an SRP_RSP + * response. + */ +static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx, + u8 rsp_code, u64 tag) +{ + struct srp_rsp *srp_rsp; + int resp_data_len; + int resp_len; + + resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4; + resp_len = sizeof(*srp_rsp) + resp_data_len; + + srp_rsp = ioctx->ioctx.buf; + BUG_ON(!srp_rsp); + memset(srp_rsp, 0, sizeof *srp_rsp); + + srp_rsp->opcode = SRP_RSP; + srp_rsp->req_lim_delta = __constant_cpu_to_be32(1 + + atomic_xchg(&ch->req_lim_delta, 0)); + srp_rsp->tag = tag; + + if (rsp_code != SRP_TSK_MGMT_SUCCESS) { + srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; + srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); + srp_rsp->data[3] = rsp_code; + } + + return resp_len; +} + +#define NO_SUCH_LUN ((uint64_t)-1LL) + +/* + * SCSI LUN addressing method. See also SAM-2 and the section about + * eight byte LUNs. + */ +enum scsi_lun_addr_method { + SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0, + SCSI_LUN_ADDR_METHOD_FLAT = 1, + SCSI_LUN_ADDR_METHOD_LUN = 2, + SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3, +}; + +/* + * srpt_unpack_lun() - Convert from network LUN to linear LUN. + * + * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte + * order (big endian) to a linear LUN. Supports three LUN addressing methods: + * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40). + */ +static uint64_t srpt_unpack_lun(const uint8_t *lun, int len) +{ + uint64_t res = NO_SUCH_LUN; + int addressing_method; + + if (unlikely(len < 2)) { + printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or " + "more", len); + goto out; + } + + switch (len) { + case 8: + if ((*((__be64 *)lun) & + __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) + goto out_err; + break; + case 4: + if (*((__be16 *)&lun[2]) != 0) + goto out_err; + break; + case 6: + if (*((__be32 *)&lun[2]) != 0) + goto out_err; + break; + case 2: + break; + default: + goto out_err; + } + + addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */ + switch (addressing_method) { + case SCSI_LUN_ADDR_METHOD_PERIPHERAL: + case SCSI_LUN_ADDR_METHOD_FLAT: + case SCSI_LUN_ADDR_METHOD_LUN: + res = *(lun + 1) | (((*lun) & 0x3f) << 8); + break; + + case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN: + default: + printk(KERN_ERR "Unimplemented LUN addressing method %u", + addressing_method); + break; + } + +out: + return res; + +out_err: + printk(KERN_ERR "Support for multi-level LUNs has not yet been" + " implemented"); + goto out; +} + +static int srpt_check_stop_free(struct se_cmd *cmd) +{ + struct srpt_send_ioctx *ioctx; + + ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); + return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); +} + +/** + * srpt_handle_cmd() - Process SRP_CMD. + */ +static int srpt_handle_cmd(struct srpt_rdma_ch *ch, + struct srpt_recv_ioctx *recv_ioctx, + struct srpt_send_ioctx *send_ioctx) +{ + struct se_cmd *cmd; + struct srp_cmd *srp_cmd; + uint64_t unpacked_lun; + u64 data_len; + enum dma_data_direction dir; + int ret; + + BUG_ON(!send_ioctx); + + srp_cmd = recv_ioctx->ioctx.buf; + kref_get(&send_ioctx->kref); + cmd = &send_ioctx->cmd; + send_ioctx->tag = srp_cmd->tag; + + switch (srp_cmd->task_attr) { + case SRP_CMD_SIMPLE_Q: + cmd->sam_task_attr = MSG_SIMPLE_TAG; + break; + case SRP_CMD_ORDERED_Q: + default: + cmd->sam_task_attr = MSG_ORDERED_TAG; + break; + case SRP_CMD_HEAD_OF_Q: + cmd->sam_task_attr = MSG_HEAD_TAG; + break; + case SRP_CMD_ACA: + cmd->sam_task_attr = MSG_ACA_TAG; + break; + } + + ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len); + if (ret) { + printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n", + srp_cmd->tag); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + goto send_sense; + } + + cmd->data_length = data_len; + cmd->data_direction = dir; + unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun, + sizeof(srp_cmd->lun)); + if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) + goto send_sense; + ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb); + if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) + srpt_queue_status(cmd); + else if (cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) + goto send_sense; + else + WARN_ON_ONCE(ret); + + transport_handle_cdb_direct(cmd); + return 0; + +send_sense: + transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason, + 0); + return -1; +} + +/** + * srpt_rx_mgmt_fn_tag() - Process a task management function by tag. + * @ch: RDMA channel of the task management request. + * @fn: Task management function to perform. + * @req_tag: Tag of the SRP task management request. + * @mgmt_ioctx: I/O context of the task management request. + * + * Returns zero if the target core will process the task management + * request asynchronously. + * + * Note: It is assumed that the initiator serializes tag-based task management + * requests. + */ +static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag) +{ + struct srpt_device *sdev; + struct srpt_rdma_ch *ch; + struct srpt_send_ioctx *target; + int ret, i; + + ret = -EINVAL; + ch = ioctx->ch; + BUG_ON(!ch); + BUG_ON(!ch->sport); + sdev = ch->sport->sdev; + BUG_ON(!sdev); + spin_lock_irq(&sdev->spinlock); + for (i = 0; i < ch->rq_size; ++i) { + target = ch->ioctx_ring[i]; + if (target->cmd.se_lun == ioctx->cmd.se_lun && + target->tag == tag && + srpt_get_cmd_state(target) != SRPT_STATE_DONE) { + ret = 0; + /* now let the target core abort &target->cmd; */ + break; + } + } + spin_unlock_irq(&sdev->spinlock); + return ret; +} + +static int srp_tmr_to_tcm(int fn) +{ + switch (fn) { + case SRP_TSK_ABORT_TASK: + return TMR_ABORT_TASK; + case SRP_TSK_ABORT_TASK_SET: + return TMR_ABORT_TASK_SET; + case SRP_TSK_CLEAR_TASK_SET: + return TMR_CLEAR_TASK_SET; + case SRP_TSK_LUN_RESET: + return TMR_LUN_RESET; + case SRP_TSK_CLEAR_ACA: + return TMR_CLEAR_ACA; + default: + return -1; + } +} + +/** + * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit. + * + * Returns 0 if and only if the request will be processed by the target core. + * + * For more information about SRP_TSK_MGMT information units, see also section + * 6.7 in the SRP r16a document. + */ +static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, + struct srpt_recv_ioctx *recv_ioctx, + struct srpt_send_ioctx *send_ioctx) +{ + struct srp_tsk_mgmt *srp_tsk; + struct se_cmd *cmd; + uint64_t unpacked_lun; + int tcm_tmr; + int res; + + BUG_ON(!send_ioctx); + + srp_tsk = recv_ioctx->ioctx.buf; + cmd = &send_ioctx->cmd; + + pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld" + " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func, + srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); + + srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT); + send_ioctx->tag = srp_tsk->tag; + tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func); + if (tcm_tmr < 0) { + send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + send_ioctx->cmd.se_tmr_req->response = + TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; + goto process_tmr; + } + cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL); + if (!cmd->se_tmr_req) { + send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED; + goto process_tmr; + } + + unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun, + sizeof(srp_tsk->lun)); + res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun); + if (res) { + pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun); + send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; + goto process_tmr; + } + + if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) + srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag); + +process_tmr: + kref_get(&send_ioctx->kref); + if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)) + transport_generic_handle_tmr(&send_ioctx->cmd); + else + transport_send_check_condition_and_sense(cmd, + cmd->scsi_sense_reason, 0); + +} + +/** + * srpt_handle_new_iu() - Process a newly received information unit. + * @ch: RDMA channel through which the information unit has been received. + * @ioctx: SRPT I/O context associated with the information unit. + */ +static void srpt_handle_new_iu(struct srpt_rdma_ch *ch, + struct srpt_recv_ioctx *recv_ioctx, + struct srpt_send_ioctx *send_ioctx) +{ + struct srp_cmd *srp_cmd; + enum rdma_ch_state ch_state; + + BUG_ON(!ch); + BUG_ON(!recv_ioctx); + + ib_dma_sync_single_for_cpu(ch->sport->sdev->device, + recv_ioctx->ioctx.dma, srp_max_req_size, + DMA_FROM_DEVICE); + + ch_state = srpt_get_ch_state(ch); + if (unlikely(ch_state == CH_CONNECTING)) { + list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list); + goto out; + } + + if (unlikely(ch_state != CH_LIVE)) + goto out; + + srp_cmd = recv_ioctx->ioctx.buf; + if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) { + if (!send_ioctx) + send_ioctx = srpt_get_send_ioctx(ch); + if (unlikely(!send_ioctx)) { + list_add_tail(&recv_ioctx->wait_list, + &ch->cmd_wait_list); + goto out; + } + } + + transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess, + 0, DMA_NONE, MSG_SIMPLE_TAG, + send_ioctx->sense_data); + + switch (srp_cmd->opcode) { + case SRP_CMD: + srpt_handle_cmd(ch, recv_ioctx, send_ioctx); + break; + case SRP_TSK_MGMT: + srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx); + break; + case SRP_I_LOGOUT: + printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n"); + break; + case SRP_CRED_RSP: + pr_debug("received SRP_CRED_RSP\n"); + break; + case SRP_AER_RSP: + pr_debug("received SRP_AER_RSP\n"); + break; + case SRP_RSP: + printk(KERN_ERR "Received SRP_RSP\n"); + break; + default: + printk(KERN_ERR "received IU with unknown opcode 0x%x\n", + srp_cmd->opcode); + break; + } + + srpt_post_recv(ch->sport->sdev, recv_ioctx); +out: + return; +} + +static void srpt_process_rcv_completion(struct ib_cq *cq, + struct srpt_rdma_ch *ch, + struct ib_wc *wc) +{ + struct srpt_device *sdev = ch->sport->sdev; + struct srpt_recv_ioctx *ioctx; + u32 index; + + index = idx_from_wr_id(wc->wr_id); + if (wc->status == IB_WC_SUCCESS) { + int req_lim; + + req_lim = atomic_dec_return(&ch->req_lim); + if (unlikely(req_lim < 0)) + printk(KERN_ERR "req_lim = %d < 0\n", req_lim); + ioctx = sdev->ioctx_ring[index]; + srpt_handle_new_iu(ch, ioctx, NULL); + } else { + printk(KERN_INFO "receiving failed for idx %u with status %d\n", + index, wc->status); + } +} + +/** + * srpt_process_send_completion() - Process an IB send completion. + * + * Note: Although this has not yet been observed during tests, at least in + * theory it is possible that the srpt_get_send_ioctx() call invoked by + * srpt_handle_new_iu() fails. This is possible because the req_lim_delta + * value in each response is set to one, and it is possible that this response + * makes the initiator send a new request before the send completion for that + * response has been processed. This could e.g. happen if the call to + * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or + * if IB retransmission causes generation of the send completion to be + * delayed. Incoming information units for which srpt_get_send_ioctx() fails + * are queued on cmd_wait_list. The code below processes these delayed + * requests one at a time. + */ +static void srpt_process_send_completion(struct ib_cq *cq, + struct srpt_rdma_ch *ch, + struct ib_wc *wc) +{ + struct srpt_send_ioctx *send_ioctx; + uint32_t index; + enum srpt_opcode opcode; + + index = idx_from_wr_id(wc->wr_id); + opcode = opcode_from_wr_id(wc->wr_id); + send_ioctx = ch->ioctx_ring[index]; + if (wc->status == IB_WC_SUCCESS) { + if (opcode == SRPT_SEND) + srpt_handle_send_comp(ch, send_ioctx); + else { + WARN_ON(opcode != SRPT_RDMA_ABORT && + wc->opcode != IB_WC_RDMA_READ); + srpt_handle_rdma_comp(ch, send_ioctx, opcode); + } + } else { + if (opcode == SRPT_SEND) { + printk(KERN_INFO "sending response for idx %u failed" + " with status %d\n", index, wc->status); + srpt_handle_send_err_comp(ch, wc->wr_id); + } else if (opcode != SRPT_RDMA_MID) { + printk(KERN_INFO "RDMA t %d for idx %u failed with" + " status %d", opcode, index, wc->status); + srpt_handle_rdma_err_comp(ch, send_ioctx, opcode); + } + } + + while (unlikely(opcode == SRPT_SEND + && !list_empty(&ch->cmd_wait_list) + && srpt_get_ch_state(ch) == CH_LIVE + && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) { + struct srpt_recv_ioctx *recv_ioctx; + + recv_ioctx = list_first_entry(&ch->cmd_wait_list, + struct srpt_recv_ioctx, + wait_list); + list_del(&recv_ioctx->wait_list); + srpt_handle_new_iu(ch, recv_ioctx, send_ioctx); + } +} + +static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch) +{ + struct ib_wc *const wc = ch->wc; + int i, n; + + WARN_ON(cq != ch->cq); + + ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); + while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) { + for (i = 0; i < n; i++) { + if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV) + srpt_process_rcv_completion(cq, ch, &wc[i]); + else + srpt_process_send_completion(cq, ch, &wc[i]); + } + } +} + +/** + * srpt_completion() - IB completion queue callback function. + * + * Notes: + * - It is guaranteed that a completion handler will never be invoked + * concurrently on two different CPUs for the same completion queue. See also + * Documentation/infiniband/core_locking.txt and the implementation of + * handle_edge_irq() in kernel/irq/chip.c. + * - When threaded IRQs are enabled, completion handlers are invoked in thread + * context instead of interrupt context. + */ +static void srpt_completion(struct ib_cq *cq, void *ctx) +{ + struct srpt_rdma_ch *ch = ctx; + + wake_up_interruptible(&ch->wait_queue); +} + +static int srpt_compl_thread(void *arg) +{ + struct srpt_rdma_ch *ch; + + /* Hibernation / freezing of the SRPT kernel thread is not supported. */ + current->flags |= PF_NOFREEZE; + + ch = arg; + BUG_ON(!ch); + printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n", + ch->sess_name, ch->thread->comm, current->pid); + while (!kthread_should_stop()) { + wait_event_interruptible(ch->wait_queue, + (srpt_process_completion(ch->cq, ch), + kthread_should_stop())); + } + printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n", + ch->sess_name, ch->thread->comm, current->pid); + return 0; +} + +/** + * srpt_create_ch_ib() - Create receive and send completion queues. + */ +static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) +{ + struct ib_qp_init_attr *qp_init; + struct srpt_port *sport = ch->sport; + struct srpt_device *sdev = sport->sdev; + u32 srp_sq_size = sport->port_attrib.srp_sq_size; + int ret; + + WARN_ON(ch->rq_size < 1); + + ret = -ENOMEM; + qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL); + if (!qp_init) + goto out; + + ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, + ch->rq_size + srp_sq_size, 0); + if (IS_ERR(ch->cq)) { + ret = PTR_ERR(ch->cq); + printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n", + ch->rq_size + srp_sq_size, ret); + goto out; + } + + qp_init->qp_context = (void *)ch; + qp_init->event_handler + = (void(*)(struct ib_event *, void*))srpt_qp_event; + qp_init->send_cq = ch->cq; + qp_init->recv_cq = ch->cq; + qp_init->srq = sdev->srq; + qp_init->sq_sig_type = IB_SIGNAL_REQ_WR; + qp_init->qp_type = IB_QPT_RC; + qp_init->cap.max_send_wr = srp_sq_size; + qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE; + + ch->qp = ib_create_qp(sdev->pd, qp_init); + if (IS_ERR(ch->qp)) { + ret = PTR_ERR(ch->qp); + printk(KERN_ERR "failed to create_qp ret= %d\n", ret); + goto err_destroy_cq; + } + + atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr); + + pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", + __func__, ch->cq->cqe, qp_init->cap.max_send_sge, + qp_init->cap.max_send_wr, ch->cm_id); + + ret = srpt_init_ch_qp(ch, ch->qp); + if (ret) + goto err_destroy_qp; + + init_waitqueue_head(&ch->wait_queue); + + pr_debug("creating thread for session %s\n", ch->sess_name); + + ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl"); + if (IS_ERR(ch->thread)) { + printk(KERN_ERR "failed to create kernel thread %ld\n", + PTR_ERR(ch->thread)); + ch->thread = NULL; + goto err_destroy_qp; + } + +out: + kfree(qp_init); + return ret; + +err_destroy_qp: + ib_destroy_qp(ch->qp); +err_destroy_cq: + ib_destroy_cq(ch->cq); + goto out; +} + +static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch) +{ + if (ch->thread) + kthread_stop(ch->thread); + + ib_destroy_qp(ch->qp); + ib_destroy_cq(ch->cq); +} + +/** + * __srpt_close_ch() - Close an RDMA channel by setting the QP error state. + * + * Reset the QP and make sure all resources associated with the channel will + * be deallocated at an appropriate time. + * + * Note: The caller must hold ch->sport->sdev->spinlock. + */ +static void __srpt_close_ch(struct srpt_rdma_ch *ch) +{ + struct srpt_device *sdev; + enum rdma_ch_state prev_state; + unsigned long flags; + + sdev = ch->sport->sdev; + + spin_lock_irqsave(&ch->spinlock, flags); + prev_state = ch->state; + switch (prev_state) { + case CH_CONNECTING: + case CH_LIVE: + ch->state = CH_DISCONNECTING; + break; + default: + break; + } + spin_unlock_irqrestore(&ch->spinlock, flags); + + switch (prev_state) { + case CH_CONNECTING: + ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0, + NULL, 0); + /* fall through */ + case CH_LIVE: + if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0) + printk(KERN_ERR "sending CM DREQ failed.\n"); + break; + case CH_DISCONNECTING: + break; + case CH_DRAINING: + case CH_RELEASING: + break; + } +} + +/** + * srpt_close_ch() - Close an RDMA channel. + */ +static void srpt_close_ch(struct srpt_rdma_ch *ch) +{ + struct srpt_device *sdev; + + sdev = ch->sport->sdev; + spin_lock_irq(&sdev->spinlock); + __srpt_close_ch(ch); + spin_unlock_irq(&sdev->spinlock); +} + +/** + * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. + * @cm_id: Pointer to the CM ID of the channel to be drained. + * + * Note: Must be called from inside srpt_cm_handler to avoid a race between + * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one() + * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one() + * waits until all target sessions for the associated IB device have been + * unregistered and target session registration involves a call to + * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until + * this function has finished). + */ +static void srpt_drain_channel(struct ib_cm_id *cm_id) +{ + struct srpt_device *sdev; + struct srpt_rdma_ch *ch; + int ret; + bool do_reset = false; + + WARN_ON_ONCE(irqs_disabled()); + + sdev = cm_id->context; + BUG_ON(!sdev); + spin_lock_irq(&sdev->spinlock); + list_for_each_entry(ch, &sdev->rch_list, list) { + if (ch->cm_id == cm_id) { + do_reset = srpt_test_and_set_ch_state(ch, + CH_CONNECTING, CH_DRAINING) || + srpt_test_and_set_ch_state(ch, + CH_LIVE, CH_DRAINING) || + srpt_test_and_set_ch_state(ch, + CH_DISCONNECTING, CH_DRAINING); + break; + } + } + spin_unlock_irq(&sdev->spinlock); + + if (do_reset) { + ret = srpt_ch_qp_err(ch); + if (ret < 0) + printk(KERN_ERR "Setting queue pair in error state" + " failed: %d\n", ret); + } +} + +/** + * srpt_find_channel() - Look up an RDMA channel. + * @cm_id: Pointer to the CM ID of the channel to be looked up. + * + * Return NULL if no matching RDMA channel has been found. + */ +static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev, + struct ib_cm_id *cm_id) +{ + struct srpt_rdma_ch *ch; + bool found; + + WARN_ON_ONCE(irqs_disabled()); + BUG_ON(!sdev); + + found = false; + spin_lock_irq(&sdev->spinlock); + list_for_each_entry(ch, &sdev->rch_list, list) { + if (ch->cm_id == cm_id) { + found = true; + break; + } + } + spin_unlock_irq(&sdev->spinlock); + + return found ? ch : NULL; +} + +/** + * srpt_release_channel() - Release channel resources. + * + * Schedules the actual release because: + * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would + * trigger a deadlock. + * - It is not safe to call TCM transport_* functions from interrupt context. + */ +static void srpt_release_channel(struct srpt_rdma_ch *ch) +{ + schedule_work(&ch->release_work); +} + +static void srpt_release_channel_work(struct work_struct *w) +{ + struct srpt_rdma_ch *ch; + struct srpt_device *sdev; + + ch = container_of(w, struct srpt_rdma_ch, release_work); + pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess, + ch->release_done); + + sdev = ch->sport->sdev; + BUG_ON(!sdev); + + transport_deregister_session_configfs(ch->sess); + transport_deregister_session(ch->sess); + ch->sess = NULL; + + srpt_destroy_ch_ib(ch); + + srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, + ch->sport->sdev, ch->rq_size, + ch->rsp_size, DMA_TO_DEVICE); + + spin_lock_irq(&sdev->spinlock); + list_del(&ch->list); + spin_unlock_irq(&sdev->spinlock); + + ib_destroy_cm_id(ch->cm_id); + + if (ch->release_done) + complete(ch->release_done); + + wake_up(&sdev->ch_releaseQ); + + kfree(ch); +} + +static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport, + u8 i_port_id[16]) +{ + struct srpt_node_acl *nacl; + + list_for_each_entry(nacl, &sport->port_acl_list, list) + if (memcmp(nacl->i_port_id, i_port_id, + sizeof(nacl->i_port_id)) == 0) + return nacl; + + return NULL; +} + +static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport, + u8 i_port_id[16]) +{ + struct srpt_node_acl *nacl; + + spin_lock_irq(&sport->port_acl_lock); + nacl = __srpt_lookup_acl(sport, i_port_id); + spin_unlock_irq(&sport->port_acl_lock); + + return nacl; +} + +/** + * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED. + * + * Ownership of the cm_id is transferred to the target session if this + * functions returns zero. Otherwise the caller remains the owner of cm_id. + */ +static int srpt_cm_req_recv(struct ib_cm_id *cm_id, + struct ib_cm_req_event_param *param, + void *private_data) +{ + struct srpt_device *sdev = cm_id->context; + struct srpt_port *sport = &sdev->port[param->port - 1]; + struct srp_login_req *req; + struct srp_login_rsp *rsp; + struct srp_login_rej *rej; + struct ib_cm_rep_param *rep_param; + struct srpt_rdma_ch *ch, *tmp_ch; + struct srpt_node_acl *nacl; + u32 it_iu_len; + int i; + int ret = 0; + + WARN_ON_ONCE(irqs_disabled()); + + if (WARN_ON(!sdev || !private_data)) + return -EINVAL; + + req = (struct srp_login_req *)private_data; + + it_iu_len = be32_to_cpu(req->req_it_iu_len); + + printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx," + " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d" + " (guid=0x%llx:0x%llx)\n", + be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]), + be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]), + be64_to_cpu(*(__be64 *)&req->target_port_id[0]), + be64_to_cpu(*(__be64 *)&req->target_port_id[8]), + it_iu_len, + param->port, + be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]), + be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8])); + + rsp = kzalloc(sizeof *rsp, GFP_KERNEL); + rej = kzalloc(sizeof *rej, GFP_KERNEL); + rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL); + + if (!rsp || !rej || !rep_param) { + ret = -ENOMEM; + goto out; + } + + if (it_iu_len > srp_max_req_size || it_iu_len < 64) { + rej->reason = __constant_cpu_to_be32( + SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); + ret = -EINVAL; + printk(KERN_ERR "rejected SRP_LOGIN_REQ because its" + " length (%d bytes) is out of range (%d .. %d)\n", + it_iu_len, 64, srp_max_req_size); + goto reject; + } + + if (!sport->enabled) { + rej->reason = __constant_cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + ret = -EINVAL; + printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port" + " has not yet been enabled\n"); + goto reject; + } + + if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) { + rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN; + + spin_lock_irq(&sdev->spinlock); + + list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) { + if (!memcmp(ch->i_port_id, req->initiator_port_id, 16) + && !memcmp(ch->t_port_id, req->target_port_id, 16) + && param->port == ch->sport->port + && param->listen_id == ch->sport->sdev->cm_id + && ch->cm_id) { + enum rdma_ch_state ch_state; + + ch_state = srpt_get_ch_state(ch); + if (ch_state != CH_CONNECTING + && ch_state != CH_LIVE) + continue; + + /* found an existing channel */ + pr_debug("Found existing channel %s" + " cm_id= %p state= %d\n", + ch->sess_name, ch->cm_id, ch_state); + + __srpt_close_ch(ch); + + rsp->rsp_flags = + SRP_LOGIN_RSP_MULTICHAN_TERMINATED; + } + } + + spin_unlock_irq(&sdev->spinlock); + + } else + rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED; + + if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) + || *(__be64 *)(req->target_port_id + 8) != + cpu_to_be64(srpt_service_guid)) { + rej->reason = __constant_cpu_to_be32( + SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); + ret = -ENOMEM; + printk(KERN_ERR "rejected SRP_LOGIN_REQ because it" + " has an invalid target port identifier.\n"); + goto reject; + } + + ch = kzalloc(sizeof *ch, GFP_KERNEL); + if (!ch) { + rej->reason = __constant_cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n"); + ret = -ENOMEM; + goto reject; + } + + INIT_WORK(&ch->release_work, srpt_release_channel_work); + memcpy(ch->i_port_id, req->initiator_port_id, 16); + memcpy(ch->t_port_id, req->target_port_id, 16); + ch->sport = &sdev->port[param->port - 1]; + ch->cm_id = cm_id; + /* + * Avoid QUEUE_FULL conditions by limiting the number of buffers used + * for the SRP protocol to the command queue size. + */ + ch->rq_size = SRPT_RQ_SIZE; + spin_lock_init(&ch->spinlock); + ch->state = CH_CONNECTING; + INIT_LIST_HEAD(&ch->cmd_wait_list); + ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size; + + ch->ioctx_ring = (struct srpt_send_ioctx **) + srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, + sizeof(*ch->ioctx_ring[0]), + ch->rsp_size, DMA_TO_DEVICE); + if (!ch->ioctx_ring) + goto free_ch; + + INIT_LIST_HEAD(&ch->free_list); + for (i = 0; i < ch->rq_size; i++) { + ch->ioctx_ring[i]->ch = ch; + list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list); + } + + ret = srpt_create_ch_ib(ch); + if (ret) { + rej->reason = __constant_cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating" + " a new RDMA channel failed.\n"); + goto free_ring; + } + + ret = srpt_ch_qp_rtr(ch, ch->qp); + if (ret) { + rej->reason = __constant_cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling" + " RTR failed (error code = %d)\n", ret); + goto destroy_ib; + } + /* + * Use the initator port identifier as the session name. + */ + snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx", + be64_to_cpu(*(__be64 *)ch->i_port_id), + be64_to_cpu(*(__be64 *)(ch->i_port_id + 8))); + + pr_debug("registering session %s\n", ch->sess_name); + + nacl = srpt_lookup_acl(sport, ch->i_port_id); + if (!nacl) { + printk(KERN_INFO "Rejected login because no ACL has been" + " configured yet for initiator %s.\n", ch->sess_name); + rej->reason = __constant_cpu_to_be32( + SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); + goto destroy_ib; + } + + ch->sess = transport_init_session(); + if (IS_ERR(ch->sess)) { + rej->reason = __constant_cpu_to_be32( + SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); + pr_debug("Failed to create session\n"); + goto deregister_session; + } + ch->sess->se_node_acl = &nacl->nacl; + transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch); + + pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess, + ch->sess_name, ch->cm_id); + + /* create srp_login_response */ + rsp->opcode = SRP_LOGIN_RSP; + rsp->tag = req->tag; + rsp->max_it_iu_len = req->req_it_iu_len; + rsp->max_ti_iu_len = req->req_it_iu_len; + ch->max_ti_iu_len = it_iu_len; + rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT + | SRP_BUF_FORMAT_INDIRECT); + rsp->req_lim_delta = cpu_to_be32(ch->rq_size); + atomic_set(&ch->req_lim, ch->rq_size); + atomic_set(&ch->req_lim_delta, 0); + + /* create cm reply */ + rep_param->qp_num = ch->qp->qp_num; + rep_param->private_data = (void *)rsp; + rep_param->private_data_len = sizeof *rsp; + rep_param->rnr_retry_count = 7; + rep_param->flow_control = 1; + rep_param->failover_accepted = 0; + rep_param->srq = 1; + rep_param->responder_resources = 4; + rep_param->initiator_depth = 4; + + ret = ib_send_cm_rep(cm_id, rep_param); + if (ret) { + printk(KERN_ERR "sending SRP_LOGIN_REQ response failed" + " (error code = %d)\n", ret); + goto release_channel; + } + + spin_lock_irq(&sdev->spinlock); + list_add_tail(&ch->list, &sdev->rch_list); + spin_unlock_irq(&sdev->spinlock); + + goto out; + +release_channel: + srpt_set_ch_state(ch, CH_RELEASING); + transport_deregister_session_configfs(ch->sess); + +deregister_session: + transport_deregister_session(ch->sess); + ch->sess = NULL; + +destroy_ib: + srpt_destroy_ch_ib(ch); + +free_ring: + srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, + ch->sport->sdev, ch->rq_size, + ch->rsp_size, DMA_TO_DEVICE); +free_ch: + kfree(ch); + +reject: + rej->opcode = SRP_LOGIN_REJ; + rej->tag = req->tag; + rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT + | SRP_BUF_FORMAT_INDIRECT); + + ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, + (void *)rej, sizeof *rej); + +out: + kfree(rep_param); + kfree(rsp); + kfree(rej); + + return ret; +} + +static void srpt_cm_rej_recv(struct ib_cm_id *cm_id) +{ + printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id); + srpt_drain_channel(cm_id); +} + +/** + * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event. + * + * An IB_CM_RTU_RECEIVED message indicates that the connection is established + * and that the recipient may begin transmitting (RTU = ready to use). + */ +static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id) +{ + struct srpt_rdma_ch *ch; + int ret; + + ch = srpt_find_channel(cm_id->context, cm_id); + BUG_ON(!ch); + + if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) { + struct srpt_recv_ioctx *ioctx, *ioctx_tmp; + + ret = srpt_ch_qp_rts(ch, ch->qp); + + list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list, + wait_list) { + list_del(&ioctx->wait_list); + srpt_handle_new_iu(ch, ioctx, NULL); + } + if (ret) + srpt_close_ch(ch); + } +} + +static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id) +{ + printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id); + srpt_drain_channel(cm_id); +} + +static void srpt_cm_rep_error(struct ib_cm_id *cm_id) +{ + printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id); + srpt_drain_channel(cm_id); +} + +/** + * srpt_cm_dreq_recv() - Process reception of a DREQ message. + */ +static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id) +{ + struct srpt_rdma_ch *ch; + unsigned long flags; + bool send_drep = false; + + ch = srpt_find_channel(cm_id->context, cm_id); + BUG_ON(!ch); + + pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch)); + + spin_lock_irqsave(&ch->spinlock, flags); + switch (ch->state) { + case CH_CONNECTING: + case CH_LIVE: + send_drep = true; + ch->state = CH_DISCONNECTING; + break; + case CH_DISCONNECTING: + case CH_DRAINING: + case CH_RELEASING: + WARN(true, "unexpected channel state %d\n", ch->state); + break; + } + spin_unlock_irqrestore(&ch->spinlock, flags); + + if (send_drep) { + if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0) + printk(KERN_ERR "Sending IB DREP failed.\n"); + printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n", + ch->sess_name); + } +} + +/** + * srpt_cm_drep_recv() - Process reception of a DREP message. + */ +static void srpt_cm_drep_recv(struct ib_cm_id *cm_id) +{ + printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n", + cm_id); + srpt_drain_channel(cm_id); +} + +/** + * srpt_cm_handler() - IB connection manager callback function. + * + * A non-zero return value will cause the caller destroy the CM ID. + * + * Note: srpt_cm_handler() must only return a non-zero value when transferring + * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning + * a non-zero value in any other case will trigger a race with the + * ib_destroy_cm_id() call in srpt_release_channel(). + */ +static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) +{ + int ret; + + ret = 0; + switch (event->event) { + case IB_CM_REQ_RECEIVED: + ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd, + event->private_data); + break; + case IB_CM_REJ_RECEIVED: + srpt_cm_rej_recv(cm_id); + break; + case IB_CM_RTU_RECEIVED: + case IB_CM_USER_ESTABLISHED: + srpt_cm_rtu_recv(cm_id); + break; + case IB_CM_DREQ_RECEIVED: + srpt_cm_dreq_recv(cm_id); + break; + case IB_CM_DREP_RECEIVED: + srpt_cm_drep_recv(cm_id); + break; + case IB_CM_TIMEWAIT_EXIT: + srpt_cm_timewait_exit(cm_id); + break; + case IB_CM_REP_ERROR: + srpt_cm_rep_error(cm_id); + break; + case IB_CM_DREQ_ERROR: + printk(KERN_INFO "Received IB DREQ ERROR event.\n"); + break; + case IB_CM_MRA_RECEIVED: + printk(KERN_INFO "Received IB MRA event\n"); + break; + default: + printk(KERN_ERR "received unrecognized IB CM event %d\n", + event->event); + break; + } + + return ret; +} + +/** + * srpt_perform_rdmas() - Perform IB RDMA. + * + * Returns zero upon success or a negative number upon failure. + */ +static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx) +{ + struct ib_send_wr wr; + struct ib_send_wr *bad_wr; + struct rdma_iu *riu; + int i; + int ret; + int sq_wr_avail; + enum dma_data_direction dir; + const int n_rdma = ioctx->n_rdma; + + dir = ioctx->cmd.data_direction; + if (dir == DMA_TO_DEVICE) { + /* write */ + ret = -ENOMEM; + sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail); + if (sq_wr_avail < 0) { + printk(KERN_WARNING "IB send queue full (needed %d)\n", + n_rdma); + goto out; + } + } + + ioctx->rdma_aborted = false; + ret = 0; + riu = ioctx->rdma_ius; + memset(&wr, 0, sizeof wr); + + for (i = 0; i < n_rdma; ++i, ++riu) { + if (dir == DMA_FROM_DEVICE) { + wr.opcode = IB_WR_RDMA_WRITE; + wr.wr_id = encode_wr_id(i == n_rdma - 1 ? + SRPT_RDMA_WRITE_LAST : + SRPT_RDMA_MID, + ioctx->ioctx.index); + } else { + wr.opcode = IB_WR_RDMA_READ; + wr.wr_id = encode_wr_id(i == n_rdma - 1 ? + SRPT_RDMA_READ_LAST : + SRPT_RDMA_MID, + ioctx->ioctx.index); + } + wr.next = NULL; + wr.wr.rdma.remote_addr = riu->raddr; + wr.wr.rdma.rkey = riu->rkey; + wr.num_sge = riu->sge_cnt; + wr.sg_list = riu->sge; + + /* only get completion event for the last rdma write */ + if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE) + wr.send_flags = IB_SEND_SIGNALED; + + ret = ib_post_send(ch->qp, &wr, &bad_wr); + if (ret) + break; + } + + if (ret) + printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d", + __func__, __LINE__, ret, i, n_rdma); + if (ret && i > 0) { + wr.num_sge = 0; + wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index); + wr.send_flags = IB_SEND_SIGNALED; + while (ch->state == CH_LIVE && + ib_post_send(ch->qp, &wr, &bad_wr) != 0) { + printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]", + ioctx->ioctx.index); + msleep(1000); + } + while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) { + printk(KERN_INFO "Waiting until RDMA abort finished [%d]", + ioctx->ioctx.index); + msleep(1000); + } + } +out: + if (unlikely(dir == DMA_TO_DEVICE && ret < 0)) + atomic_add(n_rdma, &ch->sq_wr_avail); + return ret; +} + +/** + * srpt_xfer_data() - Start data transfer from initiator to target. + */ +static int srpt_xfer_data(struct srpt_rdma_ch *ch, + struct srpt_send_ioctx *ioctx) +{ + int ret; + + ret = srpt_map_sg_to_ib_sge(ch, ioctx); + if (ret) { + printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret); + goto out; + } + + ret = srpt_perform_rdmas(ch, ioctx); + if (ret) { + if (ret == -EAGAIN || ret == -ENOMEM) + printk(KERN_INFO "%s[%d] queue full -- ret=%d\n", + __func__, __LINE__, ret); + else + printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n", + __func__, __LINE__, ret); + goto out_unmap; + } + +out: + return ret; +out_unmap: + srpt_unmap_sg_to_ib_sge(ch, ioctx); + goto out; +} + +static int srpt_write_pending_status(struct se_cmd *se_cmd) +{ + struct srpt_send_ioctx *ioctx; + + ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); + return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA; +} + +/* + * srpt_write_pending() - Start data transfer from initiator to target (write). + */ +static int srpt_write_pending(struct se_cmd *se_cmd) +{ + struct srpt_rdma_ch *ch; + struct srpt_send_ioctx *ioctx; + enum srpt_command_state new_state; + enum rdma_ch_state ch_state; + int ret; + + ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); + + new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA); + WARN_ON(new_state == SRPT_STATE_DONE); + + ch = ioctx->ch; + BUG_ON(!ch); + + ch_state = srpt_get_ch_state(ch); + switch (ch_state) { + case CH_CONNECTING: + WARN(true, "unexpected channel state %d\n", ch_state); + ret = -EINVAL; + goto out; + case CH_LIVE: + break; + case CH_DISCONNECTING: + case CH_DRAINING: + case CH_RELEASING: + pr_debug("cmd with tag %lld: channel disconnecting\n", + ioctx->tag); + srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN); + ret = -EINVAL; + goto out; + } + ret = srpt_xfer_data(ch, ioctx); + +out: + return ret; +} + +static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status) +{ + switch (tcm_mgmt_status) { + case TMR_FUNCTION_COMPLETE: + return SRP_TSK_MGMT_SUCCESS; + case TMR_FUNCTION_REJECTED: + return SRP_TSK_MGMT_FUNC_NOT_SUPP; + } + return SRP_TSK_MGMT_FAILED; +} + +/** + * srpt_queue_response() - Transmits the response to a SCSI command. + * + * Callback function called by the TCM core. Must not block since it can be + * invoked on the context of the IB completion handler. + */ +static int srpt_queue_response(struct se_cmd *cmd) +{ + struct srpt_rdma_ch *ch; + struct srpt_send_ioctx *ioctx; + enum srpt_command_state state; + unsigned long flags; + int ret; + enum dma_data_direction dir; + int resp_len; + u8 srp_tm_status; + + ret = 0; + + ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); + ch = ioctx->ch; + BUG_ON(!ch); + + spin_lock_irqsave(&ioctx->spinlock, flags); + state = ioctx->state; + switch (state) { + case SRPT_STATE_NEW: + case SRPT_STATE_DATA_IN: + ioctx->state = SRPT_STATE_CMD_RSP_SENT; + break; + case SRPT_STATE_MGMT: + ioctx->state = SRPT_STATE_MGMT_RSP_SENT; + break; + default: + WARN(true, "ch %p; cmd %d: unexpected command state %d\n", + ch, ioctx->ioctx.index, ioctx->state); + break; + } + spin_unlock_irqrestore(&ioctx->spinlock, flags); + + if (unlikely(transport_check_aborted_status(&ioctx->cmd, false) + || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) { + atomic_inc(&ch->req_lim_delta); + srpt_abort_cmd(ioctx); + goto out; + } + + dir = ioctx->cmd.data_direction; + + /* For read commands, transfer the data to the initiator. */ + if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length && + !ioctx->queue_status_only) { + ret = srpt_xfer_data(ch, ioctx); + if (ret) { + printk(KERN_ERR "xfer_data failed for tag %llu\n", + ioctx->tag); + goto out; + } + } + + if (state != SRPT_STATE_MGMT) + resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag, + cmd->scsi_status); + else { + srp_tm_status + = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response); + resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status, + ioctx->tag); + } + ret = srpt_post_send(ch, ioctx, resp_len); + if (ret) { + printk(KERN_ERR "sending cmd response failed for tag %llu\n", + ioctx->tag); + srpt_unmap_sg_to_ib_sge(ch, ioctx); + srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); + kref_put(&ioctx->kref, srpt_put_send_ioctx_kref); + } + +out: + return ret; +} + +static int srpt_queue_status(struct se_cmd *cmd) +{ + struct srpt_send_ioctx *ioctx; + + ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); + BUG_ON(ioctx->sense_data != cmd->sense_buffer); + if (cmd->se_cmd_flags & + (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE)) + WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION); + ioctx->queue_status_only = true; + return srpt_queue_response(cmd); +} + +static void srpt_refresh_port_work(struct work_struct *work) +{ + struct srpt_port *sport = container_of(work, struct srpt_port, work); + + srpt_refresh_port(sport); +} + +static int srpt_ch_list_empty(struct srpt_device *sdev) +{ + int res; + + spin_lock_irq(&sdev->spinlock); + res = list_empty(&sdev->rch_list); + spin_unlock_irq(&sdev->spinlock); + + return res; +} + +/** + * srpt_release_sdev() - Free the channel resources associated with a target. + */ +static int srpt_release_sdev(struct srpt_device *sdev) +{ + struct srpt_rdma_ch *ch, *tmp_ch; + int res; + + WARN_ON_ONCE(irqs_disabled()); + + BUG_ON(!sdev); + + spin_lock_irq(&sdev->spinlock); + list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) + __srpt_close_ch(ch); + spin_unlock_irq(&sdev->spinlock); + + res = wait_event_interruptible(sdev->ch_releaseQ, + srpt_ch_list_empty(sdev)); + if (res) + printk(KERN_ERR "%s: interrupted.\n", __func__); + + return 0; +} + +static struct srpt_port *__srpt_lookup_port(const char *name) +{ + struct ib_device *dev; + struct srpt_device *sdev; + struct srpt_port *sport; + int i; + + list_for_each_entry(sdev, &srpt_dev_list, list) { + dev = sdev->device; + if (!dev) + continue; + + for (i = 0; i < dev->phys_port_cnt; i++) { + sport = &sdev->port[i]; + + if (!strcmp(sport->port_guid, name)) + return sport; + } + } + + return NULL; +} + +static struct srpt_port *srpt_lookup_port(const char *name) +{ + struct srpt_port *sport; + + spin_lock(&srpt_dev_lock); + sport = __srpt_lookup_port(name); + spin_unlock(&srpt_dev_lock); + + return sport; +} + +/** + * srpt_add_one() - Infiniband device addition callback function. + */ +static void srpt_add_one(struct ib_device *device) +{ + struct srpt_device *sdev; + struct srpt_port *sport; + struct ib_srq_init_attr srq_attr; + int i; + + pr_debug("device = %p, device->dma_ops = %p\n", device, + device->dma_ops); + + sdev = kzalloc(sizeof *sdev, GFP_KERNEL); + if (!sdev) + goto err; + + sdev->device = device; + INIT_LIST_HEAD(&sdev->rch_list); + init_waitqueue_head(&sdev->ch_releaseQ); + spin_lock_init(&sdev->spinlock); + + if (ib_query_device(device, &sdev->dev_attr)) + goto free_dev; + + sdev->pd = ib_alloc_pd(device); + if (IS_ERR(sdev->pd)) + goto free_dev; + + sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(sdev->mr)) + goto err_pd; + + sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr); + + srq_attr.event_handler = srpt_srq_event; + srq_attr.srq_context = (void *)sdev; + srq_attr.attr.max_wr = sdev->srq_size; + srq_attr.attr.max_sge = 1; + srq_attr.attr.srq_limit = 0; + + sdev->srq = ib_create_srq(sdev->pd, &srq_attr); + if (IS_ERR(sdev->srq)) + goto err_mr; + + pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n", + __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr, + device->name); + + if (!srpt_service_guid) + srpt_service_guid = be64_to_cpu(device->node_guid); + + sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev); + if (IS_ERR(sdev->cm_id)) + goto err_srq; + + /* print out target login information */ + pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx," + "pkey=ffff,service_id=%016llx\n", srpt_service_guid, + srpt_service_guid, srpt_service_guid); + + /* + * We do not have a consistent service_id (ie. also id_ext of target_id) + * to identify this target. We currently use the guid of the first HCA + * in the system as service_id; therefore, the target_id will change + * if this HCA is gone bad and replaced by different HCA + */ + if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL)) + goto err_cm; + + INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device, + srpt_event_handler); + if (ib_register_event_handler(&sdev->event_handler)) + goto err_cm; + + sdev->ioctx_ring = (struct srpt_recv_ioctx **) + srpt_alloc_ioctx_ring(sdev, sdev->srq_size, + sizeof(*sdev->ioctx_ring[0]), + srp_max_req_size, DMA_FROM_DEVICE); + if (!sdev->ioctx_ring) + goto err_event; + + for (i = 0; i < sdev->srq_size; ++i) + srpt_post_recv(sdev, sdev->ioctx_ring[i]); + + WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port)); + + for (i = 1; i <= sdev->device->phys_port_cnt; i++) { + sport = &sdev->port[i - 1]; + sport->sdev = sdev; + sport->port = i; + sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE; + sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE; + sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE; + INIT_WORK(&sport->work, srpt_refresh_port_work); + INIT_LIST_HEAD(&sport->port_acl_list); + spin_lock_init(&sport->port_acl_lock); + + if (srpt_refresh_port(sport)) { + printk(KERN_ERR "MAD registration failed for %s-%d.\n", + srpt_sdev_name(sdev), i); + goto err_ring; + } + snprintf(sport->port_guid, sizeof(sport->port_guid), + "0x%016llx%016llx", + be64_to_cpu(sport->gid.global.subnet_prefix), + be64_to_cpu(sport->gid.global.interface_id)); + } + + spin_lock(&srpt_dev_lock); + list_add_tail(&sdev->list, &srpt_dev_list); + spin_unlock(&srpt_dev_lock); + +out: + ib_set_client_data(device, &srpt_client, sdev); + pr_debug("added %s.\n", device->name); + return; + +err_ring: + srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, + sdev->srq_size, srp_max_req_size, + DMA_FROM_DEVICE); +err_event: + ib_unregister_event_handler(&sdev->event_handler); +err_cm: + ib_destroy_cm_id(sdev->cm_id); +err_srq: + ib_destroy_srq(sdev->srq); +err_mr: + ib_dereg_mr(sdev->mr); +err_pd: + ib_dealloc_pd(sdev->pd); +free_dev: + kfree(sdev); +err: + sdev = NULL; + printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name); + goto out; +} + +/** + * srpt_remove_one() - InfiniBand device removal callback function. + */ +static void srpt_remove_one(struct ib_device *device) +{ + struct srpt_device *sdev; + int i; + + sdev = ib_get_client_data(device, &srpt_client); + if (!sdev) { + printk(KERN_INFO "%s(%s): nothing to do.\n", __func__, + device->name); + return; + } + + srpt_unregister_mad_agent(sdev); + + ib_unregister_event_handler(&sdev->event_handler); + + /* Cancel any work queued by the just unregistered IB event handler. */ + for (i = 0; i < sdev->device->phys_port_cnt; i++) + cancel_work_sync(&sdev->port[i].work); + + ib_destroy_cm_id(sdev->cm_id); + + /* + * Unregistering a target must happen after destroying sdev->cm_id + * such that no new SRP_LOGIN_REQ information units can arrive while + * destroying the target. + */ + spin_lock(&srpt_dev_lock); + list_del(&sdev->list); + spin_unlock(&srpt_dev_lock); + srpt_release_sdev(sdev); + + ib_destroy_srq(sdev->srq); + ib_dereg_mr(sdev->mr); + ib_dealloc_pd(sdev->pd); + + srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, + sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE); + sdev->ioctx_ring = NULL; + kfree(sdev); +} + +static struct ib_client srpt_client = { + .name = DRV_NAME, + .add = srpt_add_one, + .remove = srpt_remove_one +}; + +static int srpt_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int srpt_check_false(struct se_portal_group *se_tpg) +{ + return 0; +} + +static char *srpt_get_fabric_name(void) +{ + return "srpt"; +} + +static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg) +{ + return SCSI_TRANSPORTID_PROTOCOLID_SRP; +} + +static char *srpt_get_fabric_wwn(struct se_portal_group *tpg) +{ + struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); + + return sport->port_guid; +} + +static u16 srpt_get_tag(struct se_portal_group *tpg) +{ + return 1; +} + +static u32 srpt_get_default_depth(struct se_portal_group *se_tpg) +{ + return 1; +} + +static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, unsigned char *buf) +{ + struct srpt_node_acl *nacl; + struct spc_rdma_transport_id *tr_id; + + nacl = container_of(se_nacl, struct srpt_node_acl, nacl); + tr_id = (void *)buf; + tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP; + memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id)); + return sizeof(*tr_id); +} + +static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + *format_code = 0; + return sizeof(struct spc_rdma_transport_id); +} + +static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg, + const char *buf, u32 *out_tid_len, + char **port_nexus_ptr) +{ + struct spc_rdma_transport_id *tr_id; + + *port_nexus_ptr = NULL; + *out_tid_len = sizeof(struct spc_rdma_transport_id); + tr_id = (void *)buf; + return (char *)tr_id->i_port_id; +} + +static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg) +{ + struct srpt_node_acl *nacl; + + nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL); + if (!nacl) { + printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n"); + return NULL; + } + + return &nacl->nacl; +} + +static void srpt_release_fabric_acl(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl) +{ + struct srpt_node_acl *nacl; + + nacl = container_of(se_nacl, struct srpt_node_acl, nacl); + kfree(nacl); +} + +static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + return 1; +} + +static void srpt_release_cmd(struct se_cmd *se_cmd) +{ +} + +/** + * srpt_shutdown_session() - Whether or not a session may be shut down. + */ +static int srpt_shutdown_session(struct se_session *se_sess) +{ + return true; +} + +/** + * srpt_close_session() - Forcibly close a session. + * + * Callback function invoked by the TCM core to clean up sessions associated + * with a node ACL when the user invokes + * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id + */ +static void srpt_close_session(struct se_session *se_sess) +{ + DECLARE_COMPLETION_ONSTACK(release_done); + struct srpt_rdma_ch *ch; + struct srpt_device *sdev; + int res; + + ch = se_sess->fabric_sess_ptr; + WARN_ON(ch->sess != se_sess); + + pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch)); + + sdev = ch->sport->sdev; + spin_lock_irq(&sdev->spinlock); + BUG_ON(ch->release_done); + ch->release_done = &release_done; + __srpt_close_ch(ch); + spin_unlock_irq(&sdev->spinlock); + + res = wait_for_completion_timeout(&release_done, 60 * HZ); + WARN_ON(res <= 0); +} + +/** + * To do: Find out whether stop_session() has a meaning for transports + * other than iSCSI. + */ +static void srpt_stop_session(struct se_session *se_sess, int sess_sleep, + int conn_sleep) +{ +} + +static void srpt_reset_nexus(struct se_session *sess) +{ + printk(KERN_ERR "This is the SRP protocol, not iSCSI\n"); +} + +static int srpt_sess_logged_in(struct se_session *se_sess) +{ + return true; +} + +/** + * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB). + * + * A quote from RFC 4455 (SCSI-MIB) about this MIB object: + * This object represents an arbitrary integer used to uniquely identify a + * particular attached remote initiator port to a particular SCSI target port + * within a particular SCSI target device within a particular SCSI instance. + */ +static u32 srpt_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +static void srpt_set_default_node_attrs(struct se_node_acl *nacl) +{ +} + +static u32 srpt_get_task_tag(struct se_cmd *se_cmd) +{ + struct srpt_send_ioctx *ioctx; + + ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); + return ioctx->tag; +} + +/* Note: only used from inside debug printk's by the TCM core. */ +static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd) +{ + struct srpt_send_ioctx *ioctx; + + ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); + return srpt_get_cmd_state(ioctx); +} + +static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length) +{ + return 0; +} + +static u16 srpt_get_fabric_sense_len(void) +{ + return 0; +} + +static int srpt_is_state_remove(struct se_cmd *se_cmd) +{ + return 0; +} + +/** + * srpt_parse_i_port_id() - Parse an initiator port ID. + * @name: ASCII representation of a 128-bit initiator port ID. + * @i_port_id: Binary 128-bit port ID. + */ +static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) +{ + const char *p; + unsigned len, count, leading_zero_bytes; + int ret, rc; + + p = name; + if (strnicmp(p, "0x", 2) == 0) + p += 2; + ret = -EINVAL; + len = strlen(p); + if (len % 2) + goto out; + count = min(len / 2, 16U); + leading_zero_bytes = 16 - count; + memset(i_port_id, 0, leading_zero_bytes); + rc = hex2bin(i_port_id + leading_zero_bytes, p, count); + if (rc < 0) + pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc); + ret = 0; +out: + return ret; +} + +/* + * configfs callback function invoked for + * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id + */ +static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg, + struct config_group *group, + const char *name) +{ + struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); + struct se_node_acl *se_nacl, *se_nacl_new; + struct srpt_node_acl *nacl; + int ret = 0; + u32 nexus_depth = 1; + u8 i_port_id[16]; + + if (srpt_parse_i_port_id(i_port_id, name) < 0) { + printk(KERN_ERR "invalid initiator port ID %s\n", name); + ret = -EINVAL; + goto err; + } + + se_nacl_new = srpt_alloc_fabric_acl(tpg); + if (!se_nacl_new) { + ret = -ENOMEM; + goto err; + } + /* + * nacl_new may be released by core_tpg_add_initiator_node_acl() + * when converting a node ACL from demo mode to explict + */ + se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name, + nexus_depth); + if (IS_ERR(se_nacl)) { + ret = PTR_ERR(se_nacl); + goto err; + } + /* Locate our struct srpt_node_acl and set sdev and i_port_id. */ + nacl = container_of(se_nacl, struct srpt_node_acl, nacl); + memcpy(&nacl->i_port_id[0], &i_port_id[0], 16); + nacl->sport = sport; + + spin_lock_irq(&sport->port_acl_lock); + list_add_tail(&nacl->list, &sport->port_acl_list); + spin_unlock_irq(&sport->port_acl_lock); + + return se_nacl; +err: + return ERR_PTR(ret); +} + +/* + * configfs callback function invoked for + * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id + */ +static void srpt_drop_nodeacl(struct se_node_acl *se_nacl) +{ + struct srpt_node_acl *nacl; + struct srpt_device *sdev; + struct srpt_port *sport; + + nacl = container_of(se_nacl, struct srpt_node_acl, nacl); + sport = nacl->sport; + sdev = sport->sdev; + spin_lock_irq(&sport->port_acl_lock); + list_del(&nacl->list); + spin_unlock_irq(&sport->port_acl_lock); + core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1); + srpt_release_fabric_acl(NULL, se_nacl); +} + +static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size( + struct se_portal_group *se_tpg, + char *page) +{ + struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); + + return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size); +} + +static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); + unsigned long val; + int ret; + + ret = strict_strtoul(page, 0, &val); + if (ret < 0) { + pr_err("strict_strtoul() failed with ret: %d\n", ret); + return -EINVAL; + } + if (val > MAX_SRPT_RDMA_SIZE) { + pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val, + MAX_SRPT_RDMA_SIZE); + return -EINVAL; + } + if (val < DEFAULT_MAX_RDMA_SIZE) { + pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n", + val, DEFAULT_MAX_RDMA_SIZE); + return -EINVAL; + } + sport->port_attrib.srp_max_rdma_size = val; + + return count; +} + +TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR); + +static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size( + struct se_portal_group *se_tpg, + char *page) +{ + struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); + + return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size); +} + +static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); + unsigned long val; + int ret; + + ret = strict_strtoul(page, 0, &val); + if (ret < 0) { + pr_err("strict_strtoul() failed with ret: %d\n", ret); + return -EINVAL; + } + if (val > MAX_SRPT_RSP_SIZE) { + pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val, + MAX_SRPT_RSP_SIZE); + return -EINVAL; + } + if (val < MIN_MAX_RSP_SIZE) { + pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val, + MIN_MAX_RSP_SIZE); + return -EINVAL; + } + sport->port_attrib.srp_max_rsp_size = val; + + return count; +} + +TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR); + +static ssize_t srpt_tpg_attrib_show_srp_sq_size( + struct se_portal_group *se_tpg, + char *page) +{ + struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); + + return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size); +} + +static ssize_t srpt_tpg_attrib_store_srp_sq_size( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); + unsigned long val; + int ret; + + ret = strict_strtoul(page, 0, &val); + if (ret < 0) { + pr_err("strict_strtoul() failed with ret: %d\n", ret); + return -EINVAL; + } + if (val > MAX_SRPT_SRQ_SIZE) { + pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val, + MAX_SRPT_SRQ_SIZE); + return -EINVAL; + } + if (val < MIN_SRPT_SRQ_SIZE) { + pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val, + MIN_SRPT_SRQ_SIZE); + return -EINVAL; + } + sport->port_attrib.srp_sq_size = val; + + return count; +} + +TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *srpt_tpg_attrib_attrs[] = { + &srpt_tpg_attrib_srp_max_rdma_size.attr, + &srpt_tpg_attrib_srp_max_rsp_size.attr, + &srpt_tpg_attrib_srp_sq_size.attr, + NULL, +}; + +static ssize_t srpt_tpg_show_enable( + struct se_portal_group *se_tpg, + char *page) +{ + struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); + + return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0); +} + +static ssize_t srpt_tpg_store_enable( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); + unsigned long tmp; + int ret; + + ret = strict_strtoul(page, 0, &tmp); + if (ret < 0) { + printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); + return -EINVAL; + } + + if ((tmp != 0) && (tmp != 1)) { + printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp); + return -EINVAL; + } + if (tmp == 1) + sport->enabled = true; + else + sport->enabled = false; + + return count; +} + +TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *srpt_tpg_attrs[] = { + &srpt_tpg_enable.attr, + NULL, +}; + +/** + * configfs callback invoked for + * mkdir /sys/kernel/config/target/$driver/$port/$tpg + */ +static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, + struct config_group *group, + const char *name) +{ + struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn); + int res; + + /* Initialize sport->port_wwn and sport->port_tpg_1 */ + res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn, + &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL); + if (res) + return ERR_PTR(res); + + return &sport->port_tpg_1; +} + +/** + * configfs callback invoked for + * rmdir /sys/kernel/config/target/$driver/$port/$tpg + */ +static void srpt_drop_tpg(struct se_portal_group *tpg) +{ + struct srpt_port *sport = container_of(tpg, + struct srpt_port, port_tpg_1); + + sport->enabled = false; + core_tpg_deregister(&sport->port_tpg_1); +} + +/** + * configfs callback invoked for + * mkdir /sys/kernel/config/target/$driver/$port + */ +static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct srpt_port *sport; + int ret; + + sport = srpt_lookup_port(name); + pr_debug("make_tport(%s)\n", name); + ret = -EINVAL; + if (!sport) + goto err; + + return &sport->port_wwn; + +err: + return ERR_PTR(ret); +} + +/** + * configfs callback invoked for + * rmdir /sys/kernel/config/target/$driver/$port + */ +static void srpt_drop_tport(struct se_wwn *wwn) +{ + struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn); + + pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item)); +} + +static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); +} + +TF_WWN_ATTR_RO(srpt, version); + +static struct configfs_attribute *srpt_wwn_attrs[] = { + &srpt_wwn_version.attr, + NULL, +}; + +static struct target_core_fabric_ops srpt_template = { + .get_fabric_name = srpt_get_fabric_name, + .get_fabric_proto_ident = srpt_get_fabric_proto_ident, + .tpg_get_wwn = srpt_get_fabric_wwn, + .tpg_get_tag = srpt_get_tag, + .tpg_get_default_depth = srpt_get_default_depth, + .tpg_get_pr_transport_id = srpt_get_pr_transport_id, + .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len, + .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id, + .tpg_check_demo_mode = srpt_check_false, + .tpg_check_demo_mode_cache = srpt_check_true, + .tpg_check_demo_mode_write_protect = srpt_check_true, + .tpg_check_prod_mode_write_protect = srpt_check_false, + .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl, + .tpg_release_fabric_acl = srpt_release_fabric_acl, + .tpg_get_inst_index = srpt_tpg_get_inst_index, + .release_cmd = srpt_release_cmd, + .check_stop_free = srpt_check_stop_free, + .shutdown_session = srpt_shutdown_session, + .close_session = srpt_close_session, + .stop_session = srpt_stop_session, + .fall_back_to_erl0 = srpt_reset_nexus, + .sess_logged_in = srpt_sess_logged_in, + .sess_get_index = srpt_sess_get_index, + .sess_get_initiator_sid = NULL, + .write_pending = srpt_write_pending, + .write_pending_status = srpt_write_pending_status, + .set_default_node_attributes = srpt_set_default_node_attrs, + .get_task_tag = srpt_get_task_tag, + .get_cmd_state = srpt_get_tcm_cmd_state, + .queue_data_in = srpt_queue_response, + .queue_status = srpt_queue_status, + .queue_tm_rsp = srpt_queue_response, + .get_fabric_sense_len = srpt_get_fabric_sense_len, + .set_fabric_sense_len = srpt_set_fabric_sense_len, + .is_state_remove = srpt_is_state_remove, + /* + * Setup function pointers for generic logic in + * target_core_fabric_configfs.c + */ + .fabric_make_wwn = srpt_make_tport, + .fabric_drop_wwn = srpt_drop_tport, + .fabric_make_tpg = srpt_make_tpg, + .fabric_drop_tpg = srpt_drop_tpg, + .fabric_post_link = NULL, + .fabric_pre_unlink = NULL, + .fabric_make_np = NULL, + .fabric_drop_np = NULL, + .fabric_make_nodeacl = srpt_make_nodeacl, + .fabric_drop_nodeacl = srpt_drop_nodeacl, +}; + +/** + * srpt_init_module() - Kernel module initialization. + * + * Note: Since ib_register_client() registers callback functions, and since at + * least one of these callback functions (srpt_add_one()) calls target core + * functions, this driver must be registered with the target core before + * ib_register_client() is called. + */ +static int __init srpt_init_module(void) +{ + int ret; + + ret = -EINVAL; + if (srp_max_req_size < MIN_MAX_REQ_SIZE) { + printk(KERN_ERR "invalid value %d for kernel module parameter" + " srp_max_req_size -- must be at least %d.\n", + srp_max_req_size, MIN_MAX_REQ_SIZE); + goto out; + } + + if (srpt_srq_size < MIN_SRPT_SRQ_SIZE + || srpt_srq_size > MAX_SRPT_SRQ_SIZE) { + printk(KERN_ERR "invalid value %d for kernel module parameter" + " srpt_srq_size -- must be in the range [%d..%d].\n", + srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE); + goto out; + } + + srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt"); + if (IS_ERR(srpt_target)) { + printk(KERN_ERR "couldn't register\n"); + ret = PTR_ERR(srpt_target); + goto out; + } + + srpt_target->tf_ops = srpt_template; + + /* Enable SG chaining */ + srpt_target->tf_ops.task_sg_chaining = true; + + /* + * Set up default attribute lists. + */ + srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs; + srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs; + srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs; + srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; + + ret = target_fabric_configfs_register(srpt_target); + if (ret < 0) { + printk(KERN_ERR "couldn't register\n"); + goto out_free_target; + } + + ret = ib_register_client(&srpt_client); + if (ret) { + printk(KERN_ERR "couldn't register IB client\n"); + goto out_unregister_target; + } + + return 0; + +out_unregister_target: + target_fabric_configfs_deregister(srpt_target); + srpt_target = NULL; +out_free_target: + if (srpt_target) + target_fabric_configfs_free(srpt_target); +out: + return ret; +} + +static void __exit srpt_cleanup_module(void) +{ + ib_unregister_client(&srpt_client); + target_fabric_configfs_deregister(srpt_target); + srpt_target = NULL; +} + +module_init(srpt_init_module); +module_exit(srpt_cleanup_module); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h new file mode 100644 index 0000000..61e52b8 --- /dev/null +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -0,0 +1,443 @@ +/* + * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved. + * Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef IB_SRPT_H +#define IB_SRPT_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/wait.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_sa.h> +#include <rdma/ib_cm.h> + +#include <scsi/srp.h> + +#include "ib_dm_mad.h" + +/* + * The prefix the ServiceName field must start with in the device management + * ServiceEntries attribute pair. See also the SRP specification. + */ +#define SRP_SERVICE_NAME_PREFIX "SRP.T10:" + +enum { + /* + * SRP IOControllerProfile attributes for SRP target ports that have + * not been defined in <scsi/srp.h>. Source: section B.7, table B.7 + * in the SRP specification. + */ + SRP_PROTOCOL = 0x0108, + SRP_PROTOCOL_VERSION = 0x0001, + SRP_IO_SUBCLASS = 0x609e, + SRP_SEND_TO_IOC = 0x01, + SRP_SEND_FROM_IOC = 0x02, + SRP_RDMA_READ_FROM_IOC = 0x08, + SRP_RDMA_WRITE_FROM_IOC = 0x20, + + /* + * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP + * specification. + */ + SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */ + SRP_LOSOLNT = 0x10, /* logout solicited notification */ + SRP_CRSOLNT = 0x20, /* credit request solicited notification */ + SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */ + + /* + * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables + * 18 and 20 in the SRP specification. + */ + SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */ + SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */ + + /* + * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables + * 16 and 22 in the SRP specification. + */ + SRP_SOLNT = 0x01, /* SOLNT = solicited notification */ + + /* See also table 24 in the SRP specification. */ + SRP_TSK_MGMT_SUCCESS = 0x00, + SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04, + SRP_TSK_MGMT_FAILED = 0x05, + + /* See also table 21 in the SRP specification. */ + SRP_CMD_SIMPLE_Q = 0x0, + SRP_CMD_HEAD_OF_Q = 0x1, + SRP_CMD_ORDERED_Q = 0x2, + SRP_CMD_ACA = 0x4, + + SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0, + SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1, + SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, + + SRPT_DEF_SG_TABLESIZE = 128, + SRPT_DEF_SG_PER_WQE = 16, + + MIN_SRPT_SQ_SIZE = 16, + DEF_SRPT_SQ_SIZE = 4096, + SRPT_RQ_SIZE = 128, + MIN_SRPT_SRQ_SIZE = 4, + DEFAULT_SRPT_SRQ_SIZE = 4095, + MAX_SRPT_SRQ_SIZE = 65535, + MAX_SRPT_RDMA_SIZE = 1U << 24, + MAX_SRPT_RSP_SIZE = 1024, + + MIN_MAX_REQ_SIZE = 996, + DEFAULT_MAX_REQ_SIZE + = sizeof(struct srp_cmd)/*48*/ + + sizeof(struct srp_indirect_buf)/*20*/ + + 128 * sizeof(struct srp_direct_buf)/*16*/, + + MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4, + DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */ + + DEFAULT_MAX_RDMA_SIZE = 65536, +}; + +enum srpt_opcode { + SRPT_RECV, + SRPT_SEND, + SRPT_RDMA_MID, + SRPT_RDMA_ABORT, + SRPT_RDMA_READ_LAST, + SRPT_RDMA_WRITE_LAST, +}; + +static inline u64 encode_wr_id(u8 opcode, u32 idx) +{ + return ((u64)opcode << 32) | idx; +} +static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id) +{ + return wr_id >> 32; +} +static inline u32 idx_from_wr_id(u64 wr_id) +{ + return (u32)wr_id; +} + +struct rdma_iu { + u64 raddr; + u32 rkey; + struct ib_sge *sge; + u32 sge_cnt; + int mem_id; +}; + +/** + * enum srpt_command_state - SCSI command state managed by SRPT. + * @SRPT_STATE_NEW: New command arrived and is being processed. + * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting + * for data arrival. + * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is + * being processed. + * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent. + * @SRPT_STATE_MGMT: Processing a SCSI task management command. + * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent. + * @SRPT_STATE_DONE: Command processing finished successfully, command + * processing has been aborted or command processing + * failed. + */ +enum srpt_command_state { + SRPT_STATE_NEW = 0, + SRPT_STATE_NEED_DATA = 1, + SRPT_STATE_DATA_IN = 2, + SRPT_STATE_CMD_RSP_SENT = 3, + SRPT_STATE_MGMT = 4, + SRPT_STATE_MGMT_RSP_SENT = 5, + SRPT_STATE_DONE = 6, +}; + +/** + * struct srpt_ioctx - Shared SRPT I/O context information. + * @buf: Pointer to the buffer. + * @dma: DMA address of the buffer. + * @index: Index of the I/O context in its ioctx_ring array. + */ +struct srpt_ioctx { + void *buf; + dma_addr_t dma; + uint32_t index; +}; + +/** + * struct srpt_recv_ioctx - SRPT receive I/O context. + * @ioctx: See above. + * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list. + */ +struct srpt_recv_ioctx { + struct srpt_ioctx ioctx; + struct list_head wait_list; +}; + +/** + * struct srpt_send_ioctx - SRPT send I/O context. + * @ioctx: See above. + * @ch: Channel pointer. + * @free_list: Node in srpt_rdma_ch.free_list. + * @n_rbuf: Number of data buffers in the received SRP command. + * @rbufs: Pointer to SRP data buffer array. + * @single_rbuf: SRP data buffer if the command has only a single buffer. + * @sg: Pointer to sg-list associated with this I/O context. + * @sg_cnt: SG-list size. + * @mapped_sg_count: ib_dma_map_sg() return value. + * @n_rdma_ius: Number of elements in the rdma_ius array. + * @rdma_ius: Array with information about the RDMA mapping. + * @tag: Tag of the received SRP information unit. + * @spinlock: Protects 'state'. + * @state: I/O context state. + * @rdma_aborted: If initiating a multipart RDMA transfer failed, whether + * the already initiated transfers have finished. + * @cmd: Target core command data structure. + * @sense_data: SCSI sense data. + */ +struct srpt_send_ioctx { + struct srpt_ioctx ioctx; + struct srpt_rdma_ch *ch; + struct kref kref; + struct rdma_iu *rdma_ius; + struct srp_direct_buf *rbufs; + struct srp_direct_buf single_rbuf; + struct scatterlist *sg; + struct list_head free_list; + spinlock_t spinlock; + enum srpt_command_state state; + bool rdma_aborted; + struct se_cmd cmd; + struct completion tx_done; + u64 tag; + int sg_cnt; + int mapped_sg_count; + u16 n_rdma_ius; + u8 n_rdma; + u8 n_rbuf; + bool queue_status_only; + u8 sense_data[SCSI_SENSE_BUFFERSIZE]; +}; + +/** + * enum rdma_ch_state - SRP channel state. + * @CH_CONNECTING: QP is in RTR state; waiting for RTU. + * @CH_LIVE: QP is in RTS state. + * @CH_DISCONNECTING: DREQ has been received; waiting for DREP + * or DREQ has been send and waiting for DREP + * or . + * @CH_DRAINING: QP is in ERR state; waiting for last WQE event. + * @CH_RELEASING: Last WQE event has been received; releasing resources. + */ +enum rdma_ch_state { + CH_CONNECTING, + CH_LIVE, + CH_DISCONNECTING, + CH_DRAINING, + CH_RELEASING +}; + +/** + * struct srpt_rdma_ch - RDMA channel. + * @wait_queue: Allows the kernel thread to wait for more work. + * @thread: Kernel thread that processes the IB queues associated with + * the channel. + * @cm_id: IB CM ID associated with the channel. + * @qp: IB queue pair used for communicating over this channel. + * @cq: IB completion queue for this channel. + * @rq_size: IB receive queue size. + * @rsp_size IB response message size in bytes. + * @sq_wr_avail: number of work requests available in the send queue. + * @sport: pointer to the information of the HCA port used by this + * channel. + * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ. + * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ. + * @max_ti_iu_len: maximum target-to-initiator information unit length. + * @req_lim: request limit: maximum number of requests that may be sent + * by the initiator without having received a response. + * @req_lim_delta: Number of credits not yet sent back to the initiator. + * @spinlock: Protects free_list and state. + * @free_list: Head of list with free send I/O contexts. + * @state: channel state. See also enum rdma_ch_state. + * @ioctx_ring: Send ring. + * @wc: IB work completion array for srpt_process_completion(). + * @list: Node for insertion in the srpt_device.rch_list list. + * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This + * list contains struct srpt_ioctx elements and is protected + * against concurrent modification by the cm_id spinlock. + * @sess: Session information associated with this SRP channel. + * @sess_name: Session name. + * @release_work: Allows scheduling of srpt_release_channel(). + * @release_done: Enables waiting for srpt_release_channel() completion. + */ +struct srpt_rdma_ch { + wait_queue_head_t wait_queue; + struct task_struct *thread; + struct ib_cm_id *cm_id; + struct ib_qp *qp; + struct ib_cq *cq; + int rq_size; + u32 rsp_size; + atomic_t sq_wr_avail; + struct srpt_port *sport; + u8 i_port_id[16]; + u8 t_port_id[16]; + int max_ti_iu_len; + atomic_t req_lim; + atomic_t req_lim_delta; + spinlock_t spinlock; + struct list_head free_list; + enum rdma_ch_state state; + struct srpt_send_ioctx **ioctx_ring; + struct ib_wc wc[16]; + struct list_head list; + struct list_head cmd_wait_list; + struct se_session *sess; + u8 sess_name[36]; + struct work_struct release_work; + struct completion *release_done; +}; + +/** + * struct srpt_port_attib - Attributes for SRPT port + * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections. + * @srp_max_rsp_size: Maximum size of SRP response messages in bytes. + * @srp_sq_size: Shared receive queue (SRQ) size. + */ +struct srpt_port_attrib { + u32 srp_max_rdma_size; + u32 srp_max_rsp_size; + u32 srp_sq_size; +}; + +/** + * struct srpt_port - Information associated by SRPT with a single IB port. + * @sdev: backpointer to the HCA information. + * @mad_agent: per-port management datagram processing information. + * @enabled: Whether or not this target port is enabled. + * @port_guid: ASCII representation of Port GUID + * @port: one-based port number. + * @sm_lid: cached value of the port's sm_lid. + * @lid: cached value of the port's lid. + * @gid: cached value of the port's gid. + * @port_acl_lock spinlock for port_acl_list: + * @work: work structure for refreshing the aforementioned cached values. + * @port_tpg_1 Target portal group = 1 data. + * @port_wwn: Target core WWN data. + * @port_acl_list: Head of the list with all node ACLs for this port. + */ +struct srpt_port { + struct srpt_device *sdev; + struct ib_mad_agent *mad_agent; + bool enabled; + u8 port_guid[64]; + u8 port; + u16 sm_lid; + u16 lid; + union ib_gid gid; + spinlock_t port_acl_lock; + struct work_struct work; + struct se_portal_group port_tpg_1; + struct se_wwn port_wwn; + struct list_head port_acl_list; + struct srpt_port_attrib port_attrib; +}; + +/** + * struct srpt_device - Information associated by SRPT with a single HCA. + * @device: Backpointer to the struct ib_device managed by the IB core. + * @pd: IB protection domain. + * @mr: L_Key (local key) with write access to all local memory. + * @srq: Per-HCA SRQ (shared receive queue). + * @cm_id: Connection identifier. + * @dev_attr: Attributes of the InfiniBand device as obtained during the + * ib_client.add() callback. + * @srq_size: SRQ size. + * @ioctx_ring: Per-HCA SRQ. + * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list. + * @ch_releaseQ: Enables waiting for removal from rch_list. + * @spinlock: Protects rch_list and tpg. + * @port: Information about the ports owned by this HCA. + * @event_handler: Per-HCA asynchronous IB event handler. + * @list: Node in srpt_dev_list. + */ +struct srpt_device { + struct ib_device *device; + struct ib_pd *pd; + struct ib_mr *mr; + struct ib_srq *srq; + struct ib_cm_id *cm_id; + struct ib_device_attr dev_attr; + int srq_size; + struct srpt_recv_ioctx **ioctx_ring; + struct list_head rch_list; + wait_queue_head_t ch_releaseQ; + spinlock_t spinlock; + struct srpt_port port[2]; + struct ib_event_handler event_handler; + struct list_head list; +}; + +/** + * struct srpt_node_acl - Per-initiator ACL data (managed via configfs). + * @i_port_id: 128-bit SRP initiator port ID. + * @sport: port information. + * @nacl: Target core node ACL information. + * @list: Element of the per-HCA ACL list. + */ +struct srpt_node_acl { + u8 i_port_id[16]; + struct srpt_port *sport; + struct se_node_acl nacl; + struct list_head list; +}; + +/* + * SRP-releated SCSI persistent reservation definitions. + * + * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction). + * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using + * SCSI over an RDMA interface). + */ + +enum { + SCSI_TRANSPORTID_PROTOCOLID_SRP = 4, +}; + +struct spc_rdma_transport_id { + uint8_t protocol_identifier; + uint8_t reserved[7]; + uint8_t i_port_id[16]; +}; + +#endif /* IB_SRPT_H */ diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 76457d5..afc166f 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -386,7 +386,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer, struct evdev_client *client = file->private_data; struct evdev *evdev = client->evdev; struct input_event event; - int retval; + int retval = 0; if (count < input_event_size()) return -EINVAL; diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c index a588578..67bec14 100644 --- a/drivers/input/keyboard/twl4030_keypad.c +++ b/drivers/input/keyboard/twl4030_keypad.c @@ -34,7 +34,6 @@ #include <linux/i2c/twl.h> #include <linux/slab.h> - /* * The TWL4030 family chips include a keypad controller that supports * up to an 8x8 switch matrix. The controller can issue system wakeup @@ -302,7 +301,7 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp) if (twl4030_kpwrite_u8(kp, i, KEYP_DEB) < 0) return -EIO; - /* Set timeout period to 100 ms */ + /* Set timeout period to 200 ms */ i = KEYP_PERIOD_US(200000, PTV_PRESCALER); if (twl4030_kpwrite_u8(kp, (i & 0xFF), KEYP_TIMEOUT_L) < 0) return -EIO; @@ -466,4 +465,3 @@ MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("TWL4030 Keypad Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:twl4030_keypad"); - diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index b4cfc6c..5ec774d 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -512,6 +512,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"), }, }, + { + /* Lenovo Ideapad U455 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20046"), + }, + }, { } }; diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index 8250299..4494233 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c @@ -164,7 +164,8 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer, struct serio_raw_client *client = file->private_data; struct serio_raw *serio_raw = client->serio_raw; char uninitialized_var(c); - ssize_t retval = 0; + ssize_t read = 0; + int retval; if (serio_raw->dead) return -ENODEV; @@ -180,13 +181,15 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer, if (serio_raw->dead) return -ENODEV; - while (retval < count && serio_raw_fetch_byte(serio_raw, &c)) { - if (put_user(c, buffer++)) - return -EFAULT; - retval++; + while (read < count && serio_raw_fetch_byte(serio_raw, &c)) { + if (put_user(c, buffer++)) { + retval = -EFAULT; + break; + } + read++; } - return retval; + return read ?: retval; } static ssize_t serio_raw_write(struct file *file, const char __user *buffer, diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index cce1f03..f75e060 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2863,6 +2863,9 @@ static unsigned device_dma_ops_init(void) for_each_pci_dev(pdev) { if (!check_device(&pdev->dev)) { + + iommu_ignore_device(&pdev->dev); + unhandled += 1; continue; } diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 08a90b8..cee307e 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -482,23 +482,19 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, priv = domain->priv; - if (!priv) { - ret = -ENODEV; + if (!priv) goto fail; - } fl_table = priv->pgtable; if (len != SZ_16M && len != SZ_1M && len != SZ_64K && len != SZ_4K) { pr_debug("Bad length: %d\n", len); - ret = -EINVAL; goto fail; } if (!fl_table) { pr_debug("Null page table\n"); - ret = -EINVAL; goto fail; } @@ -507,7 +503,6 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, if (*fl_pte == 0) { pr_debug("First level PTE is 0\n"); - ret = -ENODEV; goto fail; } diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index c957c34..9ca28fc 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -403,6 +403,13 @@ config LEDS_MAX8997 This option enables support for on-chip LED drivers on MAXIM MAX8997 PMIC. +config LEDS_OT200 + tristate "LED support for the Bachmann OT200" + depends on LEDS_CLASS && HAS_IOMEM + help + This option enables support for the LEDs on the Bachmann OT200. + Say Y to enable LEDs on the Bachmann OT200. + config LEDS_TRIGGERS bool "LED Trigger support" depends on LEDS_CLASS diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index b8a9723..1fc6875 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o +obj-$(CONFIG_LEDS_OT200) += leds-ot200.o obj-$(CONFIG_LEDS_FSG) += leds-fsg.o obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c index 45e6878..e59c166 100644 --- a/drivers/leds/leds-lm3530.c +++ b/drivers/leds/leds-lm3530.c @@ -164,8 +164,8 @@ static int lm3530_init_registers(struct lm3530_data *drvdata) if (drvdata->mode == LM3530_BL_MODE_ALS) { if (pltfm->als_vmax == 0) { - pltfm->als_vmin = als_vmin = 0; - pltfm->als_vmin = als_vmax = LM3530_ALS_WINDOW_mV; + pltfm->als_vmin = 0; + pltfm->als_vmax = LM3530_ALS_WINDOW_mV; } als_vmin = pltfm->als_vmin; diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c new file mode 100644 index 0000000..c464682 --- /dev/null +++ b/drivers/leds/leds-ot200.c @@ -0,0 +1,171 @@ +/* + * Bachmann ot200 leds driver. + * + * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> + * Christian Gmeiner <christian.gmeiner@gmail.com> + * + * License: GPL as published by the FSF. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/leds.h> +#include <linux/io.h> +#include <linux/module.h> + + +struct ot200_led { + struct led_classdev cdev; + const char *name; + unsigned long port; + u8 mask; +}; + +/* + * The device has three leds on the back panel (led_err, led_init and led_run) + * and can handle up to seven leds on the front panel. + */ + +static struct ot200_led leds[] = { + { + .name = "led_run", + .port = 0x5a, + .mask = BIT(0), + }, + { + .name = "led_init", + .port = 0x5a, + .mask = BIT(1), + }, + { + .name = "led_err", + .port = 0x5a, + .mask = BIT(2), + }, + { + .name = "led_1", + .port = 0x49, + .mask = BIT(7), + }, + { + .name = "led_2", + .port = 0x49, + .mask = BIT(6), + }, + { + .name = "led_3", + .port = 0x49, + .mask = BIT(5), + }, + { + .name = "led_4", + .port = 0x49, + .mask = BIT(4), + }, + { + .name = "led_5", + .port = 0x49, + .mask = BIT(3), + }, + { + .name = "led_6", + .port = 0x49, + .mask = BIT(2), + }, + { + .name = "led_7", + .port = 0x49, + .mask = BIT(1), + } +}; + +static DEFINE_SPINLOCK(value_lock); + +/* + * we need to store the current led states, as it is not + * possible to read the current led state via inb(). + */ +static u8 leds_back; +static u8 leds_front; + +static void ot200_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct ot200_led *led = container_of(led_cdev, struct ot200_led, cdev); + u8 *val; + unsigned long flags; + + spin_lock_irqsave(&value_lock, flags); + + if (led->port == 0x49) + val = &leds_front; + else if (led->port == 0x5a) + val = &leds_back; + else + BUG(); + + if (value == LED_OFF) + *val &= ~led->mask; + else + *val |= led->mask; + + outb(*val, led->port); + spin_unlock_irqrestore(&value_lock, flags); +} + +static int __devinit ot200_led_probe(struct platform_device *pdev) +{ + int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(leds); i++) { + + leds[i].cdev.name = leds[i].name; + leds[i].cdev.brightness_set = ot200_led_brightness_set; + + ret = led_classdev_register(&pdev->dev, &leds[i].cdev); + if (ret < 0) + goto err; + } + + leds_front = 0; /* turn off all front leds */ + leds_back = BIT(1); /* turn on init led */ + outb(leds_front, 0x49); + outb(leds_back, 0x5a); + + return 0; + +err: + for (i = i - 1; i >= 0; i--) + led_classdev_unregister(&leds[i].cdev); + + return ret; +} + +static int __devexit ot200_led_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(leds); i++) + led_classdev_unregister(&leds[i].cdev); + + return 0; +} + +static struct platform_driver ot200_led_driver = { + .probe = ot200_led_probe, + .remove = __devexit_p(ot200_led_remove), + .driver = { + .name = "leds-ot200", + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(ot200_led_driver); + +MODULE_AUTHOR("Sebastian A. Siewior <bigeasy@linutronix.de>"); +MODULE_DESCRIPTION("ot200 LED driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:leds-ot200"); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index c2907d8..86cb7e5 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -56,7 +56,8 @@ struct raid_dev { struct raid_set { struct dm_target *ti; - uint64_t print_flags; + uint32_t bitmap_loaded; + uint32_t print_flags; struct mddev md; struct raid_type *raid_type; @@ -1085,7 +1086,7 @@ static int raid_status(struct dm_target *ti, status_type_t type, raid_param_cnt += 2; } - raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2); + raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2); if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) raid_param_cnt--; @@ -1197,7 +1198,12 @@ static void raid_resume(struct dm_target *ti) { struct raid_set *rs = ti->private; - bitmap_load(&rs->md); + if (!rs->bitmap_loaded) { + bitmap_load(&rs->md); + rs->bitmap_loaded = 1; + } else + md_wakeup_thread(rs->md.thread); + mddev_resume(&rs->md); } diff --git a/drivers/md/md.c b/drivers/md/md.c index 9417ae2..ce88755 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7333,7 +7333,8 @@ void md_do_sync(struct mddev *mddev) printk(KERN_INFO "md: checkpointing %s of %s.\n", desc, mdname(mddev)); - mddev->recovery_cp = mddev->curr_resync; + mddev->recovery_cp = + mddev->curr_resync_completed; } } else mddev->recovery_cp = MaxSector; @@ -7351,9 +7352,9 @@ void md_do_sync(struct mddev *mddev) rcu_read_unlock(); } } + skip: set_bit(MD_CHANGE_DEVS, &mddev->flags); - skip: if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { /* We completed so min/max setting can be forgotten if used. */ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c index 2755599..b5ee3eb 100644 --- a/drivers/media/common/tuners/tuner-xc2028.c +++ b/drivers/media/common/tuners/tuner-xc2028.c @@ -24,6 +24,21 @@ #include <linux/dvb/frontend.h> #include "dvb_frontend.h" +/* Registers (Write-only) */ +#define XREG_INIT 0x00 +#define XREG_RF_FREQ 0x02 +#define XREG_POWER_DOWN 0x08 + +/* Registers (Read-only) */ +#define XREG_FREQ_ERROR 0x01 +#define XREG_LOCK 0x02 +#define XREG_VERSION 0x04 +#define XREG_PRODUCT_ID 0x08 +#define XREG_HSYNC_FREQ 0x10 +#define XREG_FRAME_LINES 0x20 +#define XREG_SNR 0x40 + +#define XREG_ADC_ENV 0x0100 static int debug; module_param(debug, int, 0644); @@ -885,7 +900,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength) mutex_lock(&priv->lock); /* Sync Lock Indicator */ - rc = xc2028_get_reg(priv, 0x0002, &frq_lock); + rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock); if (rc < 0) goto ret; @@ -894,7 +909,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength) signal = 1 << 11; /* Get SNR of the video signal */ - rc = xc2028_get_reg(priv, 0x0040, &signal); + rc = xc2028_get_reg(priv, XREG_SNR, &signal); if (rc < 0) goto ret; @@ -1019,9 +1034,9 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */, /* CMD= Set frequency */ if (priv->firm_version < 0x0202) - rc = send_seq(priv, {0x00, 0x02, 0x00, 0x00}); + rc = send_seq(priv, {0x00, XREG_RF_FREQ, 0x00, 0x00}); else - rc = send_seq(priv, {0x80, 0x02, 0x00, 0x00}); + rc = send_seq(priv, {0x80, XREG_RF_FREQ, 0x00, 0x00}); if (rc < 0) goto ret; @@ -1201,9 +1216,9 @@ static int xc2028_sleep(struct dvb_frontend *fe) mutex_lock(&priv->lock); if (priv->firm_version < 0x0202) - rc = send_seq(priv, {0x00, 0x08, 0x00, 0x00}); + rc = send_seq(priv, {0x00, XREG_POWER_DOWN, 0x00, 0x00}); else - rc = send_seq(priv, {0x80, 0x08, 0x00, 0x00}); + rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00}); priv->cur_fw.type = 0; /* need firmware reload */ diff --git a/drivers/media/common/tuners/xc4000.c b/drivers/media/common/tuners/xc4000.c index d218c1d..6839711 100644 --- a/drivers/media/common/tuners/xc4000.c +++ b/drivers/media/common/tuners/xc4000.c @@ -154,6 +154,8 @@ struct xc4000_priv { #define XREG_SNR 0x06 #define XREG_VERSION 0x07 #define XREG_PRODUCT_ID 0x08 +#define XREG_SIGNAL_LEVEL 0x0A +#define XREG_NOISE_LEVEL 0x0B /* Basic firmware description. This will remain with @@ -486,6 +488,16 @@ static int xc_get_quality(struct xc4000_priv *priv, u16 *quality) return xc4000_readreg(priv, XREG_QUALITY, quality); } +static int xc_get_signal_level(struct xc4000_priv *priv, u16 *signal) +{ + return xc4000_readreg(priv, XREG_SIGNAL_LEVEL, signal); +} + +static int xc_get_noise_level(struct xc4000_priv *priv, u16 *noise) +{ + return xc4000_readreg(priv, XREG_NOISE_LEVEL, noise); +} + static u16 xc_wait_for_lock(struct xc4000_priv *priv) { u16 lock_state = 0; @@ -1089,6 +1101,8 @@ static void xc_debug_dump(struct xc4000_priv *priv) u32 hsync_freq_hz = 0; u16 frame_lines; u16 quality; + u16 signal = 0; + u16 noise = 0; u8 hw_majorversion = 0, hw_minorversion = 0; u8 fw_majorversion = 0, fw_minorversion = 0; @@ -1119,6 +1133,12 @@ static void xc_debug_dump(struct xc4000_priv *priv) xc_get_quality(priv, &quality); dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality); + + xc_get_signal_level(priv, &signal); + dprintk(1, "*** Signal level = -%ddB (%d)\n", signal >> 8, signal); + + xc_get_noise_level(priv, &noise); + dprintk(1, "*** Noise level = %ddB (%d)\n", noise >> 8, noise); } static int xc4000_set_params(struct dvb_frontend *fe) @@ -1432,6 +1452,71 @@ fail: return ret; } +static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength) +{ + struct xc4000_priv *priv = fe->tuner_priv; + u16 value = 0; + int rc; + + mutex_lock(&priv->lock); + rc = xc4000_readreg(priv, XREG_SIGNAL_LEVEL, &value); + mutex_unlock(&priv->lock); + + if (rc < 0) + goto ret; + + /* Informations from real testing of DVB-T and radio part, + coeficient for one dB is 0xff. + */ + tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value); + + /* all known digital modes */ + if ((priv->video_standard == XC4000_DTV6) || + (priv->video_standard == XC4000_DTV7) || + (priv->video_standard == XC4000_DTV7_8) || + (priv->video_standard == XC4000_DTV8)) + goto digital; + + /* Analog mode has NOISE LEVEL important, signal + depends only on gain of antenna and amplifiers, + but it doesn't tell anything about real quality + of reception. + */ + mutex_lock(&priv->lock); + rc = xc4000_readreg(priv, XREG_NOISE_LEVEL, &value); + mutex_unlock(&priv->lock); + + tuner_dbg("Noise level: %ddB (%05d)\n", value >> 8, value); + + /* highest noise level: 32dB */ + if (value >= 0x2000) { + value = 0; + } else { + value = ~value << 3; + } + + goto ret; + + /* Digital mode has SIGNAL LEVEL important and real + noise level is stored in demodulator registers. + */ +digital: + /* best signal: -50dB */ + if (value <= 0x3200) { + value = 0xffff; + /* minimum: -114dB - should be 0x7200 but real zero is 0x713A */ + } else if (value >= 0x713A) { + value = 0; + } else { + value = ~(value - 0x3200) << 2; + } + +ret: + *strength = value; + + return rc; +} + static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) { struct xc4000_priv *priv = fe->tuner_priv; @@ -1559,6 +1644,7 @@ static const struct dvb_tuner_ops xc4000_tuner_ops = { .set_params = xc4000_set_params, .set_analog_params = xc4000_set_analog_params, .get_frequency = xc4000_get_frequency, + .get_rf_strength = xc4000_get_signal, .get_bandwidth = xc4000_get_bandwidth, .get_status = xc4000_get_status }; diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c index b15db4f..fbbe545 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -904,8 +904,11 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; int i; + u32 delsys; + delsys = c->delivery_system; memset(c, 0, sizeof(struct dtv_frontend_properties)); + c->delivery_system = delsys; c->state = DTV_CLEAR; @@ -1009,25 +1012,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = { _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0), - _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0), - _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0), - _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0), - _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0), - _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0), - _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0), - _DTV_CMD(DTV_ISDBS_TS_ID, 1, 0), _DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0), @@ -1413,6 +1397,15 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system) struct dtv_frontend_properties *c = &fe->dtv_property_cache; enum dvbv3_emulation_type type; + /* + * It was reported that some old DVBv5 applications were + * filling delivery_system with SYS_UNDEFINED. If this happens, + * assume that the application wants to use the first supported + * delivery system. + */ + if (c->delivery_system == SYS_UNDEFINED) + c->delivery_system = fe->ops.delsys[0]; + if (desired_system == SYS_UNDEFINED) { /* * A DVBv3 call doesn't know what's the desired system. @@ -1732,6 +1725,7 @@ static int dvb_frontend_ioctl_properties(struct file *file, { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; + struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int err = 0; @@ -1798,9 +1792,14 @@ static int dvb_frontend_ioctl_properties(struct file *file, /* * Fills the cache out struct with the cache contents, plus - * the data retrieved from get_frontend. + * the data retrieved from get_frontend, if the frontend + * is not idle. Otherwise, returns the cached content */ - dtv_get_frontend(fe, NULL); + if (fepriv->state != FESTATE_IDLE) { + err = dtv_get_frontend(fe, NULL); + if (err < 0) + goto out; + } for (i = 0; i < tvps->num; i++) { err = dtv_property_process_get(fe, c, tvp + i, file); if (err < 0) diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c index d661929..cf0c318 100644 --- a/drivers/media/dvb/dvb-usb/anysee.c +++ b/drivers/media/dvb/dvb-usb/anysee.c @@ -877,24 +877,17 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap) case ANYSEE_HW_508T2C: /* 20 */ /* E7 T2C */ + if (state->fe_id) + break; + /* enable DVB-T/T2/C demod on IOE[5] */ ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20); if (ret) goto error; - if (state->fe_id == 0) { - /* DVB-T/T2 */ - adap->fe_adap[state->fe_id].fe = - dvb_attach(cxd2820r_attach, - &anysee_cxd2820r_config, - &adap->dev->i2c_adap, NULL); - } else { - /* DVB-C */ - adap->fe_adap[state->fe_id].fe = - dvb_attach(cxd2820r_attach, - &anysee_cxd2820r_config, - &adap->dev->i2c_adap, adap->fe_adap[0].fe); - } + /* attach demod */ + adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach, + &anysee_cxd2820r_config, &adap->dev->i2c_adap); state->has_ci = true; @@ -1195,6 +1188,14 @@ static int anysee_ci_init(struct dvb_usb_device *d) if (ret) return ret; + ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 2)|(0 << 1)|(0 << 0), 0x07); + if (ret) + return ret; + + ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 2)|(1 << 1)|(1 << 0), 0x07); + if (ret) + return ret; + ret = dvb_ca_en50221_init(&d->adapter[0].dvb_adap, &state->ci, 0, 1); if (ret) return ret; diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-fe.c b/drivers/media/dvb/dvb-usb/cinergyT2-fe.c index 8a57ed8..1efc028 100644 --- a/drivers/media/dvb/dvb-usb/cinergyT2-fe.c +++ b/drivers/media/dvb/dvb-usb/cinergyT2-fe.c @@ -276,14 +276,15 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe) param.flags = 0; switch (fep->bandwidth_hz) { + default: case 8000000: - param.bandwidth = 0; + param.bandwidth = 8; break; case 7000000: - param.bandwidth = 1; + param.bandwidth = 7; break; case 6000000: - param.bandwidth = 2; + param.bandwidth = 6; break; } diff --git a/drivers/media/dvb/dvb-usb/dib0700.h b/drivers/media/dvb/dvb-usb/dib0700.h index 9bd6d51..7de125c 100644 --- a/drivers/media/dvb/dvb-usb/dib0700.h +++ b/drivers/media/dvb/dvb-usb/dib0700.h @@ -48,6 +48,8 @@ struct dib0700_state { u8 disable_streaming_master_mode; u32 fw_version; u32 nb_packet_buffer_size; + int (*read_status)(struct dvb_frontend *, fe_status_t *); + int (*sleep)(struct dvb_frontend* fe); u8 buf[255]; }; diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c index 2069994..070e82aa 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c @@ -834,6 +834,7 @@ static struct usb_driver dib0700_driver = { module_usb_driver(dib0700_driver); +MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw"); MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge"); MODULE_VERSION("1.0"); diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c index 81ef4b4..f9e966a 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c @@ -3066,19 +3066,25 @@ static struct dib7000p_config stk7070pd_dib7000p_config[2] = { } }; -static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap) +static void stk7070pd_init(struct dvb_usb_device *dev) { - dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); + dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1); msleep(10); - dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); - dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); - dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); - dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); + dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1); + dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1); + dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1); + dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0); - dib0700_ctrl_clock(adap->dev, 72, 1); + dib0700_ctrl_clock(dev, 72, 1); msleep(10); - dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); + dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1); +} + +static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap) +{ + stk7070pd_init(adap->dev); + msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); @@ -3099,6 +3105,77 @@ static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap) return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } +static int novatd_read_status_override(struct dvb_frontend *fe, + fe_status_t *stat) +{ + struct dvb_usb_adapter *adap = fe->dvb->priv; + struct dvb_usb_device *dev = adap->dev; + struct dib0700_state *state = dev->priv; + int ret; + + ret = state->read_status(fe, stat); + + if (!ret) + dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, + !!(*stat & FE_HAS_LOCK)); + + return ret; +} + +static int novatd_sleep_override(struct dvb_frontend* fe) +{ + struct dvb_usb_adapter *adap = fe->dvb->priv; + struct dvb_usb_device *dev = adap->dev; + struct dib0700_state *state = dev->priv; + + /* turn off LED */ + dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0); + + return state->sleep(fe); +} + +/** + * novatd_frontend_attach - Nova-TD specific attach + * + * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for + * information purposes. + */ +static int novatd_frontend_attach(struct dvb_usb_adapter *adap) +{ + struct dvb_usb_device *dev = adap->dev; + struct dib0700_state *st = dev->priv; + + if (adap->id == 0) { + stk7070pd_init(dev); + + /* turn the power LED on, the other two off (just in case) */ + dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0); + dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0); + dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1); + + if (dib7000p_i2c_enumeration(&dev->i2c_adap, 2, 18, + stk7070pd_dib7000p_config) != 0) { + err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n", + __func__); + return -ENODEV; + } + } + + adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &dev->i2c_adap, + adap->id == 0 ? 0x80 : 0x82, + &stk7070pd_dib7000p_config[adap->id]); + + if (adap->fe_adap[0].fe == NULL) + return -ENODEV; + + st->read_status = adap->fe_adap[0].fe->ops.read_status; + adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override; + st->sleep = adap->fe_adap[0].fe->ops.sleep; + adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override; + + return 0; +} + /* S5H1411 */ static struct s5h1411_config pinnacle_801e_config = { .output_mode = S5H1411_PARALLEL_OUTPUT, @@ -3870,6 +3947,57 @@ struct dvb_usb_device_properties dib0700_devices[] = { .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, + .frontend_attach = novatd_frontend_attach, + .tuner_attach = dib7070p_tuner_attach, + + DIB0700_DEFAULT_STREAMING_CONFIG(0x02), + }}, + .size_of_priv = sizeof(struct dib0700_adapter_state), + }, { + .num_frontends = 1, + .fe = {{ + .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, + .pid_filter_count = 32, + .pid_filter = stk70x0p_pid_filter, + .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, + .frontend_attach = novatd_frontend_attach, + .tuner_attach = dib7070p_tuner_attach, + + DIB0700_DEFAULT_STREAMING_CONFIG(0x03), + }}, + .size_of_priv = sizeof(struct dib0700_adapter_state), + } + }, + + .num_device_descs = 1, + .devices = { + { "Hauppauge Nova-TD Stick (52009)", + { &dib0700_usb_id_table[35], NULL }, + { NULL }, + }, + }, + + .rc.core = { + .rc_interval = DEFAULT_RC_INTERVAL, + .rc_codes = RC_MAP_DIB0700_RC5_TABLE, + .module_name = "dib0700", + .rc_query = dib0700_rc_query_old_firmware, + .allowed_protos = RC_TYPE_RC5 | + RC_TYPE_RC6 | + RC_TYPE_NEC, + .change_protocol = dib0700_change_protocol, + }, + }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, + + .num_adapters = 2, + .adapter = { + { + .num_frontends = 1, + .fe = {{ + .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, + .pid_filter_count = 32, + .pid_filter = stk70x0p_pid_filter, + .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070pd_frontend_attach0, .tuner_attach = dib7070p_tuner_attach, @@ -3892,7 +4020,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { } }, - .num_device_descs = 6, + .num_device_descs = 5, .devices = { { "DiBcom STK7070PD reference design", { &dib0700_usb_id_table[17], NULL }, @@ -3902,10 +4030,6 @@ struct dvb_usb_device_properties dib0700_devices[] = { { &dib0700_usb_id_table[18], NULL }, { NULL }, }, - { "Hauppauge Nova-TD Stick (52009)", - { &dib0700_usb_id_table[35], NULL }, - { NULL }, - }, { "Hauppauge Nova-TD-500 (84xxx)", { &dib0700_usb_id_table[36], NULL }, { NULL }, diff --git a/drivers/media/dvb/frontends/cxd2820r.h b/drivers/media/dvb/frontends/cxd2820r.h index cf0f546..5aa306e 100644 --- a/drivers/media/dvb/frontends/cxd2820r.h +++ b/drivers/media/dvb/frontends/cxd2820r.h @@ -77,14 +77,12 @@ struct cxd2820r_config { (defined(CONFIG_DVB_CXD2820R_MODULE) && defined(MODULE)) extern struct dvb_frontend *cxd2820r_attach( const struct cxd2820r_config *config, - struct i2c_adapter *i2c, - struct dvb_frontend *fe + struct i2c_adapter *i2c ); #else static inline struct dvb_frontend *cxd2820r_attach( const struct cxd2820r_config *config, - struct i2c_adapter *i2c, - struct dvb_frontend *fe + struct i2c_adapter *i2c ) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c index 93e1b12..5c7c2aa 100644 --- a/drivers/media/dvb/frontends/cxd2820r_core.c +++ b/drivers/media/dvb/frontends/cxd2820r_core.c @@ -309,9 +309,14 @@ static int cxd2820r_read_status(struct dvb_frontend *fe, fe_status_t *status) static int cxd2820r_get_frontend(struct dvb_frontend *fe) { + struct cxd2820r_priv *priv = fe->demodulator_priv; int ret; dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); + + if (priv->delivery_system == SYS_UNDEFINED) + return 0; + switch (fe->dtv_property_cache.delivery_system) { case SYS_DVBT: ret = cxd2820r_get_frontend_t(fe); @@ -476,11 +481,20 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe) dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system); /* switch between DVB-T and DVB-T2 when tune fails */ - if (priv->last_tune_failed && (priv->delivery_system != SYS_DVBC_ANNEX_A)) { - if (priv->delivery_system == SYS_DVBT) + if (priv->last_tune_failed) { + if (priv->delivery_system == SYS_DVBT) { + ret = cxd2820r_sleep_t(fe); + if (ret) + goto error; + c->delivery_system = SYS_DVBT2; - else + } else if (priv->delivery_system == SYS_DVBT2) { + ret = cxd2820r_sleep_t2(fe); + if (ret) + goto error; + c->delivery_system = SYS_DVBT; + } } /* set frontend */ @@ -492,6 +506,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe) /* frontend lock wait loop count */ switch (priv->delivery_system) { case SYS_DVBT: + case SYS_DVBC_ANNEX_A: i = 20; break; case SYS_DVBT2: @@ -556,7 +571,7 @@ static const struct dvb_frontend_ops cxd2820r_ops = { .delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A }, /* default: DVB-T/T2 */ .info = { - .name = "Sony CXD2820R (DVB-T/T2)", + .name = "Sony CXD2820R", .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | @@ -566,7 +581,9 @@ static const struct dvb_frontend_ops cxd2820r_ops = { FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | + FE_CAN_QAM_32 | FE_CAN_QAM_64 | + FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | @@ -596,8 +613,7 @@ static const struct dvb_frontend_ops cxd2820r_ops = { }; struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg, - struct i2c_adapter *i2c, - struct dvb_frontend *fe) + struct i2c_adapter *i2c) { struct cxd2820r_priv *priv = NULL; int ret; diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c index 9387770..af65d01 100644 --- a/drivers/media/dvb/frontends/ds3000.c +++ b/drivers/media/dvb/frontends/ds3000.c @@ -1195,7 +1195,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe) for (i = 0; i < 30 ; i++) { ds3000_read_status(fe, &status); - if (status && FE_HAS_LOCK) + if (status & FE_HAS_LOCK) break; msleep(10); diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c index 7fa3e47..fade566 100644 --- a/drivers/media/dvb/frontends/mb86a20s.c +++ b/drivers/media/dvb/frontends/mb86a20s.c @@ -402,7 +402,7 @@ static int mb86a20s_get_modulation(struct mb86a20s_state *state, [2] = 0x8e, /* Layer C */ }; - if (layer > ARRAY_SIZE(reg)) + if (layer >= ARRAY_SIZE(reg)) return -EINVAL; rc = mb86a20s_writereg(state, 0x6d, reg[layer]); if (rc < 0) @@ -435,7 +435,7 @@ static int mb86a20s_get_fec(struct mb86a20s_state *state, [2] = 0x8f, /* Layer C */ }; - if (layer > ARRAY_SIZE(reg)) + if (layer >= ARRAY_SIZE(reg)) return -EINVAL; rc = mb86a20s_writereg(state, 0x6d, reg[layer]); if (rc < 0) @@ -470,7 +470,7 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state, [2] = 0x90, /* Layer C */ }; - if (layer > ARRAY_SIZE(reg)) + if (layer >= ARRAY_SIZE(reg)) return -EINVAL; rc = mb86a20s_writereg(state, 0x6d, reg[layer]); if (rc < 0) @@ -494,7 +494,7 @@ static int mb86a20s_get_segment_count(struct mb86a20s_state *state, [2] = 0x91, /* Layer C */ }; - if (layer > ARRAY_SIZE(reg)) + if (layer >= ARRAY_SIZE(reg)) return -EINVAL; rc = mb86a20s_writereg(state, 0x6d, reg[layer]); if (rc < 0) diff --git a/drivers/media/dvb/frontends/tda18271c2dd.c b/drivers/media/dvb/frontends/tda18271c2dd.c index 86da3d8..ad7c72e 100644 --- a/drivers/media/dvb/frontends/tda18271c2dd.c +++ b/drivers/media/dvb/frontends/tda18271c2dd.c @@ -29,7 +29,6 @@ #include <linux/delay.h> #include <linux/firmware.h> #include <linux/i2c.h> -#include <linux/version.h> #include <asm/div64.h> #include "dvb_frontend.h" diff --git a/drivers/media/video/as3645a.c b/drivers/media/video/as3645a.c index ec859a5..f241702 100644 --- a/drivers/media/video/as3645a.c +++ b/drivers/media/video/as3645a.c @@ -29,6 +29,7 @@ #include <linux/i2c.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/slab.h> #include <media/as3645a.h> #include <media/v4l2-ctrls.h> diff --git a/drivers/media/video/atmel-isi.c b/drivers/media/video/atmel-isi.c index 9fe4519..ec3f6a0 100644 --- a/drivers/media/video/atmel-isi.c +++ b/drivers/media/video/atmel-isi.c @@ -922,7 +922,9 @@ static int __devexit atmel_isi_remove(struct platform_device *pdev) isi->fb_descriptors_phys); iounmap(isi->regs); + clk_unprepare(isi->mck); clk_put(isi->mck); + clk_unprepare(isi->pclk); clk_put(isi->pclk); kfree(isi); @@ -955,6 +957,10 @@ static int __devinit atmel_isi_probe(struct platform_device *pdev) if (IS_ERR(pclk)) return PTR_ERR(pclk); + ret = clk_prepare(pclk); + if (ret) + goto err_clk_prepare_pclk; + isi = kzalloc(sizeof(struct atmel_isi), GFP_KERNEL); if (!isi) { ret = -ENOMEM; @@ -978,6 +984,10 @@ static int __devinit atmel_isi_probe(struct platform_device *pdev) goto err_clk_get; } + ret = clk_prepare(isi->mck); + if (ret) + goto err_clk_prepare_mck; + /* Set ISI_MCK's frequency, it should be faster than pixel clock */ ret = clk_set_rate(isi->mck, pdata->mck_hz); if (ret < 0) @@ -1059,10 +1069,14 @@ err_alloc_ctx: isi->fb_descriptors_phys); err_alloc_descriptors: err_set_mck_rate: + clk_unprepare(isi->mck); +err_clk_prepare_mck: clk_put(isi->mck); err_clk_get: kfree(isi); err_alloc_isi: + clk_unprepare(pclk); +err_clk_prepare_pclk: clk_put(pclk); return ret; diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c index 14cb961..4bfd865 100644 --- a/drivers/media/video/cx18/cx18-fileops.c +++ b/drivers/media/video/cx18/cx18-fileops.c @@ -751,20 +751,10 @@ int cx18_v4l2_close(struct file *filp) CX18_DEBUG_IOCTL("close() of %s\n", s->name); - v4l2_fh_del(fh); - v4l2_fh_exit(fh); - - /* Easy case first: this stream was never claimed by us */ - if (s->id != id->open_id) { - kfree(id); - return 0; - } - - /* 'Unclaim' this stream */ - - /* Stop radio */ mutex_lock(&cx->serialize_lock); - if (id->type == CX18_ENC_STREAM_TYPE_RAD) { + /* Stop radio */ + if (id->type == CX18_ENC_STREAM_TYPE_RAD && + v4l2_fh_is_singular_file(filp)) { /* Closing radio device, return to TV mode */ cx18_mute(cx); /* Mark that the radio is no longer in use */ @@ -781,10 +771,14 @@ int cx18_v4l2_close(struct file *filp) } /* Done! Unmute and continue. */ cx18_unmute(cx); - cx18_release_stream(s); - } else { - cx18_stop_capture(id, 0); } + + v4l2_fh_del(fh); + v4l2_fh_exit(fh); + + /* 'Unclaim' this stream */ + if (s->id == id->open_id) + cx18_stop_capture(id, 0); kfree(id); mutex_unlock(&cx->serialize_lock); return 0; @@ -810,21 +804,15 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp) item->open_id = cx->open_id++; filp->private_data = &item->fh; + v4l2_fh_add(&item->fh); - if (item->type == CX18_ENC_STREAM_TYPE_RAD) { - /* Try to claim this stream */ - if (cx18_claim_stream(item, item->type)) { - /* No, it's already in use */ - v4l2_fh_exit(&item->fh); - kfree(item); - return -EBUSY; - } - + if (item->type == CX18_ENC_STREAM_TYPE_RAD && + v4l2_fh_is_singular_file(filp)) { if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) { if (atomic_read(&cx->ana_capturing) > 0) { /* switching to radio while capture is in progress is not polite */ - cx18_release_stream(s); + v4l2_fh_del(&item->fh); v4l2_fh_exit(&item->fh); kfree(item); return -EBUSY; @@ -842,7 +830,6 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp) /* Done! Unmute and continue. */ cx18_unmute(cx); } - v4l2_fh_add(&item->fh); return 0; } diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c index 919ed77..875a7ce 100644 --- a/drivers/media/video/cx231xx/cx231xx-cards.c +++ b/drivers/media/video/cx231xx/cx231xx-cards.c @@ -1052,7 +1052,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface, dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { cx231xx_err(DRIVER_NAME ": out of memory!\n"); - clear_bit(dev->devno, &cx231xx_devused); + clear_bit(nr, &cx231xx_devused); return -ENOMEM; } diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c index 3c01be9..19b5499 100644 --- a/drivers/media/video/cx23885/cx23885-cards.c +++ b/drivers/media/video/cx23885/cx23885-cards.c @@ -213,8 +213,8 @@ struct cx23885_board cx23885_boards[] = { .portc = CX23885_MPEG_DVB, .tuner_type = TUNER_XC4000, .tuner_addr = 0x61, - .radio_type = TUNER_XC4000, - .radio_addr = 0x61, + .radio_type = UNSET, + .radio_addr = ADDR_UNSET, .input = {{ .type = CX23885_VMUX_TELEVISION, .vmux = CX25840_VIN2_CH1 | diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c index af8a225..6835eb1 100644 --- a/drivers/media/video/cx23885/cx23885-dvb.c +++ b/drivers/media/video/cx23885/cx23885-dvb.c @@ -943,6 +943,11 @@ static int dvb_register(struct cx23885_tsport *port) fe = dvb_attach(xc4000_attach, fe0->dvb.frontend, &dev->i2c_bus[1].i2c_adap, &cfg); + if (!fe) { + printk(KERN_ERR "%s/2: xc4000 attach failed\n", + dev->name); + goto frontend_detach; + } } break; case CX23885_BOARD_TBS_6920: diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c index 4bbf9bb..c654bdc 100644 --- a/drivers/media/video/cx23885/cx23885-video.c +++ b/drivers/media/video/cx23885/cx23885-video.c @@ -1550,7 +1550,6 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev, struct v4l2_control ctrl; struct videobuf_dvb_frontend *vfe; struct dvb_frontend *fe; - int err = 0; struct analog_parameters params = { .mode = V4L2_TUNER_ANALOG_TV, @@ -1572,8 +1571,10 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev, params.frequency, f->tuner, params.std); vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1); - if (!vfe) - err = -EINVAL; + if (!vfe) { + mutex_unlock(&dev->lock); + return -EINVAL; + } fe = vfe->dvb.frontend; diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c index 62c7ad0..cbd5d11 100644 --- a/drivers/media/video/cx88/cx88-cards.c +++ b/drivers/media/video/cx88/cx88-cards.c @@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = { .name = "Pinnacle Hybrid PCTV", .tuner_type = TUNER_XC2028, .tuner_addr = 0x61, - .radio_type = TUNER_XC2028, - .radio_addr = 0x61, + .radio_type = UNSET, + .radio_addr = ADDR_UNSET, .input = { { .type = CX88_VMUX_TELEVISION, .vmux = 0, @@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = { .name = "Leadtek TV2000 XP Global", .tuner_type = TUNER_XC2028, .tuner_addr = 0x61, - .radio_type = TUNER_XC2028, - .radio_addr = 0x61, + .radio_type = UNSET, + .radio_addr = ADDR_UNSET, .input = { { .type = CX88_VMUX_TELEVISION, .vmux = 0, @@ -2115,8 +2115,8 @@ static const struct cx88_board cx88_boards[] = { .name = "Terratec Cinergy HT PCI MKII", .tuner_type = TUNER_XC2028, .tuner_addr = 0x61, - .radio_type = TUNER_XC2028, - .radio_addr = 0x61, + .radio_type = UNSET, + .radio_addr = ADDR_UNSET, .input = { { .type = CX88_VMUX_TELEVISION, .vmux = 0, @@ -2154,9 +2154,9 @@ static const struct cx88_board cx88_boards[] = { [CX88_BOARD_WINFAST_DTV1800H] = { .name = "Leadtek WinFast DTV1800 Hybrid", .tuner_type = TUNER_XC2028, - .radio_type = TUNER_XC2028, + .radio_type = UNSET, .tuner_addr = 0x61, - .radio_addr = 0x61, + .radio_addr = ADDR_UNSET, /* * GPIO setting * @@ -2195,9 +2195,9 @@ static const struct cx88_board cx88_boards[] = { [CX88_BOARD_WINFAST_DTV1800H_XC4000] = { .name = "Leadtek WinFast DTV1800 H (XC4000)", .tuner_type = TUNER_XC4000, - .radio_type = TUNER_XC4000, + .radio_type = UNSET, .tuner_addr = 0x61, - .radio_addr = 0x61, + .radio_addr = ADDR_UNSET, /* * GPIO setting * @@ -2236,9 +2236,9 @@ static const struct cx88_board cx88_boards[] = { [CX88_BOARD_WINFAST_DTV2000H_PLUS] = { .name = "Leadtek WinFast DTV2000 H PLUS", .tuner_type = TUNER_XC4000, - .radio_type = TUNER_XC4000, + .radio_type = UNSET, .tuner_addr = 0x61, - .radio_addr = 0x61, + .radio_addr = ADDR_UNSET, /* * GPIO * 2: 1: mute audio diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c index 9449423..aabbf48 100644 --- a/drivers/media/video/em28xx/em28xx-dvb.c +++ b/drivers/media/video/em28xx/em28xx-dvb.c @@ -853,8 +853,7 @@ static int em28xx_dvb_init(struct em28xx *dev) case EM28174_BOARD_PCTV_290E: dvb->fe[0] = dvb_attach(cxd2820r_attach, &em28xx_cxd2820r_config, - &dev->i2c_adap, - NULL); + &dev->i2c_adap); if (dvb->fe[0]) { /* FE 0 attach tuner */ if (!dvb_attach(tda18271_attach, diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c index 544af91c..3949b7d 100644 --- a/drivers/media/video/ivtv/ivtv-driver.c +++ b/drivers/media/video/ivtv/ivtv-driver.c @@ -731,9 +731,6 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv) init_kthread_work(&itv->irq_work, ivtv_irq_work_handler); - /* start counting open_id at 1 */ - itv->open_id = 1; - /* Initial settings */ itv->cxhdl.port = CX2341X_PORT_MEMORY; itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI; diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h index 8f9cc17..06f3d78 100644 --- a/drivers/media/video/ivtv/ivtv-driver.h +++ b/drivers/media/video/ivtv/ivtv-driver.h @@ -332,7 +332,7 @@ struct ivtv_stream { const char *name; /* name of the stream */ int type; /* stream type */ - u32 id; + struct v4l2_fh *fh; /* pointer to the streaming filehandle */ spinlock_t qlock; /* locks access to the queues */ unsigned long s_flags; /* status flags, see above */ int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */ @@ -379,7 +379,6 @@ struct ivtv_stream { struct ivtv_open_id { struct v4l2_fh fh; - u32 open_id; /* unique ID for this file descriptor */ int type; /* stream type */ int yuv_frames; /* 1: started OUT_UDMA_YUV output mode */ struct ivtv *itv; diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c index 38f0522..2cd6c89 100644 --- a/drivers/media/video/ivtv/ivtv-fileops.c +++ b/drivers/media/video/ivtv/ivtv-fileops.c @@ -50,16 +50,16 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type) if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) { /* someone already claimed this stream */ - if (s->id == id->open_id) { + if (s->fh == &id->fh) { /* yes, this file descriptor did. So that's OK. */ return 0; } - if (s->id == -1 && (type == IVTV_DEC_STREAM_TYPE_VBI || + if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI || type == IVTV_ENC_STREAM_TYPE_VBI)) { /* VBI is handled already internally, now also assign the file descriptor to this stream for external reading of the stream. */ - s->id = id->open_id; + s->fh = &id->fh; IVTV_DEBUG_INFO("Start Read VBI\n"); return 0; } @@ -67,7 +67,7 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type) IVTV_DEBUG_INFO("Stream %d is busy\n", type); return -EBUSY; } - s->id = id->open_id; + s->fh = &id->fh; if (type == IVTV_DEC_STREAM_TYPE_VBI) { /* Enable reinsertion interrupt */ ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT); @@ -104,7 +104,7 @@ void ivtv_release_stream(struct ivtv_stream *s) struct ivtv *itv = s->itv; struct ivtv_stream *s_vbi; - s->id = -1; + s->fh = NULL; if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) && test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) { /* this stream is still in use internally */ @@ -136,7 +136,7 @@ void ivtv_release_stream(struct ivtv_stream *s) /* was already cleared */ return; } - if (s_vbi->id != -1) { + if (s_vbi->fh) { /* VBI stream still claimed by a file descriptor */ return; } @@ -268,11 +268,13 @@ static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block, } /* wait for more data to arrive */ + mutex_unlock(&itv->serialize_lock); prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE); /* New buffers might have become available before we were added to the waitqueue */ if (!s->q_full.buffers) schedule(); finish_wait(&s->waitq, &wait); + mutex_lock(&itv->serialize_lock); if (signal_pending(current)) { /* return if a signal was received */ IVTV_DEBUG_INFO("User stopped %s\n", s->name); @@ -357,7 +359,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co size_t tot_written = 0; int single_frame = 0; - if (atomic_read(&itv->capturing) == 0 && s->id == -1) { + if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) { /* shouldn't happen */ IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name); return -EIO; @@ -507,9 +509,7 @@ ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_ IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name); - mutex_lock(&itv->serialize_lock); rc = ivtv_start_capture(id); - mutex_unlock(&itv->serialize_lock); if (rc) return rc; return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK); @@ -584,9 +584,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c set_bit(IVTV_F_S_APPL_IO, &s->s_flags); /* Start decoder (returns 0 if already started) */ - mutex_lock(&itv->serialize_lock); rc = ivtv_start_decoding(id, itv->speed); - mutex_unlock(&itv->serialize_lock); if (rc) { IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name); @@ -627,11 +625,13 @@ retry: break; if (filp->f_flags & O_NONBLOCK) return -EAGAIN; + mutex_unlock(&itv->serialize_lock); prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE); /* New buffers might have become free before we were added to the waitqueue */ if (!s->q_free.buffers) schedule(); finish_wait(&s->waitq, &wait); + mutex_lock(&itv->serialize_lock); if (signal_pending(current)) { IVTV_DEBUG_INFO("User stopped %s\n", s->name); return -EINTR; @@ -686,12 +686,14 @@ retry: if (mode == OUT_YUV) ivtv_yuv_setup_stream_frame(itv); + mutex_unlock(&itv->serialize_lock); prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); while (!(got_sig = signal_pending(current)) && test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) { schedule(); } finish_wait(&itv->dma_waitq, &wait); + mutex_lock(&itv->serialize_lock); if (got_sig) { IVTV_DEBUG_INFO("User interrupted %s\n", s->name); return -EINTR; @@ -756,9 +758,7 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait) if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) { int rc; - mutex_lock(&itv->serialize_lock); rc = ivtv_start_capture(id); - mutex_unlock(&itv->serialize_lock); if (rc) { IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n", s->name, rc); @@ -808,7 +808,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end) id->type == IVTV_ENC_STREAM_TYPE_VBI) && test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) { /* Also used internally, don't stop capturing */ - s->id = -1; + s->fh = NULL; } else { ivtv_stop_v4l2_encode_stream(s, gop_end); @@ -861,20 +861,9 @@ int ivtv_v4l2_close(struct file *filp) IVTV_DEBUG_FILE("close %s\n", s->name); - v4l2_fh_del(fh); - v4l2_fh_exit(fh); - - /* Easy case first: this stream was never claimed by us */ - if (s->id != id->open_id) { - kfree(id); - return 0; - } - - /* 'Unclaim' this stream */ - /* Stop radio */ - mutex_lock(&itv->serialize_lock); - if (id->type == IVTV_ENC_STREAM_TYPE_RAD) { + if (id->type == IVTV_ENC_STREAM_TYPE_RAD && + v4l2_fh_is_singular_file(filp)) { /* Closing radio device, return to TV mode */ ivtv_mute(itv); /* Mark that the radio is no longer in use */ @@ -890,13 +879,25 @@ int ivtv_v4l2_close(struct file *filp) if (atomic_read(&itv->capturing) > 0) { /* Undo video mute */ ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1, - v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) | - (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8)); + v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) | + (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8)); } /* Done! Unmute and continue. */ ivtv_unmute(itv); - ivtv_release_stream(s); - } else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { + } + + v4l2_fh_del(fh); + v4l2_fh_exit(fh); + + /* Easy case first: this stream was never claimed by us */ + if (s->fh != &id->fh) { + kfree(id); + return 0; + } + + /* 'Unclaim' this stream */ + + if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT]; ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0); @@ -911,21 +912,25 @@ int ivtv_v4l2_close(struct file *filp) ivtv_stop_capture(id, 0); } kfree(id); - mutex_unlock(&itv->serialize_lock); return 0; } -static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp) +int ivtv_v4l2_open(struct file *filp) { -#ifdef CONFIG_VIDEO_ADV_DEBUG struct video_device *vdev = video_devdata(filp); -#endif + struct ivtv_stream *s = video_get_drvdata(vdev); struct ivtv *itv = s->itv; struct ivtv_open_id *item; int res = 0; IVTV_DEBUG_FILE("open %s\n", s->name); + if (ivtv_init_on_first_open(itv)) { + IVTV_ERR("Failed to initialize on device %s\n", + video_device_node_name(vdev)); + return -ENXIO; + } + #ifdef CONFIG_VIDEO_ADV_DEBUG /* Unless ivtv_fw_debug is set, error out if firmware dead. */ if (ivtv_fw_debug) { @@ -966,31 +971,19 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp) return -ENOMEM; } v4l2_fh_init(&item->fh, s->vdev); - if (res < 0) { - v4l2_fh_exit(&item->fh); - kfree(item); - return res; - } item->itv = itv; item->type = s->type; - item->open_id = itv->open_id++; filp->private_data = &item->fh; + v4l2_fh_add(&item->fh); - if (item->type == IVTV_ENC_STREAM_TYPE_RAD) { - /* Try to claim this stream */ - if (ivtv_claim_stream(item, item->type)) { - /* No, it's already in use */ - v4l2_fh_exit(&item->fh); - kfree(item); - return -EBUSY; - } - + if (item->type == IVTV_ENC_STREAM_TYPE_RAD && + v4l2_fh_is_singular_file(filp)) { if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) { if (atomic_read(&itv->capturing) > 0) { /* switching to radio while capture is in progress is not polite */ - ivtv_release_stream(s); + v4l2_fh_del(&item->fh); v4l2_fh_exit(&item->fh); kfree(item); return -EBUSY; @@ -1022,32 +1015,9 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp) 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31); itv->yuv_info.stream_size = 0; } - v4l2_fh_add(&item->fh); return 0; } -int ivtv_v4l2_open(struct file *filp) -{ - int res; - struct ivtv *itv = NULL; - struct ivtv_stream *s = NULL; - struct video_device *vdev = video_devdata(filp); - - s = video_get_drvdata(vdev); - itv = s->itv; - - mutex_lock(&itv->serialize_lock); - if (ivtv_init_on_first_open(itv)) { - IVTV_ERR("Failed to initialize on device %s\n", - video_device_node_name(vdev)); - mutex_unlock(&itv->serialize_lock); - return -ENXIO; - } - res = ivtv_serialized_open(s, filp); - mutex_unlock(&itv->serialize_lock); - return res; -} - void ivtv_mute(struct ivtv *itv) { if (atomic_read(&itv->capturing)) diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c index ecafa69..c4bc481 100644 --- a/drivers/media/video/ivtv/ivtv-ioctl.c +++ b/drivers/media/video/ivtv/ivtv-ioctl.c @@ -179,6 +179,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed) ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0); /* Wait for any DMA to finish */ + mutex_unlock(&itv->serialize_lock); prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) { got_sig = signal_pending(current); @@ -188,6 +189,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed) schedule(); } finish_wait(&itv->dma_waitq, &wait); + mutex_lock(&itv->serialize_lock); if (got_sig) return -EINTR; @@ -1107,6 +1109,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std) * happens within the first 100 lines of the top field. * Make 4 attempts to sync to the decoder before giving up. */ + mutex_unlock(&itv->serialize_lock); for (f = 0; f < 4; f++) { prepare_to_wait(&itv->vsync_waitq, &wait, TASK_UNINTERRUPTIBLE); @@ -1115,6 +1118,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std) schedule_timeout(msecs_to_jiffies(25)); } finish_wait(&itv->vsync_waitq, &wait); + mutex_lock(&itv->serialize_lock); if (f == 4) IVTV_WARN("Mode change failed to sync to decoder\n"); @@ -1842,8 +1846,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio, return 0; } -static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp, - unsigned int cmd, unsigned long arg) +long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct video_device *vfd = video_devdata(filp); long ret; @@ -1855,21 +1858,6 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp, return ret; } -long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - struct ivtv_open_id *id = fh2id(filp->private_data); - struct ivtv *itv = id->itv; - long res; - - /* DQEVENT can block, so this should not run with the serialize lock */ - if (cmd == VIDIOC_DQEVENT) - return ivtv_serialized_ioctl(itv, filp, cmd, arg); - mutex_lock(&itv->serialize_lock); - res = ivtv_serialized_ioctl(itv, filp, cmd, arg); - mutex_unlock(&itv->serialize_lock); - return res; -} - static const struct v4l2_ioctl_ops ivtv_ioctl_ops = { .vidioc_querycap = ivtv_querycap, .vidioc_s_audio = ivtv_s_audio, diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c index 9c29e96..1b3b957 100644 --- a/drivers/media/video/ivtv/ivtv-irq.c +++ b/drivers/media/video/ivtv/ivtv-irq.c @@ -288,13 +288,13 @@ static void dma_post(struct ivtv_stream *s) ivtv_process_vbi_data(itv, buf, 0, s->type); s->q_dma.bytesused += buf->bytesused; } - if (s->id == -1) { + if (s->fh == NULL) { ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0); return; } } ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused); - if (s->id != -1) + if (s->fh) wake_up(&s->waitq); } diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c index e7794dc..c6e28b4 100644 --- a/drivers/media/video/ivtv/ivtv-streams.c +++ b/drivers/media/video/ivtv/ivtv-streams.c @@ -159,7 +159,6 @@ static void ivtv_stream_init(struct ivtv *itv, int type) s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size; spin_lock_init(&s->qlock); init_waitqueue_head(&s->waitq); - s->id = -1; s->sg_handle = IVTV_DMA_UNMAPPED; ivtv_queue_init(&s->q_free); ivtv_queue_init(&s->q_full); @@ -214,6 +213,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type) s->vdev->fops = ivtv_stream_info[type].fops; s->vdev->release = video_device_release; s->vdev->tvnorms = V4L2_STD_ALL; + s->vdev->lock = &itv->serialize_lock; set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags); ivtv_set_funcs(s->vdev); return 0; diff --git a/drivers/media/video/ivtv/ivtv-yuv.c b/drivers/media/video/ivtv/ivtv-yuv.c index dcbab6a..2ad65eb 100644 --- a/drivers/media/video/ivtv/ivtv-yuv.c +++ b/drivers/media/video/ivtv/ivtv-yuv.c @@ -1149,23 +1149,37 @@ int ivtv_yuv_udma_stream_frame(struct ivtv *itv, void __user *src) { struct yuv_playback_info *yi = &itv->yuv_info; struct ivtv_dma_frame dma_args; + int res; ivtv_yuv_setup_stream_frame(itv); /* We only need to supply source addresses for this */ dma_args.y_source = src; dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31); - return ivtv_yuv_udma_frame(itv, &dma_args); + /* Wait for frame DMA. Note that serialize_lock is locked, + so to allow other processes to access the driver while + we are waiting unlock first and later lock again. */ + mutex_unlock(&itv->serialize_lock); + res = ivtv_yuv_udma_frame(itv, &dma_args); + mutex_lock(&itv->serialize_lock); + return res; } /* IVTV_IOC_DMA_FRAME ioctl handler */ int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args) { -/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */ + int res; +/* IVTV_DEBUG_INFO("yuv_prep_frame\n"); */ ivtv_yuv_next_free(itv); ivtv_yuv_setup_frame(itv, args); - return ivtv_yuv_udma_frame(itv, args); + /* Wait for frame DMA. Note that serialize_lock is locked, + so to allow other processes to access the driver while + we are waiting unlock first and later lock again. */ + mutex_unlock(&itv->serialize_lock); + res = ivtv_yuv_udma_frame(itv, args); + mutex_lock(&itv->serialize_lock); + return res; } void ivtv_yuv_close(struct ivtv *itv) @@ -1174,7 +1188,9 @@ void ivtv_yuv_close(struct ivtv *itv) int h_filter, v_filter_1, v_filter_2; IVTV_DEBUG_YUV("ivtv_yuv_close\n"); + mutex_unlock(&itv->serialize_lock); ivtv_waitq(&itv->vsync_waitq); + mutex_lock(&itv->serialize_lock); yi->running = 0; atomic_set(&yi->next_dma_frame, -1); diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index 0cb461d..7452277 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -287,7 +287,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb) sg_dma_len(sg) = new_size; txd = ichan->dma_chan.device->device_prep_slave_sg( - &ichan->dma_chan, sg, 1, DMA_FROM_DEVICE, + &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!txd) goto error; diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c index a277f95..1fb7d5b 100644 --- a/drivers/media/video/omap/omap_vout.c +++ b/drivers/media/video/omap/omap_vout.c @@ -1042,7 +1042,8 @@ static int vidioc_querycap(struct file *file, void *fh, strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver)); strlcpy(cap->card, vout->vfd->name, sizeof(cap->card)); cap->bus_info[0] = '\0'; - cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT; + cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT | + V4L2_CAP_VIDEO_OUTPUT_OVERLAY; return 0; } @@ -1825,7 +1826,9 @@ static int vidioc_g_fbuf(struct file *file, void *fh, ovid = &vout->vid_info; ovl = ovid->overlays[0]; - a->flags = 0x0; + /* The video overlay must stay within the framebuffer and can't be + positioned independently. */ + a->flags = V4L2_FBUF_FLAG_OVERLAY; a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY | V4L2_FBUF_CAP_SRC_CHROMAKEY; diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c index 905d41d..1f506fd 100644 --- a/drivers/media/video/pwc/pwc-ctrl.c +++ b/drivers/media/video/pwc/pwc-ctrl.c @@ -104,47 +104,16 @@ static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] = /****************************************************************************/ -static int _send_control_msg(struct pwc_device *pdev, - u8 request, u16 value, int index, void *buf, int buflen) -{ - int rc; - void *kbuf = NULL; - - if (buflen) { - kbuf = kmemdup(buf, buflen, GFP_KERNEL); /* not allowed on stack */ - if (kbuf == NULL) - return -ENOMEM; - } - - rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0), - request, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - value, - index, - kbuf, buflen, USB_CTRL_SET_TIMEOUT); - - kfree(kbuf); - return rc; -} - static int recv_control_msg(struct pwc_device *pdev, - u8 request, u16 value, void *buf, int buflen) + u8 request, u16 value, int recv_count) { int rc; - void *kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */ - - if (kbuf == NULL) - return -ENOMEM; rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0), request, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - value, - pdev->vcinterface, - kbuf, buflen, USB_CTRL_GET_TIMEOUT); - memcpy(buf, kbuf, buflen); - kfree(kbuf); - + value, pdev->vcinterface, + pdev->ctrl_buf, recv_count, USB_CTRL_GET_TIMEOUT); if (rc < 0) PWC_ERROR("recv_control_msg error %d req %02x val %04x\n", rc, request, value); @@ -152,27 +121,39 @@ static int recv_control_msg(struct pwc_device *pdev, } static inline int send_video_command(struct pwc_device *pdev, - int index, void *buf, int buflen) + int index, const unsigned char *buf, int buflen) { - return _send_control_msg(pdev, - SET_EP_STREAM_CTL, - VIDEO_OUTPUT_CONTROL_FORMATTER, - index, - buf, buflen); + int rc; + + memcpy(pdev->ctrl_buf, buf, buflen); + + rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0), + SET_EP_STREAM_CTL, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + VIDEO_OUTPUT_CONTROL_FORMATTER, index, + pdev->ctrl_buf, buflen, USB_CTRL_SET_TIMEOUT); + if (rc >= 0) + memcpy(pdev->cmd_buf, buf, buflen); + else + PWC_ERROR("send_video_command error %d\n", rc); + + return rc; } int send_control_msg(struct pwc_device *pdev, u8 request, u16 value, void *buf, int buflen) { - return _send_control_msg(pdev, - request, value, pdev->vcinterface, buf, buflen); + return usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0), + request, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + value, pdev->vcinterface, + buf, buflen, USB_CTRL_SET_TIMEOUT); } -static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames, - int *compression) +static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt, + int frames, int *compression, int send_to_cam) { - unsigned char buf[3]; - int ret, fps; + int fps, ret = 0; struct Nala_table_entry *pEntry; int frames2frames[31] = { /* closest match of framerate */ @@ -194,30 +175,29 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames, 7 /* 30 */ }; - if (size < 0 || size > PSZ_CIF || frames < 4 || frames > 25) + if (size < 0 || size > PSZ_CIF) return -EINVAL; + if (frames < 4) + frames = 4; + else if (frames > 25) + frames = 25; frames = frames2frames[frames]; fps = frames2table[frames]; pEntry = &Nala_table[size][fps]; if (pEntry->alternate == 0) return -EINVAL; - memcpy(buf, pEntry->mode, 3); - ret = send_video_command(pdev, pdev->vendpoint, buf, 3); - if (ret < 0) { - PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret); + if (send_to_cam) + ret = send_video_command(pdev, pdev->vendpoint, + pEntry->mode, 3); + if (ret < 0) return ret; - } - if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) { - ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf); - if (ret < 0) - return ret; - } - pdev->cmd_len = 3; - memcpy(pdev->cmd_buf, buf, 3); + if (pEntry->compressed && pixfmt == V4L2_PIX_FMT_YUV420) + pwc_dec1_init(pdev, pEntry->mode); /* Set various parameters */ + pdev->pixfmt = pixfmt; pdev->vframes = frames; pdev->valternate = pEntry->alternate; pdev->width = pwc_image_sizes[size][0]; @@ -243,18 +223,20 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames, } -static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, - int *compression) +static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt, + int frames, int *compression, int send_to_cam) { - unsigned char buf[13]; const struct Timon_table_entry *pChoose; - int ret, fps; + int fps, ret = 0; - if (size >= PSZ_MAX || frames < 5 || frames > 30 || - *compression < 0 || *compression > 3) - return -EINVAL; - if (size == PSZ_VGA && frames > 15) + if (size >= PSZ_MAX || *compression < 0 || *compression > 3) return -EINVAL; + if (frames < 5) + frames = 5; + else if (size == PSZ_VGA && frames > 15) + frames = 15; + else if (frames > 30) + frames = 30; fps = (frames / 5) - 1; /* Find a supported framerate with progressively higher compression */ @@ -268,22 +250,18 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, if (pChoose == NULL || pChoose->alternate == 0) return -ENOENT; /* Not supported. */ - memcpy(buf, pChoose->mode, 13); - ret = send_video_command(pdev, pdev->vendpoint, buf, 13); + if (send_to_cam) + ret = send_video_command(pdev, pdev->vendpoint, + pChoose->mode, 13); if (ret < 0) return ret; - if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) { - ret = pwc_dec23_init(pdev, pdev->type, buf); - if (ret < 0) - return ret; - } - - pdev->cmd_len = 13; - memcpy(pdev->cmd_buf, buf, 13); + if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420) + pwc_dec23_init(pdev, pChoose->mode); /* Set various parameters */ - pdev->vframes = frames; + pdev->pixfmt = pixfmt; + pdev->vframes = (fps + 1) * 5; pdev->valternate = pChoose->alternate; pdev->width = pwc_image_sizes[size][0]; pdev->height = pwc_image_sizes[size][1]; @@ -296,18 +274,20 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, } -static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, - int *compression) +static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt, + int frames, int *compression, int send_to_cam) { const struct Kiara_table_entry *pChoose = NULL; - int fps, ret; - unsigned char buf[12]; + int fps, ret = 0; - if (size >= PSZ_MAX || frames < 5 || frames > 30 || - *compression < 0 || *compression > 3) - return -EINVAL; - if (size == PSZ_VGA && frames > 15) + if (size >= PSZ_MAX || *compression < 0 || *compression > 3) return -EINVAL; + if (frames < 5) + frames = 5; + else if (size == PSZ_VGA && frames > 15) + frames = 15; + else if (frames > 30) + frames = 30; fps = (frames / 5) - 1; /* Find a supported framerate with progressively higher compression */ @@ -320,26 +300,18 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, if (pChoose == NULL || pChoose->alternate == 0) return -ENOENT; /* Not supported. */ - PWC_TRACE("Using alternate setting %d.\n", pChoose->alternate); - - /* usb_control_msg won't take staticly allocated arrays as argument?? */ - memcpy(buf, pChoose->mode, 12); - /* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */ - ret = send_video_command(pdev, 4 /* pdev->vendpoint */, buf, 12); + if (send_to_cam) + ret = send_video_command(pdev, 4, pChoose->mode, 12); if (ret < 0) return ret; - if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) { - ret = pwc_dec23_init(pdev, pdev->type, buf); - if (ret < 0) - return ret; - } + if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420) + pwc_dec23_init(pdev, pChoose->mode); - pdev->cmd_len = 12; - memcpy(pdev->cmd_buf, buf, 12); /* All set and go */ - pdev->vframes = frames; + pdev->pixfmt = pixfmt; + pdev->vframes = (fps + 1) * 5; pdev->valternate = pChoose->alternate; pdev->width = pwc_image_sizes[size][0]; pdev->height = pwc_image_sizes[size][1]; @@ -354,22 +326,24 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, } int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, - int frames, int *compression) + int pixfmt, int frames, int *compression, int send_to_cam) { int ret, size; - PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt); + PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", + width, height, frames, pixfmt); size = pwc_get_size(pdev, width, height); PWC_TRACE("decode_size = %d.\n", size); if (DEVICE_USE_CODEC1(pdev->type)) { - ret = set_video_mode_Nala(pdev, size, frames, compression); - + ret = set_video_mode_Nala(pdev, size, pixfmt, frames, + compression, send_to_cam); } else if (DEVICE_USE_CODEC3(pdev->type)) { - ret = set_video_mode_Kiara(pdev, size, frames, compression); - + ret = set_video_mode_Kiara(pdev, size, pixfmt, frames, + compression, send_to_cam); } else { - ret = set_video_mode_Timon(pdev, size, frames, compression); + ret = set_video_mode_Timon(pdev, size, pixfmt, frames, + compression, send_to_cam); } if (ret < 0) { PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret); @@ -436,13 +410,12 @@ unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned i int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) { int ret; - u8 buf; - ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf)); + ret = recv_control_msg(pdev, request, value, 1); if (ret < 0) return ret; - *data = buf; + *data = pdev->ctrl_buf[0]; return 0; } @@ -450,7 +423,8 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data) { int ret; - ret = send_control_msg(pdev, request, value, &data, sizeof(data)); + pdev->ctrl_buf[0] = data; + ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 1); if (ret < 0) return ret; @@ -460,37 +434,34 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data) int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) { int ret; - s8 buf; - ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf)); + ret = recv_control_msg(pdev, request, value, 1); if (ret < 0) return ret; - *data = buf; + *data = ((s8 *)pdev->ctrl_buf)[0]; return 0; } int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) { int ret; - u8 buf[2]; - ret = recv_control_msg(pdev, request, value, buf, sizeof(buf)); + ret = recv_control_msg(pdev, request, value, 2); if (ret < 0) return ret; - *data = (buf[1] << 8) | buf[0]; + *data = (pdev->ctrl_buf[1] << 8) | pdev->ctrl_buf[0]; return 0; } int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data) { int ret; - u8 buf[2]; - buf[0] = data & 0xff; - buf[1] = data >> 8; - ret = send_control_msg(pdev, request, value, buf, sizeof(buf)); + pdev->ctrl_buf[0] = data & 0xff; + pdev->ctrl_buf[1] = data >> 8; + ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 2); if (ret < 0) return ret; @@ -511,7 +482,6 @@ int pwc_button_ctrl(struct pwc_device *pdev, u16 value) /* POWER */ void pwc_camera_power(struct pwc_device *pdev, int power) { - char buf; int r; if (!pdev->power_save) @@ -521,13 +491,11 @@ void pwc_camera_power(struct pwc_device *pdev, int power) return; /* Not supported by Nala or Timon < release 6 */ if (power) - buf = 0x00; /* active */ + pdev->ctrl_buf[0] = 0x00; /* active */ else - buf = 0xFF; /* power save */ - r = send_control_msg(pdev, - SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER, - &buf, sizeof(buf)); - + pdev->ctrl_buf[0] = 0xFF; /* power save */ + r = send_control_msg(pdev, SET_STATUS_CTL, + SET_POWER_SAVE_MODE_FORMATTER, pdev->ctrl_buf, 1); if (r < 0) PWC_ERROR("Failed to power %s camera (%d)\n", power ? "on" : "off", r); @@ -535,7 +503,6 @@ void pwc_camera_power(struct pwc_device *pdev, int power) int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value) { - unsigned char buf[2]; int r; if (pdev->type < 730) @@ -551,11 +518,11 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value) if (off_value > 0xff) off_value = 0xff; - buf[0] = on_value; - buf[1] = off_value; + pdev->ctrl_buf[0] = on_value; + pdev->ctrl_buf[1] = off_value; r = send_control_msg(pdev, - SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf)); + SET_STATUS_CTL, LED_FORMATTER, pdev->ctrl_buf, 2); if (r < 0) PWC_ERROR("Failed to set LED on/off time (%d)\n", r); @@ -565,7 +532,6 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value) #ifdef CONFIG_USB_PWC_DEBUG int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor) { - unsigned char buf; int ret = -1, request; if (pdev->type < 675) @@ -575,14 +541,13 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor) else request = SENSOR_TYPE_FORMATTER2; - ret = recv_control_msg(pdev, - GET_STATUS_CTL, request, &buf, sizeof(buf)); + ret = recv_control_msg(pdev, GET_STATUS_CTL, request, 1); if (ret < 0) return ret; if (pdev->type < 675) - *sensor = buf | 0x100; + *sensor = pdev->ctrl_buf[0] | 0x100; else - *sensor = buf; + *sensor = pdev->ctrl_buf[0]; return 0; } #endif diff --git a/drivers/media/video/pwc/pwc-dec1.c b/drivers/media/video/pwc/pwc-dec1.c index be0e02c..e899036 100644 --- a/drivers/media/video/pwc/pwc-dec1.c +++ b/drivers/media/video/pwc/pwc-dec1.c @@ -22,19 +22,11 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include "pwc-dec1.h" +#include "pwc.h" -int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer) +void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd) { - struct pwc_dec1_private *pdec; + struct pwc_dec1_private *pdec = &pdev->dec1; - if (pwc->decompress_data == NULL) { - pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL); - if (pdec == NULL) - return -ENOMEM; - pwc->decompress_data = pdec; - } - pdec = pwc->decompress_data; - - return 0; + pdec->version = pdev->release; } diff --git a/drivers/media/video/pwc/pwc-dec1.h b/drivers/media/video/pwc/pwc-dec1.h index a57d860..c565ef8 100644 --- a/drivers/media/video/pwc/pwc-dec1.h +++ b/drivers/media/video/pwc/pwc-dec1.h @@ -25,13 +25,15 @@ #ifndef PWC_DEC1_H #define PWC_DEC1_H -#include "pwc.h" +#include <linux/mutex.h> + +struct pwc_device; struct pwc_dec1_private { int version; }; -int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer); +void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd); #endif diff --git a/drivers/media/video/pwc/pwc-dec23.c b/drivers/media/video/pwc/pwc-dec23.c index 2c67091..3792fed 100644 --- a/drivers/media/video/pwc/pwc-dec23.c +++ b/drivers/media/video/pwc/pwc-dec23.c @@ -294,22 +294,17 @@ static unsigned char pwc_crop_table[256 + 2*MAX_OUTER_CROP_VALUE]; /* If the type or the command change, we rebuild the lookup table */ -int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd) +void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd) { int flags, version, shift, i; - struct pwc_dec23_private *pdec; - - if (pwc->decompress_data == NULL) { - pdec = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL); - if (pdec == NULL) - return -ENOMEM; - pwc->decompress_data = pdec; - } - pdec = pwc->decompress_data; + struct pwc_dec23_private *pdec = &pdev->dec23; mutex_init(&pdec->lock); - if (DEVICE_USE_CODEC3(type)) { + if (pdec->last_cmd_valid && pdec->last_cmd == cmd[2]) + return; + + if (DEVICE_USE_CODEC3(pdev->type)) { flags = cmd[2] & 0x18; if (flags == 8) pdec->nbits = 7; /* More bits, mean more bits to encode the stream, but better quality */ @@ -356,7 +351,8 @@ int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd) pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255; #endif - return 0; + pdec->last_cmd = cmd[2]; + pdec->last_cmd_valid = 1; } /* @@ -659,12 +655,12 @@ static void DecompressBand23(struct pwc_dec23_private *pdec, * src: raw data * dst: image output */ -void pwc_dec23_decompress(const struct pwc_device *pwc, +void pwc_dec23_decompress(struct pwc_device *pdev, const void *src, void *dst) { int bandlines_left, bytes_per_block; - struct pwc_dec23_private *pdec = pwc->decompress_data; + struct pwc_dec23_private *pdec = &pdev->dec23; /* YUV420P image format */ unsigned char *pout_planar_y; @@ -674,23 +670,22 @@ void pwc_dec23_decompress(const struct pwc_device *pwc, mutex_lock(&pdec->lock); - bandlines_left = pwc->height / 4; - bytes_per_block = pwc->width * 4; - plane_size = pwc->height * pwc->width; + bandlines_left = pdev->height / 4; + bytes_per_block = pdev->width * 4; + plane_size = pdev->height * pdev->width; pout_planar_y = dst; pout_planar_u = dst + plane_size; pout_planar_v = dst + plane_size + plane_size / 4; while (bandlines_left--) { - DecompressBand23(pwc->decompress_data, - src, + DecompressBand23(pdec, src, pout_planar_y, pout_planar_u, pout_planar_v, - pwc->width, pwc->width); - src += pwc->vbandlength; + pdev->width, pdev->width); + src += pdev->vbandlength; pout_planar_y += bytes_per_block; - pout_planar_u += pwc->width; - pout_planar_v += pwc->width; + pout_planar_u += pdev->width; + pout_planar_v += pdev->width; } mutex_unlock(&pdec->lock); } diff --git a/drivers/media/video/pwc/pwc-dec23.h b/drivers/media/video/pwc/pwc-dec23.h index d64a3c2..c655b1c 100644 --- a/drivers/media/video/pwc/pwc-dec23.h +++ b/drivers/media/video/pwc/pwc-dec23.h @@ -25,17 +25,20 @@ #ifndef PWC_DEC23_H #define PWC_DEC23_H -#include "pwc.h" +struct pwc_device; struct pwc_dec23_private { struct mutex lock; + unsigned char last_cmd, last_cmd_valid; + unsigned int scalebits; unsigned int nbitsmask, nbits; /* Number of bits of a color in the compressed stream */ unsigned int reservoir; unsigned int nbits_in_reservoir; + const unsigned char *stream; int temp_colors[16]; @@ -51,8 +54,8 @@ struct pwc_dec23_private }; -int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd); -void pwc_dec23_decompress(const struct pwc_device *pwc, +void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd); +void pwc_dec23_decompress(struct pwc_device *pdev, const void *src, void *dst); #endif diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c index 943d37a..122fbd0 100644 --- a/drivers/media/video/pwc/pwc-if.c +++ b/drivers/media/video/pwc/pwc-if.c @@ -128,18 +128,11 @@ static struct usb_driver pwc_driver = { #define MAX_DEV_HINTS 20 #define MAX_ISOC_ERRORS 20 -static int default_fps = 10; #ifdef CONFIG_USB_PWC_DEBUG int pwc_trace = PWC_DEBUG_LEVEL; #endif static int power_save = -1; -static int led_on = 100, led_off; /* defaults to LED that is on while in use */ -static struct { - int type; - char serial_number[30]; - int device_node; - struct pwc_device *pdev; -} device_hint[MAX_DEV_HINTS]; +static int leds[2] = { 100, 0 }; /***/ @@ -386,8 +379,8 @@ static int pwc_isoc_init(struct pwc_device *pdev) retry: /* We first try with low compression and then retry with a higher compression setting if there is not enough bandwidth. */ - ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, - pdev->vframes, &compression); + ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt, + pdev->vframes, &compression, 1); /* Get the current alternate interface, adjust packet size */ intf = usb_ifnum_to_if(udev, 0); @@ -597,23 +590,9 @@ leave: static void pwc_video_release(struct v4l2_device *v) { struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev); - int hint; - - /* search device_hint[] table if we occupy a slot, by any chance */ - for (hint = 0; hint < MAX_DEV_HINTS; hint++) - if (device_hint[hint].pdev == pdev) - device_hint[hint].pdev = NULL; - - /* Free intermediate decompression buffer & tables */ - if (pdev->decompress_data != NULL) { - PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n", - pdev->decompress_data); - kfree(pdev->decompress_data); - pdev->decompress_data = NULL; - } v4l2_ctrl_handler_free(&pdev->ctrl_handler); - + kfree(pdev->ctrl_buf); kfree(pdev); } @@ -758,7 +737,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count) /* Turn on camera and set LEDS on */ pwc_camera_power(pdev, 1); - pwc_set_leds(pdev, led_on, led_off); + pwc_set_leds(pdev, leds[0], leds[1]); r = pwc_isoc_init(pdev); if (r) { @@ -813,10 +792,9 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id struct usb_device *udev = interface_to_usbdev(intf); struct pwc_device *pdev = NULL; int vendor_id, product_id, type_id; - int hint, rc; + int rc; int features = 0; int compression = 0; - int video_nr = -1; /* default: use next available device */ int my_power_save = power_save; char serial_number[30], *name; @@ -1076,7 +1054,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id return -ENOMEM; } pdev->type = type_id; - pdev->vframes = default_fps; pdev->features = features; pwc_construct(pdev); /* set min/max sizes correct */ @@ -1107,24 +1084,14 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id pdev->release = le16_to_cpu(udev->descriptor.bcdDevice); PWC_DEBUG_PROBE("Release: %04x\n", pdev->release); - /* Now search device_hint[] table for a match, so we can hint a node number. */ - for (hint = 0; hint < MAX_DEV_HINTS; hint++) { - if (((device_hint[hint].type == -1) || (device_hint[hint].type == pdev->type)) && - (device_hint[hint].pdev == NULL)) { - /* so far, so good... try serial number */ - if ((device_hint[hint].serial_number[0] == '*') || !strcmp(device_hint[hint].serial_number, serial_number)) { - /* match! */ - video_nr = device_hint[hint].device_node; - PWC_DEBUG_PROBE("Found hint, will try to register as /dev/video%d\n", video_nr); - break; - } - } + /* Allocate USB command buffers */ + pdev->ctrl_buf = kmalloc(sizeof(pdev->cmd_buf), GFP_KERNEL); + if (!pdev->ctrl_buf) { + PWC_ERROR("Oops, could not allocate memory for pwc_device.\n"); + rc = -ENOMEM; + goto err_free_mem; } - /* occupy slot */ - if (hint < MAX_DEV_HINTS) - device_hint[hint].pdev = pdev; - #ifdef CONFIG_USB_PWC_DEBUG /* Query sensor type */ if (pwc_get_cmos_sensor(pdev, &rc) >= 0) { @@ -1138,8 +1105,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id pwc_set_leds(pdev, 0, 0); /* Setup intial videomode */ - rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, pdev->vframes, - &compression); + rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, + V4L2_PIX_FMT_YUV420, 30, &compression, 1); if (rc) goto err_free_mem; @@ -1164,7 +1131,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler; pdev->vdev.v4l2_dev = &pdev->v4l2_dev; - rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr); + rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1); if (rc < 0) { PWC_ERROR("Failed to register as video device (%d).\n", rc); goto err_unregister_v4l2_dev; @@ -1207,8 +1174,7 @@ err_unregister_v4l2_dev: err_free_controls: v4l2_ctrl_handler_free(&pdev->ctrl_handler); err_free_mem: - if (hint < MAX_DEV_HINTS) - device_hint[hint].pdev = NULL; + kfree(pdev->ctrl_buf); kfree(pdev); return rc; } @@ -1243,27 +1209,19 @@ static void usb_pwc_disconnect(struct usb_interface *intf) * Initialization code & module stuff */ -static int fps; -static int leds[2] = { -1, -1 }; static unsigned int leds_nargs; -static char *dev_hint[MAX_DEV_HINTS]; -static unsigned int dev_hint_nargs; -module_param(fps, int, 0444); #ifdef CONFIG_USB_PWC_DEBUG module_param_named(trace, pwc_trace, int, 0644); #endif module_param(power_save, int, 0644); module_param_array(leds, int, &leds_nargs, 0444); -module_param_array(dev_hint, charp, &dev_hint_nargs, 0444); -MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30"); #ifdef CONFIG_USB_PWC_DEBUG MODULE_PARM_DESC(trace, "For debugging purposes"); #endif MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off"); MODULE_PARM_DESC(leds, "LED on,off time in milliseconds"); -MODULE_PARM_DESC(dev_hint, "Device node hints"); MODULE_DESCRIPTION("Philips & OEM USB webcam driver"); MODULE_AUTHOR("Luc Saillard <luc@saillard.org>"); @@ -1273,114 +1231,13 @@ MODULE_VERSION( PWC_VERSION ); static int __init usb_pwc_init(void) { - int i; - -#ifdef CONFIG_USB_PWC_DEBUG - PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n"); - PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n"); - PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n"); - PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n"); - - if (pwc_trace >= 0) { - PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace); - } -#endif - - if (fps) { - if (fps < 4 || fps > 30) { - PWC_ERROR("Framerate out of bounds (4-30).\n"); - return -EINVAL; - } - default_fps = fps; - PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps); - } - - if (leds[0] >= 0) - led_on = leds[0]; - if (leds[1] >= 0) - led_off = leds[1]; - - /* Big device node whoopla. Basically, it allows you to assign a - device node (/dev/videoX) to a camera, based on its type - & serial number. The format is [type[.serialnumber]:]node. - - Any camera that isn't matched by these rules gets the next - available free device node. - */ - for (i = 0; i < MAX_DEV_HINTS; i++) { - char *s, *colon, *dot; - - /* This loop also initializes the array */ - device_hint[i].pdev = NULL; - s = dev_hint[i]; - if (s != NULL && *s != '\0') { - device_hint[i].type = -1; /* wildcard */ - strcpy(device_hint[i].serial_number, "*"); - - /* parse string: chop at ':' & '/' */ - colon = dot = s; - while (*colon != '\0' && *colon != ':') - colon++; - while (*dot != '\0' && *dot != '.') - dot++; - /* Few sanity checks */ - if (*dot != '\0' && dot > colon) { - PWC_ERROR("Malformed camera hint: the colon must be after the dot.\n"); - return -EINVAL; - } - - if (*colon == '\0') { - /* No colon */ - if (*dot != '\0') { - PWC_ERROR("Malformed camera hint: no colon + device node given.\n"); - return -EINVAL; - } - else { - /* No type or serial number specified, just a number. */ - device_hint[i].device_node = - simple_strtol(s, NULL, 10); - } - } - else { - /* There's a colon, so we have at least a type and a device node */ - device_hint[i].type = - simple_strtol(s, NULL, 10); - device_hint[i].device_node = - simple_strtol(colon + 1, NULL, 10); - if (*dot != '\0') { - /* There's a serial number as well */ - int k; - - dot++; - k = 0; - while (*dot != ':' && k < 29) { - device_hint[i].serial_number[k++] = *dot; - dot++; - } - device_hint[i].serial_number[k] = '\0'; - } - } - PWC_TRACE("device_hint[%d]:\n", i); - PWC_TRACE(" type : %d\n", device_hint[i].type); - PWC_TRACE(" serial# : %s\n", device_hint[i].serial_number); - PWC_TRACE(" node : %d\n", device_hint[i].device_node); - } - else - device_hint[i].type = 0; /* not filled */ - } /* ..for MAX_DEV_HINTS */ - - PWC_DEBUG_PROBE("Registering driver at address 0x%p.\n", &pwc_driver); return usb_register(&pwc_driver); } static void __exit usb_pwc_exit(void) { - PWC_DEBUG_MODULE("Deregistering driver.\n"); usb_deregister(&pwc_driver); - PWC_INFO("Philips webcam module removed.\n"); } module_init(usb_pwc_init); module_exit(usb_pwc_exit); - -/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */ diff --git a/drivers/media/video/pwc/pwc-misc.c b/drivers/media/video/pwc/pwc-misc.c index 23a55b5..9be5adf 100644 --- a/drivers/media/video/pwc/pwc-misc.c +++ b/drivers/media/video/pwc/pwc-misc.c @@ -90,5 +90,4 @@ void pwc_construct(struct pwc_device *pdev) pdev->frame_header_size = 0; pdev->frame_trailer_size = 0; } - pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */ } diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c index 80e2584..f495eeb 100644 --- a/drivers/media/video/pwc/pwc-v4l.c +++ b/drivers/media/video/pwc/pwc-v4l.c @@ -493,16 +493,11 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) (pixelformat>>24)&255); ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height, - pdev->vframes, &compression); + pixelformat, 30, &compression, 0); PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret); - if (ret == 0) { - pdev->pixfmt = pixelformat; - pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, - pdev->pixfmt); - } - + pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt); leave: mutex_unlock(&pdev->udevlock); return ret; @@ -777,33 +772,33 @@ static int pwc_set_autogain_expo(struct pwc_device *pdev) static int pwc_set_motor(struct pwc_device *pdev) { int ret; - u8 buf[4]; - buf[0] = 0; + pdev->ctrl_buf[0] = 0; if (pdev->motor_pan_reset->is_new) - buf[0] |= 0x01; + pdev->ctrl_buf[0] |= 0x01; if (pdev->motor_tilt_reset->is_new) - buf[0] |= 0x02; + pdev->ctrl_buf[0] |= 0x02; if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) { ret = send_control_msg(pdev, SET_MPT_CTL, - PT_RESET_CONTROL_FORMATTER, buf, 1); + PT_RESET_CONTROL_FORMATTER, + pdev->ctrl_buf, 1); if (ret < 0) return ret; } - memset(buf, 0, sizeof(buf)); + memset(pdev->ctrl_buf, 0, 4); if (pdev->motor_pan->is_new) { - buf[0] = pdev->motor_pan->val & 0xFF; - buf[1] = (pdev->motor_pan->val >> 8); + pdev->ctrl_buf[0] = pdev->motor_pan->val & 0xFF; + pdev->ctrl_buf[1] = (pdev->motor_pan->val >> 8); } if (pdev->motor_tilt->is_new) { - buf[2] = pdev->motor_tilt->val & 0xFF; - buf[3] = (pdev->motor_tilt->val >> 8); + pdev->ctrl_buf[2] = pdev->motor_tilt->val & 0xFF; + pdev->ctrl_buf[3] = (pdev->motor_tilt->val >> 8); } if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) { ret = send_control_msg(pdev, SET_MPT_CTL, PT_RELATIVE_CONTROL_FORMATTER, - buf, sizeof(buf)); + pdev->ctrl_buf, 4); if (ret < 0) return ret; } @@ -1094,6 +1089,63 @@ static int pwc_enum_frameintervals(struct file *file, void *fh, return 0; } +static int pwc_g_parm(struct file *file, void *fh, + struct v4l2_streamparm *parm) +{ + struct pwc_device *pdev = video_drvdata(file); + + if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + memset(parm, 0, sizeof(*parm)); + + parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + parm->parm.capture.readbuffers = MIN_FRAMES; + parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME; + parm->parm.capture.timeperframe.denominator = pdev->vframes; + parm->parm.capture.timeperframe.numerator = 1; + + return 0; +} + +static int pwc_s_parm(struct file *file, void *fh, + struct v4l2_streamparm *parm) +{ + struct pwc_device *pdev = video_drvdata(file); + int compression = 0; + int ret, fps; + + if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || + parm->parm.capture.timeperframe.numerator == 0) + return -EINVAL; + + if (pwc_test_n_set_capt_file(pdev, file)) + return -EBUSY; + + fps = parm->parm.capture.timeperframe.denominator / + parm->parm.capture.timeperframe.numerator; + + mutex_lock(&pdev->udevlock); + if (!pdev->udev) { + ret = -ENODEV; + goto leave; + } + + if (pdev->iso_init) { + ret = -EBUSY; + goto leave; + } + + ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt, + fps, &compression, 0); + + pwc_g_parm(file, fh, parm); + +leave: + mutex_unlock(&pdev->udevlock); + return ret; +} + static int pwc_log_status(struct file *file, void *priv) { struct pwc_device *pdev = video_drvdata(file); @@ -1120,4 +1172,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = { .vidioc_log_status = pwc_log_status, .vidioc_enum_framesizes = pwc_enum_framesizes, .vidioc_enum_frameintervals = pwc_enum_frameintervals, + .vidioc_g_parm = pwc_g_parm, + .vidioc_s_parm = pwc_s_parm, }; diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h index 47c518f..e4d4d71 100644 --- a/drivers/media/video/pwc/pwc.h +++ b/drivers/media/video/pwc/pwc.h @@ -44,6 +44,8 @@ #ifdef CONFIG_USB_PWC_INPUT_EVDEV #include <linux/input.h> #endif +#include "pwc-dec1.h" +#include "pwc-dec23.h" /* Version block */ #define PWC_VERSION "10.0.15" @@ -132,9 +134,6 @@ #define DEVICE_USE_CODEC3(x) ((x)>=700) #define DEVICE_USE_CODEC23(x) ((x)>=675) -/* from pwc-dec.h */ -#define PWCX_FLAG_PLANAR 0x0001 - /* Request types: video */ #define SET_LUM_CTL 0x01 #define GET_LUM_CTL 0x02 @@ -248,8 +247,8 @@ struct pwc_device char vmirror; /* for ToUCaM series */ char power_save; /* Do powersaving for this cam */ - int cmd_len; unsigned char cmd_buf[13]; + unsigned char *ctrl_buf; struct urb *urbs[MAX_ISO_BUFS]; char iso_init; @@ -272,7 +271,10 @@ struct pwc_device int frame_total_size; /* including header & trailer */ int drop_frames; - void *decompress_data; /* private data for decompression engine */ + union { /* private data for decompression engine */ + struct pwc_dec1_private dec1; + struct pwc_dec23_private dec23; + }; /* * We have an 'image' and a 'view', where 'image' is the fixed-size img @@ -364,7 +366,7 @@ void pwc_construct(struct pwc_device *pdev); /** Functions in pwc-ctrl.c */ /* Request a certain video mode. Returns < 0 if not possible */ extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, - int frames, int *compression); + int pixfmt, int frames, int *compression, int send_to_cam); extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size); extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value); extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor); diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c index 510cfab..a9e9653 100644 --- a/drivers/media/video/s5p-fimc/fimc-capture.c +++ b/drivers/media/video/s5p-fimc/fimc-capture.c @@ -693,7 +693,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx, mf->code = 0; continue; } - if (mf->width != tfmt->width || mf->width != tfmt->width) { + if (mf->width != tfmt->width || mf->height != tfmt->height) { u32 fcc = ffmt->fourcc; tfmt->width = mf->width; tfmt->height = mf->height; @@ -702,7 +702,8 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx, NULL, &fcc, FIMC_SD_PAD_SOURCE); if (ffmt && ffmt->mbus_code) mf->code = ffmt->mbus_code; - if (mf->width != tfmt->width || mf->width != tfmt->width) + if (mf->width != tfmt->width || + mf->height != tfmt->height) continue; tfmt->code = mf->code; } @@ -710,7 +711,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx, ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt); if (mf->code == tfmt->code && - mf->width == tfmt->width && mf->width == tfmt->width) + mf->width == tfmt->width && mf->height == tfmt->height) break; } diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c index f5cbb8a..81bcbb9 100644 --- a/drivers/media/video/s5p-fimc/fimc-core.c +++ b/drivers/media/video/s5p-fimc/fimc-core.c @@ -848,11 +848,11 @@ int fimc_ctrls_create(struct fimc_ctx *ctx) v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4); ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops, - V4L2_CID_HFLIP, 0, 1, 1, 0); + V4L2_CID_ROTATE, 0, 270, 90, 0); ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops, - V4L2_CID_VFLIP, 0, 1, 1, 0); + V4L2_CID_HFLIP, 0, 1, 1, 0); ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops, - V4L2_CID_ROTATE, 0, 270, 90, 0); + V4L2_CID_VFLIP, 0, 1, 1, 0); if (variant->has_alpha) ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c index 615c862..8ea4ee1 100644 --- a/drivers/media/video/s5p-fimc/fimc-mdevice.c +++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c @@ -21,7 +21,6 @@ #include <linux/pm_runtime.h> #include <linux/types.h> #include <linux/slab.h> -#include <linux/version.h> #include <media/v4l2-ctrls.h> #include <media/media-device.h> diff --git a/drivers/media/video/s5p-g2d/g2d.c b/drivers/media/video/s5p-g2d/g2d.c index c40b0dd..febaa67 100644 --- a/drivers/media/video/s5p-g2d/g2d.c +++ b/drivers/media/video/s5p-g2d/g2d.c @@ -184,6 +184,7 @@ static int g2d_s_ctrl(struct v4l2_ctrl *ctrl) ctx->rop = ROP4_INVERT; else ctx->rop = ROP4_COPY; + break; default: v4l2_err(&ctx->dev->v4l2_dev, "unknown control\n"); return -EINVAL; diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.c b/drivers/media/video/s5p-jpeg/jpeg-core.c index f841a3e..1105a87 100644 --- a/drivers/media/video/s5p-jpeg/jpeg-core.c +++ b/drivers/media/video/s5p-jpeg/jpeg-core.c @@ -989,9 +989,10 @@ static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = { * ============================================================================ */ -static int s5p_jpeg_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, - unsigned int *nplanes, unsigned int sizes[], - void *alloc_ctxs[]) +static int s5p_jpeg_queue_setup(struct vb2_queue *vq, + const struct v4l2_format *fmt, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], void *alloc_ctxs[]) { struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq); struct s5p_jpeg_q_data *q_data = NULL; diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c index e43e128..83fe461 100644 --- a/drivers/media/video/s5p-mfc/s5p_mfc.c +++ b/drivers/media/video/s5p-mfc/s5p_mfc.c @@ -18,7 +18,6 @@ #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/version.h> #include <linux/videodev2.h> #include <linux/workqueue.h> #include <media/videobuf2-core.h> @@ -475,7 +474,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx, ctx->mv_size = 0; } ctx->dpb_count = s5p_mfc_get_dpb_count(); - if (ctx->img_width == 0 || ctx->img_width == 0) + if (ctx->img_width == 0 || ctx->img_height == 0) ctx->state = MFCINST_ERROR; else ctx->state = MFCINST_HEAD_PARSED; diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c index 844a4d7..c25ec02 100644 --- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c @@ -165,7 +165,7 @@ static struct mfc_control controls[] = { .maximum = 32, .step = 1, .default_value = 1, - .flags = V4L2_CTRL_FLAG_VOLATILE, + .is_volatile = 1, }, }; diff --git a/drivers/media/video/saa7164/saa7164-cards.c b/drivers/media/video/saa7164/saa7164-cards.c index 971591d..5b72da5 100644 --- a/drivers/media/video/saa7164/saa7164-cards.c +++ b/drivers/media/video/saa7164/saa7164-cards.c @@ -269,8 +269,6 @@ struct saa7164_board saa7164_boards[] = { .portb = SAA7164_MPEG_DVB, .portc = SAA7164_MPEG_ENCODER, .portd = SAA7164_MPEG_ENCODER, - .portc = SAA7164_MPEG_ENCODER, - .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, @@ -333,8 +331,6 @@ struct saa7164_board saa7164_boards[] = { .portd = SAA7164_MPEG_ENCODER, .porte = SAA7164_MPEG_VBI, .portf = SAA7164_MPEG_VBI, - .porte = SAA7164_MPEG_VBI, - .portf = SAA7164_MPEG_VBI, .chiprev = SAA7164_CHIP_REV3, .unit = {{ .id = 0x28, diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c index 0a2d75f..4ed1c7c2 100644 --- a/drivers/media/video/timblogiw.c +++ b/drivers/media/video/timblogiw.c @@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) spin_unlock_irq(&fh->queue_lock); desc = fh->chan->device->device_prep_slave_sg(fh->chan, - buf->sg, sg_elems, DMA_FROM_DEVICE, + buf->sg, sg_elems, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); if (!desc) { spin_lock_irq(&fh->queue_lock); diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c index 129f135..c096b3f 100644 --- a/drivers/media/video/tlg2300/pd-main.c +++ b/drivers/media/video/tlg2300/pd-main.c @@ -374,7 +374,7 @@ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev) } #endif -static bool check_firmware(struct usb_device *udev, int *down_firmware) +static int check_firmware(struct usb_device *udev, int *down_firmware) { void *buf; int ret; @@ -398,7 +398,7 @@ static bool check_firmware(struct usb_device *udev, int *down_firmware) *down_firmware = 1; return firmware_download(udev); } - return ret; + return 0; } static int poseidon_probe(struct usb_interface *interface, diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c index da1f4c2..cccd42b 100644 --- a/drivers/media/video/v4l2-ctrls.c +++ b/drivers/media/video/v4l2-ctrls.c @@ -465,8 +465,8 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_CHROMA_GAIN: return "Chroma Gain"; case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1"; case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2"; - case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Minimum Number of Capture Buffers"; - case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Minimum Number of Output Buffers"; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers"; + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers"; case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component"; /* MPEG controls */ @@ -506,25 +506,25 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV"; case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface"; case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable"; - case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "The Number of Intra Refresh MBs"; + case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs"; case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable"; case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control"; case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode"; - case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "The Max Number of Reference Picture"; + case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics"; case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value"; - case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P frame QP Value"; - case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value"; - case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P frame QP Value"; - case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable"; case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size"; - case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entorpy Mode"; - case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I Period"; + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode"; + case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period"; case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level"; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset"; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset"; @@ -535,16 +535,16 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable"; case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC"; case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value"; - case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P frame QP Value"; - case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B frame QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value"; + case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level"; case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile"; case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable"; - case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "The Maximum Bytes Per Slice"; - case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "The Number of MB in a Slice"; - case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "The Slice Partitioning Method"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice"; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method"; case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size"; /* CAMERA controls */ @@ -580,7 +580,7 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled"; case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time"; case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation"; - case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Feature Enabled"; + case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled"; case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain"; case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold"; case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time"; @@ -588,24 +588,24 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled"; case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation"; case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency"; - case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-emphasis settings"; + case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis"; case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level"; case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor"; /* Flash controls */ - case V4L2_CID_FLASH_CLASS: return "Flash controls"; - case V4L2_CID_FLASH_LED_MODE: return "LED mode"; - case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe source"; + case V4L2_CID_FLASH_CLASS: return "Flash Controls"; + case V4L2_CID_FLASH_LED_MODE: return "LED Mode"; + case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source"; case V4L2_CID_FLASH_STROBE: return "Strobe"; - case V4L2_CID_FLASH_STROBE_STOP: return "Stop strobe"; - case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe status"; - case V4L2_CID_FLASH_TIMEOUT: return "Strobe timeout"; - case V4L2_CID_FLASH_INTENSITY: return "Intensity, flash mode"; - case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, torch mode"; - case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator"; + case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe"; + case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status"; + case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout"; + case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode"; + case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode"; + case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator"; case V4L2_CID_FLASH_FAULT: return "Faults"; case V4L2_CID_FLASH_CHARGE: return "Charge"; - case V4L2_CID_FLASH_READY: return "Ready to strobe"; + case V4L2_CID_FLASH_READY: return "Ready to Strobe"; default: return NULL; diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c index 77feeb6..3f62385 100644 --- a/drivers/media/video/v4l2-ioctl.c +++ b/drivers/media/video/v4l2-ioctl.c @@ -1871,6 +1871,7 @@ static long __video_do_ioctl(struct file *file, case VIDIOC_S_FREQUENCY: { struct v4l2_frequency *p = arg; + enum v4l2_tuner_type type; if (!ops->vidioc_s_frequency) break; @@ -1878,9 +1879,14 @@ static long __video_do_ioctl(struct file *file, ret = ret_prio; break; } + type = (vfd->vfl_type == VFL_TYPE_RADIO) ? + V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n", p->tuner, p->type, p->frequency); - ret = ops->vidioc_s_frequency(file, fh, p); + if (p->type != type) + ret = -EINVAL; + else + ret = ops->vidioc_s_frequency(file, fh, p); break; } case VIDIOC_G_SLICED_VBI_CAP: diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c index f6d2641..4c09ab7 100644 --- a/drivers/media/video/zoran/zoran_driver.c +++ b/drivers/media/video/zoran/zoran_driver.c @@ -1958,7 +1958,6 @@ static int zoran_g_fbuf(struct file *file, void *__fh, mutex_unlock(&zr->resource_lock); fb->fmt.colorspace = V4L2_COLORSPACE_SRGB; fb->fmt.field = V4L2_FIELD_INTERLACED; - fb->flags = V4L2_FBUF_FLAG_OVERLAY; fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING; return 0; diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c index 63be60b..86cc3f7 100644 --- a/drivers/mfd/mcp-core.c +++ b/drivers/mfd/mcp-core.c @@ -26,35 +26,9 @@ #define to_mcp(d) container_of(d, struct mcp, attached_device) #define to_mcp_driver(d) container_of(d, struct mcp_driver, drv) -static const struct mcp_device_id *mcp_match_id(const struct mcp_device_id *id, - const char *codec) -{ - while (id->name[0]) { - if (strcmp(codec, id->name) == 0) - return id; - id++; - } - return NULL; -} - -const struct mcp_device_id *mcp_get_device_id(const struct mcp *mcp) -{ - const struct mcp_driver *driver = - to_mcp_driver(mcp->attached_device.driver); - - return mcp_match_id(driver->id_table, mcp->codec); -} -EXPORT_SYMBOL(mcp_get_device_id); - static int mcp_bus_match(struct device *dev, struct device_driver *drv) { - const struct mcp *mcp = to_mcp(dev); - const struct mcp_driver *driver = to_mcp_driver(drv); - - if (driver->id_table) - return !!mcp_match_id(driver->id_table, mcp->codec); - - return 0; + return 1; } static int mcp_bus_probe(struct device *dev) @@ -100,18 +74,9 @@ static int mcp_bus_resume(struct device *dev) return ret; } -static int mcp_bus_uevent(struct device *dev, struct kobj_uevent_env *env) -{ - struct mcp *mcp = to_mcp(dev); - - add_uevent_var(env, "MODALIAS=%s%s", MCP_MODULE_PREFIX, mcp->codec); - return 0; -} - static struct bus_type mcp_bus_type = { .name = "mcp", .match = mcp_bus_match, - .uevent = mcp_bus_uevent, .probe = mcp_bus_probe, .remove = mcp_bus_remove, .suspend = mcp_bus_suspend, @@ -128,9 +93,11 @@ static struct bus_type mcp_bus_type = { */ void mcp_set_telecom_divisor(struct mcp *mcp, unsigned int div) { - spin_lock_irq(&mcp->lock); + unsigned long flags; + + spin_lock_irqsave(&mcp->lock, flags); mcp->ops->set_telecom_divisor(mcp, div); - spin_unlock_irq(&mcp->lock); + spin_unlock_irqrestore(&mcp->lock, flags); } EXPORT_SYMBOL(mcp_set_telecom_divisor); @@ -143,9 +110,11 @@ EXPORT_SYMBOL(mcp_set_telecom_divisor); */ void mcp_set_audio_divisor(struct mcp *mcp, unsigned int div) { - spin_lock_irq(&mcp->lock); + unsigned long flags; + + spin_lock_irqsave(&mcp->lock, flags); mcp->ops->set_audio_divisor(mcp, div); - spin_unlock_irq(&mcp->lock); + spin_unlock_irqrestore(&mcp->lock, flags); } EXPORT_SYMBOL(mcp_set_audio_divisor); @@ -198,10 +167,11 @@ EXPORT_SYMBOL(mcp_reg_read); */ void mcp_enable(struct mcp *mcp) { - spin_lock_irq(&mcp->lock); + unsigned long flags; + spin_lock_irqsave(&mcp->lock, flags); if (mcp->use_count++ == 0) mcp->ops->enable(mcp); - spin_unlock_irq(&mcp->lock); + spin_unlock_irqrestore(&mcp->lock, flags); } EXPORT_SYMBOL(mcp_enable); @@ -247,14 +217,9 @@ struct mcp *mcp_host_alloc(struct device *parent, size_t size) } EXPORT_SYMBOL(mcp_host_alloc); -int mcp_host_register(struct mcp *mcp, void *pdata) +int mcp_host_register(struct mcp *mcp) { - if (!mcp->codec) - return -EINVAL; - - mcp->attached_device.platform_data = pdata; dev_set_name(&mcp->attached_device, "mcp0"); - request_module("%s%s", MCP_MODULE_PREFIX, mcp->codec); return device_register(&mcp->attached_device); } EXPORT_SYMBOL(mcp_host_register); diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c index 9adc2eb..02c53a0 100644 --- a/drivers/mfd/mcp-sa11x0.c +++ b/drivers/mfd/mcp-sa11x0.c @@ -19,7 +19,6 @@ #include <linux/spinlock.h> #include <linux/platform_device.h> #include <linux/mfd/mcp.h> -#include <linux/io.h> #include <mach/dma.h> #include <mach/hardware.h> @@ -27,19 +26,12 @@ #include <asm/system.h> #include <mach/mcp.h> -/* Register offsets */ -#define MCCR0 0x00 -#define MCDR0 0x08 -#define MCDR1 0x0C -#define MCDR2 0x10 -#define MCSR 0x18 -#define MCCR1 0x00 +#include <mach/assabet.h> + struct mcp_sa11x0 { - u32 mccr0; - u32 mccr1; - unsigned char *mccr0_base; - unsigned char *mccr1_base; + u32 mccr0; + u32 mccr1; }; #define priv(mcp) ((struct mcp_sa11x0 *)mcp_priv(mcp)) @@ -47,25 +39,25 @@ struct mcp_sa11x0 { static void mcp_sa11x0_set_telecom_divisor(struct mcp *mcp, unsigned int divisor) { - struct mcp_sa11x0 *priv = priv(mcp); + unsigned int mccr0; divisor /= 32; - priv->mccr0 &= ~0x00007f00; - priv->mccr0 |= divisor << 8; - __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); + mccr0 = Ser4MCCR0 & ~0x00007f00; + mccr0 |= divisor << 8; + Ser4MCCR0 = mccr0; } static void mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor) { - struct mcp_sa11x0 *priv = priv(mcp); + unsigned int mccr0; divisor /= 32; - priv->mccr0 &= ~0x0000007f; - priv->mccr0 |= divisor; - __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); + mccr0 = Ser4MCCR0 & ~0x0000007f; + mccr0 |= divisor; + Ser4MCCR0 = mccr0; } /* @@ -79,16 +71,12 @@ mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val) { int ret = -ETIME; int i; - u32 mcpreg; - struct mcp_sa11x0 *priv = priv(mcp); - mcpreg = reg << 17 | MCDR2_Wr | (val & 0xffff); - __raw_writel(mcpreg, priv->mccr0_base + MCDR2); + Ser4MCDR2 = reg << 17 | MCDR2_Wr | (val & 0xffff); for (i = 0; i < 2; i++) { udelay(mcp->rw_timeout); - mcpreg = __raw_readl(priv->mccr0_base + MCSR); - if (mcpreg & MCSR_CWC) { + if (Ser4MCSR & MCSR_CWC) { ret = 0; break; } @@ -109,18 +97,13 @@ mcp_sa11x0_read(struct mcp *mcp, unsigned int reg) { int ret = -ETIME; int i; - u32 mcpreg; - struct mcp_sa11x0 *priv = priv(mcp); - mcpreg = reg << 17 | MCDR2_Rd; - __raw_writel(mcpreg, priv->mccr0_base + MCDR2); + Ser4MCDR2 = reg << 17 | MCDR2_Rd; for (i = 0; i < 2; i++) { udelay(mcp->rw_timeout); - mcpreg = __raw_readl(priv->mccr0_base + MCSR); - if (mcpreg & MCSR_CRC) { - ret = __raw_readl(priv->mccr0_base + MCDR2) - & 0xffff; + if (Ser4MCSR & MCSR_CRC) { + ret = Ser4MCDR2 & 0xffff; break; } } @@ -133,19 +116,13 @@ mcp_sa11x0_read(struct mcp *mcp, unsigned int reg) static void mcp_sa11x0_enable(struct mcp *mcp) { - struct mcp_sa11x0 *priv = priv(mcp); - - __raw_writel(-1, priv->mccr0_base + MCSR); - priv->mccr0 |= MCCR0_MCE; - __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); + Ser4MCSR = -1; + Ser4MCCR0 |= MCCR0_MCE; } static void mcp_sa11x0_disable(struct mcp *mcp) { - struct mcp_sa11x0 *priv = priv(mcp); - - priv->mccr0 &= ~MCCR0_MCE; - __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); + Ser4MCCR0 &= ~MCCR0_MCE; } /* @@ -165,69 +142,50 @@ static int mcp_sa11x0_probe(struct platform_device *pdev) struct mcp_plat_data *data = pdev->dev.platform_data; struct mcp *mcp; int ret; - struct mcp_sa11x0 *priv; - struct resource *res_mem0, *res_mem1; - u32 size0, size1; if (!data) return -ENODEV; - if (!data->codec) - return -ENODEV; - - res_mem0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res_mem0) - return -ENODEV; - size0 = res_mem0->end - res_mem0->start + 1; - - res_mem1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res_mem1) - return -ENODEV; - size1 = res_mem1->end - res_mem1->start + 1; - - if (!request_mem_region(res_mem0->start, size0, "sa11x0-mcp")) + if (!request_mem_region(0x80060000, 0x60, "sa11x0-mcp")) return -EBUSY; - if (!request_mem_region(res_mem1->start, size1, "sa11x0-mcp")) { - ret = -EBUSY; - goto release; - } - mcp = mcp_host_alloc(&pdev->dev, sizeof(struct mcp_sa11x0)); if (!mcp) { ret = -ENOMEM; - goto release2; + goto release; } - priv = priv(mcp); - mcp->owner = THIS_MODULE; mcp->ops = &mcp_sa11x0; mcp->sclk_rate = data->sclk_rate; - mcp->dma_audio_rd = DDAR_DevAdd(res_mem0->start + MCDR0) - + DDAR_DevRd + DDAR_Brst4 + DDAR_8BitDev; - mcp->dma_audio_wr = DDAR_DevAdd(res_mem0->start + MCDR0) - + DDAR_DevWr + DDAR_Brst4 + DDAR_8BitDev; - mcp->dma_telco_rd = DDAR_DevAdd(res_mem0->start + MCDR1) - + DDAR_DevRd + DDAR_Brst4 + DDAR_8BitDev; - mcp->dma_telco_wr = DDAR_DevAdd(res_mem0->start + MCDR1) - + DDAR_DevWr + DDAR_Brst4 + DDAR_8BitDev; - mcp->codec = data->codec; + mcp->dma_audio_rd = DMA_Ser4MCP0Rd; + mcp->dma_audio_wr = DMA_Ser4MCP0Wr; + mcp->dma_telco_rd = DMA_Ser4MCP1Rd; + mcp->dma_telco_wr = DMA_Ser4MCP1Wr; + mcp->gpio_base = data->gpio_base; platform_set_drvdata(pdev, mcp); + if (machine_is_assabet()) { + ASSABET_BCR_set(ASSABET_BCR_CODEC_RST); + } + + /* + * Setup the PPC unit correctly. + */ + PPDR &= ~PPC_RXD4; + PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM; + PSDR |= PPC_RXD4; + PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); + PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); + /* * Initialise device. Note that we initially * set the sampling rate to minimum. */ - priv->mccr0_base = ioremap(res_mem0->start, size0); - priv->mccr1_base = ioremap(res_mem1->start, size1); - - __raw_writel(-1, priv->mccr0_base + MCSR); - priv->mccr1 = data->mccr1; - priv->mccr0 = data->mccr0 | 0x7f7f; - __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); - __raw_writel(priv->mccr1, priv->mccr1_base + MCCR1); + Ser4MCSR = -1; + Ser4MCCR1 = data->mccr1; + Ser4MCCR0 = data->mccr0 | 0x7f7f; /* * Calculate the read/write timeout (us) from the bit clock @@ -237,53 +195,36 @@ static int mcp_sa11x0_probe(struct platform_device *pdev) mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) / mcp->sclk_rate; - ret = mcp_host_register(mcp, data->codec_pdata); + ret = mcp_host_register(mcp); if (ret == 0) goto out; - release2: - release_mem_region(res_mem1->start, size1); release: - release_mem_region(res_mem0->start, size0); + release_mem_region(0x80060000, 0x60); platform_set_drvdata(pdev, NULL); out: return ret; } -static int mcp_sa11x0_remove(struct platform_device *pdev) +static int mcp_sa11x0_remove(struct platform_device *dev) { - struct mcp *mcp = platform_get_drvdata(pdev); - struct mcp_sa11x0 *priv = priv(mcp); - struct resource *res_mem; - u32 size; + struct mcp *mcp = platform_get_drvdata(dev); - platform_set_drvdata(pdev, NULL); + platform_set_drvdata(dev, NULL); mcp_host_unregister(mcp); + release_mem_region(0x80060000, 0x60); - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res_mem) { - size = res_mem->end - res_mem->start + 1; - release_mem_region(res_mem->start, size); - } - res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res_mem) { - size = res_mem->end - res_mem->start + 1; - release_mem_region(res_mem->start, size); - } - iounmap(priv->mccr0_base); - iounmap(priv->mccr1_base); return 0; } static int mcp_sa11x0_suspend(struct platform_device *dev, pm_message_t state) { struct mcp *mcp = platform_get_drvdata(dev); - struct mcp_sa11x0 *priv = priv(mcp); - u32 mccr0; - mccr0 = priv->mccr0 & ~MCCR0_MCE; - __raw_writel(mccr0, priv->mccr0_base + MCCR0); + priv(mcp)->mccr0 = Ser4MCCR0; + priv(mcp)->mccr1 = Ser4MCCR1; + Ser4MCCR0 &= ~MCCR0_MCE; return 0; } @@ -291,10 +232,9 @@ static int mcp_sa11x0_suspend(struct platform_device *dev, pm_message_t state) static int mcp_sa11x0_resume(struct platform_device *dev) { struct mcp *mcp = platform_get_drvdata(dev); - struct mcp_sa11x0 *priv = priv(mcp); - __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); - __raw_writel(priv->mccr1, priv->mccr1_base + MCCR1); + Ser4MCCR1 = priv(mcp)->mccr1; + Ser4MCCR0 = priv(mcp)->mccr0; return 0; } @@ -311,7 +251,6 @@ static struct platform_driver mcp_sa11x0_driver = { .resume = mcp_sa11x0_resume, .driver = { .name = "sa11x0-mcp", - .owner = THIS_MODULE, }, }; diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c index dda8629..b2d8e51 100644 --- a/drivers/mfd/twl6040-core.c +++ b/drivers/mfd/twl6040-core.c @@ -282,6 +282,7 @@ int twl6040_power(struct twl6040 *twl6040, int on) /* Default PLL configuration after power up */ twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL; twl6040->sysclk = 19200000; + twl6040->mclk = 32768; } else { /* already powered-down */ if (!twl6040->power_count) { @@ -305,6 +306,7 @@ int twl6040_power(struct twl6040 *twl6040, int on) twl6040_power_down(twl6040); } twl6040->sysclk = 0; + twl6040->mclk = 0; } out: @@ -324,23 +326,38 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, hppllctl = twl6040_reg_read(twl6040, TWL6040_REG_HPPLLCTL); lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL); + /* Force full reconfiguration when switching between PLL */ + if (pll_id != twl6040->pll) { + twl6040->sysclk = 0; + twl6040->mclk = 0; + } + switch (pll_id) { case TWL6040_SYSCLK_SEL_LPPLL: /* low-power PLL divider */ - switch (freq_out) { - case 17640000: - lppllctl |= TWL6040_LPLLFIN; - break; - case 19200000: - lppllctl &= ~TWL6040_LPLLFIN; - break; - default: - dev_err(twl6040->dev, - "freq_out %d not supported\n", freq_out); - ret = -EINVAL; - goto pll_out; + /* Change the sysclk configuration only if it has been canged */ + if (twl6040->sysclk != freq_out) { + switch (freq_out) { + case 17640000: + lppllctl |= TWL6040_LPLLFIN; + break; + case 19200000: + lppllctl &= ~TWL6040_LPLLFIN; + break; + default: + dev_err(twl6040->dev, + "freq_out %d not supported\n", + freq_out); + ret = -EINVAL; + goto pll_out; + } + twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, + lppllctl); } - twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); + + /* The PLL in use has not been change, we can exit */ + if (twl6040->pll == pll_id) + break; switch (freq_in) { case 32768: @@ -371,48 +388,56 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, goto pll_out; } - hppllctl &= ~TWL6040_MCLK_MSK; + if (twl6040->mclk != freq_in) { + hppllctl &= ~TWL6040_MCLK_MSK; + + switch (freq_in) { + case 12000000: + /* PLL enabled, active mode */ + hppllctl |= TWL6040_MCLK_12000KHZ | + TWL6040_HPLLENA; + break; + case 19200000: + /* + * PLL disabled + * (enable PLL if MCLK jitter quality + * doesn't meet specification) + */ + hppllctl |= TWL6040_MCLK_19200KHZ; + break; + case 26000000: + /* PLL enabled, active mode */ + hppllctl |= TWL6040_MCLK_26000KHZ | + TWL6040_HPLLENA; + break; + case 38400000: + /* PLL enabled, active mode */ + hppllctl |= TWL6040_MCLK_38400KHZ | + TWL6040_HPLLENA; + break; + default: + dev_err(twl6040->dev, + "freq_in %d not supported\n", freq_in); + ret = -EINVAL; + goto pll_out; + } - switch (freq_in) { - case 12000000: - /* PLL enabled, active mode */ - hppllctl |= TWL6040_MCLK_12000KHZ | - TWL6040_HPLLENA; - break; - case 19200000: /* - * PLL disabled - * (enable PLL if MCLK jitter quality - * doesn't meet specification) + * enable clock slicer to ensure input waveform is + * square */ - hppllctl |= TWL6040_MCLK_19200KHZ; - break; - case 26000000: - /* PLL enabled, active mode */ - hppllctl |= TWL6040_MCLK_26000KHZ | - TWL6040_HPLLENA; - break; - case 38400000: - /* PLL enabled, active mode */ - hppllctl |= TWL6040_MCLK_38400KHZ | - TWL6040_HPLLENA; - break; - default: - dev_err(twl6040->dev, - "freq_in %d not supported\n", freq_in); - ret = -EINVAL; - goto pll_out; - } + hppllctl |= TWL6040_HPLLSQRENA; - /* enable clock slicer to ensure input waveform is square */ - hppllctl |= TWL6040_HPLLSQRENA; - - twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL, hppllctl); - usleep_range(500, 700); - lppllctl |= TWL6040_HPLLSEL; - twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); - lppllctl &= ~TWL6040_LPLLENA; - twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl); + twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL, + hppllctl); + usleep_range(500, 700); + lppllctl |= TWL6040_HPLLSEL; + twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, + lppllctl); + lppllctl &= ~TWL6040_LPLLENA; + twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, + lppllctl); + } break; default: dev_err(twl6040->dev, "unknown pll id %d\n", pll_id); @@ -421,6 +446,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id, } twl6040->sysclk = freq_out; + twl6040->mclk = freq_in; twl6040->pll = pll_id; pll_out: diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c index 91c4f25..febc90c 100644 --- a/drivers/mfd/ucb1x00-core.c +++ b/drivers/mfd/ucb1x00-core.c @@ -36,15 +36,6 @@ static DEFINE_MUTEX(ucb1x00_mutex); static LIST_HEAD(ucb1x00_drivers); static LIST_HEAD(ucb1x00_devices); -static struct mcp_device_id ucb1x00_id[] = { - { "ucb1x00", 0 }, /* auto-detection */ - { "ucb1200", UCB_ID_1200 }, - { "ucb1300", UCB_ID_1300 }, - { "tc35143", UCB_ID_TC35143 }, - { } -}; -MODULE_DEVICE_TABLE(mcp, ucb1x00_id); - /** * ucb1x00_io_set_dir - set IO direction * @ucb: UCB1x00 structure describing chip @@ -157,16 +148,22 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset { struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); unsigned long flags; + unsigned old, mask = 1 << offset; spin_lock_irqsave(&ucb->io_lock, flags); - ucb->io_dir |= (1 << offset); - ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); - + old = ucb->io_out; if (value) - ucb->io_out |= 1 << offset; + ucb->io_out |= mask; else - ucb->io_out &= ~(1 << offset); - ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); + ucb->io_out &= ~mask; + + if (old != ucb->io_out) + ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); + + if (!(ucb->io_dir & mask)) { + ucb->io_dir |= mask; + ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); + } spin_unlock_irqrestore(&ucb->io_lock, flags); return 0; @@ -536,33 +533,17 @@ static struct class ucb1x00_class = { static int ucb1x00_probe(struct mcp *mcp) { - const struct mcp_device_id *mid; struct ucb1x00 *ucb; struct ucb1x00_driver *drv; - struct ucb1x00_plat_data *pdata; unsigned int id; int ret = -ENODEV; int temp; mcp_enable(mcp); id = mcp_reg_read(mcp, UCB_ID); - mid = mcp_get_device_id(mcp); - if (mid && mid->driver_data) { - if (id != mid->driver_data) { - printk(KERN_WARNING "%s wrong ID %04x found: %04x\n", - mid->name, (unsigned int) mid->driver_data, id); - goto err_disable; - } - } else { - mid = &ucb1x00_id[1]; - while (mid->driver_data) { - if (id == mid->driver_data) - break; - mid++; - } - printk(KERN_WARNING "%s ID not found: %04x\n", - ucb1x00_id[0].name, id); + if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) { + printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id); goto err_disable; } @@ -571,28 +552,28 @@ static int ucb1x00_probe(struct mcp *mcp) if (!ucb) goto err_disable; - pdata = mcp->attached_device.platform_data; + ucb->dev.class = &ucb1x00_class; ucb->dev.parent = &mcp->attached_device; - dev_set_name(&ucb->dev, mid->name); + dev_set_name(&ucb->dev, "ucb1x00"); spin_lock_init(&ucb->lock); spin_lock_init(&ucb->io_lock); sema_init(&ucb->adc_sem, 1); - ucb->id = mid; + ucb->id = id; ucb->mcp = mcp; ucb->irq = ucb1x00_detect_irq(ucb); if (ucb->irq == NO_IRQ) { - printk(KERN_ERR "%s: IRQ probe failed\n", mid->name); + printk(KERN_ERR "UCB1x00: IRQ probe failed\n"); ret = -ENODEV; goto err_free; } ucb->gpio.base = -1; - if (pdata && (pdata->gpio_base >= 0)) { + if (mcp->gpio_base != 0) { ucb->gpio.label = dev_name(&ucb->dev); - ucb->gpio.base = pdata->gpio_base; + ucb->gpio.base = mcp->gpio_base; ucb->gpio.ngpio = 10; ucb->gpio.set = ucb1x00_gpio_set; ucb->gpio.get = ucb1x00_gpio_get; @@ -605,10 +586,10 @@ static int ucb1x00_probe(struct mcp *mcp) dev_info(&ucb->dev, "gpio_base not set so no gpiolib support"); ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING, - mid->name, ucb); + "UCB1x00", ucb); if (ret) { - printk(KERN_ERR "%s: unable to grab irq%d: %d\n", - mid->name, ucb->irq, ret); + printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n", + ucb->irq, ret); goto err_gpio; } @@ -712,6 +693,7 @@ static int ucb1x00_resume(struct mcp *mcp) struct ucb1x00 *ucb = mcp_get_drvdata(mcp); struct ucb1x00_dev *dev; + ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); mutex_lock(&ucb1x00_mutex); list_for_each_entry(dev, &ucb->devs, dev_node) { @@ -730,7 +712,6 @@ static struct mcp_driver ucb1x00_driver = { .remove = ucb1x00_remove, .suspend = ucb1x00_suspend, .resume = ucb1x00_resume, - .id_table = ucb1x00_id, }; static int __init ucb1x00_init(void) diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c index 40ec3c1..63a3cbd 100644 --- a/drivers/mfd/ucb1x00-ts.c +++ b/drivers/mfd/ucb1x00-ts.c @@ -47,7 +47,6 @@ struct ucb1x00_ts { u16 x_res; u16 y_res; - unsigned int restart:1; unsigned int adcsync:1; }; @@ -207,15 +206,17 @@ static int ucb1x00_thread(void *_ts) { struct ucb1x00_ts *ts = _ts; DECLARE_WAITQUEUE(wait, current); + bool frozen, ignore = false; int valid = 0; set_freezable(); add_wait_queue(&ts->irq_wait, &wait); - while (!kthread_should_stop()) { + while (!kthread_freezable_should_stop(&frozen)) { unsigned int x, y, p; signed long timeout; - ts->restart = 0; + if (frozen) + ignore = true; ucb1x00_adc_enable(ts->ucb); @@ -258,7 +259,7 @@ static int ucb1x00_thread(void *_ts) * space. We therefore leave it to user space * to do any filtering they please. */ - if (!ts->restart) { + if (!ignore) { ucb1x00_ts_evt_add(ts, p, x, y); valid = 1; } @@ -267,8 +268,6 @@ static int ucb1x00_thread(void *_ts) timeout = HZ / 100; } - try_to_freeze(); - schedule_timeout(timeout); } @@ -340,26 +339,6 @@ static void ucb1x00_ts_close(struct input_dev *idev) ucb1x00_disable(ts->ucb); } -#ifdef CONFIG_PM -static int ucb1x00_ts_resume(struct ucb1x00_dev *dev) -{ - struct ucb1x00_ts *ts = dev->priv; - - if (ts->rtask != NULL) { - /* - * Restart the TS thread to ensure the - * TS interrupt mode is set up again - * after sleep. - */ - ts->restart = 1; - wake_up(&ts->irq_wait); - } - return 0; -} -#else -#define ucb1x00_ts_resume NULL -#endif - /* * Initialisation. @@ -382,7 +361,7 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev) ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC; idev->name = "Touchscreen panel"; - idev->id.product = ts->ucb->id->driver_data; + idev->id.product = ts->ucb->id; idev->open = ucb1x00_ts_open; idev->close = ucb1x00_ts_close; @@ -425,7 +404,6 @@ static void ucb1x00_ts_remove(struct ucb1x00_dev *dev) static struct ucb1x00_driver ucb1x00_ts_driver = { .add = ucb1x00_ts_add, .remove = ucb1x00_ts_remove, - .resume = ucb1x00_ts_resume, }; static int __init ucb1x00_ts_init(void) diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c index eb5cd28..a2d25e4 100644 --- a/drivers/misc/carma/carma-fpga-program.c +++ b/drivers/misc/carma/carma-fpga-program.c @@ -513,7 +513,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv) * transaction, and then put it under external control */ memset(&config, 0, sizeof(config)); - config.direction = DMA_TO_DEVICE; + config.direction = DMA_MEM_TO_DEV; config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4; ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG, diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 150cd70..28adefe 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c @@ -354,6 +354,7 @@ static void lkdtm_do_action(enum ctype which) static void lkdtm_handler(void) { unsigned long flags; + bool do_it = false; spin_lock_irqsave(&count_lock, flags); count--; @@ -361,10 +362,13 @@ static void lkdtm_handler(void) cp_name_to_str(cpoint), cp_type_to_str(cptype), count); if (count == 0) { - lkdtm_do_action(cptype); + do_it = true; count = cpoint_count; } spin_unlock_irqrestore(&count_lock, flags); + + if (do_it) + lkdtm_do_action(cptype); } static int lkdtm_register_cpoint(enum cname which) diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index a7ee502..fcfe1eb 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -823,6 +823,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) struct scatterlist *sg; unsigned int i; enum dma_data_direction direction; + enum dma_transfer_direction slave_dirn; unsigned int sglen; u32 iflags; @@ -860,16 +861,19 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) if (host->caps.has_dma) atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN); - if (data->flags & MMC_DATA_READ) + if (data->flags & MMC_DATA_READ) { direction = DMA_FROM_DEVICE; - else + slave_dirn = DMA_DEV_TO_MEM; + } else { direction = DMA_TO_DEVICE; + slave_dirn = DMA_MEM_TO_DEV; + } sglen = dma_map_sg(chan->device->dev, data->sg, data->sg_len, direction); desc = chan->device->device_prep_slave_sg(chan, - data->sg, sglen, direction, + data->sg, sglen, slave_dirn, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) goto unmap_exit; diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index ece03b4..0d955ff 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -374,6 +374,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, struct dma_chan *chan; struct dma_device *device; struct dma_async_tx_descriptor *desc; + enum dma_data_direction buffer_dirn; int nr_sg; /* Check if next job is already prepared */ @@ -387,10 +388,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, } if (data->flags & MMC_DATA_READ) { - conf.direction = DMA_FROM_DEVICE; + conf.direction = DMA_DEV_TO_MEM; + buffer_dirn = DMA_FROM_DEVICE; chan = host->dma_rx_channel; } else { - conf.direction = DMA_TO_DEVICE; + conf.direction = DMA_MEM_TO_DEV; + buffer_dirn = DMA_TO_DEVICE; chan = host->dma_tx_channel; } @@ -403,7 +406,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, return -EINVAL; device = chan->device; - nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); + nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); if (nr_sg == 0) return -EINVAL; @@ -426,7 +429,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, unmap_exit: if (!next) dmaengine_terminate_all(chan); - dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); + dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); return -ENOMEM; } diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 7088b40..4184b79 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -218,6 +218,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) unsigned int blksz = data->blksz; unsigned int datasize = nob * blksz; struct scatterlist *sg; + enum dma_transfer_direction slave_dirn; int i, nents; if (data->flags & MMC_DATA_STREAM) @@ -240,10 +241,13 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) } } - if (data->flags & MMC_DATA_READ) + if (data->flags & MMC_DATA_READ) { host->dma_dir = DMA_FROM_DEVICE; - else + slave_dirn = DMA_DEV_TO_MEM; + } else { host->dma_dir = DMA_TO_DEVICE; + slave_dirn = DMA_MEM_TO_DEV; + } nents = dma_map_sg(host->dma->device->dev, data->sg, data->sg_len, host->dma_dir); @@ -251,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) return -EINVAL; host->desc = host->dma->device->device_prep_slave_sg(host->dma, - data->sg, data->sg_len, host->dma_dir, + data->sg, data->sg_len, slave_dirn, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!host->desc) { diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 4e2e019..382c835 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -154,6 +154,7 @@ struct mxs_mmc_host { struct dma_chan *dmach; struct mxs_dma_data dma_data; unsigned int dma_dir; + enum dma_transfer_direction slave_dirn; u32 ssp_pio_words[SSP_PIO_NUM]; unsigned int version; @@ -324,7 +325,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( } desc = host->dmach->device->device_prep_slave_sg(host->dmach, - sgl, sg_len, host->dma_dir, append); + sgl, sg_len, host->slave_dirn, append); if (desc) { desc->callback = mxs_mmc_dma_irq_callback; desc->callback_param = host; @@ -356,6 +357,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host) host->ssp_pio_words[1] = cmd0; host->ssp_pio_words[2] = cmd1; host->dma_dir = DMA_NONE; + host->slave_dirn = DMA_TRANS_NONE; desc = mxs_mmc_prep_dma(host, 0); if (!desc) goto out; @@ -395,6 +397,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) host->ssp_pio_words[1] = cmd0; host->ssp_pio_words[2] = cmd1; host->dma_dir = DMA_NONE; + host->slave_dirn = DMA_TRANS_NONE; desc = mxs_mmc_prep_dma(host, 0); if (!desc) goto out; @@ -433,6 +436,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) int i; unsigned short dma_data_dir, timeout; + enum dma_transfer_direction slave_dirn; unsigned int data_size = 0, log2_blksz; unsigned int blocks = data->blocks; @@ -448,9 +452,11 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) if (data->flags & MMC_DATA_WRITE) { dma_data_dir = DMA_TO_DEVICE; + slave_dirn = DMA_MEM_TO_DEV; read = 0; } else { dma_data_dir = DMA_FROM_DEVICE; + slave_dirn = DMA_DEV_TO_MEM; read = BM_SSP_CTRL0_READ; } @@ -510,6 +516,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) host->ssp_pio_words[1] = cmd0; host->ssp_pio_words[2] = cmd1; host->dma_dir = DMA_NONE; + host->slave_dirn = DMA_TRANS_NONE; desc = mxs_mmc_prep_dma(host, 0); if (!desc) goto out; @@ -518,6 +525,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) WARN_ON(host->data != NULL); host->data = data; host->dma_dir = dma_data_dir; + host->slave_dirn = slave_dirn; desc = mxs_mmc_prep_dma(host, 1); if (!desc) goto out; diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 4a2c5b2..f5d8b53 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -286,7 +286,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) if (ret > 0) { host->dma_active = true; desc = chan->device->device_prep_slave_sg(chan, sg, ret, - DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } if (desc) { @@ -335,7 +335,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) if (ret > 0) { host->dma_active = true; desc = chan->device->device_prep_slave_sg(chan, sg, ret, - DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } if (desc) { diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index 86f259c..7a6e6cc 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c @@ -77,7 +77,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); if (ret > 0) desc = chan->device->device_prep_slave_sg(chan, sg, ret, - DMA_FROM_DEVICE, DMA_CTRL_ACK); + DMA_DEV_TO_MEM, DMA_CTRL_ACK); if (desc) { cookie = dmaengine_submit(desc); @@ -158,7 +158,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); if (ret > 0) desc = chan->device->device_prep_slave_sg(chan, sg, ret, - DMA_TO_DEVICE, DMA_CTRL_ACK); + DMA_MEM_TO_DEV, DMA_CTRL_ACK); if (desc) { cookie = dmaengine_submit(desc); diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 6ae9ca0..9a9ce71 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -119,7 +119,7 @@ static int mtd_cls_suspend(struct device *dev, pm_message_t state) { struct mtd_info *mtd = dev_get_drvdata(dev); - return mtd_suspend(mtd); + return mtd ? mtd_suspend(mtd) : 0; } static int mtd_cls_resume(struct device *dev) diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 4dd056e..35b4fb5 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -161,6 +161,37 @@ static int atmel_nand_device_ready(struct mtd_info *mtd) !!host->board->rdy_pin_active_low; } +/* + * Minimal-overhead PIO for data access. + */ +static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len) +{ + struct nand_chip *nand_chip = mtd->priv; + + __raw_readsb(nand_chip->IO_ADDR_R, buf, len); +} + +static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len) +{ + struct nand_chip *nand_chip = mtd->priv; + + __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2); +} + +static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len) +{ + struct nand_chip *nand_chip = mtd->priv; + + __raw_writesb(nand_chip->IO_ADDR_W, buf, len); +} + +static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len) +{ + struct nand_chip *nand_chip = mtd->priv; + + __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2); +} + static void dma_complete_func(void *completion) { complete(completion); @@ -235,27 +266,33 @@ err_buf: static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) { struct nand_chip *chip = mtd->priv; + struct atmel_nand_host *host = chip->priv; if (use_dma && len > mtd->oobsize) /* only use DMA for bigger than oob size: better performances */ if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) return; - /* if no DMA operation possible, use PIO */ - memcpy_fromio(buf, chip->IO_ADDR_R, len); + if (host->board->bus_width_16) + atmel_read_buf16(mtd, buf, len); + else + atmel_read_buf8(mtd, buf, len); } static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) { struct nand_chip *chip = mtd->priv; + struct atmel_nand_host *host = chip->priv; if (use_dma && len > mtd->oobsize) /* only use DMA for bigger than oob size: better performances */ if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) return; - /* if no DMA operation possible, use PIO */ - memcpy_toio(chip->IO_ADDR_W, buf, len); + if (host->board->bus_width_16) + atmel_write_buf16(mtd, buf, len); + else + atmel_write_buf8(mtd, buf, len); } /* diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c index 2a56fc6..7db6555 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c @@ -69,17 +69,19 @@ static int clear_poll_bit(void __iomem *addr, u32 mask) * [1] enable the module. * [2] reset the module. * - * In most of the cases, it's ok. But there is a hardware bug in the BCH block. + * In most of the cases, it's ok. + * But in MX23, there is a hardware bug in the BCH block (see erratum #2847). * If you try to soft reset the BCH block, it becomes unusable until * the next hard reset. This case occurs in the NAND boot mode. When the board * boots by NAND, the ROM of the chip will initialize the BCH blocks itself. * So If the driver tries to reset the BCH again, the BCH will not work anymore. - * You will see a DMA timeout in this case. + * You will see a DMA timeout in this case. The bug has been fixed + * in the following chips, such as MX28. * * To avoid this bug, just add a new parameter `just_enable` for * the mxs_reset_block(), and rewrite it here. */ -int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) +static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) { int ret; int timeout = 0x400; @@ -206,7 +208,15 @@ int bch_set_geometry(struct gpmi_nand_data *this) if (ret) goto err_out; - ret = gpmi_reset_block(r->bch_regs, true); + /* + * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this + * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. + * On the other hand, the MX28 needs the reset, because one case has been + * seen where the BCH produced ECC errors constantly after 10000 + * consecutive reboots. The latter case has not been seen on the MX23 yet, + * still we don't know if it could happen there as well. + */ + ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); if (ret) goto err_out; @@ -827,7 +837,7 @@ int gpmi_send_command(struct gpmi_nand_data *this) pio[1] = pio[2] = 0; desc = channel->device->device_prep_slave_sg(channel, (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_NONE, 0); + ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); if (!desc) { pr_err("step 1 error\n"); return -1; @@ -839,7 +849,7 @@ int gpmi_send_command(struct gpmi_nand_data *this) sg_init_one(sgl, this->cmd_buffer, this->command_length); dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); desc = channel->device->device_prep_slave_sg(channel, - sgl, 1, DMA_TO_DEVICE, 1); + sgl, 1, DMA_MEM_TO_DEV, 1); if (!desc) { pr_err("step 2 error\n"); return -1; @@ -872,7 +882,7 @@ int gpmi_send_data(struct gpmi_nand_data *this) pio[1] = 0; desc = channel->device->device_prep_slave_sg(channel, (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_NONE, 0); + ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); if (!desc) { pr_err("step 1 error\n"); return -1; @@ -881,7 +891,7 @@ int gpmi_send_data(struct gpmi_nand_data *this) /* [2] send DMA request */ prepare_data_dma(this, DMA_TO_DEVICE); desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, - 1, DMA_TO_DEVICE, 1); + 1, DMA_MEM_TO_DEV, 1); if (!desc) { pr_err("step 2 error\n"); return -1; @@ -908,7 +918,7 @@ int gpmi_read_data(struct gpmi_nand_data *this) pio[1] = 0; desc = channel->device->device_prep_slave_sg(channel, (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_NONE, 0); + ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); if (!desc) { pr_err("step 1 error\n"); return -1; @@ -917,7 +927,7 @@ int gpmi_read_data(struct gpmi_nand_data *this) /* [2] : send DMA request */ prepare_data_dma(this, DMA_FROM_DEVICE); desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, - 1, DMA_FROM_DEVICE, 1); + 1, DMA_DEV_TO_MEM, 1); if (!desc) { pr_err("step 2 error\n"); return -1; @@ -964,7 +974,7 @@ int gpmi_send_page(struct gpmi_nand_data *this, desc = channel->device->device_prep_slave_sg(channel, (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_NONE, 0); + ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); if (!desc) { pr_err("step 2 error\n"); return -1; @@ -998,7 +1008,8 @@ int gpmi_read_page(struct gpmi_nand_data *this, | BF_GPMI_CTRL0_XFER_COUNT(0); pio[1] = 0; desc = channel->device->device_prep_slave_sg(channel, - (struct scatterlist *)pio, 2, DMA_NONE, 0); + (struct scatterlist *)pio, 2, + DMA_TRANS_NONE, 0); if (!desc) { pr_err("step 1 error\n"); return -1; @@ -1027,7 +1038,7 @@ int gpmi_read_page(struct gpmi_nand_data *this, pio[5] = auxiliary; desc = channel->device->device_prep_slave_sg(channel, (struct scatterlist *)pio, - ARRAY_SIZE(pio), DMA_NONE, 1); + ARRAY_SIZE(pio), DMA_TRANS_NONE, 1); if (!desc) { pr_err("step 2 error\n"); return -1; @@ -1045,7 +1056,8 @@ int gpmi_read_page(struct gpmi_nand_data *this, | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); pio[1] = 0; desc = channel->device->device_prep_slave_sg(channel, - (struct scatterlist *)pio, 2, DMA_NONE, 1); + (struct scatterlist *)pio, 2, + DMA_TRANS_NONE, 1); if (!desc) { pr_err("step 3 error\n"); return -1; diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 35b4565..8a393f9 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2588,7 +2588,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, instr->state = MTD_ERASING; while (len) { - /* Heck if we have a bad block, we do not erase bad blocks! */ + /* Check if we have a bad block, we do not erase bad blocks! */ if (nand_block_checkbad(mtd, ((loff_t) page) << chip->page_shift, 0, allowbbt)) { pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 342626f..f820b26 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -909,16 +909,12 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) } } -/* hw is a boolean parameter that determines whether we should try and - * set the hw address of the device as well as the hw address of the - * net_device - */ -static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw) +static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) { struct net_device *dev = slave->dev; struct sockaddr s_addr; - if (!hw) { + if (slave->bond->params.mode == BOND_MODE_TLB) { memcpy(dev->dev_addr, addr, dev->addr_len); return 0; } @@ -948,8 +944,8 @@ static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct u8 tmp_mac_addr[ETH_ALEN]; memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN); - alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled); - alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled); + alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr); + alb_set_slave_mac_addr(slave2, tmp_mac_addr); } @@ -1096,8 +1092,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav /* Try setting slave mac to bond address and fall-through to code handling that situation below... */ - alb_set_slave_mac_addr(slave, bond->dev->dev_addr, - bond->alb_info.rlb_enabled); + alb_set_slave_mac_addr(slave, bond->dev->dev_addr); } /* The slave's address is equal to the address of the bond. @@ -1133,8 +1128,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav } if (free_mac_slave) { - alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr, - bond->alb_info.rlb_enabled); + alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr); pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n", bond->dev->name, slave->dev->name, @@ -1491,8 +1485,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) { int res; - res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr, - bond->alb_info.rlb_enabled); + res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr); if (res) { return res; } @@ -1643,8 +1636,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave alb_swap_mac_addr(bond, swap_slave, new_slave); } else { /* set the new_slave to the bond mac address */ - alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr, - bond->alb_info.rlb_enabled); + alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); } if (swap_slave) { @@ -1704,8 +1696,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); } else { - alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, - bond->alb_info.rlb_enabled); + alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); read_lock(&bond->lock); alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 7fc4e81..325391d 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -9,6 +9,7 @@ */ #include <linux/list.h> +#include <linux/module.h> #include <linux/netdevice.h> #include <linux/phy.h> #include <net/dsa.h> diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c index c0a458f..c17c75b 100644 --- a/drivers/net/dsa/mv88e6123_61_65.c +++ b/drivers/net/dsa/mv88e6123_61_65.c @@ -9,6 +9,7 @@ */ #include <linux/list.h> +#include <linux/module.h> #include <linux/netdevice.h> #include <linux/phy.h> #include <net/dsa.h> @@ -20,12 +21,25 @@ static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr) ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); if (ret >= 0) { - ret &= 0xfff0; - if (ret == 0x1210) + if (ret == 0x1212) + return "Marvell 88E6123 (A1)"; + if (ret == 0x1213) + return "Marvell 88E6123 (A2)"; + if ((ret & 0xfff0) == 0x1210) return "Marvell 88E6123"; - if (ret == 0x1610) + + if (ret == 0x1612) + return "Marvell 88E6161 (A1)"; + if (ret == 0x1613) + return "Marvell 88E6161 (A2)"; + if ((ret & 0xfff0) == 0x1610) return "Marvell 88E6161"; - if (ret == 0x1650) + + if (ret == 0x1652) + return "Marvell 88E6165 (A1)"; + if (ret == 0x1653) + return "Marvell 88e6165 (A2)"; + if ((ret & 0xfff0) == 0x1650) return "Marvell 88E6165"; } diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index e0eb6824..55888b0 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -9,6 +9,7 @@ */ #include <linux/list.h> +#include <linux/module.h> #include <linux/netdevice.h> #include <linux/phy.h> #include <net/dsa.h> diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 5467c04..a2c62c2 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -9,6 +9,7 @@ */ #include <linux/list.h> +#include <linux/module.h> #include <linux/netdevice.h> #include <linux/phy.h> #include <net/dsa.h> diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 2b731b2..03f3935 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3117,7 +3117,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) int rx_ring_size = 0; #ifdef BCM_CNIC - if (IS_MF_ISCSI_SD(bp)) { + if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) { rx_ring_size = MIN_RX_SIZE_NONTPA; bp->rx_ring_size = rx_ring_size; } else diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index a688b9d97..31a8b38 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -365,13 +365,18 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx); if (cmd->autoneg == AUTONEG_ENABLE) { + u32 an_supported_speed = bp->port.supported[cfg_idx]; + if (bp->link_params.phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + an_supported_speed |= (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full); if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { DP(NETIF_MSG_LINK, "Autoneg not supported\n"); return -EINVAL; } /* advertise the requested speed and duplex if supported */ - if (cmd->advertising & ~(bp->port.supported[cfg_idx])) { + if (cmd->advertising & ~an_supported_speed) { DP(NETIF_MSG_LINK, "Advertisement parameters " "are not supported\n"); return -EINVAL; @@ -1733,7 +1738,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; u16 tx_start_idx, tx_idx; u16 rx_start_idx, rx_idx; - u16 pkt_prod, bd_prod, rx_comp_cons; + u16 pkt_prod, bd_prod; struct sw_tx_bd *tx_buf; struct eth_tx_start_bd *tx_start_bd; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; @@ -1868,8 +1873,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) if (rx_idx != rx_start_idx + num_pkts) goto test_loopback_exit; - rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons); - cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)]; + cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) @@ -2116,18 +2120,16 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) case ETH_SS_STATS: if (is_multi(bp)) { num_stats = bnx2x_num_stat_queues(bp) * - BNX2X_NUM_Q_STATS; - if (!IS_MF_MODE_STAT(bp)) - num_stats += BNX2X_NUM_STATS; - } else { - if (IS_MF_MODE_STAT(bp)) { - num_stats = 0; - for (i = 0; i < BNX2X_NUM_STATS; i++) - if (IS_FUNC_STAT(i)) - num_stats++; - } else - num_stats = BNX2X_NUM_STATS; - } + BNX2X_NUM_Q_STATS; + } else + num_stats = 0; + if (IS_MF_MODE_STAT(bp)) { + for (i = 0; i < BNX2X_NUM_STATS; i++) + if (IS_FUNC_STAT(i)) + num_stats++; + } else + num_stats += BNX2X_NUM_STATS; + return num_stats; case ETH_SS_TEST: @@ -2146,8 +2148,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) switch (stringset) { case ETH_SS_STATS: + k = 0; if (is_multi(bp)) { - k = 0; for_each_eth_queue(bp, i) { memset(queue_name, 0, sizeof(queue_name)); sprintf(queue_name, "%d", i); @@ -2158,20 +2160,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) queue_name); k += BNX2X_NUM_Q_STATS; } - if (IS_MF_MODE_STAT(bp)) - break; - for (j = 0; j < BNX2X_NUM_STATS; j++) - strcpy(buf + (k + j)*ETH_GSTRING_LEN, - bnx2x_stats_arr[j].string); - } else { - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) - continue; - strcpy(buf + j*ETH_GSTRING_LEN, - bnx2x_stats_arr[i].string); - j++; - } } + + + for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { + if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + continue; + strcpy(buf + (k + j)*ETH_GSTRING_LEN, + bnx2x_stats_arr[i].string); + j++; + } + break; case ETH_SS_TEST: @@ -2185,10 +2184,9 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); u32 *hw_stats, *offset; - int i, j, k; + int i, j, k = 0; if (is_multi(bp)) { - k = 0; for_each_eth_queue(bp, i) { hw_stats = (u32 *)&bp->fp[i].eth_q_stats; for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { @@ -2209,46 +2207,28 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, } k += BNX2X_NUM_Q_STATS; } - if (IS_MF_MODE_STAT(bp)) - return; - hw_stats = (u32 *)&bp->eth_stats; - for (j = 0; j < BNX2X_NUM_STATS; j++) { - if (bnx2x_stats_arr[j].size == 0) { - /* skip this counter */ - buf[k + j] = 0; - continue; - } - offset = (hw_stats + bnx2x_stats_arr[j].offset); - if (bnx2x_stats_arr[j].size == 4) { - /* 4-byte counter */ - buf[k + j] = (u64) *offset; - continue; - } - /* 8-byte counter */ - buf[k + j] = HILO_U64(*offset, *(offset + 1)); + } + + hw_stats = (u32 *)&bp->eth_stats; + for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { + if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) + continue; + if (bnx2x_stats_arr[i].size == 0) { + /* skip this counter */ + buf[k + j] = 0; + j++; + continue; } - } else { - hw_stats = (u32 *)&bp->eth_stats; - for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { - if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) - continue; - if (bnx2x_stats_arr[i].size == 0) { - /* skip this counter */ - buf[j] = 0; - j++; - continue; - } - offset = (hw_stats + bnx2x_stats_arr[i].offset); - if (bnx2x_stats_arr[i].size == 4) { - /* 4-byte counter */ - buf[j] = (u64) *offset; - j++; - continue; - } - /* 8-byte counter */ - buf[j] = HILO_U64(*offset, *(offset + 1)); + offset = (hw_stats + bnx2x_stats_arr[i].offset); + if (bnx2x_stats_arr[i].size == 4) { + /* 4-byte counter */ + buf[k + j] = (u64) *offset; j++; + continue; } + /* 8-byte counter */ + buf[k + j] = HILO_U64(*offset, *(offset + 1)); + j++; } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 4df9505..2091e5d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -2502,7 +2502,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, struct bnx2x_nig_brb_pfc_port_params *nig_params) { u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0; - u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0; + u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0; u32 pkt_priority_to_cos = 0; struct bnx2x *bp = params->bp; u8 port = params->port; @@ -2516,9 +2516,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params, * MAC control frames (that are not pause packets) * will be forwarded to the XCM. */ - xcm_mask = REG_RD(bp, - port ? NIG_REG_LLH1_XCM_MASK : - NIG_REG_LLH0_XCM_MASK); + xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : + NIG_REG_LLH0_XCM_MASK); /* * nig params will override non PFC params, since it's possible to * do transition from PFC to SAFC @@ -2533,8 +2532,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params, ppp_enable = 1; xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); - xcm0_out_en = 0; - p0_hwpfc_enable = 1; + xcm_out_en = 0; + hwpfc_enable = 1; } else { if (nig_params) { llfc_out_en = nig_params->llfc_out_en; @@ -2545,7 +2544,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); - xcm0_out_en = 1; + xcm_out_en = 1; } if (CHIP_IS_E3(bp)) @@ -2564,13 +2563,16 @@ static void bnx2x_update_pfc_nig(struct link_params *params, REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK : NIG_REG_LLH0_XCM_MASK, xcm_mask); - REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); + REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : + NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); /* output enable for RX_XCM # IF */ - REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en); + REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN : + NIG_REG_XCM0_OUT_EN, xcm_out_en); /* HW PFC TX enable */ - REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable); + REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE : + NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable); if (nig_params) { u8 i = 0; @@ -3761,7 +3763,15 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, /* Advertise pause */ bnx2x_ext_phy_set_pause(params, phy, vars); - vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; + /* + * Set KR Autoneg Work-Around flag for Warpcore version older than D108 + */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); + if (val16 < 0xd108) { + DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); + vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; + } bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, &val16); @@ -9266,62 +9276,68 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, /* BCM8481/BCM84823/BCM84833 PHY SECTION */ /******************************************************************/ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, - struct link_params *params) + struct bnx2x *bp, + u8 port) { u16 val, fw_ver1, fw_ver2, cnt; - u8 port; - struct bnx2x *bp = params->bp; - port = params->port; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); + bnx2x_save_spirom_version(bp, port, + ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f), + phy->ver_addr); + } else { + /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ + /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); + + for (cnt = 0; cnt < 100; cnt++) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); + if (val & 1) + break; + udelay(5); + } + if (cnt == 100) { + DP(NETIF_MSG_LINK, "Unable to read 848xx " + "phy fw version(1)\n"); + bnx2x_save_spirom_version(bp, port, 0, + phy->ver_addr); + return; + } - /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ - /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); - for (cnt = 0; cnt < 100; cnt++) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); - if (val & 1) - break; - udelay(5); - } - if (cnt == 100) { - DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n"); - bnx2x_save_spirom_version(bp, port, 0, - phy->ver_addr); - return; - } + /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); + for (cnt = 0; cnt < 100; cnt++) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); + if (val & 1) + break; + udelay(5); + } + if (cnt == 100) { + DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw " + "version(2)\n"); + bnx2x_save_spirom_version(bp, port, 0, + phy->ver_addr); + return; + } + /* lower 16 bits of the register SPI_FW_STATUS */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); + /* upper 16 bits of register SPI_FW_STATUS */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); - /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); - for (cnt = 0; cnt < 100; cnt++) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); - if (val & 1) - break; - udelay(5); - } - if (cnt == 100) { - DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n"); - bnx2x_save_spirom_version(bp, port, 0, + bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1, phy->ver_addr); - return; } - /* lower 16 bits of the register SPI_FW_STATUS */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); - /* upper 16 bits of register SPI_FW_STATUS */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); - - bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1, - phy->ver_addr); } - static void bnx2x_848xx_set_led(struct bnx2x *bp, struct bnx2x_phy *phy) { @@ -9392,10 +9408,13 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, u16 tmp_req_line_speed; tmp_req_line_speed = phy->req_line_speed; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { if (phy->req_line_speed == SPEED_10000) phy->req_line_speed = SPEED_AUTO_NEG; - + } else { + /* Save spirom version */ + bnx2x_save_848xx_spirom_version(phy, bp, params->port); + } /* * This phy uses the NIG latch mechanism since link indication * arrives through its LED4 and not via its LASI signal, so we @@ -9443,13 +9462,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, an_1000_val); /* set 100 speed advertisement */ - if (((phy->req_line_speed == SPEED_AUTO_NEG) && + if ((phy->req_line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) && - (phy->supported & - (SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full)))) { + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) { an_10_100_val |= (1<<7); /* Enable autoneg and restart autoneg for legacy speeds */ autoneg_val |= (1<<9 | 1<<12); @@ -9539,9 +9555,6 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, 1); - /* Save spirom version */ - bnx2x_save_848xx_spirom_version(phy, params); - phy->req_line_speed = tmp_req_line_speed; return 0; @@ -9749,17 +9762,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* Wait for GPHY to come out of reset */ msleep(50); - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { - /* Bring PHY out of super isolate mode */ - bnx2x_cl45_read(bp, phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); - val &= ~MDIO_84833_SUPER_ISOLATE; - bnx2x_cl45_write(bp, phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); - bnx2x_84833_pair_swap_cfg(phy, params, vars); - } else { + if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { /* * BCM84823 requires that XGXS links up first @ 10G for normal * behavior. @@ -9816,24 +9819,23 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", params->multi_phy_config, val); - /* AutogrEEEn */ - if (params->feature_config_flags & - FEATURE_CONFIG_AUTOGREEEN_ENABLED) - cmd_args[0] = 0x2; - else - cmd_args[0] = 0x0; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { + bnx2x_84833_pair_swap_cfg(phy, params, vars); - cmd_args[1] = 0x0; - cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; - cmd_args[3] = PHY84833_CONSTANT_LATENCY; - rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, cmd_args); - if (rc != 0) - DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); + /* Keep AutogrEEEn disabled. */ + cmd_args[0] = 0x0; + cmd_args[1] = 0x0; + cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; + cmd_args[3] = PHY84833_CONSTANT_LATENCY; + rc = bnx2x_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, cmd_args); + if (rc != 0) + DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); + } if (initialize) rc = bnx2x_848xx_cmn_config_init(phy, params, vars); else - bnx2x_save_848xx_spirom_version(phy, params); + bnx2x_save_848xx_spirom_version(phy, bp, params->port); /* 84833 PHY has a better feature and doesn't need to support this. */ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { cms_enable = REG_RD(bp, params->shmem_base + @@ -9851,6 +9853,16 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, MDIO_CTL_REG_84823_USER_CTRL_REG, val); } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { + /* Bring PHY out of super isolate mode as the final step. */ + bnx2x_cl45_read(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); + val &= ~MDIO_84833_SUPER_ISOLATE; + bnx2x_cl45_write(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); + } return rc; } @@ -9988,10 +10000,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, } else { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - 0x400f, &val16); + MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16); + val16 |= MDIO_84833_SUPER_ISOLATE; bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, 0x800); + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16); } } @@ -11516,6 +11529,19 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, } phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && + (phy->ver_addr)) { + /* + * Remove 100Mb link supported for BCM84833 when phy fw + * version lower than or equal to 1.39 + */ + u32 raw_ver = REG_RD(bp, phy->ver_addr); + if (((raw_ver & 0x7F) <= 39) && + (((raw_ver & 0xF80) >> 7) <= 1)) + phy->supported &= ~(SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full); + } + /* * In case mdc/mdio_access of the external phy is different than the * mdc/mdio access of the XGXS, a HW lock must be taken in each access @@ -12333,55 +12359,69 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp, u32 chip_id) { u8 reset_gpios; - struct bnx2x_phy phy; - u32 shmem_base, shmem2_base, cnt; - s8 port = 0; - u16 val; - reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); udelay(10); bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", reset_gpios); - for (port = PORT_MAX - 1; port >= PORT_0; port--) { - /* This PHY is for E2 and E3. */ - shmem_base = shmem_base_path[port]; - shmem2_base = shmem2_base_path[port]; - /* Extract the ext phy address for the port */ - if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, - 0, &phy) != - 0) { - DP(NETIF_MSG_LINK, "populate_phy failed\n"); - return -EINVAL; - } + return 0; +} - /* Wait for FW completing its initialization. */ - for (cnt = 0; cnt < 1000; cnt++) { - bnx2x_cl45_read(bp, &phy, +static int bnx2x_84833_pre_init_phy(struct bnx2x *bp, + struct bnx2x_phy *phy) +{ + u16 val, cnt; + /* Wait for FW completing its initialization. */ + for (cnt = 0; cnt < 1500; cnt++) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, &val); - if (!(val & (1<<15))) - break; - msleep(1); - } - if (cnt >= 1000) - DP(NETIF_MSG_LINK, - "84833 Cmn reset timeout (%d)\n", port); - - /* Put the port in super isolate mode. */ - bnx2x_cl45_read(bp, &phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); - val |= MDIO_84833_SUPER_ISOLATE; - bnx2x_cl45_write(bp, &phy, - MDIO_CTL_DEVAD, - MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); + if (!(val & (1<<15))) + break; + msleep(1); + } + if (cnt >= 1500) { + DP(NETIF_MSG_LINK, "84833 reset timeout\n"); + return -EINVAL; } + /* Put the port in super isolate mode. */ + bnx2x_cl45_read(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); + val |= MDIO_84833_SUPER_ISOLATE; + bnx2x_cl45_write(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); + + /* Save spirom version */ + bnx2x_save_848xx_spirom_version(phy, bp, PORT_0); return 0; } +int bnx2x_pre_init_phy(struct bnx2x *bp, + u32 shmem_base, + u32 shmem2_base, + u32 chip_id) +{ + int rc = 0; + struct bnx2x_phy phy; + bnx2x_set_mdio_clk(bp, chip_id, PORT_0); + if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base, + PORT_0, &phy)) { + DP(NETIF_MSG_LINK, "populate_phy failed\n"); + return -EINVAL; + } + switch (phy.type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: + rc = bnx2x_84833_pre_init_phy(bp, &phy); + break; + default: + break; + } + return rc; +} static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], u32 shmem2_base_path[], u8 phy_index, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index ffeaaa9..1e3f978 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -941,7 +941,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", - i, j, rx_bd[1], rx_bd[0], sw_bd->skb); + i, j, rx_bd[1], rx_bd[0], sw_bd->data); } start = RX_SGE(fp->rx_sge_prod); @@ -10536,6 +10536,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, { struct bnx2x *bp; int rc; + bool chip_is_e1x = (board_type == BCM57710 || + board_type == BCM57711 || + board_type == BCM57711E); SET_NETDEV_DEV(dev, &pdev->dev); bp = netdev_priv(dev); @@ -10624,7 +10627,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); - if (CHIP_IS_E1x(bp)) { + if (chip_is_e1x) { REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); @@ -10635,9 +10638,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, * Enable internal target-read (in case we are probed after PF FLR). * Must be done prior to any BAR read access. Only for 57712 and up */ - if (board_type != BCM57710 && - board_type != BCM57711 && - board_type != BCM57711E) + if (!chip_is_e1x) REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); /* Reset the load counter */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 44609de..dddbcf6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2176,6 +2176,7 @@ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to * accommodate the 9 input clients to ETS arbiter. */ #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 +#define NIG_REG_P1_HWPFC_ENABLE 0x181d0 #define NIG_REG_P1_MAC_IN_EN 0x185c0 /* [RW 1] Output enable for TX MAC interface */ #define NIG_REG_P1_MAC_OUT_EN 0x185c4 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 5ac6160..cb6339c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -50,6 +50,7 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp, int exe_len, union bnx2x_qable_obj *owner, exe_q_validate validate, + exe_q_remove remove, exe_q_optimize optimize, exe_q_execute exec, exe_q_get get) @@ -66,6 +67,7 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp, /* Owner specific callbacks */ o->validate = validate; + o->remove = remove; o->optimize = optimize; o->execute = exec; o->get = get; @@ -1340,6 +1342,35 @@ static int bnx2x_validate_vlan_mac(struct bnx2x *bp, } } +static int bnx2x_remove_vlan_mac(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + int rc = 0; + + /* If consumption wasn't required, nothing to do */ + if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags)) + return 0; + + switch (elem->cmd_data.vlan_mac.cmd) { + case BNX2X_VLAN_MAC_ADD: + case BNX2X_VLAN_MAC_MOVE: + rc = qo->vlan_mac.put_credit(&qo->vlan_mac); + break; + case BNX2X_VLAN_MAC_DEL: + rc = qo->vlan_mac.get_credit(&qo->vlan_mac); + break; + default: + return -EINVAL; + } + + if (rc != true) + return -EINVAL; + + return 0; +} + /** * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. * @@ -1801,8 +1832,14 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == - *vlan_mac_flags) + *vlan_mac_flags) { + rc = exeq->remove(bp, exeq->owner, exeq_pos); + if (rc) { + BNX2X_ERR("Failed to remove command\n"); + return rc; + } list_del(&exeq_pos->link); + } } spin_unlock_bh(&exeq->lock); @@ -1908,6 +1945,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp, bnx2x_exe_queue_init(bp, &mac_obj->exe_queue, 1, qable_obj, bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, bnx2x_optimize_vlan_mac, bnx2x_execute_vlan_mac, bnx2x_exeq_get_mac); @@ -1924,6 +1962,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp, bnx2x_exe_queue_init(bp, &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, qable_obj, bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, bnx2x_optimize_vlan_mac, bnx2x_execute_vlan_mac, bnx2x_exeq_get_mac); @@ -1963,6 +2002,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, bnx2x_exe_queue_init(bp, &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, qable_obj, bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, bnx2x_optimize_vlan_mac, bnx2x_execute_vlan_mac, bnx2x_exeq_get_vlan); @@ -2009,6 +2049,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, bnx2x_exe_queue_init(bp, &vlan_mac_obj->exe_queue, 1, qable_obj, bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, bnx2x_optimize_vlan_mac, bnx2x_execute_vlan_mac, bnx2x_exeq_get_vlan_mac); @@ -2025,6 +2066,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, &vlan_mac_obj->exe_queue, CLASSIFY_RULES_COUNT, qable_obj, bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, bnx2x_optimize_vlan_mac, bnx2x_execute_vlan_mac, bnx2x_exeq_get_vlan_mac); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 992308f..66da39f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -161,6 +161,10 @@ typedef int (*exe_q_validate)(struct bnx2x *bp, union bnx2x_qable_obj *o, struct bnx2x_exeq_elem *elem); +typedef int (*exe_q_remove)(struct bnx2x *bp, + union bnx2x_qable_obj *o, + struct bnx2x_exeq_elem *elem); + /** * @return positive is entry was optimized, 0 - if not, negative * in case of an error. @@ -203,11 +207,18 @@ struct bnx2x_exe_queue_obj { */ exe_q_validate validate; + /** + * Called before removing pending commands, cleaning allocated + * resources (e.g., credits from validate) + */ + exe_q_remove remove; /** * This will try to cancel the current pending commands list * considering the new command. * + * Returns the number of optimized commands or a negative error code + * * Must run under exe_queue->lock */ exe_q_optimize optimize; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 076e02a..a1f2e0f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6667,14 +6667,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) iph = ip_hdr(skb); tcp_opt_len = tcp_optlen(skb); - if (skb_is_gso_v6(skb)) { - hdr_len = skb_headlen(skb) - ETH_HLEN; - } else { - u32 ip_tcp_len; - - ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); - hdr_len = ip_tcp_len + tcp_opt_len; + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; + if (!skb_is_gso_v6(skb)) { iph->check = 0; iph->tot_len = htons(mss + hdr_len); } @@ -8846,9 +8841,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(100); - if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) { + if (tg3_flag(tp, USING_MSIX)) { val = tr32(MSGINT_MODE); - val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; + val |= MSGINT_MODE_ENABLE; + if (tp->irq_cnt > 1) + val |= MSGINT_MODE_MULTIVEC_EN; if (!tg3_flag(tp, 1SHOT_MSI)) val |= MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); @@ -9548,19 +9545,18 @@ static int tg3_request_firmware(struct tg3 *tp) static bool tg3_enable_msix(struct tg3 *tp) { - int i, rc, cpus = num_online_cpus(); + int i, rc; struct msix_entry msix_ent[tp->irq_max]; - if (cpus == 1) - /* Just fallback to the simpler MSI mode. */ - return false; - - /* - * We want as many rx rings enabled as there are cpus. - * The first MSIX vector only deals with link interrupts, etc, - * so we add one to the number of vectors we are requesting. - */ - tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max); + tp->irq_cnt = num_online_cpus(); + if (tp->irq_cnt > 1) { + /* We want as many rx rings enabled as there are cpus. + * In multiqueue MSI-X mode, the first MSI-X vector + * only deals with link interrupts, etc, so we add + * one to the number of vectors we are requesting. + */ + tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max); + } for (i = 0; i < tp->irq_max; i++) { msix_ent[i].entry = i; diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index fe0c29a..ee93a20 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -32,7 +32,7 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.1.1.28" +#define DRV_VERSION "2.1.1.31" #define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 2fd9db4..ab3f67f 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -57,11 +57,13 @@ #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ +#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ /* Supported devices */ static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = { { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, + { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, { 0, } /* end of table */ }; @@ -132,6 +134,11 @@ int enic_sriov_enabled(struct enic *enic) return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; } +static int enic_is_sriov_vf(struct enic *enic) +{ + return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; +} + int enic_is_valid_vf(struct enic *enic, int vf) { #ifdef CONFIG_PCI_IOV @@ -437,7 +444,7 @@ static void enic_mtu_check(struct enic *enic) if (mtu && mtu != enic->port_mtu) { enic->port_mtu = mtu; - if (enic_is_dynamic(enic)) { + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, mtu)); if (mtu != netdev->mtu) @@ -849,7 +856,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr) { struct enic *enic = netdev_priv(netdev); - if (enic_is_dynamic(enic)) { + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr)) return -EADDRNOTAVAIL; } else { @@ -1608,7 +1615,7 @@ static int enic_open(struct net_device *netdev) for (i = 0; i < enic->rq_count; i++) vnic_rq_enable(&enic->rq[i]); - if (!enic_is_dynamic(enic)) + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_add_station_addr(enic); enic_set_rx_mode(netdev); @@ -1659,7 +1666,7 @@ static int enic_stop(struct net_device *netdev) netif_carrier_off(netdev); netif_tx_disable(netdev); - if (!enic_is_dynamic(enic)) + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_del_station_addr(enic); for (i = 0; i < enic->wq_count; i++) { @@ -1696,7 +1703,7 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu) if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) return -EINVAL; - if (enic_is_dynamic(enic)) + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) return -EOPNOTSUPP; if (running) @@ -2263,10 +2270,10 @@ static int __devinit enic_probe(struct pci_dev *pdev, int using_dac = 0; unsigned int i; int err; - int num_pps = 1; #ifdef CONFIG_PCI_IOV int pos = 0; #endif + int num_pps = 1; /* Allocate net device structure and initialize. Private * instance data is initialized to zero. @@ -2376,14 +2383,14 @@ static int __devinit enic_probe(struct pci_dev *pdev, num_pps = enic->num_vfs; } } - #endif + /* Allocate structure for port profiles */ enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); if (!enic->pp) { pr_err("port profile alloc failed, aborting\n"); err = -ENOMEM; - goto err_out_disable_sriov; + goto err_out_disable_sriov_pp; } /* Issue device open to get device in known state @@ -2392,7 +2399,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, err = enic_dev_open(enic); if (err) { dev_err(dev, "vNIC dev open failed, aborting\n"); - goto err_out_free_pp; + goto err_out_disable_sriov; } /* Setup devcmd lock @@ -2426,7 +2433,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, * called later by an upper layer. */ - if (!enic_is_dynamic(enic)) { + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) { err = vnic_dev_init(enic->vdev, 0); if (err) { dev_err(dev, "vNIC dev init failed, aborting\n"); @@ -2460,8 +2467,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, (void)enic_change_mtu(netdev, enic->port_mtu); #ifdef CONFIG_PCI_IOV - if (enic_is_dynamic(enic) && pdev->is_virtfn && - is_zero_ether_addr(enic->mac_addr)) + if (enic_is_sriov_vf(enic) && is_zero_ether_addr(enic->mac_addr)) random_ether_addr(enic->mac_addr); #endif @@ -2474,7 +2480,7 @@ static int __devinit enic_probe(struct pci_dev *pdev, enic->tx_coalesce_usecs = enic->config.intr_timer_usec; enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; - if (enic_is_dynamic(enic)) + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) netdev->netdev_ops = &enic_netdev_dynamic_ops; else netdev->netdev_ops = &enic_netdev_ops; @@ -2516,17 +2522,17 @@ err_out_dev_deinit: enic_dev_deinit(enic); err_out_dev_close: vnic_dev_close(enic->vdev); -err_out_free_pp: - kfree(enic->pp); err_out_disable_sriov: + kfree(enic->pp); +err_out_disable_sriov_pp: #ifdef CONFIG_PCI_IOV if (enic_sriov_enabled(enic)) { pci_disable_sriov(pdev); enic->priv_flags &= ~ENIC_SRIOV_ENABLED; } err_out_vnic_unregister: - vnic_dev_unregister(enic->vdev); #endif + vnic_dev_unregister(enic->vdev); err_out_iounmap: enic_iounmap(enic); err_out_release_regions: diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a6bcdb5..e703d64 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1786,8 +1786,7 @@ static void be_rx_queues_destroy(struct be_adapter *adapter) static u32 be_num_rxqs_want(struct be_adapter *adapter) { if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && - !sriov_enabled(adapter) && be_physfn(adapter) && - !be_is_mc(adapter)) { + !sriov_enabled(adapter) && be_physfn(adapter)) { return 1 + MAX_RSS_QS; /* one default non-RSS queue */ } else { dev_warn(&adapter->pdev->dev, diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index fb5579a..47f85c3 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -25,6 +25,7 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/init.h> +#include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index a127cb2..bb336a0 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -25,6 +25,7 @@ #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/init.h> +#include <linux/interrupt.h> #include <linux/io.h> #include <linux/mii.h> #include <linux/module.h> diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c index 05b7359..6bdd8e3 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c @@ -263,7 +263,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev, data[i++] = atomic_read(&port->port_res[k].swqe_avail); } -const struct ethtool_ops ehea_ethtool_ops = { +static const struct ethtool_ops ehea_ethtool_ops = { .get_settings = ehea_get_settings, .get_drvinfo = ehea_get_drvinfo, .get_msglevel = ehea_get_msglevel, diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 3554414..5d5fb26 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -94,8 +94,8 @@ static int port_name_cnt; static LIST_HEAD(adapter_list); static unsigned long ehea_driver_flags; static DEFINE_MUTEX(dlpar_mem_lock); -struct ehea_fw_handle_array ehea_fw_handles; -struct ehea_bcmc_reg_array ehea_bcmc_regs; +static struct ehea_fw_handle_array ehea_fw_handles; +static struct ehea_bcmc_reg_array ehea_bcmc_regs; static int __devinit ehea_probe_adapter(struct platform_device *dev, @@ -133,7 +133,7 @@ void ehea_dump(void *adr, int len, char *msg) } } -void ehea_schedule_port_reset(struct ehea_port *port) +static void ehea_schedule_port_reset(struct ehea_port *port) { if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags)) schedule_work(&port->reset_task); @@ -1404,7 +1404,7 @@ out: return ret; } -int ehea_gen_smrs(struct ehea_port_res *pr) +static int ehea_gen_smrs(struct ehea_port_res *pr) { int ret; struct ehea_adapter *adapter = pr->port->adapter; @@ -1426,7 +1426,7 @@ out: return -EIO; } -int ehea_rem_smrs(struct ehea_port_res *pr) +static int ehea_rem_smrs(struct ehea_port_res *pr) { if ((ehea_rem_mr(&pr->send_mr)) || (ehea_rem_mr(&pr->recv_mr))) @@ -2190,7 +2190,7 @@ out: return err; } -int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) +static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) { int ret = -EIO; u64 hret; @@ -2531,7 +2531,7 @@ static void ehea_flush_sq(struct ehea_port *port) } } -int ehea_stop_qps(struct net_device *dev) +static int ehea_stop_qps(struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); struct ehea_adapter *adapter = port->adapter; @@ -2600,7 +2600,7 @@ out: return ret; } -void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) +static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) { struct ehea_qp qp = *orig_qp; struct ehea_qp_init_attr *init_attr = &qp.init_attr; @@ -2633,7 +2633,7 @@ void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) } } -int ehea_restart_qps(struct net_device *dev) +static int ehea_restart_qps(struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); struct ehea_adapter *adapter = port->adapter; @@ -2824,7 +2824,7 @@ static void ehea_tx_watchdog(struct net_device *dev) ehea_schedule_port_reset(port); } -int ehea_sense_adapter_attr(struct ehea_adapter *adapter) +static int ehea_sense_adapter_attr(struct ehea_adapter *adapter) { struct hcp_query_ehea *cb; u64 hret; @@ -2852,7 +2852,7 @@ out: return ret; } -int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) +static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) { struct hcp_ehea_port_cb4 *cb4; u64 hret; @@ -2966,7 +2966,7 @@ static const struct net_device_ops ehea_netdev_ops = { .ndo_tx_timeout = ehea_tx_watchdog, }; -struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, +static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, u32 logical_port_id, struct device_node *dn) { @@ -3237,7 +3237,7 @@ static ssize_t ehea_remove_port(struct device *dev, static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port); static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port); -int ehea_create_device_sysfs(struct platform_device *dev) +static int ehea_create_device_sysfs(struct platform_device *dev) { int ret = device_create_file(&dev->dev, &dev_attr_probe_port); if (ret) @@ -3248,7 +3248,7 @@ out: return ret; } -void ehea_remove_device_sysfs(struct platform_device *dev) +static void ehea_remove_device_sysfs(struct platform_device *dev) { device_remove_file(&dev->dev, &dev_attr_probe_port); device_remove_file(&dev->dev, &dev_attr_remove_port); @@ -3379,7 +3379,7 @@ static int __devexit ehea_remove(struct platform_device *dev) return 0; } -void ehea_crash_handler(void) +static void ehea_crash_handler(void) { int i; @@ -3491,7 +3491,7 @@ static ssize_t ehea_show_capabilities(struct device_driver *drv, static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_capabilities, NULL); -int __init ehea_module_init(void) +static int __init ehea_module_init(void) { int ret; diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c index 95b9f4f..c25b05b 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c @@ -34,9 +34,7 @@ #include "ehea_phyp.h" #include "ehea_qmr.h" -struct ehea_bmap *ehea_bmap = NULL; - - +static struct ehea_bmap *ehea_bmap; static void *hw_qpageit_get_inc(struct hw_queue *queue) { @@ -212,7 +210,7 @@ out_nomem: return NULL; } -u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force) +static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force) { u64 hret; u64 adapter_handle = cq->adapter->handle; @@ -337,7 +335,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq) return eqe; } -u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force) +static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force) { u64 hret; unsigned long flags; @@ -381,7 +379,7 @@ int ehea_destroy_eq(struct ehea_eq *eq) /** * allocates memory for a queue and registers pages in phyp */ -int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, +static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, int nr_pages, int wqe_size, int act_nr_sges, struct ehea_adapter *adapter, int h_call_q_selector) { @@ -516,7 +514,7 @@ out_freemem: return NULL; } -u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force) +static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force) { u64 hret; struct ehea_qp_init_attr *qp_attr = &qp->init_attr; @@ -976,7 +974,7 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr, return 0; } -void print_error_data(u64 *data) +static void print_error_data(u64 *data) { int length; u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]); diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index c6e4621..6565c46 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel 82575 PCI-Express Ethernet Linux driver -# Copyright(c) 1999 - 2011 Intel Corporation. +# Copyright(c) 1999 - 2012 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index b8e20f0..08bdc33 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index 08a757e..b927d79 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index f5fc572..aed2174 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 4519a13..f67cbd3 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 73aac08..f57338a 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -151,7 +151,7 @@ void igb_clear_vfta_i350(struct e1000_hw *hw) * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ -void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) { int i; diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index e45996b..cbddc4e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 469d95e..5988b89 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index eddb0f8..dbcfa3d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index 4040712..fa2c6ba 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h index a2a7ca9..825b022 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.h +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2011 Intel Corporation. + Copyright(c) 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index b17d7c2..789de5b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 8510797..4c32ac6 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 0a860bc..ccdf36d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 3d12e67..8e33bdd 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7998bf4..aa399a8 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 01e5e89..e91d73c 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -68,7 +68,7 @@ char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; -static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation."; +static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation."; static const struct e1000_info *igb_info_tbl[] = { [board_82575] = &e1000_82575_info, @@ -4003,8 +4003,8 @@ set_itr_now: } } -void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, - u32 type_tucmd, u32 mss_l4len_idx) +static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, + u32 type_tucmd, u32 mss_l4len_idx) { struct e1000_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; @@ -5623,7 +5623,7 @@ static irqreturn_t igb_intr(int irq, void *data) return IRQ_HANDLED; } -void igb_ring_irq_enable(struct igb_q_vector *q_vector) +static void igb_ring_irq_enable(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 7b600a1..2dba534 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -468,6 +468,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = { void igbvf_set_ethtool_ops(struct net_device *netdev) { - /* have to "undeclare" const on this struct to remove warnings */ - SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops); + SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops); } diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index fd3da30..a4b20c8 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1194,11 +1194,6 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - igbvf_irq_disable(adapter); - - if (!test_bit(__IGBVF_DOWN, &adapter->state)) - igbvf_irq_enable(adapter); - if (hw->mac.ops.set_vfta(hw, vid, false)) { dev_err(&adapter->pdev->dev, "Failed to remove vlan id %d\n", vid); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 802bfa0..775602e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -161,19 +161,19 @@ /* Receive DMA Registers */ #define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ - (0x0D000 + ((_i - 64) * 0x40))) + (0x0D000 + (((_i) - 64) * 0x40))) #define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ - (0x0D004 + ((_i - 64) * 0x40))) + (0x0D004 + (((_i) - 64) * 0x40))) #define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ - (0x0D008 + ((_i - 64) * 0x40))) + (0x0D008 + (((_i) - 64) * 0x40))) #define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ - (0x0D010 + ((_i - 64) * 0x40))) + (0x0D010 + (((_i) - 64) * 0x40))) #define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ - (0x0D018 + ((_i - 64) * 0x40))) + (0x0D018 + (((_i) - 64) * 0x40))) #define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ - (0x0D028 + ((_i - 64) * 0x40))) + (0x0D028 + (((_i) - 64) * 0x40))) #define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ - (0x0D02C + ((_i - 64) * 0x40))) + (0x0D02C + (((_i) - 64) * 0x40))) #define IXGBE_RSCDBU 0x03028 #define IXGBE_RDDCC 0x02F20 #define IXGBE_RXMEMWRAP 0x03190 @@ -186,7 +186,7 @@ */ #define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ - (0x0D014 + ((_i - 64) * 0x40)))) + (0x0D014 + (((_i) - 64) * 0x40)))) /* * Rx DCA Control Register: * 00-15 : 0x02200 + n*4 @@ -195,7 +195,7 @@ */ #define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ - (0x0D00C + ((_i - 64) * 0x40)))) + (0x0D00C + (((_i) - 64) * 0x40)))) #define IXGBE_RDRXCTL 0x02F00 #define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) /* 8 of these 0x03C00 - 0x03C1C */ @@ -344,9 +344,9 @@ #define IXGBE_WUPL 0x05900 #define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ -#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */ -#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host - * Filter Table */ +#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host + * Filter Table */ #define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 #define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 @@ -1485,7 +1485,7 @@ enum { #define IXGBE_LED_BLINK_BASE 0x00000080 #define IXGBE_LED_MODE_MASK_BASE 0x0000000F #define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) -#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i)) #define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) #define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) #define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) @@ -2068,9 +2068,9 @@ enum { /* SR-IOV specific macros */ #define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) -#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4)) -#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) -#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) +#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) +#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) enum ixgbe_fdir_pballoc_type { IXGBE_FDIR_PBALLOC_NONE = 0, diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index dc8e651..c857003 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -56,7 +56,8 @@ struct ixgbe_stats { offsetof(struct ixgbevf_adapter, m), \ offsetof(struct ixgbevf_adapter, b), \ offsetof(struct ixgbevf_adapter, r) -static struct ixgbe_stats ixgbe_gstrings_stats[] = { + +static const struct ixgbe_stats ixgbe_gstrings_stats[] = { {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, stats.saved_reset_vfgprc)}, {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, @@ -671,7 +672,7 @@ static int ixgbevf_nway_reset(struct net_device *netdev) return 0; } -static struct ethtool_ops ixgbevf_ethtool_ops = { +static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_settings = ixgbevf_get_settings, .get_drvinfo = ixgbevf_get_drvinfo, .get_regs_len = ixgbevf_get_regs_len, diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index e6c9d1a..9075c1d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -279,12 +279,12 @@ enum ixgbevf_boards { board_X540_vf, }; -extern struct ixgbevf_info ixgbevf_82599_vf_info; -extern struct ixgbevf_info ixgbevf_X540_vf_info; -extern struct ixgbe_mbx_operations ixgbevf_mbx_ops; +extern const struct ixgbevf_info ixgbevf_82599_vf_info; +extern const struct ixgbevf_info ixgbevf_X540_vf_info; +extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; /* needed by ethtool.c */ -extern char ixgbevf_driver_name[]; +extern const char ixgbevf_driver_name[]; extern const char ixgbevf_driver_version[]; extern int ixgbevf_up(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 891162d..bed411b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -53,7 +53,7 @@ #include "ixgbevf.h" -char ixgbevf_driver_name[] = "ixgbevf"; +const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; @@ -917,31 +917,34 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) struct ixgbe_hw *hw = &adapter->hw; u32 eicr; u32 msg; + bool got_ack = false; eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); - if (!hw->mbx.ops.check_for_ack(hw)) { - /* - * checking for the ack clears the PFACK bit. Place - * it back in the v2p_mailbox cache so that anyone - * polling for an ack will not miss it. Also - * avoid the read below because the code to read - * the mailbox will also clear the ack bit. This was - * causing lost acks. Just cache the bit and exit - * the IRQ handler. - */ - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; - goto out; - } + if (!hw->mbx.ops.check_for_ack(hw)) + got_ack = true; - /* Not an ack interrupt, go ahead and read the message */ - hw->mbx.ops.read(hw, &msg, 1); + if (!hw->mbx.ops.check_for_msg(hw)) { + hw->mbx.ops.read(hw, &msg, 1); - if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 1)); + if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 1)); + if (msg & IXGBE_VT_MSGTYPE_NACK) + pr_warn("Last Request of type %2.2x to PF Nacked\n", + msg & 0xFF); + goto out; + } + + /* + * checking for the ack clears the PFACK bit. Place + * it back in the v2p_mailbox cache so that anyone + * polling for an ack will not miss it + */ + if (got_ack) + hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; out: return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index 930fa83..13532d9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -26,6 +26,7 @@ *******************************************************************************/ #include "mbx.h" +#include "ixgbevf.h" /** * ixgbevf_poll_for_msg - Wait for message notification @@ -328,7 +329,7 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) return 0; } -struct ixgbe_mbx_operations ixgbevf_mbx_ops = { +const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { .init_params = ixgbevf_init_mbx_params_vf, .read = ixgbevf_read_mbx_vf, .write = ixgbevf_write_mbx_vf, diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 21533e3..d0138d7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -26,6 +26,7 @@ *******************************************************************************/ #include "vf.h" +#include "ixgbevf.h" /** * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx @@ -401,7 +402,7 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, return 0; } -static struct ixgbe_mac_operations ixgbevf_mac_ops = { +static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .init_hw = ixgbevf_init_hw_vf, .reset_hw = ixgbevf_reset_hw_vf, .start_hw = ixgbevf_start_hw_vf, @@ -415,12 +416,12 @@ static struct ixgbe_mac_operations ixgbevf_mac_ops = { .set_vfta = ixgbevf_set_vfta_vf, }; -struct ixgbevf_info ixgbevf_82599_vf_info = { +const struct ixgbevf_info ixgbevf_82599_vf_info = { .mac = ixgbe_mac_82599_vf, .mac_ops = &ixgbevf_mac_ops, }; -struct ixgbevf_info ixgbevf_X540_vf_info = { +const struct ixgbevf_info ixgbevf_X540_vf_info = { .mac = ixgbe_mac_X540_vf, .mac_ops = &ixgbevf_mac_ops, }; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 10306b4..d556619 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -167,7 +167,7 @@ struct ixgbevf_hw_stats { struct ixgbevf_info { enum ixgbe_mac_type mac; - struct ixgbe_mac_operations *mac_ops; + const struct ixgbe_mac_operations *mac_ops; }; #endif /* __IXGBE_VF_H__ */ diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 9c049d2..9edecfa 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -136,6 +136,8 @@ static char mv643xx_eth_driver_version[] = "1.4"; #define INT_MASK 0x0068 #define INT_MASK_EXT 0x006c #define TX_FIFO_URGENT_THRESHOLD 0x0074 +#define RX_DISCARD_FRAME_CNT 0x0084 +#define RX_OVERRUN_FRAME_CNT 0x0088 #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc #define TX_BW_RATE_MOVED 0x00e0 #define TX_BW_MTU_MOVED 0x00e8 @@ -334,6 +336,9 @@ struct mib_counters { u32 bad_crc_event; u32 collision; u32 late_collision; + /* Non MIB hardware counters */ + u32 rx_discard; + u32 rx_overrun; }; struct lro_counters { @@ -1225,6 +1230,10 @@ static void mib_counters_clear(struct mv643xx_eth_private *mp) for (i = 0; i < 0x80; i += 4) mib_read(mp, i); + + /* Clear non MIB hw counters also */ + rdlp(mp, RX_DISCARD_FRAME_CNT); + rdlp(mp, RX_OVERRUN_FRAME_CNT); } static void mib_counters_update(struct mv643xx_eth_private *mp) @@ -1262,6 +1271,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) p->bad_crc_event += mib_read(mp, 0x74); p->collision += mib_read(mp, 0x78); p->late_collision += mib_read(mp, 0x7c); + /* Non MIB hardware counters */ + p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); + p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); spin_unlock_bh(&mp->mib_counters_lock); mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); @@ -1413,6 +1425,8 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { MIBSTAT(bad_crc_event), MIBSTAT(collision), MIBSTAT(late_collision), + MIBSTAT(rx_discard), + MIBSTAT(rx_overrun), LROSTAT(lro_aggregated), LROSTAT(lro_flushed), LROSTAT(lro_no_desc), diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 18a87a5..edb9bda 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) } /* Allocate and setup a new buffer for receiving */ -static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, - struct sk_buff *skb, unsigned int bufsize) +static int skge_rx_setup(struct pci_dev *pdev, + struct skge_element *e, + struct sk_buff *skb, unsigned int bufsize) { struct skge_rx_desc *rd = e->desc; - u64 map; + dma_addr_t map; - map = pci_map_single(skge->hw->pdev, skb->data, bufsize, + map = pci_map_single(pdev, skb->data, bufsize, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pdev, map)) + goto mapping_error; - rd->dma_lo = map; - rd->dma_hi = map >> 32; + rd->dma_lo = lower_32_bits(map); + rd->dma_hi = upper_32_bits(map); e->skb = skb; rd->csum1_start = ETH_HLEN; rd->csum2_start = ETH_HLEN; @@ -953,6 +956,13 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, bufsize); + return 0; + +mapping_error: + if (net_ratelimit()) + dev_warn(&pdev->dev, "%s: rx mapping error\n", + skb->dev->name); + return -EIO; } /* Resume receiving using existing skb, @@ -1014,7 +1024,11 @@ static int skge_rx_fill(struct net_device *dev) return -ENOMEM; skb_reserve(skb, NET_IP_ALIGN); - skge_rx_setup(skge, e, skb, skge->rx_buf_size); + if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) { + kfree_skb(skb); + return -ENOMEM; + } + } while ((e = e->next) != ring->start); ring->to_clean = ring->start; @@ -2576,6 +2590,7 @@ static int skge_up(struct net_device *dev) } /* Initialize MAC */ + netif_carrier_off(dev); spin_lock_bh(&hw->phy_lock); if (is_genesis(hw)) genesis_mac_init(hw, port); @@ -2728,7 +2743,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, struct skge_tx_desc *td; int i; u32 control, len; - u64 map; + dma_addr_t map; if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; @@ -2742,11 +2757,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, e->skb = skb; len = skb_headlen(skb); map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(hw->pdev, map)) + goto mapping_error; + dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, len); - td->dma_lo = map; - td->dma_hi = map >> 32; + td->dma_lo = lower_32_bits(map); + td->dma_hi = upper_32_bits(map); if (skb->ip_summed == CHECKSUM_PARTIAL) { const int offset = skb_checksum_start_offset(skb); @@ -2777,14 +2795,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(&hw->pdev->dev, map)) + goto mapping_unwind; e = e->next; e->skb = skb; tf = e->desc; BUG_ON(tf->control & BMU_OWN); - tf->dma_lo = map; - tf->dma_hi = (u64) map >> 32; + tf->dma_lo = lower_32_bits(map); + tf->dma_hi = upper_32_bits(map); dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, skb_frag_size(frag)); @@ -2797,6 +2817,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; wmb(); + netdev_sent_queue(dev, skb->len); + skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, @@ -2812,15 +2834,35 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, } return NETDEV_TX_OK; + +mapping_unwind: + /* unroll any pages that were already mapped. */ + if (e != skge->tx_ring.to_use) { + struct skge_element *u; + + for (u = skge->tx_ring.to_use->next; u != e; u = u->next) + pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr), + dma_unmap_len(u, maplen), + PCI_DMA_TODEVICE); + e = skge->tx_ring.to_use; + } + /* undo the mapping for the skb header */ + pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); +mapping_error: + /* mapping error causes error message and packet to be discarded. */ + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); + dev_kfree_skb(skb); + return NETDEV_TX_OK; } /* Free resources associated with this reing element */ -static void skge_tx_free(struct skge_port *skge, struct skge_element *e, - u32 control) +static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e, + u32 control) { - struct pci_dev *pdev = skge->hw->pdev; - /* skb header vs. fragment */ if (control & BMU_STF) pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), @@ -2830,13 +2872,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e, pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), PCI_DMA_TODEVICE); - - if (control & BMU_EOF) { - netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, - "tx done slot %td\n", e - skge->tx_ring.start); - - dev_kfree_skb(e->skb); - } } /* Free all buffers in transmit ring */ @@ -2847,10 +2882,15 @@ static void skge_tx_clean(struct net_device *dev) for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { struct skge_tx_desc *td = e->desc; - skge_tx_free(skge, e, td->control); + + skge_tx_unmap(skge->hw->pdev, e, td->control); + + if (td->control & BMU_EOF) + dev_kfree_skb(e->skb); td->control = 0; } + netdev_reset_queue(dev); skge->tx_ring.to_clean = e; } @@ -3059,13 +3099,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, if (!nskb) goto resubmit; + if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) { + dev_kfree_skb(nskb); + goto resubmit; + } + pci_unmap_single(skge->hw->pdev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), PCI_DMA_FROMDEVICE); skb = e->skb; prefetch(skb->data); - skge_rx_setup(skge, e, nskb, skge->rx_buf_size); } skb_put(skb, len); @@ -3111,6 +3155,7 @@ static void skge_tx_done(struct net_device *dev) struct skge_port *skge = netdev_priv(dev); struct skge_ring *ring = &skge->tx_ring; struct skge_element *e; + unsigned int bytes_compl = 0, pkts_compl = 0; skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); @@ -3120,8 +3165,20 @@ static void skge_tx_done(struct net_device *dev) if (control & BMU_OWN) break; - skge_tx_free(skge, e, control); + skge_tx_unmap(skge->hw->pdev, e, control); + + if (control & BMU_EOF) { + netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, + "tx done slot %td\n", + e - skge->tx_ring.start); + + pkts_compl++; + bytes_compl += e->skb->len; + + dev_kfree_skb(e->skb); + } } + netdev_completed_queue(dev, pkts_compl, bytes_compl); skge->tx_ring.to_clean = e; /* Can run lockless until we need to synchronize to restart queue. */ diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 978f593..405e6ac 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1247,6 +1247,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, u32 reply; u32 slave_status = 0; u8 is_going_down = 0; + int i; slave_state[slave].comm_toggle ^= 1; reply = (u32) slave_state[slave].comm_toggle << 31; @@ -1258,6 +1259,10 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, if (cmd == MLX4_COMM_CMD_RESET) { mlx4_warn(dev, "Received reset from slave:%d\n", slave); slave_state[slave].active = false; + for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { + slave_state[slave].event_eq[i].eqn = -1; + slave_state[slave].event_eq[i].token = 0; + } /*check if we are in the middle of FLR process, if so return "retry" status to the slave*/ if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { @@ -1452,7 +1457,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_state; - int i, err, port; + int i, j, err, port; priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, &priv->mfunc.vhcr_dma, @@ -1485,6 +1490,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) for (i = 0; i < dev->num_slaves; ++i) { s_state = &priv->mfunc.master.slave_state[i]; s_state->last_cmd = MLX4_COMM_CMD_RESET; + for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) + s_state->event_eq[j].eqn = -1; __raw_writel((__force u32) 0, &priv->mfunc.comm[i].slave_write); __raw_writel((__force u32) 0, diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 475f9d6..7e64033 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -96,7 +96,7 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int cq_num) { - return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0, + return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } @@ -111,7 +111,7 @@ static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int cq_num) { - return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0, + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 7dbc6a2..70346fd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -183,10 +183,11 @@ static int mlx4_en_set_wol(struct net_device *netdev, static int mlx4_en_get_sset_count(struct net_device *dev, int sset) { struct mlx4_en_priv *priv = netdev_priv(dev); + int bit_count = hweight64(priv->stats_bitmap); switch (sset) { case ETH_SS_STATS: - return NUM_ALL_STATS + + return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + (priv->tx_ring_num + priv->rx_ring_num) * 2; case ETH_SS_TEST: return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags @@ -201,14 +202,34 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); int index = 0; - int i; + int i, j = 0; spin_lock_bh(&priv->stats_lock); - for (i = 0; i < NUM_MAIN_STATS; i++) - data[index++] = ((unsigned long *) &priv->stats)[i]; - for (i = 0; i < NUM_PORT_STATS; i++) - data[index++] = ((unsigned long *) &priv->port_stats)[i]; + if (!(priv->stats_bitmap)) { + for (i = 0; i < NUM_MAIN_STATS; i++) + data[index++] = + ((unsigned long *) &priv->stats)[i]; + for (i = 0; i < NUM_PORT_STATS; i++) + data[index++] = + ((unsigned long *) &priv->port_stats)[i]; + for (i = 0; i < NUM_PKT_STATS; i++) + data[index++] = + ((unsigned long *) &priv->pkstats)[i]; + } else { + for (i = 0; i < NUM_MAIN_STATS; i++) { + if ((priv->stats_bitmap >> j) & 1) + data[index++] = + ((unsigned long *) &priv->stats)[i]; + j++; + } + for (i = 0; i < NUM_PORT_STATS; i++) { + if ((priv->stats_bitmap >> j) & 1) + data[index++] = + ((unsigned long *) &priv->port_stats)[i]; + j++; + } + } for (i = 0; i < priv->tx_ring_num; i++) { data[index++] = priv->tx_ring[i].packets; data[index++] = priv->tx_ring[i].bytes; @@ -217,8 +238,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, data[index++] = priv->rx_ring[i].packets; data[index++] = priv->rx_ring[i].bytes; } - for (i = 0; i < NUM_PKT_STATS; i++) - data[index++] = ((unsigned long *) &priv->pkstats)[i]; spin_unlock_bh(&priv->stats_lock); } @@ -247,11 +266,29 @@ static void mlx4_en_get_strings(struct net_device *dev, case ETH_SS_STATS: /* Add main counters */ - for (i = 0; i < NUM_MAIN_STATS; i++) - strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]); - for (i = 0; i< NUM_PORT_STATS; i++) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[i + NUM_MAIN_STATS]); + if (!priv->stats_bitmap) { + for (i = 0; i < NUM_MAIN_STATS; i++) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[i]); + for (i = 0; i < NUM_PORT_STATS; i++) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[i + + NUM_MAIN_STATS]); + for (i = 0; i < NUM_PKT_STATS; i++) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[i + + NUM_MAIN_STATS + + NUM_PORT_STATS]); + } else + for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) { + if ((priv->stats_bitmap >> i) & 1) { + strcpy(data + + (index++) * ETH_GSTRING_LEN, + main_strings[i]); + } + if (!(priv->stats_bitmap >> i)) + break; + } for (i = 0; i < priv->tx_ring_num; i++) { sprintf(data + (index++) * ETH_GSTRING_LEN, "tx%d_packets", i); @@ -264,9 +301,6 @@ static void mlx4_en_get_strings(struct net_device *dev, sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_bytes", i); } - for (i = 0; i< NUM_PKT_STATS; i++) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]); break; } } @@ -479,6 +513,95 @@ static void mlx4_en_get_ringparam(struct net_device *dev, param->tx_pending = priv->tx_ring[0].size; } +static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + return priv->rx_ring_num; +} + +static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rss_map *rss_map = &priv->rss_map; + int rss_rings; + size_t n = priv->rx_ring_num; + int err = 0; + + rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; + + while (n--) { + ring_index[n] = rss_map->qps[n % rss_rings].qpn - + rss_map->base_qpn; + } + + return err; +} + +static int mlx4_en_set_rxfh_indir(struct net_device *dev, + const u32 *ring_index) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int port_up = 0; + int err = 0; + int i; + int rss_rings = 0; + + /* Calculate RSS table size and make sure flows are spread evenly + * between rings + */ + for (i = 0; i < priv->rx_ring_num; i++) { + if (i > 0 && !ring_index[i] && !rss_rings) + rss_rings = i; + + if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num))) + return -EINVAL; + } + + if (!rss_rings) + rss_rings = priv->rx_ring_num; + + /* RSS table size must be an order of 2 */ + if (!is_power_of_2(rss_rings)) + return -EINVAL; + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev); + } + + priv->prof->rss_rings = rss_rings; + + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + + mutex_unlock(&mdev->state_lock); + return err; +} + +static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->rx_ring_num; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, .get_settings = mlx4_en_get_settings, @@ -498,6 +621,10 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .set_pauseparam = mlx4_en_set_pauseparam, .get_ringparam = mlx4_en_get_ringparam, .set_ringparam = mlx4_en_set_ringparam, + .get_rxnfc = mlx4_en_get_rxnfc, + .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, + .get_rxfh_indir = mlx4_en_get_rxfh_indir, + .set_rxfh_indir = mlx4_en_set_rxfh_indir, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index a06096f..2097a7d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -62,10 +62,6 @@ static const char mlx4_en_version[] = * Device scope module parameters */ - -/* Enable RSS TCP traffic */ -MLX4_EN_PARM_INT(tcp_rss, 1, - "Enable RSS for incomming TCP traffic or disabled (0)"); /* Enable RSS UDP traffic */ MLX4_EN_PARM_INT(udp_rss, 1, "Enable RSS for incomming UDP traffic or disabled (0)"); @@ -104,7 +100,6 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) struct mlx4_en_profile *params = &mdev->profile; int i; - params->tcp_rss = tcp_rss; params->udp_rss = udp_rss; if (params->udp_rss && !(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS)) { @@ -120,6 +115,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS + (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS; + params->prof[i].rss_rings = 0; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 72fa807..467ae58 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -702,6 +702,8 @@ int mlx4_en_start_port(struct net_device *dev) /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->mcast_task); + mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); + priv->port_up = true; netif_tx_start_all_queues(dev); return 0; @@ -807,38 +809,50 @@ static void mlx4_en_restart(struct work_struct *work) mutex_unlock(&mdev->state_lock); } - -static int mlx4_en_open(struct net_device *dev) +static void mlx4_en_clear_stats(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int i; - int err = 0; - - mutex_lock(&mdev->state_lock); - - if (!mdev->device_up) { - en_err(priv, "Cannot open - device down/disabled\n"); - err = -EBUSY; - goto out; - } - /* Reset HW statistics and performance counters */ if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) en_dbg(HW, priv, "Failed dumping statistics\n"); memset(&priv->stats, 0, sizeof(priv->stats)); memset(&priv->pstats, 0, sizeof(priv->pstats)); + memset(&priv->pkstats, 0, sizeof(priv->pkstats)); + memset(&priv->port_stats, 0, sizeof(priv->port_stats)); for (i = 0; i < priv->tx_ring_num; i++) { priv->tx_ring[i].bytes = 0; priv->tx_ring[i].packets = 0; + priv->tx_ring[i].tx_csum = 0; } for (i = 0; i < priv->rx_ring_num; i++) { priv->rx_ring[i].bytes = 0; priv->rx_ring[i].packets = 0; + priv->rx_ring[i].csum_ok = 0; + priv->rx_ring[i].csum_none = 0; + } +} + +static int mlx4_en_open(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err = 0; + + mutex_lock(&mdev->state_lock); + + if (!mdev->device_up) { + en_err(priv, "Cannot open - device down/disabled\n"); + err = -EBUSY; + goto out; } + /* Reset HW statistics and SW counters */ + mlx4_en_clear_stats(dev); + err = mlx4_en_start_port(dev); if (err) en_err(priv, "Failed starting port:%d\n", priv->port); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index e8d6ad2..971d4b6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -853,6 +853,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) struct mlx4_en_rss_map *rss_map = &priv->rss_map; struct mlx4_qp_context context; struct mlx4_rss_context *rss_context; + int rss_rings; void *ptr; u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | MLX4_RSS_TCP_IPV6); @@ -893,10 +894,15 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, priv->rx_ring[0].cqn, &context); + if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) + rss_rings = priv->rx_ring_num; + else + rss_rings = priv->prof->rss_rings; + ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; rss_context = ptr; - rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | + rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | (rss_map->base_qpn)); rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); if (priv->mdev->profile.udp_rss) { diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 1e9b55e..55d7bd4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -513,25 +513,22 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_event_eq_info *event_eq = - &priv->mfunc.master.slave_state[slave].event_eq; + priv->mfunc.master.slave_state[slave].event_eq; u32 in_modifier = vhcr->in_modifier; u32 eqn = in_modifier & 0x1FF; u64 in_param = vhcr->in_param; int err = 0; + int i; if (slave == dev->caps.function) err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); - if (!err) { - if (in_modifier >> 31) { - /* unmap */ - event_eq->event_type &= ~in_param; - } else { - event_eq->eqn = eqn; - event_eq->event_type = in_param; - } - } + if (!err) + for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) + if (in_param & (1LL << i)) + event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn; + return err; } @@ -546,7 +543,7 @@ static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int eq_num) { - return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0, + return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } @@ -554,7 +551,7 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int eq_num) { - return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num, + return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index a424a19..8a21e10 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -158,7 +158,6 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 -#define QUERY_FUNC_CAP_FUNCTION_OFFSET 0x3 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 @@ -182,9 +181,6 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, field = 1 << 7; /* enable only ethernet interface */ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); - field = slave; - MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET); - field = dev->caps.num_ports; MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); @@ -249,9 +245,6 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap) goto out; } - MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET); - func_cap->function = field; - MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); func_cap->num_ports = field; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 119e0cc..e1a5fa5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -119,7 +119,6 @@ struct mlx4_dev_cap { }; struct mlx4_func_cap { - u8 function; u8 num_ports; u8 flags; u32 pf_context_behaviour; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 6bb62c5..678558b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -108,7 +108,7 @@ static struct mlx4_profile default_profile = { .num_cq = 1 << 16, .num_mcg = 1 << 13, .num_mpt = 1 << 19, - .num_mtt = 1 << 20, + .num_mtt = 1 << 20, /* It is really num mtt segements */ }; static int log_num_mac = 7; @@ -471,7 +471,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) return -ENOSYS; } - dev->caps.function = func_cap.function; dev->caps.num_ports = func_cap.num_ports; dev->caps.num_qps = func_cap.qp_quota; dev->caps.num_srqs = func_cap.srq_quota; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index a80121a..c92269f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -388,9 +388,8 @@ struct mlx4_slave_eqe { }; struct mlx4_slave_event_eq_info { - u32 eqn; + int eqn; u16 token; - u64 event_type; }; struct mlx4_profile { @@ -449,6 +448,8 @@ struct mlx4_steer_index { struct list_head duplicates; }; +#define MLX4_EVENT_TYPES_NUM 64 + struct mlx4_slave_state { u8 comm_toggle; u8 last_cmd; @@ -461,7 +462,8 @@ struct mlx4_slave_state { struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; struct list_head mcast_filters[MLX4_MAX_PORTS + 1]; struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1]; - struct mlx4_slave_event_eq_info event_eq; + /* event type to eq number lookup */ + struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM]; u16 eq_pi; u16 eq_ci; spinlock_t lock; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f2a8e65..35f0884 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -325,11 +325,11 @@ struct mlx4_en_port_profile { u8 rx_ppp; u8 tx_pause; u8 tx_ppp; + int rss_rings; }; struct mlx4_en_profile { int rss_xor; - int tcp_rss; int udp_rss; u8 rss_mask; u32 active_ports; @@ -476,6 +476,7 @@ struct mlx4_en_priv { struct mlx4_en_perf_stats pstats; struct mlx4_en_pkt_stats pkstats; struct mlx4_en_port_stats port_stats; + u64 stats_bitmap; char *mc_addrs; int mc_addrs_cnt; struct mlx4_en_stat_out_mbox hw_stats; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 01df556..8deeef9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -291,7 +291,7 @@ static u32 key_to_hw_index(u32 key) static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int mpt_index) { - return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index, + return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); } diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index 5c9a54d..db4746d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -52,8 +52,7 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap); if (*pdn == -1) return -ENOMEM; - if (mlx4_is_mfunc(dev)) - *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS; + return 0; } EXPORT_SYMBOL_GPL(mlx4_pd_alloc); diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 88b52e5..f44ae55 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -44,6 +44,11 @@ #define MLX4_VLAN_VALID (1u << 31) #define MLX4_VLAN_MASK 0xfff +#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL +#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL +#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL +#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL + void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) { int i; @@ -898,6 +903,24 @@ int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { + if (slave != dev->caps.function) + return 0; return mlx4_common_dump_eth_stats(dev, slave, vhcr->in_modifier, outbox); } + +void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap) +{ + if (!mlx4_is_mfunc(dev)) { + *stats_bitmap = 0; + return; + } + + *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK | + MLX4_STATS_TRAFFIC_DROPS_MASK | + MLX4_STATS_PORT_COUNTERS_MASK); + + if (mlx4_is_master(dev)) + *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK; +} +EXPORT_SYMBOL(mlx4_set_stats_bitmap); diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 66f91ca..1129677d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -110,7 +110,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); profile[MLX4_RES_DMPT].num = request->num_mpt; profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; - profile[MLX4_RES_MTT].num = request->num_mtt; + profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg); profile[MLX4_RES_MCG].num = request->num_mcg; for (i = 0; i < MLX4_RES_NUM; ++i) { diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 6b03ac8..738f950 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -162,7 +162,7 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = cpu_to_be32(qp->qpn); - ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function, + ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), new_state == MLX4_QP_STATE_RST ? 2 : 0, op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index ed20751..dcd819b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1561,11 +1561,6 @@ static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) return be32_to_cpu(mpt->mtt_sz); } -static int mr_get_pdn(struct mlx4_mpt_entry *mpt) -{ - return be32_to_cpu(mpt->pd_flags) & 0xffffff; -} - static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) { return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; @@ -1602,16 +1597,6 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc) return total_pages; } -static int qp_get_pdn(struct mlx4_qp_context *qpc) -{ - return be32_to_cpu(qpc->pd) & 0xffffff; -} - -static int pdn2slave(int pdn) -{ - return (pdn >> NOT_MASKED_PD_BITS) - 1; -} - static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, int size, struct res_mtt *mtt) { @@ -1656,11 +1641,6 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, mpt->mtt = mtt; } - if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) { - err = -EPERM; - goto ex_put; - } - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto ex_put; @@ -1792,11 +1772,6 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, if (err) goto ex_put_mtt; - if (pdn2slave(qp_get_pdn(qpc)) != slave) { - err = -EPERM; - goto ex_put_mtt; - } - err = get_res(dev, slave, rcqn, RES_CQ, &rcq); if (err) goto ex_put_mtt; @@ -2048,10 +2023,10 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) if (!priv->mfunc.master.slave_state) return -EINVAL; - event_eq = &priv->mfunc.master.slave_state[slave].event_eq; + event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; /* Create the event only if the slave is registered */ - if ((event_eq->event_type & (1 << eqe->type)) == 0) + if (event_eq->eqn < 0) return 0; mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); @@ -2289,11 +2264,6 @@ ex_put: return err; } -static int srq_get_pdn(struct mlx4_srq_context *srqc) -{ - return be32_to_cpu(srqc->pd) & 0xffffff; -} - static int srq_get_mtt_size(struct mlx4_srq_context *srqc) { int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; @@ -2333,11 +2303,6 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, if (err) goto ex_put_mtt; - if (pdn2slave(srq_get_pdn(srqc)) != slave) { - err = -EPERM; - goto ex_put_mtt; - } - err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto ex_put_mtt; diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index 2823fff..feda6c0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -67,7 +67,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int srq_num) { - return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0, + return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 75ec87a..0a85690 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -459,7 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, - &ctl->sg, 1, DMA_TO_DEVICE, + &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); if (!ctl->adesc) return NETDEV_TX_BUSY; @@ -571,7 +571,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) sg_dma_len(sg) = DMA_BUFFER_SIZE; ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, - sg, 1, DMA_FROM_DEVICE, + sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); if (!ctl->adesc) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 964e9c0..3ead111 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1745,6 +1745,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; int err; + /* Ensure we have a valid MAC */ + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + pr_err("Error: Invalid MAC address\n"); + return -EINVAL; + } + /* hardware has been reset, we need to reload some things */ pch_gbe_set_multi(netdev); @@ -2468,9 +2474,14 @@ static int pch_gbe_probe(struct pci_dev *pdev, memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) { - dev_err(&pdev->dev, "Invalid MAC Address\n"); - ret = -EIO; - goto err_free_adapter; + /* + * If the MAC is invalid (or just missing), display a warning + * but do not abort setting up the device. pch_gbe_up will + * prevent the interface from being brought up until a valid MAC + * is set. + */ + dev_err(&pdev->dev, "Invalid MAC address, " + "interface disabled.\n"); } setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog, (unsigned long)adapter); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 6ece429..813d41c 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1703,7 +1703,7 @@ static int sh_mdio_init(struct net_device *ndev, int id, mdp->mii_bus->name = "sh_mii"; mdp->mii_bus->parent = &ndev->dev; snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - mdp->pdev->name, pdid); + mdp->pdev->name, id); /* PHY IRQ */ mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index da4a104..7319532 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -154,7 +154,7 @@ int stmmac_mdio_register(struct net_device *ndev) else irqlist = priv->mii_irq; - new_bus->name = "STMMAC MII Bus"; + new_bus->name = "stmmac"; new_bus->read = &stmmac_mdio_read; new_bus->write = &stmmac_mdio_write; new_bus->reset = &stmmac_mdio_reset; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 54a819a..c796de9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -170,9 +170,9 @@ static int stmmac_pci_resume(struct pci_dev *pdev) #define STMMAC_DEVICE_ID 0x1108 static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = { - { - PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, { - } + {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)}, + {} }; MODULE_DEVICE_TABLE(pci, stmmac_id_table); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 462d05f..1a1ca6c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -68,11 +68,11 @@ static void do_set_multicast(struct work_struct *w) nvdev = hv_get_drvdata(ndevctx->device_ctx); if (nvdev == NULL) - return; + goto out; rdev = nvdev->extension; if (rdev == NULL) - return; + goto out; if (net->flags & IFF_PROMISC) rndis_filter_set_packet_filter(rdev, @@ -83,6 +83,7 @@ static void do_set_multicast(struct work_struct *w) NDIS_PACKET_TYPE_ALL_MULTICAST | NDIS_PACKET_TYPE_DIRECTED); +out: kfree(w); } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index f2f820c..9ea9921 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -173,6 +173,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN); if (!skb) return RX_HANDLER_CONSUMED; + eth = eth_hdr(skb); src = macvlan_hash_lookup(port, eth->h_source); if (!src) /* frame comes from an external address */ diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 88cc5db..8985cc6 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -38,12 +38,11 @@ /** * mdiobus_alloc_size - allocate a mii_bus structure + * @size: extra amount of memory to allocate for private storage. + * If non-zero, then bus->priv is points to that memory. * * Description: called by a bus driver to allocate an mii_bus * structure to fill in. - * - * 'size' is an an extra amount of memory to allocate for private storage. - * If non-zero, then bus->priv is points to that memory. */ struct mii_bus *mdiobus_alloc_size(size_t size) { diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index ed2a862..6b678f3 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -92,9 +92,9 @@ struct team_option *__team_find_option(struct team *team, const char *opt_name) return NULL; } -int team_options_register(struct team *team, - const struct team_option *option, - size_t option_count) +int __team_options_register(struct team *team, + const struct team_option *option, + size_t option_count) { int i; struct team_option **dst_opts; @@ -116,8 +116,11 @@ int team_options_register(struct team *team, } } - for (i = 0; i < option_count; i++) + for (i = 0; i < option_count; i++) { + dst_opts[i]->changed = true; + dst_opts[i]->removed = false; list_add_tail(&dst_opts[i]->list, &team->option_list); + } kfree(dst_opts); return 0; @@ -130,10 +133,22 @@ rollback: return err; } -EXPORT_SYMBOL(team_options_register); +static void __team_options_mark_removed(struct team *team, + const struct team_option *option, + size_t option_count) +{ + int i; + + for (i = 0; i < option_count; i++, option++) { + struct team_option *del_opt; -static void __team_options_change_check(struct team *team, - struct team_option *changed_option); + del_opt = __team_find_option(team, option->name); + if (del_opt) { + del_opt->changed = true; + del_opt->removed = true; + } + } +} static void __team_options_unregister(struct team *team, const struct team_option *option, @@ -152,12 +167,29 @@ static void __team_options_unregister(struct team *team, } } +static void __team_options_change_check(struct team *team); + +int team_options_register(struct team *team, + const struct team_option *option, + size_t option_count) +{ + int err; + + err = __team_options_register(team, option, option_count); + if (err) + return err; + __team_options_change_check(team); + return 0; +} +EXPORT_SYMBOL(team_options_register); + void team_options_unregister(struct team *team, const struct team_option *option, size_t option_count) { + __team_options_mark_removed(team, option, option_count); + __team_options_change_check(team); __team_options_unregister(team, option, option_count); - __team_options_change_check(team, NULL); } EXPORT_SYMBOL(team_options_unregister); @@ -176,7 +208,8 @@ static int team_option_set(struct team *team, struct team_option *option, if (err) return err; - __team_options_change_check(team, option); + option->changed = true; + __team_options_change_check(team); return err; } @@ -653,6 +686,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev) return -ENOENT; } + port->removed = true; __team_port_change_check(port, false); team_port_list_del_port(team, port); team_adjust_ops(team); @@ -1200,10 +1234,9 @@ err_fill: return err; } -static int team_nl_fill_options_get_changed(struct sk_buff *skb, - u32 pid, u32 seq, int flags, - struct team *team, - struct team_option *changed_option) +static int team_nl_fill_options_get(struct sk_buff *skb, + u32 pid, u32 seq, int flags, + struct team *team, bool fillall) { struct nlattr *option_list; void *hdr; @@ -1223,12 +1256,19 @@ static int team_nl_fill_options_get_changed(struct sk_buff *skb, struct nlattr *option_item; long arg; + /* Include only changed options if fill all mode is not on */ + if (!fillall && !option->changed) + continue; option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); if (!option_item) goto nla_put_failure; NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name); - if (option == changed_option) + if (option->changed) { NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED); + option->changed = false; + } + if (option->removed) + NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED); switch (option->type) { case TEAM_OPTION_TYPE_U32: NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32); @@ -1255,13 +1295,13 @@ nla_put_failure: return -EMSGSIZE; } -static int team_nl_fill_options_get(struct sk_buff *skb, - struct genl_info *info, int flags, - struct team *team) +static int team_nl_fill_options_get_all(struct sk_buff *skb, + struct genl_info *info, int flags, + struct team *team) { - return team_nl_fill_options_get_changed(skb, info->snd_pid, - info->snd_seq, NLM_F_ACK, - team, NULL); + return team_nl_fill_options_get(skb, info->snd_pid, + info->snd_seq, NLM_F_ACK, + team, true); } static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) @@ -1273,7 +1313,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) if (!team) return -EINVAL; - err = team_nl_send_generic(info, team, team_nl_fill_options_get); + err = team_nl_send_generic(info, team, team_nl_fill_options_get_all); team_nl_team_put(team); @@ -1365,10 +1405,10 @@ team_put: return err; } -static int team_nl_fill_port_list_get_changed(struct sk_buff *skb, - u32 pid, u32 seq, int flags, - struct team *team, - struct team_port *changed_port) +static int team_nl_fill_port_list_get(struct sk_buff *skb, + u32 pid, u32 seq, int flags, + struct team *team, + bool fillall) { struct nlattr *port_list; void *hdr; @@ -1387,12 +1427,19 @@ static int team_nl_fill_port_list_get_changed(struct sk_buff *skb, list_for_each_entry(port, &team->port_list, list) { struct nlattr *port_item; + /* Include only changed ports if fill all mode is not on */ + if (!fillall && !port->changed) + continue; port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); if (!port_item) goto nla_put_failure; NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex); - if (port == changed_port) + if (port->changed) { NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED); + port->changed = false; + } + if (port->removed) + NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED); if (port->linkup) NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP); NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed); @@ -1408,13 +1455,13 @@ nla_put_failure: return -EMSGSIZE; } -static int team_nl_fill_port_list_get(struct sk_buff *skb, - struct genl_info *info, int flags, - struct team *team) +static int team_nl_fill_port_list_get_all(struct sk_buff *skb, + struct genl_info *info, int flags, + struct team *team) { - return team_nl_fill_port_list_get_changed(skb, info->snd_pid, - info->snd_seq, NLM_F_ACK, - team, NULL); + return team_nl_fill_port_list_get(skb, info->snd_pid, + info->snd_seq, NLM_F_ACK, + team, true); } static int team_nl_cmd_port_list_get(struct sk_buff *skb, @@ -1427,7 +1474,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb, if (!team) return -EINVAL; - err = team_nl_send_generic(info, team, team_nl_fill_port_list_get); + err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all); team_nl_team_put(team); @@ -1464,8 +1511,7 @@ static struct genl_multicast_group team_change_event_mcgrp = { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, }; -static int team_nl_send_event_options_get(struct team *team, - struct team_option *changed_option) +static int team_nl_send_event_options_get(struct team *team) { struct sk_buff *skb; int err; @@ -1475,8 +1521,7 @@ static int team_nl_send_event_options_get(struct team *team, if (!skb) return -ENOMEM; - err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team, - changed_option); + err = team_nl_fill_options_get(skb, 0, 0, 0, team, false); if (err < 0) goto err_fill; @@ -1489,18 +1534,17 @@ err_fill: return err; } -static int team_nl_send_event_port_list_get(struct team_port *port) +static int team_nl_send_event_port_list_get(struct team *team) { struct sk_buff *skb; int err; - struct net *net = dev_net(port->team->dev); + struct net *net = dev_net(team->dev); skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOMEM; - err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0, - port->team, port); + err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false); if (err < 0) goto err_fill; @@ -1544,12 +1588,11 @@ static void team_nl_fini(void) * Change checkers ******************/ -static void __team_options_change_check(struct team *team, - struct team_option *changed_option) +static void __team_options_change_check(struct team *team) { int err; - err = team_nl_send_event_options_get(team, changed_option); + err = team_nl_send_event_options_get(team); if (err) netdev_warn(team->dev, "Failed to send options change via netlink\n"); } @@ -1559,9 +1602,10 @@ static void __team_port_change_check(struct team_port *port, bool linkup) { int err; - if (port->linkup == linkup) + if (!port->removed && port->linkup == linkup) return; + port->changed = true; port->linkup = linkup; if (linkup) { struct ethtool_cmd ecmd; @@ -1577,7 +1621,7 @@ static void __team_port_change_check(struct team_port *port, bool linkup) port->duplex = 0; send_event: - err = team_nl_send_event_port_list_get(port); + err = team_nl_send_event_port_list_get(port->team); if (err) netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", port->dev->name); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 88c81c5..09b8c9d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -557,10 +557,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_status |= ATH9K_RXERR_DECRYPT; else if (rxsp->status11 & AR_MichaelErr) rxs->rs_status |= ATH9K_RXERR_MIC; - if (rxsp->status11 & AR_KeyMiss) - rxs->rs_status |= ATH9K_RXERR_KEYMISS; } + if (rxsp->status11 & AR_KeyMiss) + rxs->rs_status |= ATH9K_RXERR_KEYMISS; + return 0; } EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma); diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index fd3f19c..e196aba 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -618,10 +618,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_status |= ATH9K_RXERR_DECRYPT; else if (ads.ds_rxstatus8 & AR_MichaelErr) rs->rs_status |= ATH9K_RXERR_MIC; - if (ads.ds_rxstatus8 & AR_KeyMiss) - rs->rs_status |= ATH9K_RXERR_KEYMISS; } + if (ads.ds_rxstatus8 & AR_KeyMiss) + rs->rs_status |= ATH9K_RXERR_KEYMISS; + return 0; } EXPORT_SYMBOL(ath9k_hw_rxprocdesc); diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig index b97a40e..3876c7e 100644 --- a/drivers/net/wireless/b43/Kconfig +++ b/drivers/net/wireless/b43/Kconfig @@ -31,6 +31,12 @@ config B43_BCMA depends on B43 && BCMA default y +config B43_BCMA_EXTRA + bool "Hardware support that overlaps with the brcmsmac driver" + depends on B43_BCMA + default n if BRCMSMAC || BRCMSMAC_MODULE + default y + config B43_SSB bool depends on B43 && SSB diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 1c6f193..23ffb1b 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -116,8 +116,10 @@ MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO"); #ifdef CONFIG_B43_BCMA static const struct bcma_device_id b43_bcma_tbl[] = { BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS), +#ifdef CONFIG_B43_BCMA_EXTRA BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS), +#endif BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS), BCMA_CORETABLE_END }; @@ -4852,6 +4854,9 @@ static void b43_op_stop(struct ieee80211_hw *hw) cancel_work_sync(&(wl->beacon_update_trigger)); + if (!dev) + goto out; + mutex_lock(&wl->mutex); if (b43_status(dev) >= B43_STAT_STARTED) { dev = b43_wireless_core_stop(dev); @@ -4863,7 +4868,7 @@ static void b43_op_stop(struct ieee80211_hw *hw) out_unlock: mutex_unlock(&wl->mutex); - +out: cancel_work_sync(&(wl->txpower_adjust_work)); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index f23b0c3..bf11850a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -2475,7 +2475,7 @@ static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv) return err; } -static void brcmf_delay(u32 ms) +static __always_inline void brcmf_delay(u32 ms) { if (ms < 1000 / HZ) { cond_resched(); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index d106576..448ab9c 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -1128,14 +1128,7 @@ static int __devinit brcms_bcma_probe(struct bcma_device *pdev) return 0; } -static int brcms_pci_suspend(struct pci_dev *pdev) -{ - pci_save_state(pdev); - pci_disable_device(pdev); - return pci_set_power_state(pdev, PCI_D3hot); -} - -static int brcms_suspend(struct bcma_device *pdev, pm_message_t state) +static int brcms_suspend(struct bcma_device *pdev) { struct brcms_info *wl; struct ieee80211_hw *hw; @@ -1153,40 +1146,15 @@ static int brcms_suspend(struct bcma_device *pdev, pm_message_t state) wl->pub->hw_up = false; spin_unlock_bh(&wl->lock); - /* temporarily do suspend ourselves */ - return brcms_pci_suspend(pdev->bus->host_pci); -} - -static int brcms_pci_resume(struct pci_dev *pdev) -{ - int err = 0; - uint val; - - err = pci_set_power_state(pdev, PCI_D0); - if (err) - return err; - - pci_restore_state(pdev); - - err = pci_enable_device(pdev); - if (err) - return err; - - pci_set_master(pdev); - - pci_read_config_dword(pdev, 0x40, &val); - if ((val & 0x0000ff00) != 0) - pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + pr_debug("brcms_suspend ok\n"); return 0; } static int brcms_resume(struct bcma_device *pdev) { - /* - * just do pci resume for now until bcma supports it. - */ - return brcms_pci_resume(pdev->bus->host_pci); + pr_debug("brcms_resume ok\n"); + return 0; } static struct bcma_driver brcms_bcma_driver = { diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index f7ed340..f6affc6 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -7981,13 +7981,21 @@ int brcms_c_get_curband(struct brcms_c_info *wlc) void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) { + int timeout = 20; + /* flush packet queue when requested */ if (drop) brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL); /* wait for queue and DMA fifos to run dry */ - while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) + while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) { brcms_msleep(wlc->wl, 1); + + if (--timeout == 0) + break; + } + + WARN_ON_ONCE(timeout == 0); } void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval) diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 018a8de..4fcdac6 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -7848,7 +7848,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv, * more efficiently than we can parse it. ORDER MATTERS HERE */ struct ipw_rt_hdr *ipw_rt; - short len = le16_to_cpu(pkt->u.frame.length); + unsigned short len = le16_to_cpu(pkt->u.frame.length); /* We received data from the HW, so stop the watchdog */ dev->trans_start = jiffies; @@ -8023,7 +8023,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv, s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM; s8 noise = (s8) le16_to_cpu(frame->noise); u8 rate = frame->rate; - short len = le16_to_cpu(pkt->u.frame.length); + unsigned short len = le16_to_cpu(pkt->u.frame.length); struct sk_buff *skb; int hdr_only = 0; u16 filter = priv->prom_priv->filter; diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index 084aa2c..a645472 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -569,7 +569,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) struct iwl_scan_cmd *scan; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; u32 rate_flags = 0; - u16 cmd_len; + u16 cmd_len = 0; u16 rx_chain = 0; enum ieee80211_band band; u8 n_probes = 0; diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c index 752493f..65d1f05 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c @@ -972,11 +972,11 @@ void iwl_irq_tasklet(struct iwl_trans *trans) } #endif - spin_unlock_irqrestore(&trans->shrd->lock, flags); - /* saved interrupt in inta variable now we can reset trans_pcie->inta */ trans_pcie->inta = 0; + spin_unlock_irqrestore(&trans->shrd->lock, flags); + /* Now service all interrupt bits discovered above. */ if (inta & CSR_INT_BIT_HW_ERR) { IWL_ERR(trans, "Hardware error detected. Restarting.\n"); diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 7becea3..dd5aeaf 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -2777,7 +2777,7 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw, else if (channel->band == IEEE80211_BAND_5GHZ) cmd->band = cpu_to_le16(0x4); - cmd->channel = channel->hw_value; + cmd->channel = cpu_to_le16(channel->hw_value); if (conf->channel_type == NL80211_CHAN_NO_HT || conf->channel_type == NL80211_CHAN_HT20) { @@ -4066,7 +4066,7 @@ static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw, goto done; if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || - WLAN_CIPHER_SUITE_WEP104) + key->cipher == WLAN_CIPHER_SUITE_WEP104) mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0; cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY); diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 4941a1a..dc88bae 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev) static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - int mask = (state == STATE_RADIO_IRQ_ON); u32 reg; unsigned long flags; @@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, } spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); - rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, ®); - rt2x00_set_field32(®, INT_MASK_CSR_RXDELAYINT, 0); - rt2x00_set_field32(®, INT_MASK_CSR_TXDELAYINT, 0); - rt2x00_set_field32(®, INT_MASK_CSR_RX_DONE, mask); - rt2x00_set_field32(®, INT_MASK_CSR_AC0_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_AC1_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_AC2_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_AC3_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_HCCA_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_MGMT_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_MCU_COMMAND, 0); - rt2x00_set_field32(®, INT_MASK_CSR_RXTX_COHERENT, 0); - rt2x00_set_field32(®, INT_MASK_CSR_TBTT, mask); - rt2x00_set_field32(®, INT_MASK_CSR_PRE_TBTT, mask); - rt2x00_set_field32(®, INT_MASK_CSR_TX_FIFO_STATUS, mask); - rt2x00_set_field32(®, INT_MASK_CSR_AUTO_WAKEUP, mask); - rt2x00_set_field32(®, INT_MASK_CSR_GPTIMER, 0); - rt2x00_set_field32(®, INT_MASK_CSR_RX_COHERENT, 0); - rt2x00_set_field32(®, INT_MASK_CSR_TX_COHERENT, 0); + reg = 0; + if (state == STATE_RADIO_IRQ_ON) { + rt2x00_set_field32(®, INT_MASK_CSR_RX_DONE, 1); + rt2x00_set_field32(®, INT_MASK_CSR_TBTT, 1); + rt2x00_set_field32(®, INT_MASK_CSR_PRE_TBTT, 1); + rt2x00_set_field32(®, INT_MASK_CSR_TX_FIFO_STATUS, 1); + rt2x00_set_field32(®, INT_MASK_CSR_AUTO_WAKEUP, 1); + } rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index fa67905..698b905 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -68,7 +68,7 @@ struct netfront_cb { #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) -#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) +#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) struct netfront_stats { u64 rx_packets; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 97fff78..af295bb 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2802,7 +2802,7 @@ pci_intx(struct pci_dev *pdev, int enable) /** * pci_intx_mask_supported - probe for INTx masking support - * @pdev: the PCI device to operate on + * @dev: the PCI device to operate on * * Check if the device dev support INTx masking via the config space * command word. @@ -2884,7 +2884,7 @@ done: /** * pci_check_and_mask_intx - mask INTx on pending interrupt - * @pdev: the PCI device to operate on + * @dev: the PCI device to operate on * * Check if the device dev has its INTx line asserted, mask it and * return true in that case. False is returned if not interrupt was @@ -2898,7 +2898,7 @@ EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); /** * pci_check_and_mask_intx - unmask INTx of no interrupt is pending - * @pdev: the PCI device to operate on + * @dev: the PCI device to operate on * * Check if the device dev has its INTx line asserted, unmask it if not * and return true. False is returned and the mask remains active if diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 749c2a1..1932029 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -1269,10 +1269,8 @@ static int pcmcia_bus_add(struct pcmcia_socket *skt) static int pcmcia_bus_early_resume(struct pcmcia_socket *skt) { - if (!verify_cis_cache(skt)) { - pcmcia_put_socket(skt); + if (!verify_cis_cache(skt)) return 0; - } dev_dbg(&skt->dev, "cis mismatch - different card\n"); diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c index 5986690..27f2fe3 100644 --- a/drivers/pcmcia/sa1111_generic.c +++ b/drivers/pcmcia/sa1111_generic.c @@ -205,7 +205,8 @@ static int __devexit pcmcia_remove(struct sa1111_dev *dev) dev_set_drvdata(&dev->dev, NULL); - for (; next = s->next, s; s = next) { + for (; s; s = next) { + next = s->next; soc_pcmcia_remove_one(&s->soc); kfree(s); } diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 569bdb3..8fe15cf 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -510,10 +510,12 @@ static struct dentry *debugfs_root; static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev) { - static struct dentry *device_root; + struct dentry *device_root; device_root = debugfs_create_dir(dev_name(pctldev->dev), debugfs_root); + pctldev->device_root = device_root; + if (IS_ERR(device_root) || !device_root) { pr_warn("failed to create debugfs directory for %s\n", dev_name(pctldev->dev)); @@ -529,6 +531,11 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev) pinconf_init_device_debugfs(device_root, pctldev); } +static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev) +{ + debugfs_remove_recursive(pctldev->device_root); +} + static void pinctrl_init_debugfs(void) { debugfs_root = debugfs_create_dir("pinctrl", NULL); @@ -553,6 +560,10 @@ static void pinctrl_init_debugfs(void) { } +static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev) +{ +} + #endif /** @@ -572,40 +583,40 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, if (pctldesc->name == NULL) return NULL; + pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL); + if (pctldev == NULL) + return NULL; + + /* Initialize pin control device struct */ + pctldev->owner = pctldesc->owner; + pctldev->desc = pctldesc; + pctldev->driver_data = driver_data; + INIT_RADIX_TREE(&pctldev->pin_desc_tree, GFP_KERNEL); + spin_lock_init(&pctldev->pin_desc_tree_lock); + INIT_LIST_HEAD(&pctldev->gpio_ranges); + mutex_init(&pctldev->gpio_ranges_lock); + pctldev->dev = dev; + /* If we're implementing pinmuxing, check the ops for sanity */ if (pctldesc->pmxops) { - ret = pinmux_check_ops(pctldesc->pmxops); + ret = pinmux_check_ops(pctldev); if (ret) { pr_err("%s pinmux ops lacks necessary functions\n", pctldesc->name); - return NULL; + goto out_err; } } /* If we're implementing pinconfig, check the ops for sanity */ if (pctldesc->confops) { - ret = pinconf_check_ops(pctldesc->confops); + ret = pinconf_check_ops(pctldev); if (ret) { pr_err("%s pin config ops lacks necessary functions\n", pctldesc->name); - return NULL; + goto out_err; } } - pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL); - if (pctldev == NULL) - return NULL; - - /* Initialize pin control device struct */ - pctldev->owner = pctldesc->owner; - pctldev->desc = pctldesc; - pctldev->driver_data = driver_data; - INIT_RADIX_TREE(&pctldev->pin_desc_tree, GFP_KERNEL); - spin_lock_init(&pctldev->pin_desc_tree_lock); - INIT_LIST_HEAD(&pctldev->gpio_ranges); - mutex_init(&pctldev->gpio_ranges_lock); - pctldev->dev = dev; - /* Register all the pins */ pr_debug("try to register %d pins on %s...\n", pctldesc->npins, pctldesc->name); @@ -641,6 +652,7 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev) if (pctldev == NULL) return; + pinctrl_remove_device_debugfs(pctldev); pinmux_unhog_maps(pctldev); /* TODO: check that no pinmuxes are still active? */ mutex_lock(&pinctrldev_list_mutex); diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h index 177a331..cfa86da 100644 --- a/drivers/pinctrl/core.h +++ b/drivers/pinctrl/core.h @@ -41,6 +41,9 @@ struct pinctrl_dev { struct device *dev; struct module *owner; void *driver_data; +#ifdef CONFIG_DEBUG_FS + struct dentry *device_root; +#endif #ifdef CONFIG_PINMUX struct mutex pinmux_hogs_lock; struct list_head pinmux_hogs; diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c index 1259872..9fb7545 100644 --- a/drivers/pinctrl/pinconf.c +++ b/drivers/pinctrl/pinconf.c @@ -205,8 +205,10 @@ int pin_config_group_set(const char *dev_name, const char *pin_group, } EXPORT_SYMBOL(pin_config_group_set); -int pinconf_check_ops(const struct pinconf_ops *ops) +int pinconf_check_ops(struct pinctrl_dev *pctldev) { + const struct pinconf_ops *ops = pctldev->desc->confops; + /* We must be able to read out pin status */ if (!ops->pin_config_get && !ops->pin_config_group_get) return -EINVAL; @@ -236,7 +238,7 @@ static int pinconf_pins_show(struct seq_file *s, void *what) seq_puts(s, "Format: pin (name): pinmux setting array\n"); /* The pin number can be retrived from the pin controller descriptor */ - for (i = 0; pin < pctldev->desc->npins; i++) { + for (i = 0; i < pctldev->desc->npins; i++) { struct pin_desc *desc; pin = pctldev->desc->pins[i].number; diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h index e7dc616..006b77f 100644 --- a/drivers/pinctrl/pinconf.h +++ b/drivers/pinctrl/pinconf.h @@ -13,7 +13,7 @@ #ifdef CONFIG_PINCONF -int pinconf_check_ops(const struct pinconf_ops *ops); +int pinconf_check_ops(struct pinctrl_dev *pctldev); void pinconf_init_device_debugfs(struct dentry *devroot, struct pinctrl_dev *pctldev); int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin, @@ -23,7 +23,7 @@ int pin_config_set_for_pin(struct pinctrl_dev *pctldev, unsigned pin, #else -static inline int pinconf_check_ops(const struct pinconf_ops *ops) +static inline int pinconf_check_ops(struct pinctrl_dev *pctldev) { return 0; } diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index a76a348..7c3193f 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -53,11 +53,6 @@ struct pinmux_group { * @dev: the device using this pinmux * @usecount: the number of active users of this mux setting, used to keep * track of nested use cases - * @pins: an array of discrete physical pins used in this mapping, taken - * from the global pin enumeration space (copied from pinmux map) - * @num_pins: the number of pins in this mapping array, i.e. the number of - * elements in .pins so we can iterate over that array (copied from - * pinmux map) * @pctldev: pin control device handling this pinmux * @func_selector: the function selector for the pinmux device handling * this pinmux @@ -152,8 +147,7 @@ static int pin_request(struct pinctrl_dev *pctldev, status = 0; if (status) - dev_err(pctldev->dev, "->request on device %s failed " - "for pin %d\n", + dev_err(pctldev->dev, "->request on device %s failed for pin %d\n", pctldev->desc->name, pin); out_free_pin: if (status) { @@ -355,21 +349,20 @@ int __init pinmux_register_mappings(struct pinmux_map const *maps, /* First sanity check the new mapping */ for (i = 0; i < num_maps; i++) { if (!maps[i].name) { - pr_err("failed to register map %d: " - "no map name given\n", i); + pr_err("failed to register map %d: no map name given\n", + i); return -EINVAL; } if (!maps[i].ctrl_dev && !maps[i].ctrl_dev_name) { - pr_err("failed to register map %s (%d): " - "no pin control device given\n", + pr_err("failed to register map %s (%d): no pin control device given\n", maps[i].name, i); return -EINVAL; } if (!maps[i].function) { - pr_err("failed to register map %s (%d): " - "no function ID given\n", maps[i].name, i); + pr_err("failed to register map %s (%d): no function ID given\n", + maps[i].name, i); return -EINVAL; } @@ -411,7 +404,7 @@ int __init pinmux_register_mappings(struct pinmux_map const *maps, } /** - * acquire_pins() - acquire all the pins for a certain funcion on a pinmux + * acquire_pins() - acquire all the pins for a certain function on a pinmux * @pctldev: the device to take the pins on * @func_selector: the function selector to acquire the pins for * @group_selector: the group selector containing the pins to acquire @@ -442,8 +435,7 @@ static int acquire_pins(struct pinctrl_dev *pctldev, ret = pin_request(pctldev, pins[i], func, NULL); if (ret) { dev_err(pctldev->dev, - "could not get pin %d for function %s " - "on device %s - conflicting mux mappings?\n", + "could not get pin %d for function %s on device %s - conflicting mux mappings?\n", pins[i], func ? : "(undefined)", pinctrl_dev_get_name(pctldev)); /* On error release all taken pins */ @@ -458,7 +450,7 @@ static int acquire_pins(struct pinctrl_dev *pctldev, /** * release_pins() - release pins taken by earlier acquirement - * @pctldev: the device to free the pinx on + * @pctldev: the device to free the pins on * @group_selector: the group selector containing the pins to free */ static void release_pins(struct pinctrl_dev *pctldev, @@ -473,8 +465,7 @@ static void release_pins(struct pinctrl_dev *pctldev, ret = pctlops->get_group_pins(pctldev, group_selector, &pins, &num_pins); if (ret) { - dev_err(pctldev->dev, "could not get pins to release for " - "group selector %d\n", + dev_err(pctldev->dev, "could not get pins to release for group selector %d\n", group_selector); return; } @@ -526,8 +517,7 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev, ret = pinctrl_get_group_selector(pctldev, groups[0]); if (ret < 0) { dev_err(pctldev->dev, - "function %s wants group %s but the pin " - "controller does not seem to have that group\n", + "function %s wants group %s but the pin controller does not seem to have that group\n", pmxops->get_function_name(pctldev, func_selector), groups[0]); return ret; @@ -535,8 +525,7 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev, if (num_groups > 1) dev_dbg(pctldev->dev, - "function %s support more than one group, " - "default-selecting first group %s (%d)\n", + "function %s support more than one group, default-selecting first group %s (%d)\n", pmxops->get_function_name(pctldev, func_selector), groups[0], ret); @@ -628,10 +617,8 @@ static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev, if (pmx->pctldev && pmx->pctldev != pctldev) { dev_err(pctldev->dev, - "different pin control devices given for device %s, " - "function %s\n", - devname, - map->function); + "different pin control devices given for device %s, function %s\n", + devname, map->function); return -EINVAL; } pmx->dev = dev; @@ -695,7 +682,6 @@ static void pinmux_free_groups(struct pinmux *pmx) */ struct pinmux *pinmux_get(struct device *dev, const char *name) { - struct pinmux_map const *map = NULL; struct pinctrl_dev *pctldev = NULL; const char *devname = NULL; @@ -745,8 +731,7 @@ struct pinmux *pinmux_get(struct device *dev, const char *name) else if (map->ctrl_dev_name) devname = map->ctrl_dev_name; - pr_warning("could not find a pinctrl device for pinmux " - "function %s, fishy, they shall all have one\n", + pr_warning("could not find a pinctrl device for pinmux function %s, fishy, they shall all have one\n", map->function); pr_warning("given pinctrl device name: %s", devname ? devname : "UNDEFINED"); @@ -904,8 +889,11 @@ void pinmux_disable(struct pinmux *pmx) } EXPORT_SYMBOL_GPL(pinmux_disable); -int pinmux_check_ops(const struct pinmux_ops *ops) +int pinmux_check_ops(struct pinctrl_dev *pctldev) { + const struct pinmux_ops *ops = pctldev->desc->pmxops; + unsigned selector = 0; + /* Check that we implement required operations */ if (!ops->list_functions || !ops->get_function_name || @@ -914,6 +902,18 @@ int pinmux_check_ops(const struct pinmux_ops *ops) !ops->disable) return -EINVAL; + /* Check that all functions registered have names */ + while (ops->list_functions(pctldev, selector) >= 0) { + const char *fname = ops->get_function_name(pctldev, + selector); + if (!fname) { + pr_err("pinmux ops has no name for function%u\n", + selector); + return -EINVAL; + } + selector++; + } + return 0; } @@ -932,8 +932,8 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev, * without any problems, so then we can hog pinmuxes for * all devices that just want a static pin mux at this point. */ - dev_err(pctldev->dev, "map %s wants to hog a non-system " - "pinmux, this is not going to work\n", map->name); + dev_err(pctldev->dev, "map %s wants to hog a non-system pinmux, this is not going to work\n", + map->name); return -EINVAL; } @@ -993,9 +993,12 @@ int pinmux_hog_maps(struct pinctrl_dev *pctldev) for (i = 0; i < pinmux_maps_num; i++) { struct pinmux_map const *map = &pinmux_maps[i]; - if (((map->ctrl_dev == dev) || - !strcmp(map->ctrl_dev_name, devname)) && - map->hog_on_boot) { + if (!map->hog_on_boot) + continue; + + if ((map->ctrl_dev == dev) || + (map->ctrl_dev_name && + !strcmp(map->ctrl_dev_name, devname))) { /* OK time to hog! */ ret = pinmux_hog_map(pctldev, map); if (ret) @@ -1122,13 +1125,15 @@ static int pinmux_show(struct seq_file *s, void *what) seq_printf(s, "device: %s function: %s (%u),", pinctrl_dev_get_name(pmx->pctldev), - pmxops->get_function_name(pctldev, pmx->func_selector), + pmxops->get_function_name(pctldev, + pmx->func_selector), pmx->func_selector); seq_printf(s, " groups: ["); list_for_each_entry(grp, &pmx->groups, node) { seq_printf(s, " %s (%u)", - pctlops->get_group_name(pctldev, grp->group_selector), + pctlops->get_group_name(pctldev, + grp->group_selector), grp->group_selector); } seq_printf(s, " ]"); diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h index 844500b..97f5222 100644 --- a/drivers/pinctrl/pinmux.h +++ b/drivers/pinctrl/pinmux.h @@ -12,7 +12,7 @@ */ #ifdef CONFIG_PINMUX -int pinmux_check_ops(const struct pinmux_ops *ops); +int pinmux_check_ops(struct pinctrl_dev *pctldev); void pinmux_init_device_debugfs(struct dentry *devroot, struct pinctrl_dev *pctldev); void pinmux_init_debugfs(struct dentry *subsys_root); @@ -21,7 +21,7 @@ void pinmux_unhog_maps(struct pinctrl_dev *pctldev); #else -static inline int pinmux_check_ops(const struct pinmux_ops *ops) +static inline int pinmux_check_ops(struct pinctrl_dev *pctldev) { return 0; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index ca86f39..e9a83f8 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -2731,6 +2731,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev) * @dev: struct device for the regulator * @init_data: platform provided init data, passed through by driver * @driver_data: private regulator data + * @of_node: OpenFirmware node to parse for device tree bindings (may be + * NULL). * * Called by regulator drivers to register a regulator. * Returns 0 on success. diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index f1651eb..679734d 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -35,7 +35,7 @@ static void of_get_regulation_constraints(struct device_node *np, if (constraints->min_uV != constraints->max_uV) constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE; /* Only one voltage? Then make sure it's set. */ - if (constraints->min_uV == constraints->max_uV) + if (min_uV && max_uV && constraints->min_uV == constraints->max_uV) constraints->apply_uV = true; uV_offset = of_get_property(np, "regulator-microvolt-offset", NULL); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index e19a403..3a125b8 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -774,7 +774,7 @@ config RTC_DRV_EP93XX config RTC_DRV_SA1100 tristate "SA11x0/PXA2xx" - depends on ARCH_SA1100 || ARCH_PXA || ARCH_MMP + depends on ARCH_SA1100 || ARCH_PXA help If you say Y here you will get access to the real time clock built into your SA11x0 or PXA2xx CPU. diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 4595d3e..cb9a585 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -27,42 +27,34 @@ #include <linux/init.h> #include <linux/fs.h> #include <linux/interrupt.h> +#include <linux/string.h> #include <linux/pm.h> -#include <linux/slab.h> -#include <linux/clk.h> -#include <linux/io.h> +#include <linux/bitops.h> #include <mach/hardware.h> #include <asm/irq.h> +#ifdef CONFIG_ARCH_PXA +#include <mach/regs-rtc.h> +#endif + #define RTC_DEF_DIVIDER (32768 - 1) #define RTC_DEF_TRIM 0 -#define RTC_FREQ 1024 - -#define RCNR 0x00 /* RTC Count Register */ -#define RTAR 0x04 /* RTC Alarm Register */ -#define RTSR 0x08 /* RTC Status Register */ -#define RTTR 0x0c /* RTC Timer Trim Register */ - -#define RTSR_HZE (1 << 3) /* HZ interrupt enable */ -#define RTSR_ALE (1 << 2) /* RTC alarm interrupt enable */ -#define RTSR_HZ (1 << 1) /* HZ rising-edge detected */ -#define RTSR_AL (1 << 0) /* RTC alarm detected */ - -#define rtc_readl(sa1100_rtc, reg) \ - readl_relaxed((sa1100_rtc)->base + (reg)) -#define rtc_writel(sa1100_rtc, reg, value) \ - writel_relaxed((value), (sa1100_rtc)->base + (reg)) - -struct sa1100_rtc { - struct resource *ress; - void __iomem *base; - struct clk *clk; - int irq_1Hz; - int irq_Alrm; - struct rtc_device *rtc; - spinlock_t lock; /* Protects this structure */ -}; + +static const unsigned long RTC_FREQ = 1024; +static struct rtc_time rtc_alarm; +static DEFINE_SPINLOCK(sa1100_rtc_lock); + +static inline int rtc_periodic_alarm(struct rtc_time *tm) +{ + return (tm->tm_year == -1) || + ((unsigned)tm->tm_mon >= 12) || + ((unsigned)(tm->tm_mday - 1) >= 31) || + ((unsigned)tm->tm_hour > 23) || + ((unsigned)tm->tm_min > 59) || + ((unsigned)tm->tm_sec > 59); +} + /* * Calculate the next alarm time given the requested alarm time mask * and the current time. @@ -90,26 +82,46 @@ static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, } } +static int rtc_update_alarm(struct rtc_time *alrm) +{ + struct rtc_time alarm_tm, now_tm; + unsigned long now, time; + int ret; + + do { + now = RCNR; + rtc_time_to_tm(now, &now_tm); + rtc_next_alarm_time(&alarm_tm, &now_tm, alrm); + ret = rtc_tm_to_time(&alarm_tm, &time); + if (ret != 0) + break; + + RTSR = RTSR & (RTSR_HZE|RTSR_ALE|RTSR_AL); + RTAR = time; + } while (now != RCNR); + + return ret; +} + static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id) { struct platform_device *pdev = to_platform_device(dev_id); - struct sa1100_rtc *sa1100_rtc = platform_get_drvdata(pdev); + struct rtc_device *rtc = platform_get_drvdata(pdev); unsigned int rtsr; unsigned long events = 0; - spin_lock(&sa1100_rtc->lock); + spin_lock(&sa1100_rtc_lock); + rtsr = RTSR; /* clear interrupt sources */ - rtsr = rtc_readl(sa1100_rtc, RTSR); - rtc_writel(sa1100_rtc, RTSR, 0); - + RTSR = 0; /* Fix for a nasty initialization problem the in SA11xx RTSR register. * See also the comments in sa1100_rtc_probe(). */ if (rtsr & (RTSR_ALE | RTSR_HZE)) { /* This is the original code, before there was the if test * above. This code does not clear interrupts that were not * enabled. */ - rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ) & (rtsr >> 2)); + RTSR = (RTSR_AL | RTSR_HZ) & (rtsr >> 2); } else { /* For some reason, it is possible to enter this routine * without interruptions enabled, it has been tested with @@ -118,13 +130,13 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id) * This situation leads to an infinite "loop" of interrupt * routine calling and as a result the processor seems to * lock on its first call to open(). */ - rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ)); + RTSR = RTSR_AL | RTSR_HZ; } /* clear alarm interrupt if it has occurred */ if (rtsr & RTSR_AL) rtsr &= ~RTSR_ALE; - rtc_writel(sa1100_rtc, RTSR, rtsr & (RTSR_ALE | RTSR_HZE)); + RTSR = rtsr & (RTSR_ALE | RTSR_HZE); /* update irq data & counter */ if (rtsr & RTSR_AL) @@ -132,100 +144,89 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id) if (rtsr & RTSR_HZ) events |= RTC_UF | RTC_IRQF; - rtc_update_irq(sa1100_rtc->rtc, 1, events); + rtc_update_irq(rtc, 1, events); - spin_unlock(&sa1100_rtc->lock); + if (rtsr & RTSR_AL && rtc_periodic_alarm(&rtc_alarm)) + rtc_update_alarm(&rtc_alarm); + + spin_unlock(&sa1100_rtc_lock); return IRQ_HANDLED; } static int sa1100_rtc_open(struct device *dev) { - struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev); int ret; + struct platform_device *plat_dev = to_platform_device(dev); + struct rtc_device *rtc = platform_get_drvdata(plat_dev); - ret = request_irq(sa1100_rtc->irq_1Hz, sa1100_rtc_interrupt, - IRQF_DISABLED, "rtc 1Hz", dev); + ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, IRQF_DISABLED, + "rtc 1Hz", dev); if (ret) { - dev_err(dev, "IRQ %d already in use.\n", sa1100_rtc->irq_1Hz); + dev_err(dev, "IRQ %d already in use.\n", IRQ_RTC1Hz); goto fail_ui; } - ret = request_irq(sa1100_rtc->irq_Alrm, sa1100_rtc_interrupt, - IRQF_DISABLED, "rtc Alrm", dev); + ret = request_irq(IRQ_RTCAlrm, sa1100_rtc_interrupt, IRQF_DISABLED, + "rtc Alrm", dev); if (ret) { - dev_err(dev, "IRQ %d already in use.\n", sa1100_rtc->irq_Alrm); + dev_err(dev, "IRQ %d already in use.\n", IRQ_RTCAlrm); goto fail_ai; } - sa1100_rtc->rtc->max_user_freq = RTC_FREQ; - rtc_irq_set_freq(sa1100_rtc->rtc, NULL, RTC_FREQ); + rtc->max_user_freq = RTC_FREQ; + rtc_irq_set_freq(rtc, NULL, RTC_FREQ); return 0; fail_ai: - free_irq(sa1100_rtc->irq_1Hz, dev); + free_irq(IRQ_RTC1Hz, dev); fail_ui: return ret; } static void sa1100_rtc_release(struct device *dev) { - struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev); - - spin_lock_irq(&sa1100_rtc->lock); - rtc_writel(sa1100_rtc, RTSR, 0); - spin_unlock_irq(&sa1100_rtc->lock); + spin_lock_irq(&sa1100_rtc_lock); + RTSR = 0; + spin_unlock_irq(&sa1100_rtc_lock); - free_irq(sa1100_rtc->irq_Alrm, dev); - free_irq(sa1100_rtc->irq_1Hz, dev); + free_irq(IRQ_RTCAlrm, dev); + free_irq(IRQ_RTC1Hz, dev); } static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { - struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev); - unsigned int rtsr; - - spin_lock_irq(&sa1100_rtc->lock); - - rtsr = rtc_readl(sa1100_rtc, RTSR); + spin_lock_irq(&sa1100_rtc_lock); if (enabled) - rtsr |= RTSR_ALE; + RTSR |= RTSR_ALE; else - rtsr &= ~RTSR_ALE; - rtc_writel(sa1100_rtc, RTSR, rtsr); - - spin_unlock_irq(&sa1100_rtc->lock); + RTSR &= ~RTSR_ALE; + spin_unlock_irq(&sa1100_rtc_lock); return 0; } static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) { - struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev); - - rtc_time_to_tm(rtc_readl(sa1100_rtc, RCNR), tm); + rtc_time_to_tm(RCNR, tm); return 0; } static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm) { - struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev); unsigned long time; int ret; ret = rtc_tm_to_time(tm, &time); if (ret == 0) - rtc_writel(sa1100_rtc, RCNR, time); + RCNR = time; return ret; } static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { - struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev); - unsigned long time; - unsigned int rtsr; + u32 rtsr; - time = rtc_readl(sa1100_rtc, RCNR); - rtc_time_to_tm(time, &alrm->time); - rtsr = rtc_readl(sa1100_rtc, RTSR); + memcpy(&alrm->time, &rtc_alarm, sizeof(struct rtc_time)); + rtsr = RTSR; alrm->enabled = (rtsr & RTSR_ALE) ? 1 : 0; alrm->pending = (rtsr & RTSR_AL) ? 1 : 0; return 0; @@ -233,39 +234,26 @@ static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { - struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev); - struct rtc_time now_tm, alarm_tm; - unsigned long time, alarm; - unsigned int rtsr; - - spin_lock_irq(&sa1100_rtc->lock); - - time = rtc_readl(sa1100_rtc, RCNR); - rtc_time_to_tm(time, &now_tm); - rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time); - rtc_tm_to_time(&alarm_tm, &alarm); - rtc_writel(sa1100_rtc, RTAR, alarm); - - rtsr = rtc_readl(sa1100_rtc, RTSR); - if (alrm->enabled) - rtsr |= RTSR_ALE; - else - rtsr &= ~RTSR_ALE; - rtc_writel(sa1100_rtc, RTSR, rtsr); + int ret; - spin_unlock_irq(&sa1100_rtc->lock); + spin_lock_irq(&sa1100_rtc_lock); + ret = rtc_update_alarm(&alrm->time); + if (ret == 0) { + if (alrm->enabled) + RTSR |= RTSR_ALE; + else + RTSR &= ~RTSR_ALE; + } + spin_unlock_irq(&sa1100_rtc_lock); - return 0; + return ret; } static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq) { - struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev); + seq_printf(seq, "trim/divider\t\t: 0x%08x\n", (u32) RTTR); + seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", (u32)RTSR); - seq_printf(seq, "trim/divider\t\t: 0x%08x\n", - rtc_readl(sa1100_rtc, RTTR)); - seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", - rtc_readl(sa1100_rtc, RTSR)); return 0; } @@ -282,51 +270,7 @@ static const struct rtc_class_ops sa1100_rtc_ops = { static int sa1100_rtc_probe(struct platform_device *pdev) { - struct sa1100_rtc *sa1100_rtc; - unsigned int rttr; - int ret; - - sa1100_rtc = kzalloc(sizeof(struct sa1100_rtc), GFP_KERNEL); - if (!sa1100_rtc) - return -ENOMEM; - - spin_lock_init(&sa1100_rtc->lock); - platform_set_drvdata(pdev, sa1100_rtc); - - ret = -ENXIO; - sa1100_rtc->ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!sa1100_rtc->ress) { - dev_err(&pdev->dev, "No I/O memory resource defined\n"); - goto err_ress; - } - - sa1100_rtc->irq_1Hz = platform_get_irq(pdev, 0); - if (sa1100_rtc->irq_1Hz < 0) { - dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n"); - goto err_ress; - } - sa1100_rtc->irq_Alrm = platform_get_irq(pdev, 1); - if (sa1100_rtc->irq_Alrm < 0) { - dev_err(&pdev->dev, "No alarm IRQ resource defined\n"); - goto err_ress; - } - - ret = -ENOMEM; - sa1100_rtc->base = ioremap(sa1100_rtc->ress->start, - resource_size(sa1100_rtc->ress)); - if (!sa1100_rtc->base) { - dev_err(&pdev->dev, "Unable to map pxa RTC I/O memory\n"); - goto err_map; - } - - sa1100_rtc->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(sa1100_rtc->clk)) { - dev_err(&pdev->dev, "failed to find rtc clock source\n"); - ret = PTR_ERR(sa1100_rtc->clk); - goto err_clk; - } - clk_prepare(sa1100_rtc->clk); - clk_enable(sa1100_rtc->clk); + struct rtc_device *rtc; /* * According to the manual we should be able to let RTTR be zero @@ -335,24 +279,24 @@ static int sa1100_rtc_probe(struct platform_device *pdev) * If the clock divider is uninitialized then reset it to the * default value to get the 1Hz clock. */ - if (rtc_readl(sa1100_rtc, RTTR) == 0) { - rttr = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16); - rtc_writel(sa1100_rtc, RTTR, rttr); - dev_warn(&pdev->dev, "warning: initializing default clock" - " divider/trim value\n"); + if (RTTR == 0) { + RTTR = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16); + dev_warn(&pdev->dev, "warning: " + "initializing default clock divider/trim value\n"); /* The current RTC value probably doesn't make sense either */ - rtc_writel(sa1100_rtc, RCNR, 0); + RCNR = 0; } device_init_wakeup(&pdev->dev, 1); - sa1100_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, - &sa1100_rtc_ops, THIS_MODULE); - if (IS_ERR(sa1100_rtc->rtc)) { - dev_err(&pdev->dev, "Failed to register RTC device -> %d\n", - ret); - goto err_rtc_reg; - } + rtc = rtc_device_register(pdev->name, &pdev->dev, &sa1100_rtc_ops, + THIS_MODULE); + + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + platform_set_drvdata(pdev, rtc); + /* Fix for a nasty initialization problem the in SA11xx RTSR register. * See also the comments in sa1100_rtc_interrupt(). * @@ -375,46 +319,33 @@ static int sa1100_rtc_probe(struct platform_device *pdev) * * Notice that clearing bit 1 and 0 is accomplished by writting ONES to * the corresponding bits in RTSR. */ - rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ)); + RTSR = RTSR_AL | RTSR_HZ; return 0; - -err_rtc_reg: -err_clk: - iounmap(sa1100_rtc->base); -err_ress: -err_map: - kfree(sa1100_rtc); - return ret; } static int sa1100_rtc_remove(struct platform_device *pdev) { - struct sa1100_rtc *sa1100_rtc = platform_get_drvdata(pdev); + struct rtc_device *rtc = platform_get_drvdata(pdev); + + if (rtc) + rtc_device_unregister(rtc); - rtc_device_unregister(sa1100_rtc->rtc); - clk_disable(sa1100_rtc->clk); - clk_unprepare(sa1100_rtc->clk); - iounmap(sa1100_rtc->base); return 0; } #ifdef CONFIG_PM static int sa1100_rtc_suspend(struct device *dev) { - struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev); - if (device_may_wakeup(dev)) - enable_irq_wake(sa1100_rtc->irq_Alrm); + enable_irq_wake(IRQ_RTCAlrm); return 0; } static int sa1100_rtc_resume(struct device *dev) { - struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev); - if (device_may_wakeup(dev)) - disable_irq_wake(sa1100_rtc->irq_Alrm); + disable_irq_wake(IRQ_RTCAlrm); return 0; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index eef27a1..110137e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3261,6 +3261,12 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) device->path_data.tbvpm |= eventlpm; dasd_schedule_device_bh(device); } + if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "Pathgroup re-established\n"); + if (device->discipline->kick_validate) + device->discipline->kick_validate(device); + } } dasd_put_device(device); } diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 553b3c5..b3beed5 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -189,14 +189,12 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device) unsigned long flags; struct alias_server *server, *newserver; struct alias_lcu *lcu, *newlcu; - int is_lcu_known; struct dasd_uid uid; private = (struct dasd_eckd_private *) device->private; device->discipline->get_uid(device, &uid); spin_lock_irqsave(&aliastree.lock, flags); - is_lcu_known = 1; server = _find_server(&uid); if (!server) { spin_unlock_irqrestore(&aliastree.lock, flags); @@ -208,7 +206,6 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device) if (!server) { list_add(&newserver->server, &aliastree.serverlist); server = newserver; - is_lcu_known = 0; } else { /* someone was faster */ _free_server(newserver); @@ -226,12 +223,10 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device) if (!lcu) { list_add(&newlcu->lcu, &server->lculist); lcu = newlcu; - is_lcu_known = 0; } else { /* someone was faster */ _free_lcu(newlcu); } - is_lcu_known = 0; } spin_lock(&lcu->lock); list_add(&device->alias_list, &lcu->inactive_devices); @@ -239,64 +234,7 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device) spin_unlock(&lcu->lock); spin_unlock_irqrestore(&aliastree.lock, flags); - return is_lcu_known; -} - -/* - * The first device to be registered on an LCU will have to do - * some additional setup steps to configure that LCU on the - * storage server. All further devices should wait with their - * initialization until the first device is done. - * To synchronize this work, the first device will call - * dasd_alias_lcu_setup_complete when it is done, and all - * other devices will wait for it with dasd_alias_wait_for_lcu_setup. - */ -void dasd_alias_lcu_setup_complete(struct dasd_device *device) -{ - unsigned long flags; - struct alias_server *server; - struct alias_lcu *lcu; - struct dasd_uid uid; - - device->discipline->get_uid(device, &uid); - lcu = NULL; - spin_lock_irqsave(&aliastree.lock, flags); - server = _find_server(&uid); - if (server) - lcu = _find_lcu(server, &uid); - spin_unlock_irqrestore(&aliastree.lock, flags); - if (!lcu) { - DBF_EVENT_DEVID(DBF_ERR, device->cdev, - "could not find lcu for %04x %02x", - uid.ssid, uid.real_unit_addr); - WARN_ON(1); - return; - } - complete_all(&lcu->lcu_setup); -} - -void dasd_alias_wait_for_lcu_setup(struct dasd_device *device) -{ - unsigned long flags; - struct alias_server *server; - struct alias_lcu *lcu; - struct dasd_uid uid; - - device->discipline->get_uid(device, &uid); - lcu = NULL; - spin_lock_irqsave(&aliastree.lock, flags); - server = _find_server(&uid); - if (server) - lcu = _find_lcu(server, &uid); - spin_unlock_irqrestore(&aliastree.lock, flags); - if (!lcu) { - DBF_EVENT_DEVID(DBF_ERR, device->cdev, - "could not find lcu for %04x %02x", - uid.ssid, uid.real_unit_addr); - WARN_ON(1); - return; - } - wait_for_completion(&lcu->lcu_setup); + return 0; } /* diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index bbcd5e9..70880be 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1534,6 +1534,10 @@ static void dasd_eckd_validate_server(struct dasd_device *device) struct dasd_eckd_private *private; int enable_pav; + private = (struct dasd_eckd_private *) device->private; + if (private->uid.type == UA_BASE_PAV_ALIAS || + private->uid.type == UA_HYPER_PAV_ALIAS) + return; if (dasd_nopav || MACHINE_IS_VM) enable_pav = 0; else @@ -1542,11 +1546,28 @@ static void dasd_eckd_validate_server(struct dasd_device *device) /* may be requested feature is not available on server, * therefore just report error and go ahead */ - private = (struct dasd_eckd_private *) device->private; DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " "returned rc=%d", private->uid.ssid, rc); } +/* + * worker to do a validate server in case of a lost pathgroup + */ +static void dasd_eckd_do_validate_server(struct work_struct *work) +{ + struct dasd_device *device = container_of(work, struct dasd_device, + kick_validate); + dasd_eckd_validate_server(device); + dasd_put_device(device); +} + +static void dasd_eckd_kick_validate_server(struct dasd_device *device) +{ + dasd_get_device(device); + /* queue call to do_validate_server to the kernel event daemon. */ + schedule_work(&device->kick_validate); +} + static u32 get_fcx_max_data(struct dasd_device *device) { #if defined(CONFIG_64BIT) @@ -1588,10 +1609,13 @@ dasd_eckd_check_characteristics(struct dasd_device *device) struct dasd_eckd_private *private; struct dasd_block *block; struct dasd_uid temp_uid; - int is_known, rc, i; + int rc, i; int readonly; unsigned long value; + /* setup work queue for validate server*/ + INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); + if (!ccw_device_is_pathgroup(device->cdev)) { dev_warn(&device->cdev->dev, "A channel path group could not be established\n"); @@ -1651,22 +1675,12 @@ dasd_eckd_check_characteristics(struct dasd_device *device) block->base = device; } - /* register lcu with alias handling, enable PAV if this is a new lcu */ - is_known = dasd_alias_make_device_known_to_lcu(device); - if (is_known < 0) { - rc = is_known; + /* register lcu with alias handling, enable PAV */ + rc = dasd_alias_make_device_known_to_lcu(device); + if (rc) goto out_err2; - } - /* - * dasd_eckd_validate_server is done on the first device that - * is found for an LCU. All later other devices have to wait - * for it, so they will read the correct feature codes. - */ - if (!is_known) { - dasd_eckd_validate_server(device); - dasd_alias_lcu_setup_complete(device); - } else - dasd_alias_wait_for_lcu_setup(device); + + dasd_eckd_validate_server(device); /* device may report different configuration data after LCU setup */ rc = dasd_eckd_read_conf(device); @@ -4098,7 +4112,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device) { struct dasd_eckd_private *private; struct dasd_eckd_characteristics temp_rdc_data; - int is_known, rc; + int rc; struct dasd_uid temp_uid; unsigned long flags; @@ -4121,14 +4135,10 @@ static int dasd_eckd_restore_device(struct dasd_device *device) goto out_err; /* register lcu with alias handling, enable PAV if this is a new lcu */ - is_known = dasd_alias_make_device_known_to_lcu(device); - if (is_known < 0) - return is_known; - if (!is_known) { - dasd_eckd_validate_server(device); - dasd_alias_lcu_setup_complete(device); - } else - dasd_alias_wait_for_lcu_setup(device); + rc = dasd_alias_make_device_known_to_lcu(device); + if (rc) + return rc; + dasd_eckd_validate_server(device); /* RE-Read Configuration Data */ rc = dasd_eckd_read_conf(device); @@ -4270,6 +4280,7 @@ static struct dasd_discipline dasd_eckd_discipline = { .restore = dasd_eckd_restore_device, .reload = dasd_eckd_reload_device, .get_uid = dasd_eckd_get_uid, + .kick_validate = dasd_eckd_kick_validate_server, }; static int __init diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index afe8c33..33a6743 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -355,6 +355,7 @@ struct dasd_discipline { int (*reload) (struct dasd_device *); int (*get_uid) (struct dasd_device *, struct dasd_uid *); + void (*kick_validate) (struct dasd_device *); }; extern struct dasd_discipline *dasd_diag_discipline_pointer; @@ -455,6 +456,7 @@ struct dasd_device { struct work_struct kick_work; struct work_struct restore_device; struct work_struct reload_device; + struct work_struct kick_validate; struct timer_list timer; debug_info_t *debug_area; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 06ea3bc..16570aa 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -830,16 +830,11 @@ config SCSI_ISCI tristate "Intel(R) C600 Series Chipset SAS Controller" depends on PCI && SCSI depends on X86 - # (temporary): known alpha quality driver - depends on EXPERIMENTAL select SCSI_SAS_LIBSAS - select SCSI_SAS_HOST_SMP ---help--- This driver supports the 6Gb/s SAS capabilities of the storage control unit found in the Intel(R) C600 series chipset. - The experimental tag will be removed after the driver exits alpha - config SCSI_GENERIC_NCR5380 tristate "Generic NCR5380/53c400 SCSI PIO support" depends on ISA && SCSI diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h index 78963be..cb07c62 100644 --- a/drivers/scsi/bfa/bfa_defs_svc.h +++ b/drivers/scsi/bfa/bfa_defs_svc.h @@ -673,12 +673,7 @@ struct bfa_itnim_iostats_s { u32 tm_iocdowns; /* TM cleaned-up due to IOC down */ u32 tm_cleanups; /* TM cleanup requests */ u32 tm_cleanup_comps; /* TM cleanup completions */ - u32 lm_lun_across_sg; /* LM lun is across sg data buf */ - u32 lm_lun_not_sup; /* LM lun not supported */ - u32 lm_rpl_data_changed; /* LM report-lun data changed */ - u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */ - u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */ - u32 lm_lun_not_rdy; /* LM lun not ready */ + u32 rsvd[6]; }; /* Modify char* port_stt[] in bfal_port.c if a new state was added */ diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h index 50b6a1c..8d0b88f 100644 --- a/drivers/scsi/bfa/bfa_fc.h +++ b/drivers/scsi/bfa/bfa_fc.h @@ -56,161 +56,6 @@ struct scsi_cdb_s { #define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */ -#define SCSI_SENSE_CUR_ERR 0x70 -#define SCSI_SENSE_DEF_ERR 0x71 - -/* - * SCSI additional sense codes - */ -#define SCSI_ASC_LUN_NOT_READY 0x04 -#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25 -#define SCSI_ASC_TOCC 0x3F - -/* - * SCSI additional sense code qualifiers - */ -#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */ -#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */ - -/* - * Methods of reporting informational exceptions - */ -#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */ - -struct scsi_report_luns_data_s { - u32 lun_list_length; /* length of LUN list length */ - u32 reserved; - struct scsi_lun lun[1]; /* first LUN in lun list */ -}; - -struct scsi_inquiry_vendor_s { - u8 vendor_id[8]; -}; - -struct scsi_inquiry_prodid_s { - u8 product_id[16]; -}; - -struct scsi_inquiry_prodrev_s { - u8 product_rev[4]; -}; - -struct scsi_inquiry_data_s { -#ifdef __BIG_ENDIAN - u8 peripheral_qual:3; /* peripheral qualifier */ - u8 device_type:5; /* peripheral device type */ - u8 rmb:1; /* removable medium bit */ - u8 device_type_mod:7; /* device type modifier */ - u8 version; - u8 aenc:1; /* async evt notification capability */ - u8 trm_iop:1; /* terminate I/O process */ - u8 norm_aca:1; /* normal ACA supported */ - u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */ - u8 rsp_data_format:4; - u8 additional_len; - u8 sccs:1; - u8 reserved1:7; - u8 reserved2:1; - u8 enc_serv:1; /* enclosure service component */ - u8 reserved3:1; - u8 multi_port:1; /* multi-port device */ - u8 m_chngr:1; /* device in medium transport element */ - u8 ack_req_q:1; /* SIP specific bit */ - u8 addr32:1; /* SIP specific bit */ - u8 addr16:1; /* SIP specific bit */ - u8 rel_adr:1; /* relative address */ - u8 w_bus32:1; - u8 w_bus16:1; - u8 synchronous:1; - u8 linked_commands:1; - u8 trans_dis:1; - u8 cmd_queue:1; /* command queueing supported */ - u8 soft_reset:1; /* soft reset alternative (VS) */ -#else - u8 device_type:5; /* peripheral device type */ - u8 peripheral_qual:3; /* peripheral qualifier */ - u8 device_type_mod:7; /* device type modifier */ - u8 rmb:1; /* removable medium bit */ - u8 version; - u8 rsp_data_format:4; - u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */ - u8 norm_aca:1; /* normal ACA supported */ - u8 terminate_iop:1;/* terminate I/O process */ - u8 aenc:1; /* async evt notification capability */ - u8 additional_len; - u8 reserved1:7; - u8 sccs:1; - u8 addr16:1; /* SIP specific bit */ - u8 addr32:1; /* SIP specific bit */ - u8 ack_req_q:1; /* SIP specific bit */ - u8 m_chngr:1; /* device in medium transport element */ - u8 multi_port:1; /* multi-port device */ - u8 reserved3:1; /* TBD - Vendor Specific */ - u8 enc_serv:1; /* enclosure service component */ - u8 reserved2:1; - u8 soft_seset:1; /* soft reset alternative (VS) */ - u8 cmd_queue:1; /* command queueing supported */ - u8 trans_dis:1; - u8 linked_commands:1; - u8 synchronous:1; - u8 w_bus16:1; - u8 w_bus32:1; - u8 rel_adr:1; /* relative address */ -#endif - struct scsi_inquiry_vendor_s vendor_id; - struct scsi_inquiry_prodid_s product_id; - struct scsi_inquiry_prodrev_s product_rev; - u8 vendor_specific[20]; - u8 reserved4[40]; -}; - -/* - * SCSI sense data format - */ -struct scsi_sense_s { -#ifdef __BIG_ENDIAN - u8 valid:1; - u8 rsp_code:7; -#else - u8 rsp_code:7; - u8 valid:1; -#endif - u8 seg_num; -#ifdef __BIG_ENDIAN - u8 file_mark:1; - u8 eom:1; /* end of media */ - u8 ili:1; /* incorrect length indicator */ - u8 reserved:1; - u8 sense_key:4; -#else - u8 sense_key:4; - u8 reserved:1; - u8 ili:1; /* incorrect length indicator */ - u8 eom:1; /* end of media */ - u8 file_mark:1; -#endif - u8 information[4]; /* device-type or cmd specific info */ - u8 add_sense_length; /* additional sense length */ - u8 command_info[4];/* command specific information */ - u8 asc; /* additional sense code */ - u8 ascq; /* additional sense code qualifier */ - u8 fru_code; /* field replaceable unit code */ -#ifdef __BIG_ENDIAN - u8 sksv:1; /* sense key specific valid */ - u8 c_d:1; /* command/data bit */ - u8 res1:2; - u8 bpv:1; /* bit pointer valid */ - u8 bpointer:3; /* bit pointer */ -#else - u8 bpointer:3; /* bit pointer */ - u8 bpv:1; /* bit pointer valid */ - u8 res1:2; - u8 c_d:1; /* command/data bit */ - u8 sksv:1; /* sense key specific valid */ -#endif - u8 fpointer[2]; /* field pointer */ -}; - /* * Fibre Channel Header Structure (FCHS) definition */ diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index e07bd47..f0f80e2 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -24,8 +24,6 @@ BFA_TRC_FILE(HAL, FCPIM); * BFA ITNIM Related definitions */ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); -static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim); -static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim); static void bfa_ioim_lm_init(struct bfa_s *bfa); #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ @@ -60,14 +58,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa); } \ } while (0) -#define bfa_ioim_rp_wwn(__ioim) \ - (((struct bfa_fcs_rport_s *) \ - (__ioim)->itnim->rport->rport_drv)->pwwn) - -#define bfa_ioim_lp_wwn(__ioim) \ - ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \ - (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \ - #define bfa_itnim_sler_cb(__itnim) do { \ if ((__itnim)->bfa->fcs) \ bfa_cb_itnim_sler((__itnim)->ditn); \ @@ -77,13 +67,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa); } \ } while (0) -enum bfa_ioim_lm_status { - BFA_IOIM_LM_PRESENT = 1, - BFA_IOIM_LM_LUN_NOT_SUP = 2, - BFA_IOIM_LM_RPL_DATA_CHANGED = 3, - BFA_IOIM_LM_LUN_NOT_RDY = 4, -}; - enum bfa_ioim_lm_ua_status { BFA_IOIM_LM_UA_RESET = 0, BFA_IOIM_LM_UA_SET = 1, @@ -145,9 +128,6 @@ enum bfa_ioim_event { BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ - BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */ - BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */ - BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */ }; @@ -245,9 +225,6 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); -static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete); -static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete); -static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete); /* * forward declaration of BFA IO state machine @@ -445,12 +422,6 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats, bfa_fcpim_add_iostats(lstats, rstats, output_reqs); bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); - bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg); - bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup); - bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed); - bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed); - bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue); - bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy); } bfa_status_t @@ -1580,27 +1551,6 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) __bfa_cb_ioim_abort, ioim); break; - case BFA_IOIM_SM_LM_LUN_NOT_SUP: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_ioim_move_to_comp_q(ioim); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, - __bfa_cb_ioim_lm_lun_not_sup, ioim); - break; - - case BFA_IOIM_SM_LM_RPL_DC: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_ioim_move_to_comp_q(ioim); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, - __bfa_cb_ioim_lm_rpl_dc, ioim); - break; - - case BFA_IOIM_SM_LM_LUN_NOT_RDY: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_ioim_move_to_comp_q(ioim); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, - __bfa_cb_ioim_lm_lun_not_rdy, ioim); - break; - default: bfa_sm_fault(ioim->bfa, event); } @@ -2160,243 +2110,6 @@ bfa_ioim_lm_init(struct bfa_s *bfa) } } -/* - * Validate LUN for LUN masking - */ -static enum bfa_ioim_lm_status -bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps, - struct bfa_rport_s *rp, struct scsi_lun lun) -{ - u8 i; - struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa); - struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio; - struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd; - - if ((cdb->scsi_cdb[0] == REPORT_LUNS) && - (scsilun_to_int((struct scsi_lun *)&lun) == 0)) { - ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data; - return BFA_IOIM_LM_PRESENT; - } - - for (i = 0; i < MAX_LUN_MASK_CFG; i++) { - - if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) - continue; - - if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) == - scsilun_to_int((struct scsi_lun *)&lun)) - && (rp->rport_tag == lun_list[i].rp_tag) - && ((u8)ioim->itnim->rport->rport_info.lp_tag == - lun_list[i].lp_tag)) { - bfa_trc(ioim->bfa, lun_list[i].rp_tag); - bfa_trc(ioim->bfa, lun_list[i].lp_tag); - bfa_trc(ioim->bfa, scsilun_to_int( - (struct scsi_lun *)&lun_list[i].lun)); - - if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) && - ((cdb->scsi_cdb[0] != INQUIRY) || - (cdb->scsi_cdb[0] != REPORT_LUNS))) { - lun_list[i].ua = BFA_IOIM_LM_UA_RESET; - return BFA_IOIM_LM_RPL_DATA_CHANGED; - } - - if (cdb->scsi_cdb[0] == REPORT_LUNS) - ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data; - - return BFA_IOIM_LM_PRESENT; - } - } - - if ((cdb->scsi_cdb[0] == INQUIRY) && - (scsilun_to_int((struct scsi_lun *)&lun) == 0)) { - ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data; - return BFA_IOIM_LM_PRESENT; - } - - if (cdb->scsi_cdb[0] == TEST_UNIT_READY) - return BFA_IOIM_LM_LUN_NOT_RDY; - - return BFA_IOIM_LM_LUN_NOT_SUP; -} - -static bfa_boolean_t -bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim) -{ - return BFA_TRUE; -} - -static void -bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset, - int buf_lun_cnt) -{ - struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa); - struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset); - struct scsi_lun lun; - int i, j; - - bfa_trc(ioim->bfa, buf_lun_cnt); - for (j = 0; j < buf_lun_cnt; j++) { - lun = *((struct scsi_lun *)(lun_data + j)); - for (i = 0; i < MAX_LUN_MASK_CFG; i++) { - if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE) - continue; - if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) && - (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) && - (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) - == scsilun_to_int((struct scsi_lun *)&lun))) { - lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED; - break; - } - } /* next lun in mask DB */ - } /* next lun in buf */ -} - -static int -bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen, - struct scsi_report_luns_data_s *rl) -{ - struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio; - struct scatterlist *sg = scsi_sglist(cmnd); - struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa); - struct scsi_lun *prev_rl_data = NULL, *base_rl_data; - int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count; - int lun_across_sg_bytes, bytes_from_next_buf; - u64 last_lun, temp_last_lun; - - /* fetch luns from the first sg element */ - bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0, - (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1); - - /* fetch luns from multiple sg elements */ - scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) { - if (sgeid == 0) { - prev_sg_len = sg_dma_len(sg); - prev_rl_data = (struct scsi_lun *) - phys_to_virt(sg_dma_address(sg)); - continue; - } - - /* if the buf is having more data */ - lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun); - if (lun_across_sg_bytes) { - bfa_trc(ioim->bfa, lun_across_sg_bytes); - bfa_stats(ioim->itnim, lm_lun_across_sg); - bytes_from_next_buf = sizeof(struct scsi_lun) - - lun_across_sg_bytes; - - /* from next buf take higher bytes */ - temp_last_lun = *((u64 *) - phys_to_virt(sg_dma_address(sg))); - last_lun |= temp_last_lun >> - (lun_across_sg_bytes * BITS_PER_BYTE); - - /* from prev buf take higher bytes */ - temp_last_lun = *((u64 *)(prev_rl_data + - (prev_sg_len - lun_across_sg_bytes))); - temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE; - last_lun = last_lun | (temp_last_lun << - (bytes_from_next_buf * BITS_PER_BYTE)); - - bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1); - } else - bytes_from_next_buf = 0; - - *pgdlen += sg_dma_len(sg); - prev_sg_len = sg_dma_len(sg); - prev_rl_data = (struct scsi_lun *) - phys_to_virt(sg_dma_address(sg)); - bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data, - bytes_from_next_buf, - sg_dma_len(sg) / sizeof(struct scsi_lun)); - } - - /* update the report luns data - based on fetched luns */ - sg = scsi_sglist(cmnd); - base_rl_data = (struct scsi_lun *)rl->lun; - base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1; - for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) { - if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) { - base_rl_data[j] = lun_list[i].lun; - lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE; - j++; - lun_fetched_cnt++; - } - - if (j > base_count) { - j = 0; - sg = sg_next(sg); - base_rl_data = (struct scsi_lun *) - phys_to_virt(sg_dma_address(sg)); - base_count = sg_dma_len(sg) / sizeof(struct scsi_lun); - } - } - - bfa_trc(ioim->bfa, lun_fetched_cnt); - return lun_fetched_cnt; -} - -static bfa_boolean_t -bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim) -{ - struct scsi_inquiry_data_s *inq; - struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio); - - ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; - inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg)); - - bfa_trc(ioim->bfa, inq->device_type); - inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON; - return 0; -} - -static bfa_boolean_t -bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim) -{ - struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio; - struct scatterlist *sg = scsi_sglist(cmnd); - struct bfi_ioim_rsp_s *m; - struct scsi_report_luns_data_s *rl = NULL; - int lun_count = 0, lun_fetched_cnt = 0; - u32 residue, pgdlen = 0; - - ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; - if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED) - return BFA_TRUE; - - m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; - if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION) - return BFA_TRUE; - - pgdlen = sg_dma_len(sg); - bfa_trc(ioim->bfa, pgdlen); - rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg)); - lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun); - lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl); - - if (lun_count == lun_fetched_cnt) - return BFA_TRUE; - - bfa_trc(ioim->bfa, lun_count); - bfa_trc(ioim->bfa, lun_fetched_cnt); - bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length)); - - if (be32_to_cpu(rl->lun_list_length) <= pgdlen) - rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) * - sizeof(struct scsi_lun); - else - bfa_stats(ioim->itnim, lm_small_buf_addresidue); - - bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length)); - bfa_trc(ioim->bfa, be32_to_cpu(m->residue)); - - residue = be32_to_cpu(m->residue); - residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun); - bfa_stats(ioim->itnim, lm_wire_residue_changed); - m->residue = be32_to_cpu(residue); - bfa_trc(ioim->bfa, ioim->nsges); - return BFA_FALSE; -} - static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) { @@ -2454,83 +2167,6 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete) m->scsi_status, sns_len, snsinfo, residue); } -static void -__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_ioim_s *ioim = cbarg; - int sns_len = 0xD; - u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio); - struct scsi_sense_s *snsinfo; - - if (!complete) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); - return; - } - - snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG( - ioim->fcpim->fcp, ioim->iotag); - snsinfo->rsp_code = SCSI_SENSE_CUR_ERR; - snsinfo->add_sense_length = 0xa; - snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED; - snsinfo->sense_key = ILLEGAL_REQUEST; - bfa_trc(ioim->bfa, residue); - bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK, - SCSI_STATUS_CHECK_CONDITION, sns_len, - (u8 *)snsinfo, residue); -} - -static void -__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_ioim_s *ioim = cbarg; - int sns_len = 0xD; - u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio); - struct scsi_sense_s *snsinfo; - - if (!complete) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); - return; - } - - snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp, - ioim->iotag); - snsinfo->rsp_code = SCSI_SENSE_CUR_ERR; - snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN; - snsinfo->asc = SCSI_ASC_TOCC; - snsinfo->add_sense_length = 0x6; - snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED; - bfa_trc(ioim->bfa, residue); - bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK, - SCSI_STATUS_CHECK_CONDITION, sns_len, - (u8 *)snsinfo, residue); -} - -static void -__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_ioim_s *ioim = cbarg; - int sns_len = 0xD; - u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio); - struct scsi_sense_s *snsinfo; - - if (!complete) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); - return; - } - - snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG( - ioim->fcpim->fcp, ioim->iotag); - snsinfo->rsp_code = SCSI_SENSE_CUR_ERR; - snsinfo->add_sense_length = 0xa; - snsinfo->sense_key = NOT_READY; - snsinfo->asc = SCSI_ASC_LUN_NOT_READY; - snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ; - bfa_trc(ioim->bfa, residue); - bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK, - SCSI_STATUS_CHECK_CONDITION, sns_len, - (u8 *)snsinfo, residue); -} - void bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn, u16 rp_tag, u8 lp_tag) @@ -2647,7 +2283,8 @@ bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn, if (port) { *pwwn = port->port_cfg.pwwn; rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); - rp = rp_fcs->bfa_rport; + if (rp_fcs) + rp = rp_fcs->bfa_rport; } lunm_list = bfa_get_lun_mask_list(bfa); @@ -2715,7 +2352,8 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn, if (port) { *pwwn = port->port_cfg.pwwn; rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); - rp = rp_fcs->bfa_rport; + if (rp_fcs) + rp = rp_fcs->bfa_rport; } } @@ -2757,7 +2395,6 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) return; } - ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, 0, 0, NULL, 0); } @@ -2773,7 +2410,6 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete) return; } - ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, 0, 0, NULL, 0); } @@ -2788,7 +2424,6 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete) return; } - ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); } @@ -3132,7 +2767,6 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim) ioim->bfa = fcpim->bfa; ioim->fcpim = fcpim; ioim->iosp = iosp; - ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; INIT_LIST_HEAD(&ioim->sgpg_q); bfa_reqq_winit(&ioim->iosp->reqq_wait, bfa_ioim_qresume, ioim); @@ -3170,7 +2804,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) evt = BFA_IOIM_SM_DONE; else evt = BFA_IOIM_SM_COMP; - ioim->proc_rsp_data(ioim); break; case BFI_IOIM_STS_TIMEDOUT: @@ -3206,7 +2839,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) if (rsp->abort_tag != ioim->abort_tag) { bfa_trc(ioim->bfa, rsp->abort_tag); bfa_trc(ioim->bfa, ioim->abort_tag); - ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; return; } @@ -3225,7 +2857,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) WARN_ON(1); } - ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy; bfa_sm_send_event(ioim, evt); } @@ -3244,15 +2875,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) bfa_ioim_cb_profile_comp(fcpim, ioim); - if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); - return; - } - - if (ioim->proc_rsp_data(ioim) == BFA_TRUE) - bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); - else - bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP); + bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); } /* @@ -3364,35 +2987,6 @@ bfa_ioim_free(struct bfa_ioim_s *ioim) void bfa_ioim_start(struct bfa_ioim_s *ioim) { - struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio; - struct bfa_lps_s *lps; - enum bfa_ioim_lm_status status; - struct scsi_lun scsilun; - - if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) { - lps = BFA_IOIM_TO_LPS(ioim); - int_to_scsilun(cmnd->device->lun, &scsilun); - status = bfa_ioim_lm_check(ioim, lps, - ioim->itnim->rport, scsilun); - if (status == BFA_IOIM_LM_LUN_NOT_RDY) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY); - bfa_stats(ioim->itnim, lm_lun_not_rdy); - return; - } - - if (status == BFA_IOIM_LM_LUN_NOT_SUP) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP); - bfa_stats(ioim->itnim, lm_lun_not_sup); - return; - } - - if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC); - bfa_stats(ioim->itnim, lm_rpl_data_changed); - return; - } - } - bfa_ioim_cb_profile_start(ioim->fcpim, ioim); /* diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index 1080bcb..36f26da 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -110,7 +110,6 @@ struct bfad_ioim_s; struct bfad_tskim_s; typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim); -typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim); struct bfa_fcpim_s { struct bfa_s *bfa; @@ -124,7 +123,6 @@ struct bfa_fcpim_s { u32 path_tov; u16 q_depth; u8 reqq; /* Request queue to be used */ - u8 lun_masking_pending; struct list_head itnim_q; /* queue of active itnim */ struct list_head ioim_resfree_q; /* IOs waiting for f/w */ struct list_head ioim_comp_q; /* IO global comp Q */ @@ -181,7 +179,6 @@ struct bfa_ioim_s { u8 reqq; /* Request queue for I/O */ u8 mode; /* IO is passthrough or not */ u64 start_time; /* IO's Profile start val */ - bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */ }; struct bfa_ioim_sp_s { @@ -261,10 +258,6 @@ struct bfa_itnim_s { (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \ } while (0) -#define BFA_IOIM_TO_LPS(__ioim) \ - BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \ - __ioim->itnim->rport->rport_info.lp_tag) - static inline bfa_boolean_t bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim) { diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index 95adb86..b52cbb6 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -582,11 +582,6 @@ void bfa_cb_rport_qos_scn_prio(void *rport, #define BFA_LP_TAG_INVALID 0xff void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp); -bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp); -wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp); -struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id, - wwn_t *lpwwn, wwn_t rpwwn); -void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn); /* * bfa fcxp API functions diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 66fb725..404fd10 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -674,6 +674,7 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id, spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_vport_start(&vport->fcs_vport); + list_add_tail(&vport->list_entry, &bfad->vport_list); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_OK; @@ -1404,6 +1405,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) bfad->ref_count = 0; bfad->pport.bfad = bfad; INIT_LIST_HEAD(&bfad->pbc_vport_list); + INIT_LIST_HEAD(&bfad->vport_list); /* Setup the debugfs node for this bfad */ if (bfa_debugfs_enable) diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 9d95844..1938fe0 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -491,7 +491,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport) free_scsi_host: bfad_scsi_host_free(bfad, im_port); - + list_del(&vport->list_entry); kfree(vport); return 0; diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 06fc00c..530de2b 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -2394,6 +2394,21 @@ out: return 0; } +/* Function to reset the LUN SCAN mode */ +static void +bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg) +{ + struct bfad_im_port_s *pport_im = bfad->pport.im_port; + struct bfad_vport_s *vport = NULL; + + /* Set the scsi device LUN SCAN flags for base port */ + bfad_reset_sdev_bflags(pport_im, lunmask_cfg); + + /* Set the scsi device LUN SCAN flags for the vports */ + list_for_each_entry(vport, &bfad->vport_list, list_entry) + bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg); +} + int bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) { @@ -2401,11 +2416,17 @@ bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); - if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) + if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) { iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); - else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) + /* Set the LUN Scanning mode to be Sequential scan */ + if (iocmd->status == BFA_STATUS_OK) + bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE); + } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) { iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); - else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) + /* Set the LUN Scanning mode to default REPORT_LUNS scan */ + if (iocmd->status == BFA_STATUS_OK) + bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE); + } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 5e19a5f..dc5b9d9 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -43,6 +43,7 @@ #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_bsg_fc.h> +#include <scsi/scsi_devinfo.h> #include "bfa_modules.h" #include "bfa_fcs.h" @@ -227,6 +228,7 @@ struct bfad_s { struct list_head active_aen_q; struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY]; spinlock_t bfad_aen_spinlock; + struct list_head vport_list; }; /* BFAD state machine events */ diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index e5db649..3153923 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -918,16 +918,70 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id) } /* + * Function is invoked from the SCSI Host Template slave_alloc() entry point. + * Has the logic to query the LUN Mask database to check if this LUN needs to + * be made visible to the SCSI mid-layer or not. + * + * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack. + * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack. + */ +static int +bfad_im_check_if_make_lun_visible(struct scsi_device *sdev, + struct fc_rport *rport) +{ + struct bfad_itnim_data_s *itnim_data = + (struct bfad_itnim_data_s *) rport->dd_data; + struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa; + struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport; + struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa); + int i = 0, ret = -ENXIO; + + for (i = 0; i < MAX_LUN_MASK_CFG; i++) { + if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE && + scsilun_to_int(&lun_list[i].lun) == sdev->lun && + lun_list[i].rp_tag == bfa_rport->rport_tag && + lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) { + ret = BFA_STATUS_OK; + break; + } + } + return ret; +} + +/* * Scsi_Host template entry slave_alloc */ static int bfad_im_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct bfad_itnim_data_s *itnim_data = + (struct bfad_itnim_data_s *) rport->dd_data; + struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) { + /* + * We should not mask LUN 0 - since this will translate + * to no LUN / TARGET for SCSI ml resulting no scan. + */ + if (sdev->lun == 0) { + sdev->sdev_bflags |= BLIST_NOREPORTLUN | + BLIST_SPARSELUN; + goto done; + } + + /* + * Query LUN Mask configuration - to expose this LUN + * to the SCSI mid-layer or to mask it. + */ + if (bfad_im_check_if_make_lun_visible(sdev, rport) != + BFA_STATUS_OK) + return -ENXIO; + } +done: sdev->hostdata = rport->dd_data; return 0; @@ -1037,6 +1091,8 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim) && (fc_rport->scsi_target_id < MAX_FCP_TARGET)) itnim->scsi_tgt_id = fc_rport->scsi_target_id; + itnim->channel = fc_rport->channel; + return; } diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index 004b6cf..0814367 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -91,6 +91,7 @@ struct bfad_itnim_s { struct fc_rport *fc_rport; struct bfa_itnim_s *bfa_itnim; u16 scsi_tgt_id; + u16 channel; u16 queue_work; unsigned long last_ramp_up_time; unsigned long last_queue_full_time; @@ -166,4 +167,30 @@ irqreturn_t bfad_intx(int irq, void *dev_id); int bfad_im_bsg_request(struct fc_bsg_job *job); int bfad_im_bsg_timeout(struct fc_bsg_job *job); +/* + * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the + * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan + * + * Internally iterate's over all the ITNIM's part of the im_port & set's the + * sdev_bflags for the scsi_device associated with LUN #0. + */ +#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do { \ + struct scsi_device *__sdev = NULL; \ + struct bfad_itnim_s *__itnim = NULL; \ + u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; \ + list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \ + list_entry) { \ + __sdev = scsi_device_lookup((__im_port)->shost, \ + __itnim->channel, \ + __itnim->scsi_tgt_id, 0); \ + if (__sdev) { \ + if ((__lunmask_cfg) == BFA_TRUE) \ + __sdev->sdev_bflags |= scan_flags; \ + else \ + __sdev->sdev_bflags &= ~scan_flags; \ + scsi_device_put(__sdev); \ + } \ + } \ +} while (0) + #endif diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index c5360ff..d3ff9cd 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -1868,8 +1868,9 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); if (!tdata->skb) { - pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n", - cdev->skb_tx_rsvd, headroom, opcode); + struct cxgbi_sock *csk = cconn->cep->csk; + struct net_device *ndev = cdev->ports[csk->port_id]; + ndev->stats.tx_dropped++; return -ENOMEM; } diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 4ef0212..04c5cea 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -466,6 +466,11 @@ static int alua_check_sense(struct scsi_device *sdev, * Power On, Reset, or Bus Device Reset, just retry. */ return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) + /* + * Mode Parameters Changed + */ + return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) /* * ALUA state changed diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 841ebf4..53a31c7 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -953,6 +953,8 @@ static int __init rdac_init(void) if (!kmpath_rdacd) { scsi_unregister_device_handler(&rdac_dh); printk(KERN_ERR "kmpath_rdacd creation failed.\n"); + + r = -EINVAL; } done: return r; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 8d67467..e959960 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -58,7 +58,11 @@ module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ "Direct Data Placement (DDP)."); -DEFINE_MUTEX(fcoe_config_mutex); +unsigned int fcoe_debug_logging; +module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); + +static DEFINE_MUTEX(fcoe_config_mutex); static struct workqueue_struct *fcoe_wq; @@ -67,8 +71,8 @@ static DECLARE_COMPLETION(fcoe_flush_completion); /* fcoe host list */ /* must only by accessed under the RTNL mutex */ -LIST_HEAD(fcoe_hostlist); -DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); +static LIST_HEAD(fcoe_hostlist); +static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); /* Function Prototypes */ static int fcoe_reset(struct Scsi_Host *); @@ -157,7 +161,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = { .lport_set_port_id = fcoe_set_port_id, }; -struct fc_function_template fcoe_nport_fc_functions = { +static struct fc_function_template fcoe_nport_fc_functions = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, @@ -197,7 +201,7 @@ struct fc_function_template fcoe_nport_fc_functions = { .bsg_request = fc_lport_bsg_request, }; -struct fc_function_template fcoe_vport_fc_functions = { +static struct fc_function_template fcoe_vport_fc_functions = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, @@ -433,7 +437,7 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe) * * Caller must be holding the RTNL mutex */ -void fcoe_interface_cleanup(struct fcoe_interface *fcoe) +static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) { struct net_device *netdev = fcoe->netdev; struct fcoe_ctlr *fip = &fcoe->ctlr; @@ -748,7 +752,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev) * * Returns: True for read types I/O, otherwise returns false. */ -bool fcoe_oem_match(struct fc_frame *fp) +static bool fcoe_oem_match(struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); struct fcp_cmnd *fcp; @@ -756,11 +760,12 @@ bool fcoe_oem_match(struct fc_frame *fp) if (fc_fcp_is_read(fr_fsp(fp)) && (fr_fsp(fp)->data_len > fcoe_ddp_min)) return true; - else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) { + else if ((fr_fsp(fp) == NULL) && + (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) && + (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) { fcp = fc_frame_payload_get(fp, sizeof(*fcp)); - if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN && - fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) && - (fcp->fc_flags & FCP_CFL_WRDATA)) + if ((fcp->fc_flags & FCP_CFL_WRDATA) && + (ntohl(fcp->fc_dl) > fcoe_ddp_min)) return true; } return false; @@ -1106,7 +1111,7 @@ static int __init fcoe_if_init(void) * * Returns: 0 on success */ -int __exit fcoe_if_exit(void) +static int __exit fcoe_if_exit(void) { fc_release_transport(fcoe_nport_scsi_transport); fc_release_transport(fcoe_vport_scsi_transport); @@ -1295,7 +1300,7 @@ static inline unsigned int fcoe_select_cpu(void) * * Returns: 0 for success */ -int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, +static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, struct packet_type *ptype, struct net_device *olddev) { struct fc_lport *lport; @@ -1451,7 +1456,7 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) * * Return: 0 for success */ -int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) +static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) { int wlen; u32 crc; @@ -1671,8 +1676,7 @@ static void fcoe_recv_frame(struct sk_buff *skb) skb->dev ? skb->dev->name : "<NULL>"); port = lport_priv(lport); - if (skb_is_nonlinear(skb)) - skb_linearize(skb); /* not ideal */ + skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */ /* * Frame length checks and setting up the header pointers @@ -1728,7 +1732,7 @@ drop: * * Return: 0 for success */ -int fcoe_percpu_receive_thread(void *arg) +static int fcoe_percpu_receive_thread(void *arg) { struct fcoe_percpu_s *p = arg; struct sk_buff *skb; @@ -2146,7 +2150,7 @@ out_nortnl: * Returns: 0 if the ethtool query was successful * -1 if the ethtool query failed */ -int fcoe_link_speed_update(struct fc_lport *lport) +static int fcoe_link_speed_update(struct fc_lport *lport) { struct net_device *netdev = fcoe_netdev(lport); struct ethtool_cmd ecmd; @@ -2180,7 +2184,7 @@ int fcoe_link_speed_update(struct fc_lport *lport) * Returns: 0 if link is UP and OK, -1 if not * */ -int fcoe_link_ok(struct fc_lport *lport) +static int fcoe_link_ok(struct fc_lport *lport) { struct net_device *netdev = fcoe_netdev(lport); @@ -2200,7 +2204,7 @@ int fcoe_link_ok(struct fc_lport *lport) * there no packets that will be handled by the lport, but also that any * threads already handling packet have returned. */ -void fcoe_percpu_clean(struct fc_lport *lport) +static void fcoe_percpu_clean(struct fc_lport *lport) { struct fcoe_percpu_s *pp; struct fcoe_rcv_info *fr; @@ -2251,7 +2255,7 @@ void fcoe_percpu_clean(struct fc_lport *lport) * * Returns: Always 0 (return value required by FC transport template) */ -int fcoe_reset(struct Scsi_Host *shost) +static int fcoe_reset(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); struct fcoe_port *port = lport_priv(lport); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 6c6884b..bcc89e6 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -40,9 +40,7 @@ #define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ #define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ -unsigned int fcoe_debug_logging; -module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); +extern unsigned int fcoe_debug_logging; #define FCOE_LOGGING 0x01 /* General logging, not categorized */ #define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */ diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 5140f5d..b96962c 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -4271,7 +4271,9 @@ static void stop_controller_lockup_detector(struct ctlr_info *h) remove_ctlr_from_lockup_detector_list(h); /* If the list of ctlr's to monitor is empty, stop the thread */ if (list_empty(&hpsa_ctlr_list)) { + spin_unlock_irqrestore(&lockup_detector_lock, flags); kthread_stop(hpsa_lockup_detector); + spin_lock_irqsave(&lockup_detector_lock, flags); hpsa_lockup_detector = NULL; } spin_unlock_irqrestore(&lockup_detector_lock, flags); diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile deleted file mode 100644 index 5f54461..0000000 --- a/drivers/scsi/isci/firmware/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -# Makefile for create_fw -# -CC=gcc -CFLAGS=-c -Wall -O2 -g -LDFLAGS= -SOURCES=create_fw.c -OBJECTS=$(SOURCES:.cpp=.o) -EXECUTABLE=create_fw - -all: $(SOURCES) $(EXECUTABLE) - -$(EXECUTABLE): $(OBJECTS) - $(CC) $(LDFLAGS) $(OBJECTS) -o $@ - -.c.o: - $(CC) $(CFLAGS) $< -O $@ - -clean: - rm -f *.o $(EXECUTABLE) diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README deleted file mode 100644 index 8056d2b..0000000 --- a/drivers/scsi/isci/firmware/README +++ /dev/null @@ -1,36 +0,0 @@ -This defines the temporary binary blow we are to pass to the SCU -driver to emulate the binary firmware that we will eventually be -able to access via NVRAM on the SCU controller. - -The current size of the binary blob is expected to be 149 bytes or larger - -Header Types: -0x1: Phy Masks -0x2: Phy Gens -0x3: SAS Addrs -0xff: End of Data - -ID string - u8[12]: "#SCU MAGIC#\0" -Version - u8: 1 -SubVersion - u8: 0 - -Header Type - u8: 0x1 -Size - u8: 8 -Phy Mask - u32[8] - -Header Type - u8: 0x2 -Size - u8: 8 -Phy Gen - u32[8] - -Header Type - u8: 0x3 -Size - u8: 8 -Sas Addr - u64[8] - -Header Type - u8: 0xf - - -============================================================================== - -Place isci_firmware.bin in /lib/firmware -Be sure to recreate the initramfs image to include the firmware. - diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c deleted file mode 100644 index c7a2887..0000000 --- a/drivers/scsi/isci/firmware/create_fw.c +++ /dev/null @@ -1,99 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> -#include <unistd.h> -#include <sys/types.h> -#include <sys/stat.h> -#include <fcntl.h> -#include <string.h> -#include <errno.h> -#include <asm/types.h> -#include <strings.h> -#include <stdint.h> - -#include "create_fw.h" -#include "../probe_roms.h" - -int write_blob(struct isci_orom *isci_orom) -{ - FILE *fd; - int err; - size_t count; - - fd = fopen(blob_name, "w+"); - if (!fd) { - perror("Open file for write failed"); - fclose(fd); - return -EIO; - } - - count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd); - if (count != 1) { - perror("Write data failed"); - fclose(fd); - return -EIO; - } - - fclose(fd); - - return 0; -} - -void set_binary_values(struct isci_orom *isci_orom) -{ - int ctrl_idx, phy_idx, port_idx; - - /* setting OROM signature */ - strncpy(isci_orom->hdr.signature, sig, strlen(sig)); - isci_orom->hdr.version = version; - isci_orom->hdr.total_block_length = sizeof(struct isci_orom); - isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr); - isci_orom->hdr.num_elements = num_elements; - - for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) { - isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type; - isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up = - max_num_concurrent_dev_spin_up; - isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc = - enable_ssc; - - for (port_idx = 0; port_idx < 4; port_idx++) - isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask = - phy_mask[ctrl_idx][port_idx]; - - for (phy_idx = 0; phy_idx < 4; phy_idx++) { - isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high = - (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32); - isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low = - (__u32)(sas_addr[ctrl_idx][phy_idx]); - - isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 = - afe_tx_amp_control0; - isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 = - afe_tx_amp_control1; - isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 = - afe_tx_amp_control2; - isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 = - afe_tx_amp_control3; - } - } -} - -int main(void) -{ - int err; - struct isci_orom *isci_orom; - - isci_orom = malloc(sizeof(struct isci_orom)); - memset(isci_orom, 0, sizeof(struct isci_orom)); - - set_binary_values(isci_orom); - - err = write_blob(isci_orom); - if (err < 0) { - free(isci_orom); - return err; - } - - free(isci_orom); - return 0; -} diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h deleted file mode 100644 index 5f29882..0000000 --- a/drivers/scsi/isci/firmware/create_fw.h +++ /dev/null @@ -1,77 +0,0 @@ -#ifndef _CREATE_FW_H_ -#define _CREATE_FW_H_ -#include "../probe_roms.h" - - -/* we are configuring for 2 SCUs */ -static const int num_elements = 2; - -/* - * For all defined arrays: - * elements 0-3 are for SCU0, ports 0-3 - * elements 4-7 are for SCU1, ports 0-3 - * - * valid configurations for one SCU are: - * P0 P1 P2 P3 - * ---------------- - * 0xF,0x0,0x0,0x0 # 1 x4 port - * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1 - * # ports - * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2 - * # port - * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port - * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration) - * - * if there is a port/phy on which you do not wish to override the default - * values, use the value assigned to UNINIT_PARAM (255). - */ - -/* discovery mode type (port auto config mode by default ) */ - -/* - * if there is a port/phy on which you do not wish to override the default - * values, use the value "0000000000000000". SAS address of zero's is - * considered invalid and will not be used. - */ -#ifdef MPC -static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE; -static const __u8 phy_mask[2][4] = { {1, 2, 4, 8}, - {1, 2, 4, 8} }; -static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL, - 0x5FCFFFFFF0000002ULL, - 0x5FCFFFFFF0000003ULL, - 0x5FCFFFFFF0000004ULL }, - { 0x5FCFFFFFF0000005ULL, - 0x5FCFFFFFF0000006ULL, - 0x5FCFFFFFF0000007ULL, - 0x5FCFFFFFF0000008ULL } }; -#else /* APC (default) */ -static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; -static const __u8 phy_mask[2][4]; -static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL, - 0x5FCFFFFF00000001ULL, - 0x5FCFFFFF00000001ULL, - 0x5FCFFFFF00000001ULL }, - { 0x5FCFFFFF00000002ULL, - 0x5FCFFFFF00000002ULL, - 0x5FCFFFFF00000002ULL, - 0x5FCFFFFF00000002ULL } }; -#endif - -/* Maximum number of concurrent device spin up */ -static const int max_num_concurrent_dev_spin_up = 1; - -/* enable of ssc operation */ -static const int enable_ssc; - -/* AFE_TX_AMP_CONTROL */ -static const unsigned int afe_tx_amp_control0 = 0x000bdd08; -static const unsigned int afe_tx_amp_control1 = 0x000ffc00; -static const unsigned int afe_tx_amp_control2 = 0x000b7c09; -static const unsigned int afe_tx_amp_control3 = 0x000afc6e; - -static const char blob_name[] = "isci_firmware.bin"; -static const char sig[] = "ISCUOEMB"; -static const unsigned char version = 0x10; - -#endif diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index e7fe9c4..1a65d65 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -899,7 +899,8 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) */ if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || - (iphy->is_in_link_training == true && is_phy_starting(iphy))) { + (iphy->is_in_link_training == true && is_phy_starting(iphy)) || + (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) { is_controller_start_complete = false; break; } @@ -1666,6 +1667,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost /* Default to no SSC operation. */ ihost->oem_parameters.controller.do_enable_ssc = false; + /* Default to short cables on all phys. */ + ihost->oem_parameters.controller.cable_selection_mask = 0; + /* Initialize all of the port parameter information to narrow ports. */ for (index = 0; index < SCI_MAX_PORTS; index++) { ihost->oem_parameters.ports[index].phy_mask = 0; @@ -1673,8 +1677,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost /* Initialize all of the phy parameter information. */ for (index = 0; index < SCI_MAX_PHYS; index++) { - /* Default to 6G (i.e. Gen 3) for now. */ - ihost->user_parameters.phys[index].max_speed_generation = 3; + /* Default to 3G (i.e. Gen 2). */ + ihost->user_parameters.phys[index].max_speed_generation = + SCIC_SDS_PARM_GEN2_SPEED; /* the frequencies cannot be 0 */ ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; @@ -1694,7 +1699,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost ihost->user_parameters.ssp_inactivity_timeout = 5; ihost->user_parameters.stp_max_occupancy_timeout = 5; ihost->user_parameters.ssp_max_occupancy_timeout = 20; - ihost->user_parameters.no_outbound_task_timeout = 20; + ihost->user_parameters.no_outbound_task_timeout = 2; } static void controller_timeout(unsigned long data) @@ -1759,7 +1764,7 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost, return sci_controller_reset(ihost); } -int sci_oem_parameters_validate(struct sci_oem_params *oem) +int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version) { int i; @@ -1791,18 +1796,61 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem) oem->controller.max_concurr_spin_up < 1) return -EINVAL; + if (oem->controller.do_enable_ssc) { + if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1) + return -EINVAL; + + if (version >= ISCI_ROM_VER_1_1) { + u8 test = oem->controller.ssc_sata_tx_spread_level; + + switch (test) { + case 0: + case 2: + case 3: + case 6: + case 7: + break; + default: + return -EINVAL; + } + + test = oem->controller.ssc_sas_tx_spread_level; + if (oem->controller.ssc_sas_tx_type == 0) { + switch (test) { + case 0: + case 2: + case 3: + break; + default: + return -EINVAL; + } + } else if (oem->controller.ssc_sas_tx_type == 1) { + switch (test) { + case 0: + case 3: + case 6: + break; + default: + return -EINVAL; + } + } + } + } + return 0; } static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) { u32 state = ihost->sm.current_state_id; + struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); if (state == SCIC_RESET || state == SCIC_INITIALIZING || state == SCIC_INITIALIZED) { - if (sci_oem_parameters_validate(&ihost->oem_parameters)) + if (sci_oem_parameters_validate(&ihost->oem_parameters, + pci_info->orom->hdr.version)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; return SCI_SUCCESS; @@ -1857,6 +1905,31 @@ static void power_control_timeout(unsigned long data) ihost->power_control.phys_waiting--; ihost->power_control.phys_granted_power++; sci_phy_consume_power_handler(iphy); + + if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { + u8 j; + + for (j = 0; j < SCI_MAX_PHYS; j++) { + struct isci_phy *requester = ihost->power_control.requesters[j]; + + /* + * Search the power_control queue to see if there are other phys + * attached to the same remote device. If found, take all of + * them out of await_sas_power state. + */ + if (requester != NULL && requester != iphy) { + u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr, + iphy->frame_rcvd.iaf.sas_addr, + sizeof(requester->frame_rcvd.iaf.sas_addr)); + + if (other == 0) { + ihost->power_control.requesters[j] = NULL; + ihost->power_control.phys_waiting--; + sci_phy_consume_power_handler(requester); + } + } + } + } } /* @@ -1891,9 +1964,34 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost, ihost->power_control.timer_started = true; } else { - /* Add the phy in the waiting list */ - ihost->power_control.requesters[iphy->phy_index] = iphy; - ihost->power_control.phys_waiting++; + /* + * There are phys, attached to the same sas address as this phy, are + * already in READY state, this phy don't need wait. + */ + u8 i; + struct isci_phy *current_phy; + + for (i = 0; i < SCI_MAX_PHYS; i++) { + u8 other; + current_phy = &ihost->phys[i]; + + other = memcmp(current_phy->frame_rcvd.iaf.sas_addr, + iphy->frame_rcvd.iaf.sas_addr, + sizeof(current_phy->frame_rcvd.iaf.sas_addr)); + + if (current_phy->sm.current_state_id == SCI_PHY_READY && + current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS && + other == 0) { + sci_phy_consume_power_handler(iphy); + break; + } + } + + if (i == SCI_MAX_PHYS) { + /* Add the phy in the waiting list */ + ihost->power_control.requesters[iphy->phy_index] = iphy; + ihost->power_control.phys_waiting++; + } } } @@ -1908,162 +2006,250 @@ void sci_controller_power_control_queue_remove(struct isci_host *ihost, ihost->power_control.requesters[iphy->phy_index] = NULL; } +static int is_long_cable(int phy, unsigned char selection_byte) +{ + return !!(selection_byte & (1 << phy)); +} + +static int is_medium_cable(int phy, unsigned char selection_byte) +{ + return !!(selection_byte & (1 << (phy + 4))); +} + +static enum cable_selections decode_selection_byte( + int phy, + unsigned char selection_byte) +{ + return ((selection_byte & (1 << phy)) ? 1 : 0) + + (selection_byte & (1 << (phy + 4)) ? 2 : 0); +} + +static unsigned char *to_cable_select(struct isci_host *ihost) +{ + if (is_cable_select_overridden()) + return ((unsigned char *)&cable_selection_override) + + ihost->id; + else + return &ihost->oem_parameters.controller.cable_selection_mask; +} + +enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy) +{ + return decode_selection_byte(phy, *to_cable_select(ihost)); +} + +char *lookup_cable_names(enum cable_selections selection) +{ + static char *cable_names[] = { + [short_cable] = "short", + [long_cable] = "long", + [medium_cable] = "medium", + [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */ + }; + return (selection <= undefined_cable) ? cable_names[selection] + : cable_names[undefined_cable]; +} + #define AFE_REGISTER_WRITE_DELAY 10 -/* Initialize the AFE for this phy index. We need to read the AFE setup from - * the OEM parameters - */ static void sci_controller_afe_initialization(struct isci_host *ihost) { + struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; const struct sci_oem_params *oem = &ihost->oem_parameters; struct pci_dev *pdev = ihost->pdev; u32 afe_status; u32 phy_id; + unsigned char cable_selection_mask = *to_cable_select(ihost); /* Clear DFX Status registers */ - writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0); + writel(0x0081000f, &afe->afe_dfx_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); - if (is_b0(pdev)) { + if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) { /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement - * Timer, PM Stagger Timer */ - writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2); + * Timer, PM Stagger Timer + */ + writel(0x0007FFFF, &afe->afe_pmsn_master_control2); udelay(AFE_REGISTER_WRITE_DELAY); } /* Configure bias currents to normal */ if (is_a2(pdev)) - writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control); + writel(0x00005A00, &afe->afe_bias_control); else if (is_b0(pdev) || is_c0(pdev)) - writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control); + writel(0x00005F00, &afe->afe_bias_control); + else if (is_c1(pdev)) + writel(0x00005500, &afe->afe_bias_control); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable PLL */ - if (is_b0(pdev) || is_c0(pdev)) - writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0); - else - writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0); + if (is_a2(pdev)) + writel(0x80040908, &afe->afe_pll_control0); + else if (is_b0(pdev) || is_c0(pdev)) + writel(0x80040A08, &afe->afe_pll_control0); + else if (is_c1(pdev)) { + writel(0x80000B08, &afe->afe_pll_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + writel(0x00000B08, &afe->afe_pll_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + writel(0x80000B08, &afe->afe_pll_control0); + } udelay(AFE_REGISTER_WRITE_DELAY); /* Wait for the PLL to lock */ do { - afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status); + afe_status = readl(&afe->afe_common_block_status); udelay(AFE_REGISTER_WRITE_DELAY); } while ((afe_status & 0x00001000) == 0); if (is_a2(pdev)) { - /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */ - writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0); + /* Shorten SAS SNW lock time (RxLock timer value from 76 + * us to 50 us) + */ + writel(0x7bcc96ad, &afe->afe_pmsn_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); } for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { + struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id]; const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; + int cable_length_long = + is_long_cable(phy_id, cable_selection_mask); + int cable_length_medium = + is_medium_cable(phy_id, cable_selection_mask); - if (is_b0(pdev)) { - /* Configure transmitter SSC parameters */ - writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); + if (is_a2(pdev)) { + /* All defaults, except the Receive Word + * Alignament/Comma Detect Enable....(0xe800) + */ + writel(0x00004512, &xcvr->afe_xcvr_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(0x0050100F, &xcvr->afe_xcvr_control1); + udelay(AFE_REGISTER_WRITE_DELAY); + } else if (is_b0(pdev)) { + /* Configure transmitter SSC parameters */ + writel(0x00030000, &xcvr->afe_tx_ssc_control); udelay(AFE_REGISTER_WRITE_DELAY); } else if (is_c0(pdev)) { - /* Configure transmitter SSC parameters */ - writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control); + /* Configure transmitter SSC parameters */ + writel(0x00010202, &xcvr->afe_tx_ssc_control); udelay(AFE_REGISTER_WRITE_DELAY); - /* - * All defaults, except the Receive Word Alignament/Comma Detect - * Enable....(0xe800) */ - writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); + /* All defaults, except the Receive Word + * Alignament/Comma Detect Enable....(0xe800) + */ + writel(0x00014500, &xcvr->afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); - } else { - /* - * All defaults, except the Receive Word Alignament/Comma Detect - * Enable....(0xe800) */ - writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); + } else if (is_c1(pdev)) { + /* Configure transmitter SSC parameters */ + writel(0x00010202, &xcvr->afe_tx_ssc_control); udelay(AFE_REGISTER_WRITE_DELAY); - writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1); + /* All defaults, except the Receive Word + * Alignament/Comma Detect Enable....(0xe800) + */ + writel(0x0001C500, &xcvr->afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); } - /* - * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) - * & increase TX int & ext bias 20%....(0xe85c) */ + /* Power up TX and RX out from power down (PWRDNTX and + * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c) + */ if (is_a2(pdev)) - writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + writel(0x000003F0, &xcvr->afe_channel_control); else if (is_b0(pdev)) { - /* Power down TX and RX (PWRDNTX and PWRDNRX) */ - writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + writel(0x000003D7, &xcvr->afe_channel_control); udelay(AFE_REGISTER_WRITE_DELAY); - /* - * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) - * & increase TX int & ext bias 20%....(0xe85c) */ - writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); - } else { - writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + writel(0x000003D4, &xcvr->afe_channel_control); + } else if (is_c0(pdev)) { + writel(0x000001E7, &xcvr->afe_channel_control); udelay(AFE_REGISTER_WRITE_DELAY); - /* - * Power up TX and RX out from power down (PWRDNTX and PWRDNRX) - * & increase TX int & ext bias 20%....(0xe85c) */ - writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control); + writel(0x000001E4, &xcvr->afe_channel_control); + } else if (is_c1(pdev)) { + writel(cable_length_long ? 0x000002F7 : 0x000001F7, + &xcvr->afe_channel_control); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(cable_length_long ? 0x000002F4 : 0x000001F4, + &xcvr->afe_channel_control); } udelay(AFE_REGISTER_WRITE_DELAY); if (is_a2(pdev)) { /* Enable TX equalization (0xe824) */ - writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); + writel(0x00040000, &xcvr->afe_tx_control); udelay(AFE_REGISTER_WRITE_DELAY); } - /* - * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On), - * RDD=0x0(RX Detect Enabled) ....(0xe800) */ - writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0); + if (is_a2(pdev) || is_b0(pdev)) + /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, + * TPD=0x0(TX Power On), RDD=0x0(RX Detect + * Enabled) ....(0xe800) + */ + writel(0x00004100, &xcvr->afe_xcvr_control0); + else if (is_c0(pdev)) + writel(0x00014100, &xcvr->afe_xcvr_control0); + else if (is_c1(pdev)) + writel(0x0001C100, &xcvr->afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Leave DFE/FFE on */ if (is_a2(pdev)) - writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); + writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); else if (is_b0(pdev)) { - writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); + writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable TX equalization (0xe824) */ - writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); - } else { - writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1); + writel(0x00040000, &xcvr->afe_tx_control); + } else if (is_c0(pdev)) { + writel(0x01400C0F, &xcvr->afe_rx_ssc_control1); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0); + udelay(AFE_REGISTER_WRITE_DELAY); + + /* Enable TX equalization (0xe824) */ + writel(0x00040000, &xcvr->afe_tx_control); + } else if (is_c1(pdev)) { + writel(cable_length_long ? 0x01500C0C : + cable_length_medium ? 0x01400C0D : 0x02400C0D, + &xcvr->afe_xcvr_control1); + udelay(AFE_REGISTER_WRITE_DELAY); + + writel(0x000003E0, &xcvr->afe_dfx_rx_control1); udelay(AFE_REGISTER_WRITE_DELAY); - writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0); + writel(cable_length_long ? 0x33091C1F : + cable_length_medium ? 0x3315181F : 0x2B17161F, + &xcvr->afe_rx_ssc_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable TX equalization (0xe824) */ - writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control); + writel(0x00040000, &xcvr->afe_tx_control); } udelay(AFE_REGISTER_WRITE_DELAY); - writel(oem_phy->afe_tx_amp_control0, - &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0); + writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0); udelay(AFE_REGISTER_WRITE_DELAY); - writel(oem_phy->afe_tx_amp_control1, - &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1); + writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1); udelay(AFE_REGISTER_WRITE_DELAY); - writel(oem_phy->afe_tx_amp_control2, - &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2); + writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2); udelay(AFE_REGISTER_WRITE_DELAY); - writel(oem_phy->afe_tx_amp_control3, - &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3); + writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3); udelay(AFE_REGISTER_WRITE_DELAY); } /* Transfer control to the PEs */ - writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0); + writel(0x00010f00, &afe->afe_dfx_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); } diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 646051a..5477f0f 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -435,11 +435,36 @@ static inline bool is_b0(struct pci_dev *pdev) static inline bool is_c0(struct pci_dev *pdev) { - if (pdev->revision >= 5) + if (pdev->revision == 5) return true; return false; } +static inline bool is_c1(struct pci_dev *pdev) +{ + if (pdev->revision >= 6) + return true; + return false; +} + +enum cable_selections { + short_cable = 0, + long_cable = 1, + medium_cable = 2, + undefined_cable = 3 +}; + +#define CABLE_OVERRIDE_DISABLED (0x10000) + +static inline int is_cable_select_overridden(void) +{ + return cable_selection_override < CABLE_OVERRIDE_DISABLED; +} + +enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy); +void validate_cable_selections(struct isci_host *ihost); +char *lookup_cable_names(enum cable_selections); + /* set hw control for 'activity', even though active enclosures seem to drive * the activity led on their own. Skip setting FSENG control on 'status' due * to unexpected operation and 'error' due to not being a supported automatic diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index a97edab..17c4c2c 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -65,7 +65,7 @@ #include "probe_roms.h" #define MAJ 1 -#define MIN 0 +#define MIN 1 #define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) @@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(pci, isci_id_table); /* linux isci specific settings */ -unsigned char no_outbound_task_to = 20; +unsigned char no_outbound_task_to = 2; module_param(no_outbound_task_to, byte, 0); MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)"); @@ -114,7 +114,7 @@ u16 stp_inactive_to = 5; module_param(stp_inactive_to, ushort, 0); MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)"); -unsigned char phy_gen = 3; +unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED; module_param(phy_gen, byte, 0); MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)"); @@ -122,6 +122,14 @@ unsigned char max_concurr_spinup; module_param(max_concurr_spinup, byte, 0); MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup"); +uint cable_selection_override = CABLE_OVERRIDE_DISABLED; +module_param(cable_selection_override, uint, 0); + +MODULE_PARM_DESC(cable_selection_override, + "This field indicates length of the SAS/SATA cable between " + "host and device. If any bits > 15 are set (default) " + "indicates \"use platform defaults\""); + static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev); @@ -412,6 +420,14 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) return NULL; isci_host->shost = shost; + dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: " + "{%s, %s, %s, %s}\n", + (is_cable_select_overridden() ? "* " : ""), isci_host->id, + lookup_cable_names(decode_cable_selection(isci_host, 3)), + lookup_cable_names(decode_cable_selection(isci_host, 2)), + lookup_cable_names(decode_cable_selection(isci_host, 1)), + lookup_cable_names(decode_cable_selection(isci_host, 0))); + err = isci_host_init(isci_host); if (err) goto err_shost; @@ -466,7 +482,8 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic orom = isci_request_oprom(pdev); for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { - if (sci_oem_parameters_validate(&orom->ctrl[i])) { + if (sci_oem_parameters_validate(&orom->ctrl[i], + orom->hdr.version)) { dev_warn(&pdev->dev, "[%d]: invalid oem parameters detected, falling back to firmware\n", i); devm_kfree(&pdev->dev, orom); diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index 8efeb6b..234ab46 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h @@ -480,6 +480,7 @@ extern u16 ssp_inactive_to; extern u16 stp_inactive_to; extern unsigned char phy_gen; extern unsigned char max_concurr_spinup; +extern uint cable_selection_override; irqreturn_t isci_msix_isr(int vec, void *data); irqreturn_t isci_intx_isr(int vec, void *data); diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index 35f50c2..fe18acf 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -91,22 +91,23 @@ sci_phy_transport_layer_initialization(struct isci_phy *iphy, static enum sci_status sci_phy_link_layer_initialization(struct isci_phy *iphy, - struct scu_link_layer_registers __iomem *reg) + struct scu_link_layer_registers __iomem *llr) { struct isci_host *ihost = iphy->owning_port->owning_controller; + struct sci_phy_user_params *phy_user; + struct sci_phy_oem_params *phy_oem; int phy_idx = iphy->phy_index; - struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx]; - struct sci_phy_oem_params *phy_oem = - &ihost->oem_parameters.phys[phy_idx]; - u32 phy_configuration; struct sci_phy_cap phy_cap; + u32 phy_configuration; u32 parity_check = 0; u32 parity_count = 0; u32 llctl, link_rate; u32 clksm_value = 0; u32 sp_timeouts = 0; - iphy->link_layer_registers = reg; + phy_user = &ihost->user_parameters.phys[phy_idx]; + phy_oem = &ihost->oem_parameters.phys[phy_idx]; + iphy->link_layer_registers = llr; /* Set our IDENTIFY frame data */ #define SCI_END_DEVICE 0x01 @@ -116,32 +117,26 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) | SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) | SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE), - &iphy->link_layer_registers->transmit_identification); + &llr->transmit_identification); /* Write the device SAS Address */ - writel(0xFEDCBA98, - &iphy->link_layer_registers->sas_device_name_high); - writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low); + writel(0xFEDCBA98, &llr->sas_device_name_high); + writel(phy_idx, &llr->sas_device_name_low); /* Write the source SAS Address */ - writel(phy_oem->sas_address.high, - &iphy->link_layer_registers->source_sas_address_high); - writel(phy_oem->sas_address.low, - &iphy->link_layer_registers->source_sas_address_low); + writel(phy_oem->sas_address.high, &llr->source_sas_address_high); + writel(phy_oem->sas_address.low, &llr->source_sas_address_low); /* Clear and Set the PHY Identifier */ - writel(0, &iphy->link_layer_registers->identify_frame_phy_id); - writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), - &iphy->link_layer_registers->identify_frame_phy_id); + writel(0, &llr->identify_frame_phy_id); + writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id); /* Change the initial state of the phy configuration register */ - phy_configuration = - readl(&iphy->link_layer_registers->phy_configuration); + phy_configuration = readl(&llr->phy_configuration); /* Hold OOB state machine in reset */ phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET); - writel(phy_configuration, - &iphy->link_layer_registers->phy_configuration); + writel(phy_configuration, &llr->phy_configuration); /* Configure the SNW capabilities */ phy_cap.all = 0; @@ -149,15 +144,64 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, phy_cap.gen3_no_ssc = 1; phy_cap.gen2_no_ssc = 1; phy_cap.gen1_no_ssc = 1; - if (ihost->oem_parameters.controller.do_enable_ssc == true) { + if (ihost->oem_parameters.controller.do_enable_ssc) { + struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; + struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx]; + struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); + bool en_sas = false; + bool en_sata = false; + u32 sas_type = 0; + u32 sata_spread = 0x2; + u32 sas_spread = 0x2; + phy_cap.gen3_ssc = 1; phy_cap.gen2_ssc = 1; phy_cap.gen1_ssc = 1; + + if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1) + en_sas = en_sata = true; + else { + sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level; + sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level; + + if (sata_spread) + en_sata = true; + + if (sas_spread) { + en_sas = true; + sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type; + } + + } + + if (en_sas) { + u32 reg; + + reg = readl(&xcvr->afe_xcvr_control0); + reg |= (0x00100000 | (sas_type << 19)); + writel(reg, &xcvr->afe_xcvr_control0); + + reg = readl(&xcvr->afe_tx_ssc_control); + reg |= sas_spread << 8; + writel(reg, &xcvr->afe_tx_ssc_control); + } + + if (en_sata) { + u32 reg; + + reg = readl(&xcvr->afe_tx_ssc_control); + reg |= sata_spread; + writel(reg, &xcvr->afe_tx_ssc_control); + + reg = readl(&llr->stp_control); + reg |= 1 << 12; + writel(reg, &llr->stp_control); + } } - /* - * The SAS specification indicates that the phy_capabilities that - * are transmitted shall have an even parity. Calculate the parity. */ + /* The SAS specification indicates that the phy_capabilities that + * are transmitted shall have an even parity. Calculate the parity. + */ parity_check = phy_cap.all; while (parity_check != 0) { if (parity_check & 0x1) @@ -165,20 +209,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, parity_check >>= 1; } - /* - * If parity indicates there are an odd number of bits set, then - * set the parity bit to 1 in the phy capabilities. */ + /* If parity indicates there are an odd number of bits set, then + * set the parity bit to 1 in the phy capabilities. + */ if ((parity_count % 2) != 0) phy_cap.parity = 1; - writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities); + writel(phy_cap.all, &llr->phy_capabilities); /* Set the enable spinup period but disable the ability to send * notify enable spinup */ writel(SCU_ENSPINUP_GEN_VAL(COUNT, phy_user->notify_enable_spin_up_insertion_frequency), - &iphy->link_layer_registers->notify_enable_spinup_control); + &llr->notify_enable_spinup_control); /* Write the ALIGN Insertion Ferequency for connected phy and * inpendent of connected state @@ -189,11 +233,13 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL, phy_user->align_insertion_frequency); - writel(clksm_value, &iphy->link_layer_registers->clock_skew_management); + writel(clksm_value, &llr->clock_skew_management); - /* @todo Provide a way to write this register correctly */ - writel(0x02108421, - &iphy->link_layer_registers->afe_lookup_table_control); + if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) { + writel(0x04210400, &llr->afe_lookup_table_control); + writel(0x020A7C05, &llr->sas_primitive_timeout); + } else + writel(0x02108421, &llr->afe_lookup_table_control); llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT, (u8)ihost->user_parameters.no_outbound_task_timeout); @@ -210,9 +256,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, break; } llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate); - writel(llctl, &iphy->link_layer_registers->link_layer_control); + writel(llctl, &llr->link_layer_control); - sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts); + sp_timeouts = readl(&llr->sas_phy_timeouts); /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */ sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF); @@ -222,20 +268,23 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, */ sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B); - writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts); + writel(sp_timeouts, &llr->sas_phy_timeouts); if (is_a2(ihost->pdev)) { - /* Program the max ARB time for the PHY to 700us so we inter-operate with - * the PMC expander which shuts down PHYs if the expander PHY generates too - * many breaks. This time value will guarantee that the initiator PHY will - * generate the break. + /* Program the max ARB time for the PHY to 700us so we + * inter-operate with the PMC expander which shuts down + * PHYs if the expander PHY generates too many breaks. + * This time value will guarantee that the initiator PHY + * will generate the break. */ writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME, - &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout); + &llr->maximum_arbitration_wait_timer_timeout); } - /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */ - writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout); + /* Disable link layer hang detection, rely on the OS timeout for + * I/O timeouts. + */ + writel(0, &llr->link_layer_hang_detection_timeout); /* We can exit the initial state to the stopped state */ sci_change_state(&iphy->sm, SCI_PHY_STOPPED); @@ -1049,24 +1098,25 @@ static void scu_link_layer_stop_protocol_engine( writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control); } -/** - * - * - * This method will start the OOB/SN state machine for this struct isci_phy object. - */ -static void scu_link_layer_start_oob( - struct isci_phy *iphy) +static void scu_link_layer_start_oob(struct isci_phy *iphy) { - u32 scu_sas_pcfg_value; - - scu_sas_pcfg_value = - readl(&iphy->link_layer_registers->phy_configuration); - scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); - scu_sas_pcfg_value &= - ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | - SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); - writel(scu_sas_pcfg_value, - &iphy->link_layer_registers->phy_configuration); + struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers; + u32 val; + + /** Reset OOB sequence - start */ + val = readl(&ll->phy_configuration); + val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | + SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); + writel(val, &ll->phy_configuration); + readl(&ll->phy_configuration); /* flush */ + /** Reset OOB sequence - end */ + + /** Start OOB sequence - start */ + val = readl(&ll->phy_configuration); + val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE); + writel(val, &ll->phy_configuration); + readl(&ll->phy_configuration); /* flush */ + /** Start OOB sequence - end */ } /** diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index ac7f277..7c6ac58 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -114,7 +114,7 @@ static u32 sci_port_get_phys(struct isci_port *iport) * value is returned if the specified port is not valid. When this value is * returned, no data is copied to the properties output parameter. */ -static enum sci_status sci_port_get_properties(struct isci_port *iport, +enum sci_status sci_port_get_properties(struct isci_port *iport, struct sci_port_properties *prop) { if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) @@ -647,19 +647,26 @@ void sci_port_setup_transports(struct isci_port *iport, u32 device_id) } } -static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy, - bool do_notify_user) +static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy) +{ + sci_phy_resume(iphy); + iport->enabled_phy_mask |= 1 << iphy->phy_index; +} + +static void sci_port_activate_phy(struct isci_port *iport, + struct isci_phy *iphy, + u8 flags) { struct isci_host *ihost = iport->owning_controller; - if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) + if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME)) sci_phy_resume(iphy); iport->active_phy_mask |= 1 << iphy->phy_index; sci_controller_clear_invalid_phy(ihost, iphy); - if (do_notify_user == true) + if (flags & PF_NOTIFY) isci_port_link_up(ihost, iport, iphy); } @@ -669,14 +676,19 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, struct isci_host *ihost = iport->owning_controller; iport->active_phy_mask &= ~(1 << iphy->phy_index); + iport->enabled_phy_mask &= ~(1 << iphy->phy_index); if (!iport->active_phy_mask) iport->last_active_phy = iphy->phy_index; iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; - /* Re-assign the phy back to the LP as if it were a narrow port */ - writel(iphy->phy_index, - &iport->port_pe_configuration_register[iphy->phy_index]); + /* Re-assign the phy back to the LP as if it were a narrow port for APC + * mode. For MPC mode, the phy will remain in the port. + */ + if (iport->owning_controller->oem_parameters.controller.mode_type == + SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) + writel(iphy->phy_index, + &iport->port_pe_configuration_register[iphy->phy_index]); if (do_notify_user == true) isci_port_link_down(ihost, iphy, iport); @@ -701,18 +713,16 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i * sci_port_general_link_up_handler - phy can be assigned to port? * @sci_port: sci_port object for which has a phy that has gone link up. * @sci_phy: This is the struct isci_phy object that has gone link up. - * @do_notify_user: This parameter specifies whether to inform the user (via - * sci_port_link_up()) as to the fact that a new phy as become ready. + * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy * - * Determine if this phy can be assigned to this - * port . If the phy is not a valid PHY for - * this port then the function will notify the user. A PHY can only be - * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in - * the same port. none + * Determine if this phy can be assigned to this port . If the phy is + * not a valid PHY for this port then the function will notify the user. + * A PHY can only be part of a port if it's attached SAS ADDRESS is the + * same as all other PHYs in the same port. */ static void sci_port_general_link_up_handler(struct isci_port *iport, - struct isci_phy *iphy, - bool do_notify_user) + struct isci_phy *iphy, + u8 flags) { struct sci_sas_address port_sas_address; struct sci_sas_address phy_sas_address; @@ -730,7 +740,7 @@ static void sci_port_general_link_up_handler(struct isci_port *iport, iport->active_phy_mask == 0) { struct sci_base_state_machine *sm = &iport->sm; - sci_port_activate_phy(iport, iphy, do_notify_user); + sci_port_activate_phy(iport, iphy, flags); if (sm->current_state_id == SCI_PORT_RESETTING) port_state_machine_change(iport, SCI_PORT_READY); } else @@ -781,11 +791,16 @@ bool sci_port_link_detected( struct isci_phy *iphy) { if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && - (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) && - sci_port_is_wide(iport)) { - sci_port_invalid_link_up(iport, iphy); - - return false; + (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) { + if (sci_port_is_wide(iport)) { + sci_port_invalid_link_up(iport, iphy); + return false; + } else { + struct isci_host *ihost = iport->owning_controller; + struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]); + writel(iphy->phy_index, + &dst_port->port_pe_configuration_register[iphy->phy_index]); + } } return true; @@ -975,6 +990,13 @@ static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine } } +static void scic_sds_port_ready_substate_waiting_exit( + struct sci_base_state_machine *sm) +{ + struct isci_port *iport = container_of(sm, typeof(*iport), sm); + sci_port_resume_port_task_scheduler(iport); +} + static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) { u32 index; @@ -988,13 +1010,13 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach writel(iport->physical_port_index, &iport->port_pe_configuration_register[ iport->phy_table[index]->phy_index]); + if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0) + sci_port_resume_phy(iport, iport->phy_table[index]); } } sci_port_update_viit_entry(iport); - sci_port_resume_port_task_scheduler(iport); - /* * Post the dummy task for the port so the hardware can schedule * io correctly @@ -1061,20 +1083,9 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach if (iport->active_phy_mask == 0) { isci_port_not_ready(ihost, iport); - port_state_machine_change(iport, - SCI_PORT_SUB_WAITING); - } else if (iport->started_request_count == 0) - port_state_machine_change(iport, - SCI_PORT_SUB_OPERATIONAL); -} - -static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm) -{ - struct isci_port *iport = container_of(sm, typeof(*iport), sm); - - sci_port_suspend_port_task_scheduler(iport); - if (iport->ready_exit) - sci_port_invalidate_dummy_remote_node(iport); + port_state_machine_change(iport, SCI_PORT_SUB_WAITING); + } else + port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); } enum sci_status sci_port_start(struct isci_port *iport) @@ -1252,7 +1263,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport, if (status != SCI_SUCCESS) return status; - sci_port_general_link_up_handler(iport, iphy, true); + sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); @@ -1262,7 +1273,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport, if (status != SCI_SUCCESS) return status; - sci_port_general_link_up_handler(iport, iphy, true); + sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY); /* Re-enter the configuring state since this may be the last phy in * the port. @@ -1338,13 +1349,13 @@ enum sci_status sci_port_link_up(struct isci_port *iport, /* Since this is the first phy going link up for the port we * can just enable it and continue */ - sci_port_activate_phy(iport, iphy, true); + sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME); port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); return SCI_SUCCESS; case SCI_PORT_SUB_OPERATIONAL: - sci_port_general_link_up_handler(iport, iphy, true); + sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); return SCI_SUCCESS; case SCI_PORT_RESETTING: /* TODO We should make sure that the phy that has gone @@ -1361,7 +1372,7 @@ enum sci_status sci_port_link_up(struct isci_port *iport, /* In the resetting state we don't notify the user regarding * link up and link down notifications. */ - sci_port_general_link_up_handler(iport, iphy, false); + sci_port_general_link_up_handler(iport, iphy, PF_RESUME); return SCI_SUCCESS; default: dev_warn(sciport_to_dev(iport), @@ -1584,14 +1595,14 @@ static const struct sci_base_state sci_port_state_table[] = { }, [SCI_PORT_SUB_WAITING] = { .enter_state = sci_port_ready_substate_waiting_enter, + .exit_state = scic_sds_port_ready_substate_waiting_exit, }, [SCI_PORT_SUB_OPERATIONAL] = { .enter_state = sci_port_ready_substate_operational_enter, .exit_state = sci_port_ready_substate_operational_exit }, [SCI_PORT_SUB_CONFIGURING] = { - .enter_state = sci_port_ready_substate_configuring_enter, - .exit_state = sci_port_ready_substate_configuring_exit + .enter_state = sci_port_ready_substate_configuring_enter }, [SCI_PORT_RESETTING] = { .exit_state = sci_port_resetting_state_exit @@ -1609,6 +1620,7 @@ void sci_port_construct(struct isci_port *iport, u8 index, iport->logical_port_index = SCIC_SDS_DUMMY_PORT; iport->physical_port_index = index; iport->active_phy_mask = 0; + iport->enabled_phy_mask = 0; iport->last_active_phy = 0; iport->ready_exit = false; diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index cb5ffbc..0811609 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h @@ -63,6 +63,9 @@ #define SCIC_SDS_DUMMY_PORT 0xFF +#define PF_NOTIFY (1 << 0) +#define PF_RESUME (1 << 1) + struct isci_phy; struct isci_host; @@ -83,6 +86,8 @@ enum isci_status { * @logical_port_index: software port index * @physical_port_index: hardware port index * @active_phy_mask: identifies phy members + * @enabled_phy_mask: phy mask for the port + * that are already part of the port * @reserved_tag: * @reserved_rni: reserver for port task scheduler workaround * @started_request_count: reference count for outstanding commands @@ -104,6 +109,7 @@ struct isci_port { u8 logical_port_index; u8 physical_port_index; u8 active_phy_mask; + u8 enabled_phy_mask; u8 last_active_phy; u16 reserved_rni; u16 reserved_tag; @@ -250,6 +256,10 @@ bool sci_port_link_detected( struct isci_port *iport, struct isci_phy *iphy); +enum sci_status sci_port_get_properties( + struct isci_port *iport, + struct sci_port_properties *prop); + enum sci_status sci_port_link_up(struct isci_port *iport, struct isci_phy *iphy); enum sci_status sci_port_link_down(struct isci_port *iport, diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index 38a99d2..6d1e954 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -57,7 +57,7 @@ #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) -#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100) +#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250) enum SCIC_SDS_APC_ACTIVITY { SCIC_SDS_APC_SKIP_PHY, @@ -466,6 +466,23 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost, return sci_port_configuration_agent_validate_ports(ihost, port_agent); } +/* + * This routine will restart the automatic port configuration timeout + * timer for the next time period. This could be caused by either a link + * down event or a link up event where we can not yet tell to which a phy + * belongs. + */ +static void sci_apc_agent_start_timer( + struct sci_port_configuration_agent *port_agent, + u32 timeout) +{ + if (port_agent->timer_pending) + sci_del_timer(&port_agent->timer); + + port_agent->timer_pending = true; + sci_mod_timer(&port_agent->timer, timeout); +} + static void sci_apc_agent_configure_ports(struct isci_host *ihost, struct sci_port_configuration_agent *port_agent, struct isci_phy *iphy, @@ -565,17 +582,8 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost, break; case SCIC_SDS_APC_START_TIMER: - /* - * This can occur for either a link down event, or a link - * up event where we cannot yet tell the port to which a - * phy belongs. - */ - if (port_agent->timer_pending) - sci_del_timer(&port_agent->timer); - - port_agent->timer_pending = true; - sci_mod_timer(&port_agent->timer, - SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); + sci_apc_agent_start_timer(port_agent, + SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); break; case SCIC_SDS_APC_SKIP_PHY: @@ -607,7 +615,8 @@ static void sci_apc_agent_link_up(struct isci_host *ihost, if (!iport) { /* the phy is not the part of this port */ port_agent->phy_ready_mask |= 1 << phy_index; - sci_apc_agent_configure_ports(ihost, port_agent, iphy, true); + sci_apc_agent_start_timer(port_agent, + SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); } else { /* the phy is already the part of the port */ u32 port_state = iport->sm.current_state_id; diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c index b5f4341..9b8117b 100644 --- a/drivers/scsi/isci/probe_roms.c +++ b/drivers/scsi/isci/probe_roms.c @@ -147,7 +147,7 @@ struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmw memcpy(orom, fw->data, fw->size); - if (is_c0(pdev)) + if (is_c0(pdev) || is_c1(pdev)) goto out; /* diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h index 2c75248..bb0e9d4 100644 --- a/drivers/scsi/isci/probe_roms.h +++ b/drivers/scsi/isci/probe_roms.h @@ -152,7 +152,7 @@ struct sci_user_parameters { #define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4 struct sci_oem_params; -int sci_oem_parameters_validate(struct sci_oem_params *oem); +int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version); struct isci_orom; struct isci_orom *isci_request_oprom(struct pci_dev *pdev); @@ -191,6 +191,11 @@ struct isci_oem_hdr { 0x1a, 0x04, 0xc6) #define ISCI_EFI_VAR_NAME "RstScuO" +#define ISCI_ROM_VER_1_0 0x10 +#define ISCI_ROM_VER_1_1 0x11 +#define ISCI_ROM_VER_1_3 0x13 +#define ISCI_ROM_VER_LATEST ISCI_ROM_VER_1_3 + /* Allowed PORT configuration modes APC Automatic PORT configuration mode is * defined by the OEM configuration parameters providing no PHY_MASK parameters * for any PORT. i.e. There are no phys assigned to any of the ports at start. @@ -220,8 +225,86 @@ struct sci_oem_params { struct { uint8_t mode_type; uint8_t max_concurr_spin_up; - uint8_t do_enable_ssc; - uint8_t reserved; + /* + * This bitfield indicates the OEM's desired default Tx + * Spread Spectrum Clocking (SSC) settings for SATA and SAS. + * NOTE: Default SSC Modulation Frequency is 31.5KHz. + */ + union { + struct { + /* + * NOTE: Max spread for SATA is +0 / -5000 PPM. + * Down-spreading SSC (only method allowed for SATA): + * SATA SSC Tx Disabled = 0x0 + * SATA SSC Tx at +0 / -1419 PPM Spread = 0x2 + * SATA SSC Tx at +0 / -2129 PPM Spread = 0x3 + * SATA SSC Tx at +0 / -4257 PPM Spread = 0x6 + * SATA SSC Tx at +0 / -4967 PPM Spread = 0x7 + */ + uint8_t ssc_sata_tx_spread_level:4; + /* + * SAS SSC Tx Disabled = 0x0 + * + * NOTE: Max spread for SAS down-spreading +0 / + * -2300 PPM + * Down-spreading SSC: + * SAS SSC Tx at +0 / -1419 PPM Spread = 0x2 + * SAS SSC Tx at +0 / -2129 PPM Spread = 0x3 + * + * NOTE: Max spread for SAS center-spreading +2300 / + * -2300 PPM + * Center-spreading SSC: + * SAS SSC Tx at +1064 / -1064 PPM Spread = 0x3 + * SAS SSC Tx at +2129 / -2129 PPM Spread = 0x6 + */ + uint8_t ssc_sas_tx_spread_level:3; + /* + * NOTE: Refer to the SSC section of the SAS 2.x + * Specification for proper setting of this field. + * For standard SAS Initiator SAS PHY operation it + * should be 0 for Down-spreading. + * SAS SSC Tx spread type: + * Down-spreading SSC = 0 + * Center-spreading SSC = 1 + */ + uint8_t ssc_sas_tx_type:1; + }; + uint8_t do_enable_ssc; + }; + /* + * This field indicates length of the SAS/SATA cable between + * host and device. + * This field is used make relationship between analog + * parameters of the phy in the silicon and length of the cable. + * Supported cable attenuation levels: + * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than + * 6m. + * + * This is bit mask field: + * + * BIT: (MSB) 7 6 5 4 + * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Medium cable + * length assignment + * BIT: 3 2 1 0 (LSB) + * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Long cable length + * assignment + * + * BITS 7-4 are set when the cable length is assigned to medium + * BITS 3-0 are set when the cable length is assigned to long + * + * The BIT positions are clear when the cable length is + * assigned to short. + * + * Setting the bits for both long and medium cable length is + * undefined. + * + * A value of 0x84 would assign + * phy3 - medium + * phy2 - long + * phy1 - short + * phy0 - short + */ + uint8_t cable_selection_mask; } controller; struct { diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index b207cd3..dd74b6c 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -53,6 +53,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <scsi/sas.h> +#include <linux/bitops.h> #include "isci.h" #include "port.h" #include "remote_device.h" @@ -1101,6 +1102,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, struct isci_remote_device *idev) { enum sci_status status; + struct sci_port_properties properties; struct domain_device *dev = idev->domain_dev; sci_remote_device_construct(iport, idev); @@ -1110,6 +1112,11 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, * entries will be needed to store the remote node. */ idev->is_direct_attached = true; + + sci_port_get_properties(iport, &properties); + /* Get accurate port width from port's phy mask for a DA device. */ + idev->device_port_width = hweight32(properties.phy_mask); + status = sci_controller_allocate_remote_node_context(iport->owning_controller, idev, &idev->rnc.remote_node_index); @@ -1125,9 +1132,6 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, idev->connection_rate = sci_port_get_max_allowed_speed(iport); - /* / @todo Should I assign the port width by reading all of the phys on the port? */ - idev->device_port_width = 1; - return SCI_SUCCESS; } diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 66ad3dc..f5a3f7d 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -496,7 +496,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, } } - isci_print_tmf(tmf); + isci_print_tmf(ihost, tmf); if (tmf->status == SCI_SUCCESS) ret = TMF_RESP_FUNC_COMPLETE; diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index bc78c0a..1b27b37 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h @@ -106,7 +106,6 @@ struct isci_tmf { } resp; unsigned char lun[8]; u16 io_tag; - struct isci_remote_device *device; enum isci_tmf_function_codes tmf_code; int status; @@ -120,10 +119,10 @@ struct isci_tmf { }; -static inline void isci_print_tmf(struct isci_tmf *tmf) +static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) { if (SAS_PROTOCOL_SATA == tmf->proto) - dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: status = %x\n" "tmf->resp.d2h_fis.status = %x\n" "tmf->resp.d2h_fis.error = %x\n", @@ -132,7 +131,7 @@ static inline void isci_print_tmf(struct isci_tmf *tmf) tmf->resp.d2h_fis.status, tmf->resp.d2h_fis.error); else - dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: status = %x\n" "tmf->resp.resp_iu.data_present = %x\n" "tmf->resp.resp_iu.status = %x\n" diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 7269e92..1d1b0c9 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -61,7 +61,7 @@ static void fc_disc_restart(struct fc_disc *); * Locking Note: This function expects that the lport mutex is locked before * calling it. */ -void fc_disc_stop_rports(struct fc_disc *disc) +static void fc_disc_stop_rports(struct fc_disc *disc) { struct fc_lport *lport; struct fc_rport_priv *rdata; @@ -682,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) * fc_disc_stop() - Stop discovery for a given lport * @lport: The local port that discovery should stop on */ -void fc_disc_stop(struct fc_lport *lport) +static void fc_disc_stop(struct fc_lport *lport) { struct fc_disc *disc = &lport->disc; @@ -698,7 +698,7 @@ void fc_disc_stop(struct fc_lport *lport) * This function will block until discovery has been * completely stopped and all rports have been deleted. */ -void fc_disc_stop_final(struct fc_lport *lport) +static void fc_disc_stop_final(struct fc_lport *lport) { fc_disc_stop(lport); lport->tt.rport_flush_queue(); diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c index fb9161d..e17a28d 100644 --- a/drivers/scsi/libfc/fc_elsct.c +++ b/drivers/scsi/libfc/fc_elsct.c @@ -28,6 +28,7 @@ #include <scsi/fc/fc_els.h> #include <scsi/libfc.h> #include <scsi/fc_encode.h> +#include "fc_libfc.h" /** * fc_elsct_send() - Send an ELS or CT frame diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 9de9db2..4d70d96 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -91,7 +91,7 @@ struct fc_exch_pool { * It manages the allocation of exchange IDs. */ struct fc_exch_mgr { - struct fc_exch_pool *pool; + struct fc_exch_pool __percpu *pool; mempool_t *ep_pool; enum fc_class class; struct kref kref; diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 221875e..f607314 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) fsp->xfer_ddp = FC_XID_UNKNOWN; atomic_set(&fsp->ref_cnt, 1); init_timer(&fsp->timer); + fsp->timer.data = (unsigned long)fsp; INIT_LIST_HEAD(&fsp->list); spin_lock_init(&fsp->scsi_pkt_lock); } @@ -1850,9 +1851,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd) } put_cpu(); - init_timer(&fsp->timer); - fsp->timer.data = (unsigned long)fsp; - /* * send it to the lower layer * if we get -1 return then put the request in the pending diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index e77094a..83750eb 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -677,7 +677,8 @@ EXPORT_SYMBOL(fc_set_mfs); * @lport: The local port receiving the event * @event: The discovery event */ -void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) +static void fc_lport_disc_callback(struct fc_lport *lport, + enum fc_disc_event event) { switch (event) { case DISC_EV_SUCCESS: @@ -1568,7 +1569,7 @@ EXPORT_SYMBOL(fc_lport_flogi_resp); * Locking Note: The lport lock is expected to be held before calling * this routine. */ -void fc_lport_enter_flogi(struct fc_lport *lport) +static void fc_lport_enter_flogi(struct fc_lport *lport) { struct fc_frame *fp; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index b9e4348..83aa1ef 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -391,7 +391,7 @@ static void fc_rport_work(struct work_struct *work) * If it appears we are already logged in, ADISC is used to verify * the setup. */ -int fc_rport_login(struct fc_rport_priv *rdata) +static int fc_rport_login(struct fc_rport_priv *rdata) { mutex_lock(&rdata->rp_mutex); @@ -451,7 +451,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, * function will hold the rport lock, call an _enter_* * function and then unlock the rport. */ -int fc_rport_logoff(struct fc_rport_priv *rdata) +static int fc_rport_logoff(struct fc_rport_priv *rdata) { mutex_lock(&rdata->rp_mutex); @@ -653,8 +653,8 @@ static int fc_rport_login_complete(struct fc_rport_priv *rdata, * @fp: The FLOGI response frame * @rp_arg: The remote port that received the FLOGI response */ -void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, - void *rp_arg) +static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, + void *rp_arg) { struct fc_rport_priv *rdata = rp_arg; struct fc_lport *lport = rdata->local_port; @@ -1520,7 +1520,7 @@ reject: * * Locking Note: Called with the lport lock held. */ -void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) +static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_seq_els_data els_data; diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index 4ceeace..70eb1f7 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c @@ -565,8 +565,7 @@ static int __devinit esp_mac_probe(struct platform_device *dev) esp_chips[dev->id] = esp; mb(); if (esp_chips[!dev->id] == NULL) { - err = request_irq(host->irq, mac_scsi_esp_intr, 0, - "Mac ESP", NULL); + err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); if (err < 0) { esp_chips[dev->id] = NULL; goto fail_free_priv; diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index ea2bde2..2bccfbe 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -339,9 +339,6 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance) printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." ); - /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */ - disable_irq(IRQ_MAC_SCSI); - /* get in phase */ NCR5380_write( TARGET_COMMAND_REG, PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); @@ -357,9 +354,6 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance) for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) barrier(); - /* switch on SCSI IRQ again */ - enable_irq(IRQ_MAC_SCSI); - printk(KERN_INFO " done\n" ); } #endif diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 5c17764..15eefa1 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -306,19 +306,22 @@ mega_query_adapter(adapter_t *adapter) adapter->host->sg_tablesize = adapter->sglen; - /* use HP firmware and bios version encoding */ + /* use HP firmware and bios version encoding + Note: fw_version[0|1] and bios_version[0|1] were originally shifted + right 8 bits making them zero. This 0 value was hardcoded to fix + sparse warnings. */ if (adapter->product_info.subsysvid == HP_SUBSYS_VID) { sprintf (adapter->fw_version, "%c%d%d.%d%d", adapter->product_info.fw_version[2], - adapter->product_info.fw_version[1] >> 8, + 0, adapter->product_info.fw_version[1] & 0x0f, - adapter->product_info.fw_version[0] >> 8, + 0, adapter->product_info.fw_version[0] & 0x0f); sprintf (adapter->bios_version, "%c%d%d.%d%d", adapter->product_info.bios_version[2], - adapter->product_info.bios_version[1] >> 8, + 0, adapter->product_info.bios_version[1] & 0x0f, - adapter->product_info.bios_version[0] >> 8, + 0, adapter->product_info.bios_version[0] & 0x0f); } else { memcpy(adapter->fw_version, diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index dd94c7d..e5f416f 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -33,9 +33,9 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "00.00.06.12-rc1" -#define MEGASAS_RELDATE "Oct. 5, 2011" -#define MEGASAS_EXT_VERSION "Wed. Oct. 5 17:00:00 PDT 2011" +#define MEGASAS_VERSION "00.00.06.14-rc1" +#define MEGASAS_RELDATE "Jan. 6, 2012" +#define MEGASAS_EXT_VERSION "Fri. Jan. 6 17:00:00 PDT 2012" /* * Device IDs @@ -773,7 +773,6 @@ struct megasas_ctrl_info { #define MFI_OB_INTR_STATUS_MASK 0x00000002 #define MFI_POLL_TIMEOUT_SECS 60 -#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10) #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 @@ -1353,7 +1352,6 @@ struct megasas_instance { u32 mfiStatus; u32 last_seq_num; - struct timer_list io_completion_timer; struct list_head internal_reset_pending_q; /* Ptr to hba specific information */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 29a994f..8b300be 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -18,7 +18,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_base.c - * Version : v00.00.06.12-rc1 + * Version : v00.00.06.14-rc1 * * Authors: LSI Corporation * Sreenivas Bagalkote @@ -59,14 +59,6 @@ #include "megaraid_sas.h" /* - * poll_mode_io:1- schedule complete completion from q cmd - */ -static unsigned int poll_mode_io; -module_param_named(poll_mode_io, poll_mode_io, int, 0); -MODULE_PARM_DESC(poll_mode_io, - "Complete cmds from IO path, (default=0)"); - -/* * Number of sectors per IO command * Will be set in megasas_init_mfi if user does not provide */ @@ -1439,11 +1431,6 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance, instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, cmd->frame_count-1, instance->reg_set); - /* - * Check if we have pend cmds to be completed - */ - if (poll_mode_io && atomic_read(&instance->fw_outstanding)) - tasklet_schedule(&instance->isr_tasklet); return 0; out_return_cmd: @@ -3370,47 +3357,6 @@ fail_fw_init: return -EINVAL; } -/** - * megasas_start_timer - Initializes a timer object - * @instance: Adapter soft state - * @timer: timer object to be initialized - * @fn: timer function - * @interval: time interval between timer function call - */ -static inline void -megasas_start_timer(struct megasas_instance *instance, - struct timer_list *timer, - void *fn, unsigned long interval) -{ - init_timer(timer); - timer->expires = jiffies + interval; - timer->data = (unsigned long)instance; - timer->function = fn; - add_timer(timer); -} - -/** - * megasas_io_completion_timer - Timer fn - * @instance_addr: Address of adapter soft state - * - * Schedules tasklet for cmd completion - * if poll_mode_io is set - */ -static void -megasas_io_completion_timer(unsigned long instance_addr) -{ - struct megasas_instance *instance = - (struct megasas_instance *)instance_addr; - - if (atomic_read(&instance->fw_outstanding)) - tasklet_schedule(&instance->isr_tasklet); - - /* Restart timer */ - if (poll_mode_io) - mod_timer(&instance->io_completion_timer, - jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL); -} - static u32 megasas_init_adapter_mfi(struct megasas_instance *instance) { @@ -3638,11 +3584,6 @@ static int megasas_init_fw(struct megasas_instance *instance) tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); - /* Initialize the cmd completion timer */ - if (poll_mode_io) - megasas_start_timer(instance, &instance->io_completion_timer, - megasas_io_completion_timer, - MEGASAS_COMPLETION_TIMER_INTERVAL); return 0; fail_init_adapter: @@ -4369,9 +4310,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) host = instance->host; instance->unload = 1; - if (poll_mode_io) - del_timer_sync(&instance->io_completion_timer); - megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); @@ -4511,12 +4449,6 @@ megasas_resume(struct pci_dev *pdev) } instance->instancet->enable_intr(instance->reg_set); - - /* Initialize the cmd completion timer */ - if (poll_mode_io) - megasas_start_timer(instance, &instance->io_completion_timer, - megasas_io_completion_timer, - MEGASAS_COMPLETION_TIMER_INTERVAL); instance->unload = 0; /* @@ -4570,9 +4502,6 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) host = instance->host; fusion = instance->ctrl_context; - if (poll_mode_io) - del_timer_sync(&instance->io_completion_timer); - scsi_remove_host(instance->host); megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); @@ -4773,6 +4702,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cmd->index; cmd->frame->hdr.pad_0 = 0; + cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | + MFI_FRAME_SENSE64); /* * The management interface between applications and the fw uses @@ -5219,60 +5150,6 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, megasas_sysfs_set_dbg_lvl); -static ssize_t -megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf) -{ - return sprintf(buf, "%u\n", poll_mode_io); -} - -static ssize_t -megasas_sysfs_set_poll_mode_io(struct device_driver *dd, - const char *buf, size_t count) -{ - int retval = count; - int tmp = poll_mode_io; - int i; - struct megasas_instance *instance; - - if (sscanf(buf, "%u", &poll_mode_io) < 1) { - printk(KERN_ERR "megasas: could not set poll_mode_io\n"); - retval = -EINVAL; - } - - /* - * Check if poll_mode_io is already set or is same as previous value - */ - if ((tmp && poll_mode_io) || (tmp == poll_mode_io)) - goto out; - - if (poll_mode_io) { - /* - * Start timers for all adapters - */ - for (i = 0; i < megasas_mgmt_info.max_index; i++) { - instance = megasas_mgmt_info.instance[i]; - if (instance) { - megasas_start_timer(instance, - &instance->io_completion_timer, - megasas_io_completion_timer, - MEGASAS_COMPLETION_TIMER_INTERVAL); - } - } - } else { - /* - * Delete timers for all adapters - */ - for (i = 0; i < megasas_mgmt_info.max_index; i++) { - instance = megasas_mgmt_info.instance[i]; - if (instance) - del_timer_sync(&instance->io_completion_timer); - } - } - -out: - return retval; -} - static void megasas_aen_polling(struct work_struct *work) { @@ -5502,11 +5379,6 @@ megasas_aen_polling(struct work_struct *work) kfree(ev); } - -static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR, - megasas_sysfs_show_poll_mode_io, - megasas_sysfs_set_poll_mode_io); - /** * megasas_init - Driver load entry point */ @@ -5566,11 +5438,6 @@ static int __init megasas_init(void) if (rval) goto err_dcf_dbg_lvl; rval = driver_create_file(&megasas_pci_driver.driver, - &driver_attr_poll_mode_io); - if (rval) - goto err_dcf_poll_mode_io; - - rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); if (rval) goto err_dcf_support_device_change; @@ -5579,10 +5446,6 @@ static int __init megasas_init(void) err_dcf_support_device_change: driver_remove_file(&megasas_pci_driver.driver, - &driver_attr_poll_mode_io); - -err_dcf_poll_mode_io: - driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); err_dcf_dbg_lvl: driver_remove_file(&megasas_pci_driver.driver, @@ -5607,8 +5470,6 @@ err_pcidrv: static void __exit megasas_exit(void) { driver_remove_file(&megasas_pci_driver.driver, - &driver_attr_poll_mode_io); - driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 5255dd6..294abb0 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -282,7 +282,9 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, else { *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ if ((raid->level >= 5) && - (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER)) + ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER && + raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { /* Get alternate Pd. */ diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 22a3ff0..bfe6854 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -150,6 +150,8 @@ #define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */ /* recovery timeout */ +#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8)) +#define LSW(x) ((uint16_t)(x)) #define LSDW(x) ((u32)((u64)(x))) #define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) @@ -671,6 +673,7 @@ struct scsi_qla_host { uint16_t pri_ddb_idx; uint16_t sec_ddb_idx; int is_reset; + uint16_t temperature; }; struct ql4_task_data { diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 1bdfa81..90614f3 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -697,6 +697,9 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha) writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); + writel(set_rmask(CSR_SCSI_COMPLETION_INTR), + &ha->reg->ctrl_status); + readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: Get firmware " diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index c259378..e1e66a4 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -219,6 +219,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, ha->mailbox_timeout_count++; mbx_sts[0] = (-1); set_bit(DPC_RESET_HA, &ha->dpc_flags); + if (is_qla8022(ha)) { + ql4_printk(KERN_INFO, ha, + "disabling pause transmit on port 0 & 1.\n"); + qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, + CRB_NIU_XG_PAUSE_CTL_P0 | + CRB_NIU_XG_PAUSE_CTL_P1); + } goto mbox_exit; } diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 8d6bc1b..78f1111 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -1875,6 +1875,11 @@ exit: int qla4_8xxx_load_risc(struct scsi_qla_host *ha) { int retval; + + /* clear the interrupt */ + writel(0, &ha->qla4_8xxx_reg->host_int); + readl(&ha->qla4_8xxx_reg->host_int); + retval = qla4_8xxx_device_state_handler(ha); if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags)) diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h index 35376a1..dc45ac9 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.h +++ b/drivers/scsi/qla4xxx/ql4_nx.h @@ -19,12 +19,28 @@ #define PHAN_PEG_RCV_INITIALIZED 0xff01 /*CRB_RELATED*/ -#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200) -#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X)) - +#define QLA82XX_CRB_BASE (QLA82XX_CAM_RAM(0x200)) +#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X)) #define CRB_CMDPEG_STATE QLA82XX_REG(0x50) #define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) #define CRB_DMA_SHIFT QLA82XX_REG(0xcc) +#define CRB_TEMP_STATE QLA82XX_REG(0x1b4) + +#define qla82xx_get_temp_val(x) ((x) >> 16) +#define qla82xx_get_temp_state(x) ((x) & 0xffff) +#define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) + +/* + * Temperature control. + */ +enum { + QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */ + QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */ + QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */ +}; + +#define CRB_NIU_XG_PAUSE_CTL_P0 0x1 +#define CRB_NIU_XG_PAUSE_CTL_P1 0x8 #define QLA82XX_HW_H0_CH_HUB_ADR 0x05 #define QLA82XX_HW_H1_CH_HUB_ADR 0x0E diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index ec393a00..ce6d3b7 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -35,43 +35,44 @@ static struct kmem_cache *srb_cachep; int ql4xdisablesysfsboot = 1; module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xdisablesysfsboot, - "Set to disable exporting boot targets to sysfs\n" - " 0 - Export boot targets\n" - " 1 - Do not export boot targets (Default)"); + " Set to disable exporting boot targets to sysfs.\n" + "\t\t 0 - Export boot targets\n" + "\t\t 1 - Do not export boot targets (Default)"); int ql4xdontresethba = 0; module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xdontresethba, - "Don't reset the HBA for driver recovery \n" - " 0 - It will reset HBA (Default)\n" - " 1 - It will NOT reset HBA"); + " Don't reset the HBA for driver recovery.\n" + "\t\t 0 - It will reset HBA (Default)\n" + "\t\t 1 - It will NOT reset HBA"); -int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */ +int ql4xextended_error_logging; module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xextended_error_logging, - "Option to enable extended error logging, " - "Default is 0 - no logging, 1 - debug logging"); + " Option to enable extended error logging.\n" + "\t\t 0 - no logging (Default)\n" + "\t\t 2 - debug logging"); int ql4xenablemsix = 1; module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql4xenablemsix, - "Set to enable MSI or MSI-X interrupt mechanism.\n" - " 0 = enable INTx interrupt mechanism.\n" - " 1 = enable MSI-X interrupt mechanism (Default).\n" - " 2 = enable MSI interrupt mechanism."); + " Set to enable MSI or MSI-X interrupt mechanism.\n" + "\t\t 0 = enable INTx interrupt mechanism.\n" + "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n" + "\t\t 2 = enable MSI interrupt mechanism."); #define QL4_DEF_QDEPTH 32 static int ql4xmaxqdepth = QL4_DEF_QDEPTH; module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql4xmaxqdepth, - "Maximum queue depth to report for target devices.\n" - " Default: 32."); + " Maximum queue depth to report for target devices.\n" + "\t\t Default: 32."); static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; module_param(ql4xsess_recovery_tmo, int, S_IRUGO); MODULE_PARM_DESC(ql4xsess_recovery_tmo, "Target Session Recovery Timeout.\n" - " Default: 120 sec."); + "\t\t Default: 120 sec."); static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); /* @@ -1630,7 +1631,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, /* Update timers after login */ ddb_entry->default_relogin_timeout = - le16_to_cpu(fw_ddb_entry->def_timeout); + (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) && + (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ? + le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV; ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); @@ -1970,6 +1973,42 @@ mem_alloc_error_exit: } /** + * qla4_8xxx_check_temp - Check the ISP82XX temperature. + * @ha: adapter block pointer. + * + * Note: The caller should not hold the idc lock. + **/ +static int qla4_8xxx_check_temp(struct scsi_qla_host *ha) +{ + uint32_t temp, temp_state, temp_val; + int status = QLA_SUCCESS; + + temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE); + + temp_state = qla82xx_get_temp_state(temp); + temp_val = qla82xx_get_temp_val(temp); + + if (temp_state == QLA82XX_TEMP_PANIC) { + ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C" + " exceeds maximum allowed. Hardware has been shut" + " down.\n", temp_val); + status = QLA_ERROR; + } else if (temp_state == QLA82XX_TEMP_WARN) { + if (ha->temperature == QLA82XX_TEMP_NORMAL) + ql4_printk(KERN_WARNING, ha, "Device temperature %d" + " degrees C exceeds operating range." + " Immediate action needed.\n", temp_val); + } else { + if (ha->temperature == QLA82XX_TEMP_WARN) + ql4_printk(KERN_INFO, ha, "Device temperature is" + " now %d degrees C in normal range.\n", + temp_val); + } + ha->temperature = temp_state; + return status; +} + +/** * qla4_8xxx_check_fw_alive - Check firmware health * @ha: Pointer to host adapter structure. * @@ -2040,7 +2079,16 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha) test_bit(DPC_RESET_HA, &ha->dpc_flags) || test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - if (dev_state == QLA82XX_DEV_NEED_RESET && + + if (qla4_8xxx_check_temp(ha)) { + ql4_printk(KERN_INFO, ha, "disabling pause" + " transmit on port 0 & 1.\n"); + qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, + CRB_NIU_XG_PAUSE_CTL_P0 | + CRB_NIU_XG_PAUSE_CTL_P1); + set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); + qla4xxx_wake_dpc(ha); + } else if (dev_state == QLA82XX_DEV_NEED_RESET && !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { if (!ql4xdontresethba) { ql4_printk(KERN_INFO, ha, "%s: HW State: " @@ -2057,9 +2105,21 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha) } else { /* Check firmware health */ if (qla4_8xxx_check_fw_alive(ha)) { + ql4_printk(KERN_INFO, ha, "disabling pause" + " transmit on port 0 & 1.\n"); + qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, + CRB_NIU_XG_PAUSE_CTL_P0 | + CRB_NIU_XG_PAUSE_CTL_P1); halt_status = qla4_8xxx_rd_32(ha, QLA82XX_PEG_HALT_STATUS1); + if (LSW(MSB(halt_status)) == 0x67) + ql4_printk(KERN_ERR, ha, "%s:" + " Firmware aborted with" + " error code 0x00006700." + " Device is being reset\n", + __func__); + /* Since we cannot change dev_state in interrupt * context, set appropriate DPC flag then wakeup * DPC */ @@ -2078,7 +2138,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha) } } -void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) +static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; @@ -3826,16 +3886,14 @@ exit_check: return ret; } -static void qla4xxx_free_nt_list(struct list_head *list_nt) +static void qla4xxx_free_ddb_list(struct list_head *list_ddb) { - struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp; + struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; - /* Free up the normaltargets list */ - list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { - list_del_init(&nt_ddb_idx->list); - vfree(nt_ddb_idx); + list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { + list_del_init(&ddb_idx->list); + vfree(ddb_idx); } - } static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha, @@ -3884,6 +3942,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx) static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { + uint16_t def_timeout; + ddb_entry->ddb_type = FLASH_DDB; ddb_entry->fw_ddb_index = INVALID_ENTRY; ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; @@ -3894,9 +3954,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->relogin_timer, 0); atomic_set(&ddb_entry->relogin_retry_count, 0); - + def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); ddb_entry->default_relogin_timeout = - le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout); + (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ? + def_timeout : LOGIN_TOV; ddb_entry->default_time2wait = le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait); } @@ -3934,7 +3995,6 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) ip_state == IP_ADDRSTATE_DEPRICATED || ip_state == IP_ADDRSTATE_DISABLING) ip_idx[idx] = -1; - } /* Break if all IP states checked */ @@ -3947,58 +4007,37 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) } while (time_after(wtime, jiffies)); } -void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) +static void qla4xxx_build_st_list(struct scsi_qla_host *ha, + struct list_head *list_st) { + struct qla_ddb_index *st_ddb_idx; int max_ddbs; + int fw_idx_size; + struct dev_db_entry *fw_ddb_entry; + dma_addr_t fw_ddb_dma; int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; - uint16_t conn_id; - struct dev_db_entry *fw_ddb_entry; - struct ddb_entry *ddb_entry = NULL; - dma_addr_t fw_ddb_dma; - struct iscsi_cls_session *cls_sess; - struct iscsi_session *sess; - struct iscsi_cls_conn *cls_conn; - struct iscsi_endpoint *ep; - uint16_t cmds_max = 32, tmo = 0; - uint32_t initial_cmdsn = 0; - struct list_head list_st, list_nt; /* List of sendtargets */ - struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; - int fw_idx_size; - unsigned long wtime; - struct qla_ddb_index *nt_ddb_idx; - - if (!test_bit(AF_LINK_UP, &ha->flags)) { - set_bit(AF_BUILD_DDB_LIST, &ha->flags); - ha->is_reset = is_reset; - return; - } - max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : - MAX_DEV_DB_ENTRIES; + uint16_t conn_id = 0; fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, &fw_ddb_dma); if (fw_ddb_entry == NULL) { DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); - goto exit_ddb_list; + goto exit_st_list; } - INIT_LIST_HEAD(&list_st); - INIT_LIST_HEAD(&list_nt); + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; fw_idx_size = sizeof(struct qla_ddb_index); for (idx = 0; idx < max_ddbs; idx = next_idx) { - ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, - fw_ddb_dma, NULL, - &next_idx, &state, &conn_err, - NULL, &conn_id); + ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, + NULL, &next_idx, &state, + &conn_err, NULL, &conn_id); if (ret == QLA_ERROR) break; - if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS) - goto continue_next_st; - /* Check if ST, add to the list_st */ if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) goto continue_next_st; @@ -4009,59 +4048,155 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) st_ddb_idx->fw_ddb_idx = idx; - list_add_tail(&st_ddb_idx->list, &list_st); + list_add_tail(&st_ddb_idx->list, list_st); continue_next_st: if (next_idx == 0) break; } - /* Before issuing conn open mbox, ensure all IPs states are configured - * Note, conn open fails if IPs are not configured +exit_st_list: + if (fw_ddb_entry) + dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); +} + +/** + * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list + * @ha: pointer to adapter structure + * @list_ddb: List from which failed ddb to be removed + * + * Iterate over the list of DDBs and find and remove DDBs that are either in + * no connection active state or failed state + **/ +static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, + struct list_head *list_ddb) +{ + struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; + uint32_t next_idx = 0; + uint32_t state = 0, conn_err = 0; + int ret; + + list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { + ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx, + NULL, 0, NULL, &next_idx, &state, + &conn_err, NULL, NULL); + if (ret == QLA_ERROR) + continue; + + if (state == DDB_DS_NO_CONNECTION_ACTIVE || + state == DDB_DS_SESSION_FAILED) { + list_del_init(&ddb_idx->list); + vfree(ddb_idx); + } + } +} + +static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + int is_reset) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_session *sess; + struct iscsi_cls_conn *cls_conn; + struct iscsi_endpoint *ep; + uint16_t cmds_max = 32; + uint16_t conn_id = 0; + uint32_t initial_cmdsn = 0; + int ret = QLA_SUCCESS; + + struct ddb_entry *ddb_entry = NULL; + + /* Create session object, with INVALID_ENTRY, + * the targer_id would get set when we issue the login */ - qla4xxx_wait_for_ip_configuration(ha); + cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, + cmds_max, sizeof(struct ddb_entry), + sizeof(struct ql4_task_data), + initial_cmdsn, INVALID_ENTRY); + if (!cls_sess) { + ret = QLA_ERROR; + goto exit_setup; + } - /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ - list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { - qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); + /* + * so calling module_put function to decrement the + * reference count. + **/ + module_put(qla4xxx_iscsi_transport.owner); + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ddb_entry->sess = cls_sess; + + cls_sess->recovery_tmo = ql4xsess_recovery_tmo; + memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, + sizeof(struct dev_db_entry)); + + qla4xxx_setup_flash_ddb_entry(ha, ddb_entry); + + cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id); + + if (!cls_conn) { + ret = QLA_ERROR; + goto exit_setup; } - /* Wait to ensure all sendtargets are done for min 12 sec wait */ - tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout); - DEBUG2(ql4_printk(KERN_INFO, ha, - "Default time to wait for build ddb %d\n", tmo)); + ddb_entry->conn = cls_conn; - wtime = jiffies + (HZ * tmo); - do { - list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, - list) { - ret = qla4xxx_get_fwddb_entry(ha, - st_ddb_idx->fw_ddb_idx, - NULL, 0, NULL, &next_idx, - &state, &conn_err, NULL, - NULL); - if (ret == QLA_ERROR) - continue; + /* Setup ep, for displaying attributes in sysfs */ + ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); + if (ep) { + ep->conn = cls_conn; + cls_conn->ep = ep; + } else { + DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n")); + ret = QLA_ERROR; + goto exit_setup; + } - if (state == DDB_DS_NO_CONNECTION_ACTIVE || - state == DDB_DS_SESSION_FAILED) { - list_del_init(&st_ddb_idx->list); - vfree(st_ddb_idx); - } - } - schedule_timeout_uninterruptible(HZ / 10); - } while (time_after(wtime, jiffies)); + /* Update sess/conn params */ + qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); - /* Free up the sendtargets list */ - list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { - list_del_init(&st_ddb_idx->list); - vfree(st_ddb_idx); + if (is_reset == RESET_ADAPTER) { + iscsi_block_session(cls_sess); + /* Use the relogin path to discover new devices + * by short-circuting the logic of setting + * timer to relogin - instead set the flags + * to initiate login right away. + */ + set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); + set_bit(DF_RELOGIN, &ddb_entry->flags); } +exit_setup: + return ret; +} + +static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, + struct list_head *list_nt, int is_reset) +{ + struct dev_db_entry *fw_ddb_entry; + dma_addr_t fw_ddb_dma; + int max_ddbs; + int fw_idx_size; + int ret; + uint32_t idx = 0, next_idx = 0; + uint32_t state = 0, conn_err = 0; + uint16_t conn_id = 0; + struct qla_ddb_index *nt_ddb_idx; + + fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, + &fw_ddb_dma); + if (fw_ddb_entry == NULL) { + DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); + goto exit_nt_list; + } + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + fw_idx_size = sizeof(struct qla_ddb_index); + for (idx = 0; idx < max_ddbs; idx = next_idx) { - ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, - fw_ddb_dma, NULL, - &next_idx, &state, &conn_err, - NULL, &conn_id); + ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, + NULL, &next_idx, &state, + &conn_err, NULL, &conn_id); if (ret == QLA_ERROR) break; @@ -4072,107 +4207,113 @@ continue_next_st: if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) goto continue_next_nt; - if (state == DDB_DS_NO_CONNECTION_ACTIVE || - state == DDB_DS_SESSION_FAILED) { - DEBUG2(ql4_printk(KERN_INFO, ha, - "Adding DDB to session = 0x%x\n", - idx)); - if (is_reset == INIT_ADAPTER) { - nt_ddb_idx = vmalloc(fw_idx_size); - if (!nt_ddb_idx) - break; - - nt_ddb_idx->fw_ddb_idx = idx; - - memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, - sizeof(struct dev_db_entry)); - - if (qla4xxx_is_flash_ddb_exists(ha, &list_nt, - fw_ddb_entry) == QLA_SUCCESS) { - vfree(nt_ddb_idx); - goto continue_next_nt; - } - list_add_tail(&nt_ddb_idx->list, &list_nt); - } else if (is_reset == RESET_ADAPTER) { - if (qla4xxx_is_session_exists(ha, - fw_ddb_entry) == QLA_SUCCESS) - goto continue_next_nt; - } + if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || + state == DDB_DS_SESSION_FAILED)) + goto continue_next_nt; - /* Create session object, with INVALID_ENTRY, - * the targer_id would get set when we issue the login - */ - cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, - ha->host, cmds_max, - sizeof(struct ddb_entry), - sizeof(struct ql4_task_data), - initial_cmdsn, INVALID_ENTRY); - if (!cls_sess) - goto exit_ddb_list; + DEBUG2(ql4_printk(KERN_INFO, ha, + "Adding DDB to session = 0x%x\n", idx)); + if (is_reset == INIT_ADAPTER) { + nt_ddb_idx = vmalloc(fw_idx_size); + if (!nt_ddb_idx) + break; - /* - * iscsi_session_setup increments the driver reference - * count which wouldn't let the driver to be unloaded. - * so calling module_put function to decrement the - * reference count. - **/ - module_put(qla4xxx_iscsi_transport.owner); - sess = cls_sess->dd_data; - ddb_entry = sess->dd_data; - ddb_entry->sess = cls_sess; + nt_ddb_idx->fw_ddb_idx = idx; - cls_sess->recovery_tmo = ql4xsess_recovery_tmo; - memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry, + memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry, sizeof(struct dev_db_entry)); - qla4xxx_setup_flash_ddb_entry(ha, ddb_entry); - - cls_conn = iscsi_conn_setup(cls_sess, - sizeof(struct qla_conn), - conn_id); - if (!cls_conn) - goto exit_ddb_list; - - ddb_entry->conn = cls_conn; - - /* Setup ep, for displaying attributes in sysfs */ - ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); - if (ep) { - ep->conn = cls_conn; - cls_conn->ep = ep; - } else { - DEBUG2(ql4_printk(KERN_ERR, ha, - "Unable to get ep\n")); - } - - /* Update sess/conn params */ - qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, - cls_conn); - - if (is_reset == RESET_ADAPTER) { - iscsi_block_session(cls_sess); - /* Use the relogin path to discover new devices - * by short-circuting the logic of setting - * timer to relogin - instead set the flags - * to initiate login right away. - */ - set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags); - set_bit(DF_RELOGIN, &ddb_entry->flags); + if (qla4xxx_is_flash_ddb_exists(ha, list_nt, + fw_ddb_entry) == QLA_SUCCESS) { + vfree(nt_ddb_idx); + goto continue_next_nt; } + list_add_tail(&nt_ddb_idx->list, list_nt); + } else if (is_reset == RESET_ADAPTER) { + if (qla4xxx_is_session_exists(ha, fw_ddb_entry) == + QLA_SUCCESS) + goto continue_next_nt; } + + ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset); + if (ret == QLA_ERROR) + goto exit_nt_list; + continue_next_nt: if (next_idx == 0) break; } -exit_ddb_list: - qla4xxx_free_nt_list(&list_nt); + +exit_nt_list: if (fw_ddb_entry) dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); +} + +/** + * qla4xxx_build_ddb_list - Build ddb list and setup sessions + * @ha: pointer to adapter structure + * @is_reset: Is this init path or reset path + * + * Create a list of sendtargets (st) from firmware DDBs, issue send targets + * using connection open, then create the list of normal targets (nt) + * from firmware DDBs. Based on the list of nt setup session and connection + * objects. + **/ +void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) +{ + uint16_t tmo = 0; + struct list_head list_st, list_nt; + struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp; + unsigned long wtime; + + if (!test_bit(AF_LINK_UP, &ha->flags)) { + set_bit(AF_BUILD_DDB_LIST, &ha->flags); + ha->is_reset = is_reset; + return; + } + + INIT_LIST_HEAD(&list_st); + INIT_LIST_HEAD(&list_nt); + + qla4xxx_build_st_list(ha, &list_st); + + /* Before issuing conn open mbox, ensure all IPs states are configured + * Note, conn open fails if IPs are not configured + */ + qla4xxx_wait_for_ip_configuration(ha); + + /* Go thru the STs and fire the sendtargets by issuing conn open mbx */ + list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) { + qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx); + } + + /* Wait to ensure all sendtargets are done for min 12 sec wait */ + tmo = ((ha->def_timeout > LOGIN_TOV) && + (ha->def_timeout < LOGIN_TOV * 10) ? + ha->def_timeout : LOGIN_TOV); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Default time to wait for build ddb %d\n", tmo)); + + wtime = jiffies + (HZ * tmo); + do { + if (list_empty(&list_st)) + break; + + qla4xxx_remove_failed_ddb(ha, &list_st); + schedule_timeout_uninterruptible(HZ / 10); + } while (time_after(wtime, jiffies)); + + /* Free up the sendtargets list */ + qla4xxx_free_ddb_list(&list_st); + + qla4xxx_build_nt_list(ha, &list_nt, is_reset); + + qla4xxx_free_ddb_list(&list_nt); qla4xxx_free_ddb_index(ha); } - /** * qla4xxx_probe_adapter - callback function to probe HBA * @pdev: pointer to pci_dev structure diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 26a3fa3..133989b 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.02.00-k10" +#define QLA4XXX_DRIVER_VERSION "5.02.00-k12" diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f85cfa6..b2c95db 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1316,15 +1316,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost, } if (scsi_target_is_busy(starget)) { - if (list_empty(&sdev->starved_entry)) - list_add_tail(&sdev->starved_entry, - &shost->starved_list); + list_move_tail(&sdev->starved_entry, &shost->starved_list); return 0; } - /* We're OK to process the command, so we can't be starved */ - if (!list_empty(&sdev->starved_entry)) - list_del_init(&sdev->starved_entry); return 1; } diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 1b21491..f59d4a0 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3048,7 +3048,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | - FC_RPORT_DEVLOSS_PENDING); + FC_RPORT_DEVLOSS_PENDING | + FC_RPORT_DEVLOSS_CALLBK_DONE); spin_unlock_irqrestore(shost->host_lock, flags); /* ensure any stgt delete functions are done */ diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 02d9998..eacd46b 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -2368,16 +2368,15 @@ static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { - int num; - char buff[11]; + int err; + unsigned long num; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; - num = (count < 10) ? count : 10; - if (copy_from_user(buff, buffer, num)) - return -EFAULT; - buff[num] = '\0'; - sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0; + err = kstrtoul_from_user(buffer, count, 0, &num); + if (err) + return err; + sg_allow_dio = num ? 1 : 0; return count; } @@ -2390,17 +2389,15 @@ static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { - int num; + int err; unsigned long k = ULONG_MAX; - char buff[11]; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; - num = (count < 10) ? count : 10; - if (copy_from_user(buff, buffer, num)) - return -EFAULT; - buff[num] = '\0'; - k = simple_strtoul(buff, NULL, 10); + + err = kstrtoul_from_user(buffer, count, 0, &k); + if (err) + return err; if (k <= 1048576) { /* limit "big buff" to 1 MB */ sg_big_buff = k; return count; diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index b4543f5..36d1ed7 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev) struct sym_lcb *lp = sym_lp(tp, sdev->lun); unsigned long flags; + /* if slave_alloc returned before allocating a sym_lcb, return */ + if (!lp) + return; + spin_lock_irqsave(np->s.host->host_lock, flags); if (lp->busy_itlq || lp->busy_itl) { diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3f9a47e..8293658 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -299,7 +299,7 @@ config SPI_S3C24XX_FIQ config SPI_S3C64XX tristate "Samsung S3C64XX series type SPI" - depends on (ARCH_S3C64XX || ARCH_S5P64X0) + depends on (ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS) select S3C64XX_DMA if ARCH_S3C64XX help SPI driver for Samsung S3C64XX and newer SoCs. diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index e743a45..8418eb0 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -131,7 +131,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) rxchan = dws->rxchan; /* 2. Prepare the TX dma transfer */ - txconf.direction = DMA_TO_DEVICE; + txconf.direction = DMA_MEM_TO_DEV; txconf.dst_addr = dws->dma_addr; txconf.dst_maxburst = LNW_DMA_MSIZE_16; txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -147,13 +147,13 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) txdesc = txchan->device->device_prep_slave_sg(txchan, &dws->tx_sgl, 1, - DMA_TO_DEVICE, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); txdesc->callback = dw_spi_dma_done; txdesc->callback_param = dws; /* 3. Prepare the RX dma transfer */ - rxconf.direction = DMA_FROM_DEVICE; + rxconf.direction = DMA_DEV_TO_MEM; rxconf.src_addr = dws->dma_addr; rxconf.src_maxburst = LNW_DMA_MSIZE_16; rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -169,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) rxdesc = rxchan->device->device_prep_slave_sg(rxchan, &dws->rx_sgl, 1, - DMA_FROM_DEVICE, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); rxdesc->callback = dw_spi_dma_done; rxdesc->callback_param = dws; diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index 0a282e5..d46e55c 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c @@ -551,6 +551,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) struct dma_async_tx_descriptor *txd; enum dma_slave_buswidth buswidth; struct dma_slave_config conf; + enum dma_transfer_direction slave_dirn; struct scatterlist *sg; struct sg_table *sgt; struct dma_chan *chan; @@ -573,6 +574,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) conf.src_addr = espi->sspdr_phys; conf.src_addr_width = buswidth; + slave_dirn = DMA_DEV_TO_MEM; } else { chan = espi->dma_tx; buf = t->tx_buf; @@ -580,6 +582,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) conf.dst_addr = espi->sspdr_phys; conf.dst_addr_width = buswidth; + slave_dirn = DMA_MEM_TO_DEV; } ret = dmaengine_slave_config(chan, &conf); @@ -631,7 +634,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) return ERR_PTR(-ENOMEM); txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents, - dir, DMA_CTRL_ACK); + slave_dirn, DMA_CTRL_ACK); if (!txd) { dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); return ERR_PTR(-ENOMEM); @@ -979,7 +982,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) dma_cap_set(DMA_SLAVE, mask); espi->dma_rx_data.port = EP93XX_DMA_SSP; - espi->dma_rx_data.direction = DMA_FROM_DEVICE; + espi->dma_rx_data.direction = DMA_DEV_TO_MEM; espi->dma_rx_data.name = "ep93xx-spi-rx"; espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, @@ -990,7 +993,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) } espi->dma_tx_data.port = EP93XX_DMA_SSP; - espi->dma_tx_data.direction = DMA_TO_DEVICE; + espi->dma_tx_data.direction = DMA_MEM_TO_DEV; espi->dma_tx_data.name = "ep93xx-spi-tx"; espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index f1f5efb..2f9cb43 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -900,11 +900,11 @@ static int configure_dma(struct pl022 *pl022) { struct dma_slave_config rx_conf = { .src_addr = SSP_DR(pl022->phybase), - .direction = DMA_FROM_DEVICE, + .direction = DMA_DEV_TO_MEM, }; struct dma_slave_config tx_conf = { .dst_addr = SSP_DR(pl022->phybase), - .direction = DMA_TO_DEVICE, + .direction = DMA_MEM_TO_DEV, }; unsigned int pages; int ret; @@ -1041,7 +1041,7 @@ static int configure_dma(struct pl022 *pl022) rxdesc = rxchan->device->device_prep_slave_sg(rxchan, pl022->sgt_rx.sgl, rx_sglen, - DMA_FROM_DEVICE, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!rxdesc) goto err_rxdesc; @@ -1049,7 +1049,7 @@ static int configure_dma(struct pl022 *pl022) txdesc = txchan->device->device_prep_slave_sg(txchan, pl022->sgt_tx.sgl, tx_sglen, - DMA_TO_DEVICE, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) goto err_txdesc; diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 7086583..10182eb 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -1079,7 +1079,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) } sg = dma->sg_rx_p; desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg, - num, DMA_FROM_DEVICE, + num, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) { dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n", @@ -1124,7 +1124,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) } sg = dma->sg_tx_p; desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx, - sg, num, DMA_TO_DEVICE, + sg, num, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n", @@ -1720,7 +1720,7 @@ static int pch_spi_resume(struct pci_dev *pdev) #endif -static struct pci_driver pch_spi_pcidev = { +static struct pci_driver pch_spi_pcidev_driver = { .name = "pch_spi", .id_table = pch_spi_pcidev_id, .probe = pch_spi_probe, @@ -1736,7 +1736,7 @@ static int __init pch_spi_init(void) if (ret) return ret; - ret = pci_register_driver(&pch_spi_pcidev); + ret = pci_register_driver(&pch_spi_pcidev_driver); if (ret) return ret; @@ -1746,7 +1746,7 @@ module_init(pch_spi_init); static void __exit pch_spi_exit(void) { - pci_unregister_driver(&pch_spi_pcidev); + pci_unregister_driver(&pch_spi_pcidev_driver); platform_driver_unregister(&pch_spi_pd_driver); } module_exit(pch_spi_exit); diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c index 70e006b..5443e25 100644 --- a/drivers/staging/media/go7007/go7007-usb.c +++ b/drivers/staging/media/go7007/go7007-usb.c @@ -1279,3 +1279,4 @@ static struct usb_driver go7007_usb_driver = { }; module_usb_driver(go7007_usb_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 8599545..4426290 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -27,8 +27,7 @@ #include <scsi/scsi_device.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> -#include <target/target_core_tmr.h> -#include <target/target_core_transport.h> +#include <target/target_core_fabric.h> #include "iscsi_target_core.h" #include "iscsi_target_parameters.h" @@ -284,8 +283,8 @@ static struct iscsi_np *iscsit_get_np( sock_in6 = (struct sockaddr_in6 *)sockaddr; sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; - if (!memcmp((void *)&sock_in6->sin6_addr.in6_u, - (void *)&sock_in6_e->sin6_addr.in6_u, + if (!memcmp(&sock_in6->sin6_addr.in6_u, + &sock_in6_e->sin6_addr.in6_u, sizeof(struct in6_addr))) ip_match = 1; @@ -1062,7 +1061,7 @@ attach_cmd: if (ret < 0) return iscsit_add_reject_from_cmd( ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, 1, buf, cmd); + 1, 0, buf, cmd); /* * Check the CmdSN against ExpCmdSN/MaxCmdSN here if * the Immediate Bit is not set, and no Immediate @@ -1225,7 +1224,7 @@ static void iscsit_do_crypto_hash_buf( crypto_hash_init(hash); - sg_init_one(&sg, (u8 *)buf, payload_length); + sg_init_one(&sg, buf, payload_length); crypto_hash_update(hash, &sg, payload_length); if (padding) { @@ -1603,7 +1602,7 @@ static int iscsit_handle_nop_out( /* * Attach ping data to struct iscsi_cmd->buf_ptr. */ - cmd->buf_ptr = (void *)ping_data; + cmd->buf_ptr = ping_data; cmd->buf_ptr_size = payload_length; pr_debug("Got %u bytes of NOPOUT ping" @@ -3165,6 +3164,30 @@ static int iscsit_send_task_mgt_rsp( return 0; } +static bool iscsit_check_inaddr_any(struct iscsi_np *np) +{ + bool ret = false; + + if (np->np_sockaddr.ss_family == AF_INET6) { + const struct sockaddr_in6 sin6 = { + .sin6_addr = IN6ADDR_ANY_INIT }; + struct sockaddr_in6 *sock_in6 = + (struct sockaddr_in6 *)&np->np_sockaddr; + + if (!memcmp(sock_in6->sin6_addr.s6_addr, + sin6.sin6_addr.s6_addr, 16)) + ret = true; + } else { + struct sockaddr_in * sock_in = + (struct sockaddr_in *)&np->np_sockaddr; + + if (sock_in->sin_addr.s_addr == INADDR_ANY) + ret = true; + } + + return ret; +} + static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) { char *payload = NULL; @@ -3197,7 +3220,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) end_of_buf = 1; goto eob; } - memcpy((void *)payload + payload_len, buf, len); + memcpy(payload + payload_len, buf, len); payload_len += len; spin_lock(&tiqn->tiqn_tpg_lock); @@ -3214,12 +3237,17 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) spin_lock(&tpg->tpg_np_lock); list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) { + struct iscsi_np *np = tpg_np->tpg_np; + bool inaddr_any = iscsit_check_inaddr_any(np); + len = sprintf(buf, "TargetAddress=" "%s%s%s:%hu,%hu", - (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ? - "[" : "", tpg_np->tpg_np->np_ip, - (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ? - "]" : "", tpg_np->tpg_np->np_port, + (np->np_sockaddr.ss_family == AF_INET6) ? + "[" : "", (inaddr_any == false) ? + np->np_ip : conn->local_ip, + (np->np_sockaddr.ss_family == AF_INET6) ? + "]" : "", (inaddr_any == false) ? + np->np_port : conn->local_port, tpg->tpgt); len += 1; @@ -3229,7 +3257,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) end_of_buf = 1; goto eob; } - memcpy((void *)payload + payload_len, buf, len); + memcpy(payload + payload_len, buf, len); payload_len += len; } spin_unlock(&tpg->tpg_np_lock); @@ -3486,7 +3514,7 @@ int iscsi_target_tx_thread(void *arg) struct iscsi_conn *conn; struct iscsi_queue_req *qr = NULL; struct se_cmd *se_cmd; - struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg; + struct iscsi_thread_set *ts = arg; /* * Allow ourselves to be interrupted by SIGINT so that a * connection recovery / failure event can be triggered externally. @@ -3775,7 +3803,7 @@ int iscsi_target_rx_thread(void *arg) u8 buffer[ISCSI_HDR_LEN], opcode; u32 checksum = 0, digest = 0; struct iscsi_conn *conn = NULL; - struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg; + struct iscsi_thread_set *ts = arg; struct kvec iov; /* * Allow ourselves to be interrupted by SIGINT so that a diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 1cd6ce3..db0cf7c 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -82,7 +82,7 @@ static void chap_gen_challenge( unsigned int *c_len) { unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1]; - struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; + struct iscsi_chap *chap = conn->auth_protocol; memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1); @@ -120,7 +120,7 @@ static struct iscsi_chap *chap_server_open( if (!conn->auth_protocol) return NULL; - chap = (struct iscsi_chap *) conn->auth_protocol; + chap = conn->auth_protocol; /* * We only support MD5 MDA presently. */ @@ -165,14 +165,15 @@ static int chap_server_compute_md5( unsigned int *nr_out_len) { char *endptr; - unsigned char id, digest[MD5_SIGNATURE_SIZE]; + unsigned long id; + unsigned char digest[MD5_SIGNATURE_SIZE]; unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; unsigned char identifier[10], *challenge = NULL; unsigned char *challenge_binhex = NULL; unsigned char client_digest[MD5_SIGNATURE_SIZE]; unsigned char server_digest[MD5_SIGNATURE_SIZE]; unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; - struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; + struct iscsi_chap *chap = conn->auth_protocol; struct crypto_hash *tfm; struct hash_desc desc; struct scatterlist sg; @@ -246,7 +247,7 @@ static int chap_server_compute_md5( goto out; } - sg_init_one(&sg, (void *)&chap->id, 1); + sg_init_one(&sg, &chap->id, 1); ret = crypto_hash_update(&desc, &sg, 1); if (ret < 0) { pr_err("crypto_hash_update() failed for id\n"); @@ -254,7 +255,7 @@ static int chap_server_compute_md5( goto out; } - sg_init_one(&sg, (void *)&auth->password, strlen(auth->password)); + sg_init_one(&sg, &auth->password, strlen(auth->password)); ret = crypto_hash_update(&desc, &sg, strlen(auth->password)); if (ret < 0) { pr_err("crypto_hash_update() failed for password\n"); @@ -262,7 +263,7 @@ static int chap_server_compute_md5( goto out; } - sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH); + sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH); ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH); if (ret < 0) { pr_err("crypto_hash_update() failed for challenge\n"); @@ -305,14 +306,17 @@ static int chap_server_compute_md5( } if (type == HEX) - id = (unsigned char)simple_strtoul((char *)&identifier[2], - &endptr, 0); + id = simple_strtoul(&identifier[2], &endptr, 0); else - id = (unsigned char)simple_strtoul(identifier, &endptr, 0); + id = simple_strtoul(identifier, &endptr, 0); + if (id > 255) { + pr_err("chap identifier: %lu greater than 255\n", id); + goto out; + } /* * RFC 1994 says Identifier is no more than octet (8 bits). */ - pr_debug("[server] Got CHAP_I=%d\n", id); + pr_debug("[server] Got CHAP_I=%lu\n", id); /* * Get CHAP_C. */ @@ -351,7 +355,7 @@ static int chap_server_compute_md5( goto out; } - sg_init_one(&sg, (void *)&id, 1); + sg_init_one(&sg, &id, 1); ret = crypto_hash_update(&desc, &sg, 1); if (ret < 0) { pr_err("crypto_hash_update() failed for id\n"); @@ -359,7 +363,7 @@ static int chap_server_compute_md5( goto out; } - sg_init_one(&sg, (void *)auth->password_mutual, + sg_init_one(&sg, auth->password_mutual, strlen(auth->password_mutual)); ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual)); if (ret < 0) { @@ -371,7 +375,7 @@ static int chap_server_compute_md5( /* * Convert received challenge to binary hex. */ - sg_init_one(&sg, (void *)challenge_binhex, challenge_len); + sg_init_one(&sg, challenge_binhex, challenge_len); ret = crypto_hash_update(&desc, &sg, challenge_len); if (ret < 0) { pr_err("crypto_hash_update() failed for ma challenge\n"); @@ -414,7 +418,7 @@ static int chap_got_response( char *nr_out_ptr, unsigned int *nr_out_len) { - struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; + struct iscsi_chap *chap = conn->auth_protocol; switch (chap->digest_type) { case CHAP_DIGEST_MD5: @@ -437,7 +441,7 @@ u32 chap_main_loop( int *in_len, int *out_len) { - struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol; + struct iscsi_chap *chap = conn->auth_protocol; if (!chap) { chap = chap_server_open(conn, auth, in_text, out_text, out_len); diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index db32784..6b35b37 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -21,13 +21,10 @@ #include <linux/configfs.h> #include <linux/export.h> +#include <linux/inet.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_fabric.h> #include <target/target_core_fabric_configfs.h> -#include <target/target_core_fabric_lib.h> -#include <target/target_core_device.h> -#include <target/target_core_tpg.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> @@ -56,8 +53,7 @@ struct iscsi_portal_group *lio_get_tpg_from_tpg_item( { struct se_portal_group *se_tpg = container_of(to_config_group(item), struct se_portal_group, tpg_group); - struct iscsi_portal_group *tpg = - (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr; + struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; int ret; if (!tpg) { @@ -1225,7 +1221,7 @@ struct se_portal_group *lio_target_tiqn_addtpg( ret = core_tpg_register( &lio_target_fabric_configfs->tf_ops, - wwn, &tpg->tpg_se_tpg, (void *)tpg, + wwn, &tpg->tpg_se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) return NULL; diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index f1a02da..0ec3b77 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -508,6 +508,7 @@ struct iscsi_conn { u16 cid; /* Remote TCP Port */ u16 login_port; + u16 local_port; int net_size; u32 auth_id; #define CONNFLAG_SCTP_STRUCT_FILE 0x01 @@ -527,6 +528,7 @@ struct iscsi_conn { unsigned char bad_hdr[ISCSI_HDR_LEN]; #define IPV6_ADDRESS_SPACE 48 unsigned char login_ip[IPV6_ADDRESS_SPACE]; + unsigned char local_ip[IPV6_ADDRESS_SPACE]; int conn_usage_count; int conn_waiting_on_uc; atomic_t check_immediate_queue; @@ -561,8 +563,8 @@ struct iscsi_conn { struct hash_desc conn_tx_hash; /* Used for scheduling TX and RX connection kthreads */ cpumask_var_t conn_cpumask; - int conn_rx_reset_cpumask:1; - int conn_tx_reset_cpumask:1; + unsigned int conn_rx_reset_cpumask:1; + unsigned int conn_tx_reset_cpumask:1; /* list_head of struct iscsi_cmd for this connection */ struct list_head conn_cmd_list; struct list_head immed_queue_list; diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c index a19fa5e..f63ea35 100644 --- a/drivers/target/iscsi/iscsi_target_device.c +++ b/drivers/target/iscsi/iscsi_target_device.c @@ -21,8 +21,7 @@ #include <scsi/scsi_device.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_transport.h> +#include <target/target_core_fabric.h> #include "iscsi_target_core.h" #include "iscsi_target_device.h" diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index b7ffc3c..4784511 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -21,7 +21,7 @@ #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> +#include <target/target_core_fabric.h> #include "iscsi_target_core.h" #include "iscsi_target_seq_pdu_list.h" diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 101b1be..27901e3 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -21,7 +21,7 @@ #include <linux/list.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> +#include <target/target_core_fabric.h> #include "iscsi_target_core.h" #include "iscsi_target_seq_pdu_list.h" @@ -1238,7 +1238,7 @@ void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd) { struct iscsi_conn *conn = cmd->conn; struct iscsi_session *sess = conn->sess; - struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess); + struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); spin_lock_bh(&cmd->dataout_timeout_lock); if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) { @@ -1261,7 +1261,7 @@ void iscsit_start_dataout_timer( struct iscsi_conn *conn) { struct iscsi_session *sess = conn->sess; - struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess); + struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING) return; diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 0b8404c..1af1f21 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -21,7 +21,7 @@ #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> +#include <target/target_core_fabric.h> #include "iscsi_target_core.h" #include "iscsi_target_datain_values.h" diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index d734bde..38cb7ce 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -23,7 +23,7 @@ #include <linux/crypto.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> +#include <target/target_core_fabric.h> #include "iscsi_target_core.h" #include "iscsi_target_tq.h" @@ -143,7 +143,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, sess_list) { - sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr; + sess_p = se_sess->fabric_sess_ptr; spin_lock(&sess_p->conn_lock); if (atomic_read(&sess_p->session_fall_back_to_erl0) || atomic_read(&sess_p->session_logout) || @@ -151,9 +151,9 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) spin_unlock(&sess_p->conn_lock); continue; } - if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) && - (!strcmp((void *)sess_p->sess_ops->InitiatorName, - (void *)initiatorname_param->value) && + if (!memcmp(sess_p->isid, conn->sess->isid, 6) && + (!strcmp(sess_p->sess_ops->InitiatorName, + initiatorname_param->value) && (sess_p->sess_ops->SessionType == sessiontype))) { atomic_set(&sess_p->session_reinstatement, 1); spin_unlock(&sess_p->conn_lock); @@ -229,7 +229,7 @@ static int iscsi_login_zero_tsih_s1( iscsi_login_set_conn_values(sess, conn, pdu->cid); sess->init_task_tag = pdu->itt; - memcpy((void *)&sess->isid, (void *)pdu->isid, 6); + memcpy(&sess->isid, pdu->isid, 6); sess->exp_cmd_sn = pdu->cmdsn; INIT_LIST_HEAD(&sess->sess_conn_list); INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list); @@ -440,8 +440,7 @@ static int iscsi_login_non_zero_tsih_s2( atomic_read(&sess_p->session_logout) || (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) continue; - if (!memcmp((const void *)sess_p->isid, - (const void *)pdu->isid, 6) && + if (!memcmp(sess_p->isid, pdu->isid, 6) && (sess_p->tsih == pdu->tsih)) { iscsit_inc_session_usage_count(sess_p); iscsit_stop_time2retain_timer(sess_p); @@ -616,8 +615,8 @@ static int iscsi_post_login_handler( } pr_debug("iSCSI Login successful on CID: %hu from %s to" - " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip, - np->np_port, tpg->tpgt); + " %s:%hu,%hu\n", conn->cid, conn->login_ip, + conn->local_ip, conn->local_port, tpg->tpgt); list_add_tail(&conn->conn_list, &sess->sess_conn_list); atomic_inc(&sess->nconn); @@ -654,12 +653,13 @@ static int iscsi_post_login_handler( spin_lock_bh(&se_tpg->session_lock); __transport_register_session(&sess->tpg->tpg_se_tpg, - se_sess->se_node_acl, se_sess, (void *)sess); + se_sess->se_node_acl, se_sess, sess); pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n"); sess->session_state = TARG_SESS_STATE_LOGGED_IN; pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n", - conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt); + conn->cid, conn->login_ip, conn->local_ip, conn->local_port, + tpg->tpgt); spin_lock_bh(&sess->conn_lock); list_add_tail(&conn->conn_list, &sess->sess_conn_list); @@ -811,7 +811,7 @@ int iscsi_target_setup_login_socket( * Setup the np->np_sockaddr from the passed sockaddr setup * in iscsi_target_configfs.c code.. */ - memcpy((void *)&np->np_sockaddr, (void *)sockaddr, + memcpy(&np->np_sockaddr, sockaddr, sizeof(struct __kernel_sockaddr_storage)); if (sockaddr->ss_family == AF_INET6) @@ -821,6 +821,7 @@ int iscsi_target_setup_login_socket( /* * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY. */ + /* FIXME: Someone please explain why this is endian-safe */ opt = 1; if (np->np_network_transport == ISCSI_TCP) { ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, @@ -832,6 +833,7 @@ int iscsi_target_setup_login_socket( } } + /* FIXME: Someone please explain why this is endian-safe */ ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&opt, sizeof(opt)); if (ret < 0) { @@ -840,6 +842,14 @@ int iscsi_target_setup_login_socket( goto fail; } + ret = kernel_setsockopt(sock, IPPROTO_IP, IP_FREEBIND, + (char *)&opt, sizeof(opt)); + if (ret < 0) { + pr_err("kernel_setsockopt() for IP_FREEBIND" + " failed\n"); + goto fail; + } + ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len); if (ret < 0) { pr_err("kernel_bind() failed: %d\n", ret); @@ -1019,6 +1029,18 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", &sock_in6.sin6_addr.in6_u); conn->login_port = ntohs(sock_in6.sin6_port); + + if (conn->sock->ops->getname(conn->sock, + (struct sockaddr *)&sock_in6, &err, 0) < 0) { + pr_err("sock_ops->getname() failed.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_TARGET_ERROR); + goto new_sess_out; + } + snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c", + &sock_in6.sin6_addr.in6_u); + conn->local_port = ntohs(sock_in6.sin6_port); + } else { memset(&sock_in, 0, sizeof(struct sockaddr_in)); @@ -1031,6 +1053,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) } sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr); conn->login_port = ntohs(sock_in.sin_port); + + if (conn->sock->ops->getname(conn->sock, + (struct sockaddr *)&sock_in, &err, 0) < 0) { + pr_err("sock_ops->getname() failed.\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_TARGET_ERROR); + goto new_sess_out; + } + sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr); + conn->local_port = ntohs(sock_in.sin_port); } conn->network_transport = np->np_network_transport; @@ -1038,7 +1070,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) pr_debug("Received iSCSI login request from %s on %s Network" " Portal %s:%hu\n", conn->login_ip, (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP", - np->np_ip, np->np_port); + conn->local_ip, conn->local_port); pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n"); conn->conn_state = TARG_CONN_STATE_IN_LOGIN; @@ -1206,7 +1238,7 @@ out: int iscsi_target_login_thread(void *arg) { - struct iscsi_np *np = (struct iscsi_np *)arg; + struct iscsi_np *np = arg; int ret; allow_signal(SIGINT); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 98936cb..e89fa74 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -21,7 +21,7 @@ #include <linux/ctype.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> -#include <target/target_core_tpg.h> +#include <target/target_core_fabric.h> #include "iscsi_target_core.h" #include "iscsi_target_parameters.h" @@ -732,7 +732,7 @@ static void iscsi_initiatorname_tolower( u32 iqn_size = strlen(param_buf), i; for (i = 0; i < iqn_size; i++) { - c = (char *)¶m_buf[i]; + c = ¶m_buf[i]; if (!isupper(*c)) continue; diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c index aeafbe0..b3c699c 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.c +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c @@ -19,7 +19,6 @@ ******************************************************************************/ #include <target/target_core_base.h> -#include <target/target_core_transport.h> #include "iscsi_target_core.h" #include "iscsi_target_device.h" @@ -135,7 +134,7 @@ extern int iscsit_na_nopin_timeout( spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { - sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + sess = se_sess->fabric_sess_ptr; spin_lock(&sess->conn_lock); list_for_each_entry(conn, &sess->sess_conn_list, diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c index f1db830..421d694 100644 --- a/drivers/target/iscsi/iscsi_target_stat.c +++ b/drivers/target/iscsi/iscsi_target_stat.c @@ -23,7 +23,6 @@ #include <linux/export.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> #include <target/configfs_macros.h> #include "iscsi_target_core.h" @@ -746,7 +745,7 @@ static ssize_t iscsi_stat_sess_show_attr_node( spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { - sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX); @@ -770,7 +769,7 @@ static ssize_t iscsi_stat_sess_show_attr_indx( spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { - sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->session_index); @@ -794,7 +793,7 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus( spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { - sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus); } @@ -817,7 +816,7 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus( spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { - sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus); } @@ -840,7 +839,7 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs( spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { - sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)sess->tx_data_octets); @@ -864,7 +863,7 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs( spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { - sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)sess->rx_data_octets); @@ -888,7 +887,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors( spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { - sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->conn_digest_errors); @@ -912,7 +911,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors( spin_lock_bh(&se_nacl->nacl_sess_lock); se_sess = se_nacl->nacl_sess; if (se_sess) { - sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + sess = se_sess->fabric_sess_ptr; if (sess) ret = snprintf(page, PAGE_SIZE, "%u\n", sess->conn_timeout_errors); diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index 490207e..255ed35 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -21,7 +21,7 @@ #include <asm/unaligned.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> +#include <target/target_core_fabric.h> #include "iscsi_target_core.h" #include "iscsi_target_seq_pdu_list.h" diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index d4cf2cd..879d8d0 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -19,10 +19,8 @@ ******************************************************************************/ #include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> -#include <target/target_core_tpg.h> #include "iscsi_target_core.h" #include "iscsi_target_erl0.h" @@ -72,7 +70,7 @@ int iscsit_load_discovery_tpg(void) ret = core_tpg_register( &lio_target_fabric_configfs->tf_ops, - NULL, &tpg->tpg_se_tpg, (void *)tpg, + NULL, &tpg->tpg_se_tpg, tpg, TRANSPORT_TPG_TYPE_DISCOVERY); if (ret < 0) { kfree(tpg); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 02348f7..11287e1 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -22,9 +22,7 @@ #include <scsi/scsi_tcq.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_tmr.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> #include "iscsi_target_core.h" @@ -289,7 +287,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( } se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, - (void *)cmd->tmr_req, tcm_function, + cmd->tmr_req, tcm_function, GFP_KERNEL); if (!se_cmd->se_tmr_req) goto out; @@ -851,6 +849,17 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd) case ISCSI_OP_SCSI_TMFUNC: transport_generic_free_cmd(&cmd->se_cmd, 1); break; + case ISCSI_OP_REJECT: + /* + * Handle special case for REJECT when iscsi_add_reject*() has + * overwritten the original iscsi_opcode assignment, and the + * associated cmd->se_cmd needs to be released. + */ + if (cmd->se_cmd.se_tfo != NULL) { + transport_generic_free_cmd(&cmd->se_cmd, 1); + break; + } + /* Fall-through */ default: iscsit_release_cmd(cmd); break; @@ -1066,7 +1075,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data) if (tiqn) { spin_lock_bh(&tiqn->sess_err_stats.lock); strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, - (void *)conn->sess->sess_ops->InitiatorName); + conn->sess->sess_ops->InitiatorName); tiqn->sess_err_stats.last_sess_failure_type = ISCSI_SESS_ERR_CXN_TIMEOUT; tiqn->sess_err_stats.cxn_timeout_errors++; diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 81d5832..c47ff7f 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -33,14 +33,9 @@ #include <scsi/scsi_cmnd.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_fabric.h> #include <target/target_core_fabric_configfs.h> -#include <target/target_core_fabric_lib.h> #include <target/target_core_configfs.h> -#include <target/target_core_device.h> -#include <target/target_core_tpg.h> -#include <target/target_core_tmr.h> #include "tcm_loop.h" @@ -421,11 +416,11 @@ static struct scsi_host_template tcm_loop_driver_template = { .queuecommand = tcm_loop_queuecommand, .change_queue_depth = tcm_loop_change_queue_depth, .eh_device_reset_handler = tcm_loop_device_reset, - .can_queue = TL_SCSI_CAN_QUEUE, + .can_queue = 1024, .this_id = -1, - .sg_tablesize = TL_SCSI_SG_TABLESIZE, - .cmd_per_lun = TL_SCSI_CMD_PER_LUN, - .max_sectors = TL_SCSI_MAX_SECTORS, + .sg_tablesize = 256, + .cmd_per_lun = 1024, + .max_sectors = 0xFFFF, .use_clustering = DISABLE_CLUSTERING, .slave_alloc = tcm_loop_slave_alloc, .slave_configure = tcm_loop_slave_configure, @@ -564,8 +559,7 @@ static char *tcm_loop_get_fabric_name(void) static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) { - struct tcm_loop_tpg *tl_tpg = - (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; + struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; /* * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba() @@ -592,8 +586,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) { - struct tcm_loop_tpg *tl_tpg = - (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; + struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; /* * Return the passed NAA identifier for the SAS Target Port */ @@ -602,8 +595,7 @@ static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) { - struct tcm_loop_tpg *tl_tpg = - (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; + struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; /* * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 * to represent the SCSI Target Port. @@ -623,8 +615,7 @@ static u32 tcm_loop_get_pr_transport_id( int *format_code, unsigned char *buf) { - struct tcm_loop_tpg *tl_tpg = - (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; + struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; switch (tl_hba->tl_proto_id) { @@ -653,8 +644,7 @@ static u32 tcm_loop_get_pr_transport_id_len( struct t10_pr_registration *pr_reg, int *format_code) { - struct tcm_loop_tpg *tl_tpg = - (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; + struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; switch (tl_hba->tl_proto_id) { @@ -687,8 +677,7 @@ static char *tcm_loop_parse_pr_out_transport_id( u32 *out_tid_len, char **port_nexus_ptr) { - struct tcm_loop_tpg *tl_tpg = - (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr; + struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr; struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; switch (tl_hba->tl_proto_id) { diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index 6b76c7a..15a0364 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h @@ -1,16 +1,7 @@ #define TCM_LOOP_VERSION "v2.1-rc1" #define TL_WWN_ADDR_LEN 256 #define TL_TPGS_PER_HBA 32 -/* - * Defaults for struct scsi_host_template tcm_loop_driver_template - * - * We use large can_queue and cmd_per_lun here and let TCM enforce - * the underlying se_device_t->queue_depth. - */ -#define TL_SCSI_CAN_QUEUE 1024 -#define TL_SCSI_CMD_PER_LUN 1024 -#define TL_SCSI_MAX_SECTORS 1024 -#define TL_SCSI_SG_TABLESIZE 256 + /* * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len */ diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 1dcbef4..01a2691 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -32,13 +32,12 @@ #include <scsi/scsi_cmnd.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> +#include "target_core_internal.h" #include "target_core_alua.h" -#include "target_core_hba.h" #include "target_core_ua.h" static int core_alua_check_transition(int state, int *primary); @@ -79,7 +78,7 @@ int target_emulate_report_target_port_groups(struct se_task *task) return -EINVAL; } - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, @@ -164,7 +163,7 @@ int target_emulate_report_target_port_groups(struct se_task *task) buf[2] = ((rd_len >> 8) & 0xff); buf[3] = (rd_len & 0xff); - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); task->task_scsi_status = GOOD; transport_complete_task(task, 1); @@ -195,7 +194,7 @@ int target_emulate_set_target_port_groups(struct se_task *task) cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return -EINVAL; } - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); /* * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed @@ -352,7 +351,7 @@ int target_emulate_set_target_port_groups(struct se_task *task) } out: - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); task->task_scsi_status = GOOD; transport_complete_task(task, 1); return 0; diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 831468b..f3d71fa 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -29,10 +29,11 @@ #include <scsi/scsi.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> + +#include "target_core_internal.h" #include "target_core_ua.h" -#include "target_core_cdb.h" static void target_fill_alua_data(struct se_port *port, unsigned char *buf) @@ -82,7 +83,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) return -EINVAL; } - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); if (dev == tpg->tpg_virt_lun0.lun_se_dev) { buf[0] = 0x3f; /* Not connected */ @@ -94,6 +95,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd) buf[2] = dev->transport->get_device_rev(dev); /* + * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 + * + * SPC4 says: + * A RESPONSE DATA FORMAT field set to 2h indicates that the + * standard INQUIRY data is in the format defined in this + * standard. Response data format values less than 2h are + * obsolete. Response data format values greater than 2h are + * reserved. + */ + buf[3] = 2; + + /* * Enable SCCS and TPGS fields for Emulated ALUA */ if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) @@ -115,15 +128,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd) goto out; } - snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); - snprintf((unsigned char *)&buf[16], 16, "%s", - &dev->se_sub_dev->t10_wwn.model[0]); - snprintf((unsigned char *)&buf[32], 4, "%s", - &dev->se_sub_dev->t10_wwn.revision[0]); + snprintf(&buf[8], 8, "LIO-ORG"); + snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model); + snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision); buf[4] = 31; /* Set additional length to 31 */ out: - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); return 0; } @@ -138,8 +149,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) SDF_EMULATED_VPD_UNIT_SERIAL) { u32 unit_serial_len; - unit_serial_len = - strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); + unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial); unit_serial_len++; /* For NULL Terminator */ if (((len + 4) + unit_serial_len) > cmd->data_length) { @@ -148,8 +158,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) buf[3] = (len & 0xff); return 0; } - len += sprintf((unsigned char *)&buf[4], "%s", - &dev->se_sub_dev->t10_wwn.unit_serial[0]); + len += sprintf(&buf[4], "%s", + dev->se_sub_dev->t10_wwn.unit_serial); len++; /* Extra Byte for NULL Terminator */ buf[3] = len; } @@ -279,14 +289,13 @@ check_t10_vend_desc: len += (prod_len + unit_serial_len); goto check_port; } - id_len += sprintf((unsigned char *)&buf[off+12], - "%s:%s", prod, + id_len += sprintf(&buf[off+12], "%s:%s", prod, &dev->se_sub_dev->t10_wwn.unit_serial[0]); } buf[off] = 0x2; /* ASCII */ buf[off+1] = 0x1; /* T10 Vendor ID */ buf[off+2] = 0x0; - memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8); + memcpy(&buf[off+4], "LIO-ORG", 8); /* Extra Byte for NULL Terminator */ id_len++; /* Identifier Length */ @@ -689,6 +698,13 @@ int target_emulate_inquiry(struct se_task *task) int p, ret; if (!(cdb[1] & 0x1)) { + if (cdb[2]) { + pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", + cdb[2]); + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; + } + ret = target_emulate_inquiry_std(cmd); goto out; } @@ -707,7 +723,7 @@ int target_emulate_inquiry(struct se_task *task) return -EINVAL; } - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); buf[0] = dev->transport->get_device_type(dev); @@ -720,11 +736,11 @@ int target_emulate_inquiry(struct se_task *task) } pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); - cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; ret = -EINVAL; out_unmap: - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); out: if (!ret) { task->task_scsi_status = GOOD; @@ -746,7 +762,7 @@ int target_emulate_readcapacity(struct se_task *task) else blocks = (u32)blocks_long; - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); buf[0] = (blocks >> 24) & 0xff; buf[1] = (blocks >> 16) & 0xff; @@ -762,7 +778,7 @@ int target_emulate_readcapacity(struct se_task *task) if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) put_unaligned_be32(0xFFFFFFFF, &buf[0]); - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); task->task_scsi_status = GOOD; transport_complete_task(task, 1); @@ -776,7 +792,7 @@ int target_emulate_readcapacity_16(struct se_task *task) unsigned char *buf; unsigned long long blocks = dev->transport->get_blocks(dev); - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); buf[0] = (blocks >> 56) & 0xff; buf[1] = (blocks >> 48) & 0xff; @@ -797,7 +813,7 @@ int target_emulate_readcapacity_16(struct se_task *task) if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) buf[14] = 0x80; - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); task->task_scsi_status = GOOD; transport_complete_task(task, 1); @@ -1010,9 +1026,9 @@ int target_emulate_modesense(struct se_task *task) offset = cmd->data_length; } - rbuf = transport_kmap_first_data_page(cmd); + rbuf = transport_kmap_data_sg(cmd); memcpy(rbuf, buf, offset); - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); task->task_scsi_status = GOOD; transport_complete_task(task, 1); @@ -1034,7 +1050,7 @@ int target_emulate_request_sense(struct se_task *task) return -ENOSYS; } - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { /* @@ -1042,11 +1058,8 @@ int target_emulate_request_sense(struct se_task *task) */ buf[0] = 0x70; buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; - /* - * Make sure request data length is enough for additional - * sense data. - */ - if (cmd->data_length <= 18) { + + if (cmd->data_length < 18) { buf[7] = 0x00; err = -EINVAL; goto end; @@ -1063,11 +1076,8 @@ int target_emulate_request_sense(struct se_task *task) */ buf[0] = 0x70; buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; - /* - * Make sure request data length is enough for additional - * sense data. - */ - if (cmd->data_length <= 18) { + + if (cmd->data_length < 18) { buf[7] = 0x00; err = -EINVAL; goto end; @@ -1080,7 +1090,7 @@ int target_emulate_request_sense(struct se_task *task) } end: - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); task->task_scsi_status = GOOD; transport_complete_task(task, 1); return 0; @@ -1114,7 +1124,7 @@ int target_emulate_unmap(struct se_task *task) dl = get_unaligned_be16(&cdb[0]); bd_dl = get_unaligned_be16(&cdb[2]); - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); ptr = &buf[offset]; pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" @@ -1138,7 +1148,7 @@ int target_emulate_unmap(struct se_task *task) } err: - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); if (!ret) { task->task_scsi_status = GOOD; transport_complete_task(task, 1); diff --git a/drivers/target/target_core_cdb.h b/drivers/target/target_core_cdb.h deleted file mode 100644 index ad6b1e3..0000000 --- a/drivers/target/target_core_cdb.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef TARGET_CORE_CDB_H -#define TARGET_CORE_CDB_H - -int target_emulate_inquiry(struct se_task *task); -int target_emulate_readcapacity(struct se_task *task); -int target_emulate_readcapacity_16(struct se_task *task); -int target_emulate_modesense(struct se_task *task); -int target_emulate_request_sense(struct se_task *task); -int target_emulate_unmap(struct se_task *task); -int target_emulate_write_same(struct se_task *task); -int target_emulate_synchronize_cache(struct se_task *task); -int target_emulate_noop(struct se_task *task); - -#endif /* TARGET_CORE_CDB_H */ diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 93d4f6a..6e043ee 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -39,18 +39,16 @@ #include <linux/spinlock.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> #include <target/target_core_fabric_configfs.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> +#include "target_core_internal.h" #include "target_core_alua.h" -#include "target_core_hba.h" #include "target_core_pr.h" #include "target_core_rd.h" -#include "target_core_stat.h" extern struct t10_alua_lu_gp *default_lu_gp; @@ -1452,7 +1450,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( return -ENOMEM; orig = opts; - while ((ptr = strsep(&opts, ",")) != NULL) { + while ((ptr = strsep(&opts, ",\n")) != NULL) { if (!*ptr) continue; @@ -1631,7 +1629,7 @@ static struct config_item_type target_core_dev_pr_cit = { static ssize_t target_core_show_dev_info(void *p, char *page) { - struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_subsystem_dev *se_dev = p; struct se_hba *hba = se_dev->se_dev_hba; struct se_subsystem_api *t = hba->transport; int bl = 0; @@ -1659,7 +1657,7 @@ static ssize_t target_core_store_dev_control( const char *page, size_t count) { - struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_subsystem_dev *se_dev = p; struct se_hba *hba = se_dev->se_dev_hba; struct se_subsystem_api *t = hba->transport; @@ -1682,7 +1680,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = { static ssize_t target_core_show_dev_alias(void *p, char *page) { - struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_subsystem_dev *se_dev = p; if (!(se_dev->su_dev_flags & SDF_USING_ALIAS)) return 0; @@ -1695,7 +1693,7 @@ static ssize_t target_core_store_dev_alias( const char *page, size_t count) { - struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_subsystem_dev *se_dev = p; struct se_hba *hba = se_dev->se_dev_hba; ssize_t read_bytes; @@ -1706,9 +1704,14 @@ static ssize_t target_core_store_dev_alias( return -EINVAL; } - se_dev->su_dev_flags |= SDF_USING_ALIAS; read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page); + if (!read_bytes) + return -EINVAL; + if (se_dev->se_dev_alias[read_bytes - 1] == '\n') + se_dev->se_dev_alias[read_bytes - 1] = '\0'; + + se_dev->su_dev_flags |= SDF_USING_ALIAS; pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", config_item_name(&hba->hba_group.cg_item), @@ -1728,7 +1731,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = { static ssize_t target_core_show_dev_udev_path(void *p, char *page) { - struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_subsystem_dev *se_dev = p; if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) return 0; @@ -1741,7 +1744,7 @@ static ssize_t target_core_store_dev_udev_path( const char *page, size_t count) { - struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_subsystem_dev *se_dev = p; struct se_hba *hba = se_dev->se_dev_hba; ssize_t read_bytes; @@ -1752,9 +1755,14 @@ static ssize_t target_core_store_dev_udev_path( return -EINVAL; } - se_dev->su_dev_flags |= SDF_USING_UDEV_PATH; read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, "%s", page); + if (!read_bytes) + return -EINVAL; + if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n') + se_dev->se_dev_udev_path[read_bytes - 1] = '\0'; + + se_dev->su_dev_flags |= SDF_USING_UDEV_PATH; pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", config_item_name(&hba->hba_group.cg_item), @@ -1777,7 +1785,7 @@ static ssize_t target_core_store_dev_enable( const char *page, size_t count) { - struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p; + struct se_subsystem_dev *se_dev = p; struct se_device *dev; struct se_hba *hba = se_dev->se_dev_hba; struct se_subsystem_api *t = hba->transport; @@ -1822,7 +1830,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = { static ssize_t target_core_show_alua_lu_gp(void *p, char *page) { struct se_device *dev; - struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; + struct se_subsystem_dev *su_dev = p; struct config_item *lu_ci; struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp_member *lu_gp_mem; @@ -1860,7 +1868,7 @@ static ssize_t target_core_store_alua_lu_gp( size_t count) { struct se_device *dev; - struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p; + struct se_subsystem_dev *su_dev = p; struct se_hba *hba = su_dev->se_dev_hba; struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; struct t10_alua_lu_gp_member *lu_gp_mem; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 9b86394..edbcabb 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -42,13 +42,11 @@ #include <scsi/scsi_device.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_tpg.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> +#include "target_core_internal.h" #include "target_core_alua.h" -#include "target_core_hba.h" #include "target_core_pr.h" #include "target_core_ua.h" @@ -322,11 +320,12 @@ int core_free_device_list_for_node( void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) { struct se_dev_entry *deve; + unsigned long flags; - spin_lock_irq(&se_nacl->device_list_lock); + spin_lock_irqsave(&se_nacl->device_list_lock, flags); deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; deve->deve_cmds--; - spin_unlock_irq(&se_nacl->device_list_lock); + spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); } void core_update_device_list_access( @@ -658,7 +657,7 @@ int target_report_luns(struct se_task *se_task) unsigned char *buf; u32 cdb_offset = 0, lun_count = 0, offset = 8, i; - buf = transport_kmap_first_data_page(se_cmd); + buf = (unsigned char *) transport_kmap_data_sg(se_cmd); /* * If no struct se_session pointer is present, this struct se_cmd is @@ -696,7 +695,7 @@ int target_report_luns(struct se_task *se_task) * See SPC3 r07, page 159. */ done: - transport_kunmap_first_data_page(se_cmd); + transport_kunmap_data_sg(se_cmd); lun_count *= 8; buf[0] = ((lun_count >> 24) & 0xff); buf[1] = ((lun_count >> 16) & 0xff); @@ -1134,8 +1133,6 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) */ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) { - u32 orig_queue_depth = dev->queue_depth; - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { pr_err("dev[%p]: Unable to change SE Device TCQ while" " dev_export_obj: %d count exists\n", dev, @@ -1169,11 +1166,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) } dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; - if (queue_depth > orig_queue_depth) - atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); - else if (queue_depth < orig_queue_depth) - atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); - pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, queue_depth); return 0; @@ -1303,24 +1295,26 @@ struct se_lun *core_dev_add_lun( { struct se_lun *lun_p; u32 lun_access = 0; + int rc; if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { pr_err("Unable to export struct se_device while dev_access_obj: %d\n", atomic_read(&dev->dev_access_obj.obj_access_count)); - return NULL; + return ERR_PTR(-EACCES); } lun_p = core_tpg_pre_addlun(tpg, lun); - if ((IS_ERR(lun_p)) || !lun_p) - return NULL; + if (IS_ERR(lun_p)) + return lun_p; if (dev->dev_flags & DF_READ_ONLY) lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; else lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; - if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) - return NULL; + rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev); + if (rc < 0) + return ERR_PTR(rc); pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), @@ -1357,11 +1351,10 @@ int core_dev_del_lun( u32 unpacked_lun) { struct se_lun *lun; - int ret = 0; - lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); - if (!lun) - return ret; + lun = core_tpg_pre_dellun(tpg, unpacked_lun); + if (IS_ERR(lun)) + return PTR_ERR(lun); core_tpg_post_dellun(tpg, lun); diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 09b6f87..9a2ce11 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -36,18 +36,14 @@ #include <linux/configfs.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_tpg.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_fabric.h> #include <target/target_core_fabric_configfs.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> +#include "target_core_internal.h" #include "target_core_alua.h" -#include "target_core_hba.h" #include "target_core_pr.h" -#include "target_core_stat.h" #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ @@ -770,9 +766,9 @@ static int target_fabric_port_link( lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, lun->unpacked_lun); - if (IS_ERR(lun_p) || !lun_p) { + if (IS_ERR(lun_p)) { pr_err("core_dev_add_lun() failed\n"); - ret = -EINVAL; + ret = PTR_ERR(lun_p); goto out; } diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index ec4249b..283a36e 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -34,13 +34,10 @@ #include <scsi/scsi_cmnd.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_lib.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> -#include "target_core_hba.h" +#include "target_core_internal.h" #include "target_core_pr.h" /* @@ -402,7 +399,7 @@ char *iscsi_parse_pr_out_transport_id( add_len = ((buf[2] >> 8) & 0xff); add_len |= (buf[3] & 0xff); - tid_len = strlen((char *)&buf[4]); + tid_len = strlen(&buf[4]); tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ tid_len += 1; /* Add one byte for NULL terminator */ padding = ((-tid_len) & 3); @@ -423,11 +420,11 @@ char *iscsi_parse_pr_out_transport_id( * format. */ if (format_code == 0x40) { - p = strstr((char *)&buf[4], ",i,0x"); + p = strstr(&buf[4], ",i,0x"); if (!p) { pr_err("Unable to locate \",i,0x\" seperator" " for Initiator port identifier: %s\n", - (char *)&buf[4]); + &buf[4]); return NULL; } *p = '\0'; /* Terminate iSCSI Name */ diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index b4864fb..7ed58e2 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -37,8 +37,7 @@ #include <scsi/scsi_host.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_transport.h> +#include <target/target_core_backend.h> #include "target_core_file.h" @@ -86,7 +85,7 @@ static void fd_detach_hba(struct se_hba *hba) static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) { struct fd_dev *fd_dev; - struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; + struct fd_host *fd_host = hba->hba_ptr; fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); if (!fd_dev) { @@ -114,8 +113,8 @@ static struct se_device *fd_create_virtdevice( struct se_device *dev; struct se_dev_limits dev_limits; struct queue_limits *limits; - struct fd_dev *fd_dev = (struct fd_dev *) p; - struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; + struct fd_dev *fd_dev = p; + struct fd_host *fd_host = hba->hba_ptr; mm_segment_t old_fs; struct file *file; struct inode *inode = NULL; @@ -240,7 +239,7 @@ fail: */ static void fd_free_device(void *p) { - struct fd_dev *fd_dev = (struct fd_dev *) p; + struct fd_dev *fd_dev = p; if (fd_dev->fd_file) { filp_close(fd_dev->fd_file, NULL); @@ -498,7 +497,7 @@ static ssize_t fd_set_configfs_dev_params( orig = opts; - while ((ptr = strsep(&opts, ",")) != NULL) { + while ((ptr = strsep(&opts, ",\n")) != NULL) { if (!*ptr) continue; @@ -559,7 +558,7 @@ out: static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) { - struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; + struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { pr_err("Missing fd_dev_name=\n"); diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index c68019d..3dd1bd4 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c @@ -37,11 +37,10 @@ #include <net/tcp.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_tpg.h> -#include <target/target_core_transport.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> -#include "target_core_hba.h" +#include "target_core_internal.h" static LIST_HEAD(subsystem_list); static DEFINE_MUTEX(subsystem_mutex); diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h deleted file mode 100644 index bb0fea5..0000000 --- a/drivers/target/target_core_hba.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef TARGET_CORE_HBA_H -#define TARGET_CORE_HBA_H - -extern struct se_hba *core_alloc_hba(const char *, u32, u32); -extern int core_delete_hba(struct se_hba *); - -#endif /* TARGET_CORE_HBA_H */ diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 4aa9922..8572eae 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -42,8 +42,7 @@ #include <scsi/scsi_host.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_transport.h> +#include <target/target_core_backend.h> #include "target_core_iblock.h" @@ -130,7 +129,7 @@ static struct se_device *iblock_create_virtdevice( /* * These settings need to be made tunable.. */ - ib_dev->ibd_bio_set = bioset_create(32, 64); + ib_dev->ibd_bio_set = bioset_create(32, 0); if (!ib_dev->ibd_bio_set) { pr_err("IBLOCK: Unable to create bioset()\n"); return ERR_PTR(-ENOMEM); @@ -182,7 +181,7 @@ static struct se_device *iblock_create_virtdevice( */ dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; dev->se_sub_dev->se_dev_attrib.unmap_granularity = - q->limits.discard_granularity; + q->limits.discard_granularity >> 9; dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = q->limits.discard_alignment; @@ -391,7 +390,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, orig = opts; - while ((ptr = strsep(&opts, ",")) != NULL) { + while ((ptr = strsep(&opts, ",\n")) != NULL) { if (!*ptr) continue; @@ -465,7 +464,7 @@ static ssize_t iblock_show_configfs_dev_params( if (bd) { bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? - "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? + "" : (bd->bd_holder == ibd) ? "CLAIMED: IBLOCK" : "CLAIMED: OS"); } else { bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); @@ -489,6 +488,13 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) struct iblock_req *ib_req = IBLOCK_REQ(task); struct bio *bio; + /* + * Only allocate as many vector entries as the bio code allows us to, + * we'll loop later on until we have handled the whole request. + */ + if (sg_num > BIO_MAX_PAGES) + sg_num = BIO_MAX_PAGES; + bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); if (!bio) { pr_err("Unable to allocate memory for bio\n"); diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h new file mode 100644 index 0000000..4500136 --- /dev/null +++ b/drivers/target/target_core_internal.h @@ -0,0 +1,123 @@ +#ifndef TARGET_CORE_INTERNAL_H +#define TARGET_CORE_INTERNAL_H + +/* target_core_alua.c */ +extern struct t10_alua_lu_gp *default_lu_gp; + +/* target_core_cdb.c */ +int target_emulate_inquiry(struct se_task *task); +int target_emulate_readcapacity(struct se_task *task); +int target_emulate_readcapacity_16(struct se_task *task); +int target_emulate_modesense(struct se_task *task); +int target_emulate_request_sense(struct se_task *task); +int target_emulate_unmap(struct se_task *task); +int target_emulate_write_same(struct se_task *task); +int target_emulate_synchronize_cache(struct se_task *task); +int target_emulate_noop(struct se_task *task); + +/* target_core_device.c */ +struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); +int core_free_device_list_for_node(struct se_node_acl *, + struct se_portal_group *); +void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); +void core_update_device_list_access(u32, u32, struct se_node_acl *); +int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, + u32, u32, struct se_node_acl *, struct se_portal_group *, int); +void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); +int core_dev_export(struct se_device *, struct se_portal_group *, + struct se_lun *); +void core_dev_unexport(struct se_device *, struct se_portal_group *, + struct se_lun *); +int target_report_luns(struct se_task *); +void se_release_device_for_hba(struct se_device *); +void se_release_vpd_for_dev(struct se_device *); +int se_free_virtual_device(struct se_device *, struct se_hba *); +int se_dev_check_online(struct se_device *); +int se_dev_check_shutdown(struct se_device *); +void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *); +int se_dev_set_task_timeout(struct se_device *, u32); +int se_dev_set_max_unmap_lba_count(struct se_device *, u32); +int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); +int se_dev_set_unmap_granularity(struct se_device *, u32); +int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); +int se_dev_set_emulate_dpo(struct se_device *, int); +int se_dev_set_emulate_fua_write(struct se_device *, int); +int se_dev_set_emulate_fua_read(struct se_device *, int); +int se_dev_set_emulate_write_cache(struct se_device *, int); +int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int); +int se_dev_set_emulate_tas(struct se_device *, int); +int se_dev_set_emulate_tpu(struct se_device *, int); +int se_dev_set_emulate_tpws(struct se_device *, int); +int se_dev_set_enforce_pr_isids(struct se_device *, int); +int se_dev_set_is_nonrot(struct se_device *, int); +int se_dev_set_emulate_rest_reord(struct se_device *dev, int); +int se_dev_set_queue_depth(struct se_device *, u32); +int se_dev_set_max_sectors(struct se_device *, u32); +int se_dev_set_optimal_sectors(struct se_device *, u32); +int se_dev_set_block_size(struct se_device *, u32); +struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *, + struct se_device *, u32); +int core_dev_del_lun(struct se_portal_group *, u32); +struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); +struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, + u32, char *, int *); +int core_dev_add_initiator_node_lun_acl(struct se_portal_group *, + struct se_lun_acl *, u32, u32); +int core_dev_del_initiator_node_lun_acl(struct se_portal_group *, + struct se_lun *, struct se_lun_acl *); +void core_dev_free_initiator_node_lun_acl(struct se_portal_group *, + struct se_lun_acl *lacl); +int core_dev_setup_virtual_lun0(void); +void core_dev_release_virtual_lun0(void); + +/* target_core_hba.c */ +struct se_hba *core_alloc_hba(const char *, u32, u32); +int core_delete_hba(struct se_hba *); + +/* target_core_tmr.c */ +int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *, + struct list_head *, struct se_cmd *); + +/* target_core_tpg.c */ +extern struct se_device *g_lun0_dev; + +struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, + const char *); +struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, + unsigned char *); +void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); +void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); +struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); +int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, + u32, void *); +struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); +int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *); + +/* target_core_transport.c */ +extern struct kmem_cache *se_tmr_req_cache; + +int init_se_kmem_caches(void); +void release_se_kmem_caches(void); +u32 scsi_get_new_index(scsi_index_t); +void transport_subsystem_check_init(void); +void transport_cmd_finish_abort(struct se_cmd *, int); +void __transport_remove_task_from_execute_queue(struct se_task *, + struct se_device *); +unsigned char *transport_dump_cmd_direction(struct se_cmd *); +void transport_dump_dev_state(struct se_device *, char *, int *); +void transport_dump_dev_info(struct se_device *, struct se_lun *, + unsigned long long, char *, int *); +void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int); +int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); +int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); +int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); +bool target_stop_task(struct se_task *task, unsigned long *flags); +int transport_clear_lun_from_sessions(struct se_lun *); +void transport_send_task_abort(struct se_cmd *); + +/* target_core_stat.c */ +void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); +void target_stat_setup_port_default_groups(struct se_lun *); +void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); + +#endif /* TARGET_CORE_INTERNAL_H */ diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 95dee70..b7c7793 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -33,14 +33,11 @@ #include <asm/unaligned.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_tmr.h> -#include <target/target_core_tpg.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> -#include "target_core_hba.h" +#include "target_core_internal.h" #include "target_core_pr.h" #include "target_core_ua.h" @@ -481,6 +478,7 @@ static int core_scsi3_pr_seq_non_holder( case READ_MEDIA_SERIAL_NUMBER: case REPORT_LUNS: case REQUEST_SENSE: + case PERSISTENT_RESERVE_IN: ret = 0; /*/ Allowed CDBs */ break; default: @@ -1537,7 +1535,7 @@ static int core_scsi3_decode_spec_i_port( tidh_new->dest_local_nexus = 1; list_add_tail(&tidh_new->dest_list, &tid_dest_list); - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); /* * For a PERSISTENT RESERVE OUT specify initiator ports payload, * first extract TransportID Parameter Data Length, and make sure @@ -1788,7 +1786,7 @@ static int core_scsi3_decode_spec_i_port( } - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); /* * Go ahead and create a registrations from tid_dest_list for the @@ -1836,7 +1834,7 @@ static int core_scsi3_decode_spec_i_port( return 0; out: - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); /* * For the failure case, release everything from tid_dest_list * including *dest_pr_reg and the configfs dependances.. @@ -2984,21 +2982,6 @@ static void core_scsi3_release_preempt_and_abort( } } -int core_scsi3_check_cdb_abort_and_preempt( - struct list_head *preempt_and_abort_list, - struct se_cmd *cmd) -{ - struct t10_pr_registration *pr_reg, *pr_reg_tmp; - - list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list, - pr_reg_abort_list) { - if (pr_reg->pr_res_key == cmd->pr_res_key) - return 0; - } - - return 1; -} - static int core_scsi3_pro_preempt( struct se_cmd *cmd, int type, @@ -3138,7 +3121,7 @@ static int core_scsi3_pro_preempt( if (!calling_it_nexus) core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, - ASCQ_2AH_RESERVATIONS_PREEMPTED); + ASCQ_2AH_REGISTRATIONS_PREEMPTED); } spin_unlock(&pr_tmpl->registration_lock); /* @@ -3251,7 +3234,7 @@ static int core_scsi3_pro_preempt( * additional sense code set to REGISTRATIONS PREEMPTED; */ core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, - ASCQ_2AH_RESERVATIONS_PREEMPTED); + ASCQ_2AH_REGISTRATIONS_PREEMPTED); } spin_unlock(&pr_tmpl->registration_lock); /* @@ -3428,14 +3411,14 @@ static int core_scsi3_emulate_pro_register_and_move( * will be moved to for the TransportID containing SCSI initiator WWN * information. */ - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); rtpi = (buf[18] & 0xff) << 8; rtpi |= buf[19] & 0xff; tid_len = (buf[20] & 0xff) << 24; tid_len |= (buf[21] & 0xff) << 16; tid_len |= (buf[22] & 0xff) << 8; tid_len |= buf[23] & 0xff; - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); buf = NULL; if ((tid_len + 24) != cmd->data_length) { @@ -3487,7 +3470,7 @@ static int core_scsi3_emulate_pro_register_and_move( return -EINVAL; } - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); proto_ident = (buf[24] & 0x0f); #if 0 pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" @@ -3521,7 +3504,7 @@ static int core_scsi3_emulate_pro_register_and_move( goto out; } - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); buf = NULL; pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" @@ -3786,13 +3769,13 @@ after_iport_check: " REGISTER_AND_MOVE\n"); } - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); core_scsi3_put_pr_reg(dest_pr_reg); return 0; out: if (buf) - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); if (dest_se_deve) core_scsi3_lunacl_undepend_item(dest_se_deve); if (dest_node_acl) @@ -3866,7 +3849,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task) scope = (cdb[2] & 0xf0); type = (cdb[2] & 0x0f); - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); /* * From PERSISTENT_RESERVE_OUT parameter list (payload) */ @@ -3884,7 +3867,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task) aptpl = (buf[17] & 0x01); unreg = (buf[17] & 0x02); } - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); buf = NULL; /* @@ -3984,7 +3967,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) return -EINVAL; } - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); @@ -4018,7 +4001,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) buf[6] = ((add_len >> 8) & 0xff); buf[7] = (add_len & 0xff); - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); return 0; } @@ -4044,7 +4027,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) return -EINVAL; } - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); @@ -4103,7 +4086,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) err: spin_unlock(&se_dev->dev_reservation_lock); - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); return 0; } @@ -4127,7 +4110,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) return -EINVAL; } - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); buf[0] = ((add_len << 8) & 0xff); buf[1] = (add_len & 0xff); @@ -4159,7 +4142,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); return 0; } @@ -4189,7 +4172,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) return -EINVAL; } - buf = transport_kmap_first_data_page(cmd); + buf = transport_kmap_data_sg(cmd); buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); @@ -4310,7 +4293,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) buf[6] = ((add_len >> 8) & 0xff); buf[7] = (add_len & 0xff); - transport_kunmap_first_data_page(cmd); + transport_kunmap_data_sg(cmd); return 0; } diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index b97f694..7a233fe 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -60,8 +60,6 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, struct se_node_acl *); extern void core_scsi3_free_all_registrations(struct se_device *); extern unsigned char *core_scsi3_pr_dump_type(int); -extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *, - struct se_cmd *); extern int target_scsi3_emulate_pr_in(struct se_task *task); extern int target_scsi3_emulate_pr_out(struct se_task *task); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 8b15e56..8d4def3 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -44,8 +44,7 @@ #include <scsi/scsi_tcq.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_transport.h> +#include <target/target_core_backend.h> #include "target_core_pscsi.h" @@ -105,7 +104,7 @@ static void pscsi_detach_hba(struct se_hba *hba) static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) { - struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; + struct pscsi_hba_virt *phv = hba->hba_ptr; struct Scsi_Host *sh = phv->phv_lld_host; /* * Release the struct Scsi_Host @@ -351,7 +350,6 @@ static struct se_device *pscsi_add_device_to_list( * scsi_device_put() and the pdv->pdv_sd cleared. */ pdv->pdv_sd = sd; - dev = transport_add_device_to_core_hba(hba, &pscsi_template, se_dev, dev_flags, pdv, &dev_limits, NULL, NULL); @@ -406,7 +404,7 @@ static struct se_device *pscsi_create_type_disk( __releases(sh->host_lock) { struct se_device *dev; - struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; + struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; struct Scsi_Host *sh = sd->host; struct block_device *bd; u32 dev_flags = 0; @@ -454,7 +452,7 @@ static struct se_device *pscsi_create_type_rom( __releases(sh->host_lock) { struct se_device *dev; - struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; + struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; struct Scsi_Host *sh = sd->host; u32 dev_flags = 0; @@ -489,7 +487,7 @@ static struct se_device *pscsi_create_type_other( __releases(sh->host_lock) { struct se_device *dev; - struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; + struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; struct Scsi_Host *sh = sd->host; u32 dev_flags = 0; @@ -510,10 +508,10 @@ static struct se_device *pscsi_create_virtdevice( struct se_subsystem_dev *se_dev, void *p) { - struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p; + struct pscsi_dev_virt *pdv = p; struct se_device *dev; struct scsi_device *sd; - struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; + struct pscsi_hba_virt *phv = hba->hba_ptr; struct Scsi_Host *sh = phv->phv_lld_host; int legacy_mode_enable = 0; @@ -695,7 +693,7 @@ static int pscsi_transport_complete(struct se_task *task) if (task->task_se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { - unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd); + unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd); if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) @@ -705,7 +703,7 @@ static int pscsi_transport_complete(struct se_task *task) buf[2] |= 0x80; } - transport_kunmap_first_data_page(task->task_se_cmd); + transport_kunmap_data_sg(task->task_se_cmd); } } after_mode_sense: @@ -818,7 +816,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, orig = opts; - while ((ptr = strsep(&opts, ",")) != NULL) { + while ((ptr = strsep(&opts, ",\n")) != NULL) { if (!*ptr) continue; @@ -1144,7 +1142,7 @@ static unsigned char *pscsi_get_sense_buffer(struct se_task *task) { struct pscsi_plugin_task *pt = PSCSI_TASK(task); - return (unsigned char *)&pt->pscsi_sense[0]; + return pt->pscsi_sense; } /* pscsi_get_device_rev(): diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 02e51fa..8b68f7b 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -37,9 +37,7 @@ #include <scsi/scsi_host.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> #include "target_core_rd.h" @@ -474,7 +472,7 @@ static ssize_t rd_set_configfs_dev_params( orig = opts; - while ((ptr = strsep(&opts, ",")) != NULL) { + while ((ptr = strsep(&opts, ",\n")) != NULL) { if (!*ptr) continue; diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 874152a..f8c2d2c 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -43,12 +43,12 @@ #include <scsi/scsi_host.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> -#include "target_core_hba.h" +#include "target_core_internal.h" #ifndef INITIAL_JIFFIES #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) @@ -1755,8 +1755,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident( /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ memset(buf, 0, 64); if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) - tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, - (unsigned char *)&buf[0], 64); + tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64); ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); spin_unlock_irq(&nacl->nacl_sess_lock); diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h deleted file mode 100644 index 86c252f..0000000 --- a/drivers/target/target_core_stat.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef TARGET_CORE_STAT_H -#define TARGET_CORE_STAT_H - -extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); -extern void target_stat_setup_port_default_groups(struct se_lun *); -extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *); - -#endif /*** TARGET_CORE_STAT_H ***/ diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 6845228..dcb0618 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -32,12 +32,11 @@ #include <scsi/scsi_cmnd.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_tmr.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> +#include "target_core_internal.h" #include "target_core_alua.h" #include "target_core_pr.h" @@ -101,6 +100,21 @@ static void core_tmr_handle_tas_abort( transport_cmd_finish_abort(cmd, 0); } +static int target_check_cdb_and_preempt(struct list_head *list, + struct se_cmd *cmd) +{ + struct t10_pr_registration *reg; + + if (!list) + return 0; + list_for_each_entry(reg, list, pr_reg_abort_list) { + if (reg->pr_res_key == cmd->pr_res_key) + return 0; + } + + return 1; +} + static void core_tmr_drain_tmr_list( struct se_device *dev, struct se_tmr_req *tmr, @@ -132,9 +146,7 @@ static void core_tmr_drain_tmr_list( * parameter (eg: for PROUT PREEMPT_AND_ABORT service action * skip non regisration key matching TMRs. */ - if (preempt_and_abort_list && - (core_scsi3_check_cdb_abort_and_preempt( - preempt_and_abort_list, cmd) != 0)) + if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) continue; spin_lock(&cmd->t_state_lock); @@ -211,9 +223,7 @@ static void core_tmr_drain_task_list( * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. */ - if (preempt_and_abort_list && - (core_scsi3_check_cdb_abort_and_preempt( - preempt_and_abort_list, cmd) != 0)) + if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) continue; /* * Not aborting PROUT PREEMPT_AND_ABORT CDB.. @@ -222,7 +232,7 @@ static void core_tmr_drain_task_list( continue; list_move_tail(&task->t_state_list, &drain_task_list); - atomic_set(&task->task_state_active, 0); + task->t_state_active = false; /* * Remove from task execute list before processing drain_task_list */ @@ -321,9 +331,7 @@ static void core_tmr_drain_cmd_list( * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. */ - if (preempt_and_abort_list && - (core_scsi3_check_cdb_abort_and_preempt( - preempt_and_abort_list, cmd) != 0)) + if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) continue; /* * Not aborting PROUT PREEMPT_AND_ABORT CDB.. diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 8ddd133..06336ec 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -39,13 +39,10 @@ #include <scsi/scsi_cmnd.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_tpg.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> -#include "target_core_hba.h" -#include "target_core_stat.h" +#include "target_core_internal.h" extern struct se_device *g_lun0_dev; @@ -810,8 +807,7 @@ static void core_tpg_shutdown_lun( struct se_lun *core_tpg_pre_dellun( struct se_portal_group *tpg, - u32 unpacked_lun, - int *ret) + u32 unpacked_lun) { struct se_lun *lun; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0257658..58cea07 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -45,16 +45,12 @@ #include <scsi/scsi_tcq.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_tmr.h> -#include <target/target_core_tpg.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> +#include "target_core_internal.h" #include "target_core_alua.h" -#include "target_core_cdb.h" -#include "target_core_hba.h" #include "target_core_pr.h" #include "target_core_ua.h" @@ -72,7 +68,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; static int transport_generic_write_pending(struct se_cmd *); static int transport_processing_thread(void *param); -static int __transport_execute_tasks(struct se_device *dev); +static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *); static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev); @@ -212,14 +208,13 @@ u32 scsi_get_new_index(scsi_index_t type) return new_index; } -void transport_init_queue_obj(struct se_queue_obj *qobj) +static void transport_init_queue_obj(struct se_queue_obj *qobj) { atomic_set(&qobj->queue_cnt, 0); INIT_LIST_HEAD(&qobj->qobj_list); init_waitqueue_head(&qobj->thread_wq); spin_lock_init(&qobj->cmd_queue_lock); } -EXPORT_SYMBOL(transport_init_queue_obj); void transport_subsystem_check_init(void) { @@ -426,18 +421,18 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) if (task->task_flags & TF_ACTIVE) continue; - if (!atomic_read(&task->task_state_active)) - continue; - spin_lock_irqsave(&dev->execute_task_lock, flags); - list_del(&task->t_state_list); - pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", - cmd->se_tfo->get_task_tag(cmd), dev, task); - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + if (task->t_state_active) { + pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", + cmd->se_tfo->get_task_tag(cmd), dev, task); - atomic_set(&task->task_state_active, 0); - atomic_dec(&cmd->t_task_cdbs_ex_left); + list_del(&task->t_state_list); + atomic_dec(&cmd->t_task_cdbs_ex_left); + task->t_state_active = false; + } + spin_unlock_irqrestore(&dev->execute_task_lock, flags); } + } /* transport_cmd_check_stop(): @@ -696,12 +691,6 @@ void transport_complete_task(struct se_task *task, int success) struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned long flags; -#if 0 - pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, - cmd->t_task_cdb[0], dev); -#endif - if (dev) - atomic_inc(&dev->depth_left); spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; @@ -714,7 +703,7 @@ void transport_complete_task(struct se_task *task, int success) if (dev && dev->transport->transport_complete) { if (dev->transport->transport_complete(task) != 0) { cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; - task->task_sense = 1; + task->task_flags |= TF_HAS_SENSE; success = 1; } } @@ -743,13 +732,7 @@ void transport_complete_task(struct se_task *task, int success) } if (cmd->t_tasks_failed) { - if (!task->task_error_status) { - task->task_error_status = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - } - + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; INIT_WORK(&cmd->work, target_complete_failure_work); } else { atomic_set(&cmd->t_transport_complete, 1); @@ -824,7 +807,7 @@ static void __transport_add_task_to_execute_queue( head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); atomic_inc(&dev->execute_tasks); - if (atomic_read(&task->task_state_active)) + if (task->t_state_active) return; /* * Determine if this task needs to go to HEAD_OF_QUEUE for the @@ -838,7 +821,7 @@ static void __transport_add_task_to_execute_queue( else list_add_tail(&task->t_state_list, &dev->state_task_list); - atomic_set(&task->task_state_active, 1); + task->t_state_active = true; pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), @@ -853,29 +836,26 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_state_active)) - continue; - spin_lock(&dev->execute_task_lock); - list_add_tail(&task->t_state_list, &dev->state_task_list); - atomic_set(&task->task_state_active, 1); - - pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", - task->task_se_cmd->se_tfo->get_task_tag( - task->task_se_cmd), task, dev); - + if (!task->t_state_active) { + list_add_tail(&task->t_state_list, + &dev->state_task_list); + task->t_state_active = true; + + pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", + task->task_se_cmd->se_tfo->get_task_tag( + task->task_se_cmd), task, dev); + } spin_unlock(&dev->execute_task_lock); } spin_unlock_irqrestore(&cmd->t_state_lock, flags); } -static void transport_add_tasks_from_cmd(struct se_cmd *cmd) +static void __transport_add_tasks_from_cmd(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct se_task *task, *task_prev = NULL; - unsigned long flags; - spin_lock_irqsave(&dev->execute_task_lock, flags); list_for_each_entry(task, &cmd->t_task_list, t_list) { if (!list_empty(&task->t_execute_list)) continue; @@ -886,6 +866,15 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd) __transport_add_task_to_execute_queue(task, task_prev, dev); task_prev = task; } +} + +static void transport_add_tasks_from_cmd(struct se_cmd *cmd) +{ + unsigned long flags; + struct se_device *dev = cmd->se_dev; + + spin_lock_irqsave(&dev->execute_task_lock, flags); + __transport_add_tasks_from_cmd(cmd); spin_unlock_irqrestore(&dev->execute_task_lock, flags); } @@ -896,7 +885,7 @@ void __transport_remove_task_from_execute_queue(struct se_task *task, atomic_dec(&dev->execute_tasks); } -void transport_remove_task_from_execute_queue( +static void transport_remove_task_from_execute_queue( struct se_task *task, struct se_device *dev) { @@ -983,9 +972,8 @@ void transport_dump_dev_state( break; } - *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", - atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), - dev->queue_depth); + *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", + atomic_read(&dev->execute_tasks), dev->queue_depth); *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); *bl += sprintf(b + *bl, " "); @@ -1267,32 +1255,34 @@ static void core_setup_task_attr_emulation(struct se_device *dev) static void scsi_dump_inquiry(struct se_device *dev) { struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; + char buf[17]; int i, device_type; /* * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer */ - pr_debug(" Vendor: "); for (i = 0; i < 8; i++) if (wwn->vendor[i] >= 0x20) - pr_debug("%c", wwn->vendor[i]); + buf[i] = wwn->vendor[i]; else - pr_debug(" "); + buf[i] = ' '; + buf[i] = '\0'; + pr_debug(" Vendor: %s\n", buf); - pr_debug(" Model: "); for (i = 0; i < 16; i++) if (wwn->model[i] >= 0x20) - pr_debug("%c", wwn->model[i]); + buf[i] = wwn->model[i]; else - pr_debug(" "); + buf[i] = ' '; + buf[i] = '\0'; + pr_debug(" Model: %s\n", buf); - pr_debug(" Revision: "); for (i = 0; i < 4; i++) if (wwn->revision[i] >= 0x20) - pr_debug("%c", wwn->revision[i]); + buf[i] = wwn->revision[i]; else - pr_debug(" "); - - pr_debug("\n"); + buf[i] = ' '; + buf[i] = '\0'; + pr_debug(" Revision: %s\n", buf); device_type = dev->transport->get_device_type(dev); pr_debug(" Type: %s ", scsi_device_type(device_type)); @@ -1340,9 +1330,6 @@ struct se_device *transport_add_device_to_core_hba( spin_lock_init(&dev->se_port_lock); spin_lock_init(&dev->se_tmr_lock); spin_lock_init(&dev->qf_cmd_lock); - - dev->queue_depth = dev_limits->queue_depth; - atomic_set(&dev->depth_left, dev->queue_depth); atomic_set(&dev->dev_ordered_id, 0); se_dev_set_default_attribs(dev, dev_limits); @@ -1654,6 +1641,81 @@ int transport_handle_cdb_direct( } EXPORT_SYMBOL(transport_handle_cdb_direct); +/** + * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd + * + * @se_cmd: command descriptor to submit + * @se_sess: associated se_sess for endpoint + * @cdb: pointer to SCSI CDB + * @sense: pointer to SCSI sense buffer + * @unpacked_lun: unpacked LUN to reference for struct se_lun + * @data_length: fabric expected data transfer length + * @task_addr: SAM task attribute + * @data_dir: DMA data direction + * @flags: flags for command submission from target_sc_flags_tables + * + * This may only be called from process context, and also currently + * assumes internal allocation of fabric payload buffer by target-core. + **/ +void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, + unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, + u32 data_length, int task_attr, int data_dir, int flags) +{ + struct se_portal_group *se_tpg; + int rc; + + se_tpg = se_sess->se_tpg; + BUG_ON(!se_tpg); + BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); + BUG_ON(in_interrupt()); + /* + * Initialize se_cmd for target operation. From this point + * exceptions are handled by sending exception status via + * target_core_fabric_ops->queue_status() callback + */ + transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, + data_length, data_dir, task_attr, sense); + /* + * Obtain struct se_cmd->cmd_kref reference and add new cmd to + * se_sess->sess_cmd_list. A second kref_get here is necessary + * for fabrics using TARGET_SCF_ACK_KREF that expect a second + * kref_put() to happen during fabric packet acknowledgement. + */ + target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); + /* + * Signal bidirectional data payloads to target-core + */ + if (flags & TARGET_SCF_BIDI_OP) + se_cmd->se_cmd_flags |= SCF_BIDI; + /* + * Locate se_lun pointer and attach it to struct se_cmd + */ + if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) { + transport_send_check_condition_and_sense(se_cmd, + se_cmd->scsi_sense_reason, 0); + target_put_sess_cmd(se_sess, se_cmd); + return; + } + /* + * Sanitize CDBs via transport_generic_cmd_sequencer() and + * allocate the necessary tasks to complete the received CDB+data + */ + rc = transport_generic_allocate_tasks(se_cmd, cdb); + if (rc != 0) { + transport_generic_request_failure(se_cmd); + return; + } + /* + * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend + * for immediate execution of READs, otherwise wait for + * transport_generic_handle_data() to be called for WRITEs + * when fabric has filled the incoming buffer. + */ + transport_handle_cdb_direct(se_cmd); + return; +} +EXPORT_SYMBOL(target_submit_cmd); + /* * Used by fabric module frontends defining a TFO->new_cmd_map() caller * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to @@ -1920,18 +1982,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } -static inline int transport_tcq_window_closed(struct se_device *dev) -{ - if (dev->dev_tcq_window_closed++ < - PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) { - msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT); - } else - msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); - - wake_up_interruptible(&dev->dev_queue_obj.thread_wq); - return 0; -} - /* * Called from Fabric Module context from transport_execute_tasks() * @@ -2014,13 +2064,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) static int transport_execute_tasks(struct se_cmd *cmd) { int add_tasks; - - if (se_dev_check_online(cmd->se_dev) != 0) { - cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - transport_generic_request_failure(cmd); - return 0; - } - + struct se_device *se_dev = cmd->se_dev; /* * Call transport_cmd_check_stop() to see if a fabric exception * has occurred that prevents execution. @@ -2034,19 +2078,16 @@ static int transport_execute_tasks(struct se_cmd *cmd) if (!add_tasks) goto execute_tasks; /* - * This calls transport_add_tasks_from_cmd() to handle - * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation - * (if enabled) in __transport_add_task_to_execute_queue() and - * transport_add_task_check_sam_attr(). + * __transport_execute_tasks() -> __transport_add_tasks_from_cmd() + * adds associated se_tasks while holding dev->execute_task_lock + * before I/O dispath to avoid a double spinlock access. */ - transport_add_tasks_from_cmd(cmd); + __transport_execute_tasks(se_dev, cmd); + return 0; } - /* - * Kick the execution queue for the cmd associated struct se_device - * storage object. - */ + execute_tasks: - __transport_execute_tasks(cmd->se_dev); + __transport_execute_tasks(se_dev, NULL); return 0; } @@ -2056,24 +2097,18 @@ execute_tasks: * * Called from transport_processing_thread() */ -static int __transport_execute_tasks(struct se_device *dev) +static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd) { int error; struct se_cmd *cmd = NULL; struct se_task *task = NULL; unsigned long flags; - /* - * Check if there is enough room in the device and HBA queue to send - * struct se_tasks to the selected transport. - */ check_depth: - if (!atomic_read(&dev->depth_left)) - return transport_tcq_window_closed(dev); - - dev->dev_tcq_window_closed = 0; - spin_lock_irq(&dev->execute_task_lock); + if (new_cmd != NULL) + __transport_add_tasks_from_cmd(new_cmd); + if (list_empty(&dev->execute_task_list)) { spin_unlock_irq(&dev->execute_task_lock); return 0; @@ -2083,10 +2118,7 @@ check_depth: __transport_remove_task_from_execute_queue(task, dev); spin_unlock_irq(&dev->execute_task_lock); - atomic_dec(&dev->depth_left); - cmd = task->task_se_cmd; - spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags |= (TF_ACTIVE | TF_SENT); atomic_inc(&cmd->t_task_cdbs_sent); @@ -2107,10 +2139,10 @@ check_depth: spin_unlock_irqrestore(&cmd->t_state_lock, flags); atomic_set(&cmd->t_transport_sent, 0); transport_stop_tasks_for_cmd(cmd); - atomic_inc(&dev->depth_left); transport_generic_request_failure(cmd); } + new_cmd = NULL; goto check_depth; return 0; @@ -2351,7 +2383,7 @@ static int transport_get_sense_data(struct se_cmd *cmd) list_for_each_entry_safe(task, task_tmp, &cmd->t_task_list, t_list) { - if (!task->task_sense) + if (!(task->task_flags & TF_HAS_SENSE)) continue; if (!dev->transport->get_sense_buffer) { @@ -2665,7 +2697,7 @@ static int transport_generic_cmd_sequencer( cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (target_check_write_same_discard(&cdb[10], dev) < 0) - goto out_invalid_cdb_field; + goto out_unsupported_cdb; if (!passthrough) cmd->execute_task = target_emulate_write_same; break; @@ -2948,7 +2980,7 @@ static int transport_generic_cmd_sequencer( cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; if (target_check_write_same_discard(&cdb[1], dev) < 0) - goto out_invalid_cdb_field; + goto out_unsupported_cdb; if (!passthrough) cmd->execute_task = target_emulate_write_same; break; @@ -2971,7 +3003,7 @@ static int transport_generic_cmd_sequencer( * of byte 1 bit 3 UNMAP instead of original reserved field */ if (target_check_write_same_discard(&cdb[1], dev) < 0) - goto out_invalid_cdb_field; + goto out_unsupported_cdb; if (!passthrough) cmd->execute_task = target_emulate_write_same; break; @@ -3053,11 +3085,6 @@ static int transport_generic_cmd_sequencer( (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) goto out_unsupported_cdb; - /* Let's limit control cdbs to a page, for simplicity's sake. */ - if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && - size > PAGE_SIZE) - goto out_invalid_cdb_field; - transport_set_supported_SAM_opcode(cmd); return ret; @@ -3346,6 +3373,32 @@ static inline void transport_free_pages(struct se_cmd *cmd) } /** + * transport_release_cmd - free a command + * @cmd: command to free + * + * This routine unconditionally frees a command, and reference counting + * or list removal must be done in the caller. + */ +static void transport_release_cmd(struct se_cmd *cmd) +{ + BUG_ON(!cmd->se_tfo); + + if (cmd->se_tmr_req) + core_tmr_release_req(cmd->se_tmr_req); + if (cmd->t_task_cdb != cmd->__t_task_cdb) + kfree(cmd->t_task_cdb); + /* + * If this cmd has been setup with target_get_sess_cmd(), drop + * the kref and call ->release_cmd() in kref callback. + */ + if (cmd->check_release != 0) { + target_put_sess_cmd(cmd->se_sess, cmd); + return; + } + cmd->se_tfo->release_cmd(cmd); +} + +/** * transport_put_cmd - release a reference to a command * @cmd: command to release * @@ -3435,9 +3488,11 @@ int transport_generic_map_mem_to_cmd( } EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); -void *transport_kmap_first_data_page(struct se_cmd *cmd) +void *transport_kmap_data_sg(struct se_cmd *cmd) { struct scatterlist *sg = cmd->t_data_sg; + struct page **pages; + int i; BUG_ON(!sg); /* @@ -3445,15 +3500,41 @@ void *transport_kmap_first_data_page(struct se_cmd *cmd) * tcm_loop who may be using a contig buffer from the SCSI midlayer for * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() */ - return kmap(sg_page(sg)) + sg->offset; + if (!cmd->t_data_nents) + return NULL; + else if (cmd->t_data_nents == 1) + return kmap(sg_page(sg)) + sg->offset; + + /* >1 page. use vmap */ + pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); + if (!pages) + return NULL; + + /* convert sg[] to pages[] */ + for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { + pages[i] = sg_page(sg); + } + + cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!cmd->t_data_vmap) + return NULL; + + return cmd->t_data_vmap + cmd->t_data_sg[0].offset; } -EXPORT_SYMBOL(transport_kmap_first_data_page); +EXPORT_SYMBOL(transport_kmap_data_sg); -void transport_kunmap_first_data_page(struct se_cmd *cmd) +void transport_kunmap_data_sg(struct se_cmd *cmd) { - kunmap(sg_page(cmd->t_data_sg)); + if (!cmd->t_data_nents) + return; + else if (cmd->t_data_nents == 1) + kunmap(sg_page(cmd->t_data_sg)); + + vunmap(cmd->t_data_vmap); + cmd->t_data_vmap = NULL; } -EXPORT_SYMBOL(transport_kunmap_first_data_page); +EXPORT_SYMBOL(transport_kunmap_data_sg); static int transport_generic_get_mem(struct se_cmd *cmd) @@ -3461,6 +3542,7 @@ transport_generic_get_mem(struct se_cmd *cmd) u32 length = cmd->data_length; unsigned int nents; struct page *page; + gfp_t zero_flag; int i = 0; nents = DIV_ROUND_UP(length, PAGE_SIZE); @@ -3471,9 +3553,11 @@ transport_generic_get_mem(struct se_cmd *cmd) cmd->t_data_nents = nents; sg_init_table(cmd->t_data_sg, nents); + zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO; + while (length) { u32 page_len = min_t(u32, length, PAGE_SIZE); - page = alloc_page(GFP_KERNEL | __GFP_ZERO); + page = alloc_page(GFP_KERNEL | zero_flag); if (!page) goto out; @@ -3701,6 +3785,11 @@ transport_allocate_control_task(struct se_cmd *cmd) struct se_task *task; unsigned long flags; + /* Workaround for handling zero-length control CDBs */ + if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && + !cmd->data_length) + return 0; + task = transport_generic_get_task(cmd, cmd->data_direction); if (!task) return -ENOMEM; @@ -3772,6 +3861,14 @@ int transport_generic_new_cmd(struct se_cmd *cmd) else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { cmd->t_state = TRANSPORT_COMPLETE; atomic_set(&cmd->t_transport_active, 1); + + if (cmd->t_task_cdb[0] == REQUEST_SENSE) { + u8 ua_asc = 0, ua_ascq = 0; + + core_scsi3_ua_clear_for_request_sense(cmd, + &ua_asc, &ua_ascq); + } + INIT_WORK(&cmd->work, target_complete_ok_work); queue_work(target_completion_wq, &cmd->work); return 0; @@ -3870,33 +3967,6 @@ queue_full: return 0; } -/** - * transport_release_cmd - free a command - * @cmd: command to free - * - * This routine unconditionally frees a command, and reference counting - * or list removal must be done in the caller. - */ -void transport_release_cmd(struct se_cmd *cmd) -{ - BUG_ON(!cmd->se_tfo); - - if (cmd->se_tmr_req) - core_tmr_release_req(cmd->se_tmr_req); - if (cmd->t_task_cdb != cmd->__t_task_cdb) - kfree(cmd->t_task_cdb); - /* - * Check if target_wait_for_sess_cmds() is expecting to - * release se_cmd directly here.. - */ - if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd) - if (cmd->se_tfo->check_release_cmd(cmd) != 0) - return; - - cmd->se_tfo->release_cmd(cmd); -} -EXPORT_SYMBOL(transport_release_cmd); - void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { @@ -3923,11 +3993,22 @@ EXPORT_SYMBOL(transport_generic_free_cmd); /* target_get_sess_cmd - Add command to active ->sess_cmd_list * @se_sess: session to reference * @se_cmd: command descriptor to add + * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() */ -void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) +void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, + bool ack_kref) { unsigned long flags; + kref_init(&se_cmd->cmd_kref); + /* + * Add a second kref if the fabric caller is expecting to handle + * fabric acknowledgement that requires two target_put_sess_cmd() + * invocations before se_cmd descriptor release. + */ + if (ack_kref == true) + kref_get(&se_cmd->cmd_kref); + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); se_cmd->check_release = 1; @@ -3935,30 +4016,36 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) } EXPORT_SYMBOL(target_get_sess_cmd); -/* target_put_sess_cmd - Check for active I/O shutdown or list delete - * @se_sess: session to reference - * @se_cmd: command descriptor to drop - */ -int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) +static void target_release_cmd_kref(struct kref *kref) { + struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); + struct se_session *se_sess = se_cmd->se_sess; unsigned long flags; spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (list_empty(&se_cmd->se_cmd_list)) { spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); WARN_ON(1); - return 0; + return; } - if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); complete(&se_cmd->cmd_wait_comp); - return 1; + return; } list_del(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - return 0; + se_cmd->se_tfo->release_cmd(se_cmd); +} + +/* target_put_sess_cmd - Check for active I/O shutdown via kref_put + * @se_sess: session to reference + * @se_cmd: command descriptor to drop + */ +int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) +{ + return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); @@ -4174,7 +4261,7 @@ check_cond: static int transport_clear_lun_thread(void *p) { - struct se_lun *lun = (struct se_lun *)p; + struct se_lun *lun = p; __transport_clear_lun_from_sessions(lun); complete(&lun->lun_shutdown_comp); @@ -4353,6 +4440,7 @@ int transport_send_check_condition_and_sense( case TCM_NON_EXISTENT_LUN: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* LOGICAL UNIT NOT SUPPORTED */ @@ -4362,6 +4450,7 @@ int transport_send_check_condition_and_sense( case TCM_SECTOR_COUNT_TOO_MANY: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* INVALID COMMAND OPERATION CODE */ @@ -4370,6 +4459,7 @@ int transport_send_check_condition_and_sense( case TCM_UNKNOWN_MODE_PAGE: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* INVALID FIELD IN CDB */ @@ -4378,6 +4468,7 @@ int transport_send_check_condition_and_sense( case TCM_CHECK_CONDITION_ABORT_CMD: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* BUS DEVICE RESET FUNCTION OCCURRED */ @@ -4387,6 +4478,7 @@ int transport_send_check_condition_and_sense( case TCM_INCORRECT_AMOUNT_OF_DATA: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* WRITE ERROR */ @@ -4397,22 +4489,25 @@ int transport_send_check_condition_and_sense( case TCM_INVALID_CDB_FIELD: /* CURRENT ERROR */ buffer[offset] = 0x70; - /* ABORTED COMMAND */ - buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* INVALID FIELD IN CDB */ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; break; case TCM_INVALID_PARAMETER_LIST: /* CURRENT ERROR */ buffer[offset] = 0x70; - /* ABORTED COMMAND */ - buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* INVALID FIELD IN PARAMETER LIST */ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; break; case TCM_UNEXPECTED_UNSOLICITED_DATA: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* WRITE ERROR */ @@ -4423,6 +4518,7 @@ int transport_send_check_condition_and_sense( case TCM_SERVICE_CRC_ERROR: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* PROTOCOL SERVICE CRC ERROR */ @@ -4433,6 +4529,7 @@ int transport_send_check_condition_and_sense( case TCM_SNACK_REJECTED: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* READ ERROR */ @@ -4443,6 +4540,7 @@ int transport_send_check_condition_and_sense( case TCM_WRITE_PROTECTED: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* DATA PROTECT */ buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; /* WRITE PROTECTED */ @@ -4451,6 +4549,7 @@ int transport_send_check_condition_and_sense( case TCM_CHECK_CONDITION_UNIT_ATTENTION: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* UNIT ATTENTION */ buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); @@ -4460,6 +4559,7 @@ int transport_send_check_condition_and_sense( case TCM_CHECK_CONDITION_NOT_READY: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* Not Ready */ buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; transport_get_sense_codes(cmd, &asc, &ascq); @@ -4470,6 +4570,7 @@ int transport_send_check_condition_and_sense( default: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* LOGICAL UNIT COMMUNICATION FAILURE */ @@ -4545,11 +4646,7 @@ void transport_send_task_abort(struct se_cmd *cmd) cmd->se_tfo->queue_status(cmd); } -/* transport_generic_do_tmr(): - * - * - */ -int transport_generic_do_tmr(struct se_cmd *cmd) +static int transport_generic_do_tmr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct se_tmr_req *tmr = cmd->se_tmr_req; @@ -4597,7 +4694,7 @@ static int transport_processing_thread(void *param) { int ret; struct se_cmd *cmd; - struct se_device *dev = (struct se_device *) param; + struct se_device *dev = param; while (!kthread_should_stop()) { ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, @@ -4607,8 +4704,6 @@ static int transport_processing_thread(void *param) goto out; get_cmd: - __transport_execute_tasks(dev); - cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); if (!cmd) continue; diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 50a480d..3e12f6b 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -30,13 +30,11 @@ #include <scsi/scsi_cmnd.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> +#include "target_core_internal.h" #include "target_core_alua.h" -#include "target_core_hba.h" #include "target_core_pr.h" #include "target_core_ua.h" diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 71fc9ce..9e7e26c 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -39,12 +39,8 @@ #include <scsi/fc_encode.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> -#include <target/target_core_device.h> -#include <target/target_core_tpg.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> -#include <target/target_core_tmr.h> #include <target/configfs_macros.h> #include "tcm_fc.h" @@ -367,6 +363,11 @@ static void ft_send_tm(struct ft_cmd *cmd) struct ft_sess *sess; u8 tm_func; + transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops, + cmd->sess->se_sess, 0, DMA_NONE, 0, + &cmd->ft_sense_buffer[0]); + target_get_sess_cmd(cmd->sess->se_sess, &cmd->se_cmd, false); + fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); switch (fcp->fc_tm_flags) { @@ -420,7 +421,6 @@ static void ft_send_tm(struct ft_cmd *cmd) sess = cmd->sess; transport_send_check_condition_and_sense(&cmd->se_cmd, cmd->se_cmd.scsi_sense_reason, 0); - transport_generic_free_cmd(&cmd->se_cmd, 0); ft_sess_put(sess); return; } @@ -536,12 +536,10 @@ static void ft_send_work(struct work_struct *work) { struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); - struct se_cmd *se_cmd; struct fcp_cmnd *fcp; int data_dir = 0; u32 data_len; int task_attr; - int ret; fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); if (!fcp) @@ -591,15 +589,6 @@ static void ft_send_work(struct work_struct *work) data_len = ntohl(fcp->fc_dl); cmd->cdb = fcp->fc_cdb; } - - se_cmd = &cmd->se_cmd; - /* - * Initialize struct se_cmd descriptor from target_core_mod - * infrastructure - */ - transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess, - data_len, data_dir, task_attr, - &cmd->ft_sense_buffer[0]); /* * Check for FCP task management flags */ @@ -607,39 +596,16 @@ static void ft_send_work(struct work_struct *work) ft_send_tm(cmd); return; } - fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); - cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); - ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun); - if (ret < 0) { - ft_dump_cmd(cmd, __func__); - transport_send_check_condition_and_sense(&cmd->se_cmd, - cmd->se_cmd.scsi_sense_reason, 0); - return; - } - - ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb); - - pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); - ft_dump_cmd(cmd, __func__); - - if (ret == -ENOMEM) { - transport_send_check_condition_and_sense(se_cmd, - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); - transport_generic_free_cmd(se_cmd, 0); - return; - } - if (ret == -EINVAL) { - if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) - ft_queue_status(se_cmd); - else - transport_send_check_condition_and_sense(se_cmd, - se_cmd->scsi_sense_reason, 0); - transport_generic_free_cmd(se_cmd, 0); - return; - } - transport_handle_cdb_direct(se_cmd); + /* + * Use a single se_cmd->cmd_kref as we expect to release se_cmd + * directly from ft_check_stop_free callback in response path. + */ + target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb, + &cmd->ft_sense_buffer[0], cmd->lun, data_len, + task_attr, data_dir, 0); + pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); return; err: diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 9402b73..73852fb 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -41,12 +41,8 @@ #include <scsi/libfc.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_fabric.h> #include <target/target_core_fabric_configfs.h> -#include <target/target_core_fabric_lib.h> -#include <target/target_core_device.h> -#include <target/target_core_tpg.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 1369b1c..d8cabc2 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -48,10 +48,7 @@ #include <scsi/fc_encode.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> -#include <target/target_core_device.h> -#include <target/target_core_tpg.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 3269213..4c0507c 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -40,10 +40,7 @@ #include <scsi/libfc.h> #include <target/target_core_base.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> -#include <target/target_core_device.h> -#include <target/target_core_tpg.h> +#include <target/target_core_fabric.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index dd9a5743..220ce7e 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -1304,7 +1304,7 @@ static struct genl_multicast_group thermal_event_mcgrp = { .name = THERMAL_GENL_MCAST_GROUP_NAME, }; -int generate_netlink_event(u32 orig, enum events event) +int thermal_generate_netlink_event(u32 orig, enum events event) { struct sk_buff *skb; struct nlattr *attr; @@ -1363,7 +1363,7 @@ int generate_netlink_event(u32 orig, enum events event) return result; } -EXPORT_SYMBOL(generate_netlink_event); +EXPORT_SYMBOL(thermal_generate_netlink_event); static int genetlink_init(void) { diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250/8250.c index 9f50c4e..9f50c4e 100644 --- a/drivers/tty/serial/8250.c +++ b/drivers/tty/serial/8250/8250.c diff --git a/drivers/tty/serial/8250.h b/drivers/tty/serial/8250/8250.h index ae027be..ae027be 100644 --- a/drivers/tty/serial/8250.h +++ b/drivers/tty/serial/8250/8250.h diff --git a/drivers/tty/serial/8250_accent.c b/drivers/tty/serial/8250/8250_accent.c index 34b51c6..34b51c6 100644 --- a/drivers/tty/serial/8250_accent.c +++ b/drivers/tty/serial/8250/8250_accent.c diff --git a/drivers/tty/serial/8250_acorn.c b/drivers/tty/serial/8250/8250_acorn.c index b0ce8c5..b0ce8c5 100644 --- a/drivers/tty/serial/8250_acorn.c +++ b/drivers/tty/serial/8250/8250_acorn.c diff --git a/drivers/tty/serial/8250_boca.c b/drivers/tty/serial/8250/8250_boca.c index d125dc1..d125dc1 100644 --- a/drivers/tty/serial/8250_boca.c +++ b/drivers/tty/serial/8250/8250_boca.c diff --git a/drivers/tty/serial/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index f574eef..f574eef 100644 --- a/drivers/tty/serial/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c diff --git a/drivers/tty/serial/8250_early.c b/drivers/tty/serial/8250/8250_early.c index eaafb98..eaafb98 100644 --- a/drivers/tty/serial/8250_early.c +++ b/drivers/tty/serial/8250/8250_early.c diff --git a/drivers/tty/serial/8250_exar_st16c554.c b/drivers/tty/serial/8250/8250_exar_st16c554.c index bf53aab..bf53aab 100644 --- a/drivers/tty/serial/8250_exar_st16c554.c +++ b/drivers/tty/serial/8250/8250_exar_st16c554.c diff --git a/drivers/tty/serial/8250_fourport.c b/drivers/tty/serial/8250/8250_fourport.c index be15826..be15826 100644 --- a/drivers/tty/serial/8250_fourport.c +++ b/drivers/tty/serial/8250/8250_fourport.c diff --git a/drivers/tty/serial/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c index f4d3c47..f4d3c47 100644 --- a/drivers/tty/serial/8250_fsl.c +++ b/drivers/tty/serial/8250/8250_fsl.c diff --git a/drivers/tty/serial/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index d8c0ffb..d8c0ffb 100644 --- a/drivers/tty/serial/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c diff --git a/drivers/tty/serial/8250_hp300.c b/drivers/tty/serial/8250/8250_hp300.c index c13438c9..c13438c9 100644 --- a/drivers/tty/serial/8250_hp300.c +++ b/drivers/tty/serial/8250/8250_hp300.c diff --git a/drivers/tty/serial/8250_hub6.c b/drivers/tty/serial/8250/8250_hub6.c index a5c778e..a5c778e 100644 --- a/drivers/tty/serial/8250_hub6.c +++ b/drivers/tty/serial/8250/8250_hub6.c diff --git a/drivers/tty/serial/8250_mca.c b/drivers/tty/serial/8250/8250_mca.c index d20abf0..d20abf0 100644 --- a/drivers/tty/serial/8250_mca.c +++ b/drivers/tty/serial/8250/8250_mca.c diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index da2b0b0..da2b0b0 100644 --- a/drivers/tty/serial/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c diff --git a/drivers/tty/serial/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c index a2f2365..a2f2365 100644 --- a/drivers/tty/serial/8250_pnp.c +++ b/drivers/tty/serial/8250/8250_pnp.c diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig new file mode 100644 index 0000000..591f801 --- /dev/null +++ b/drivers/tty/serial/8250/Kconfig @@ -0,0 +1,280 @@ +# +# The 8250/16550 serial drivers. You shouldn't be in this list unless +# you somehow have an implicit or explicit dependency on SERIAL_8250. +# + +config SERIAL_8250 + tristate "8250/16550 and compatible serial support" + select SERIAL_CORE + ---help--- + This selects whether you want to include the driver for the standard + serial ports. The standard answer is Y. People who might say N + here are those that are setting up dedicated Ethernet WWW/FTP + servers, or users that have one of the various bus mice instead of a + serial mouse and don't intend to use their machine's standard serial + port for anything. (Note that the Cyclades and Stallion multi + serial port drivers do not need this driver built in for them to + work.) + + To compile this driver as a module, choose M here: the + module will be called 8250. + [WARNING: Do not compile this driver as a module if you are using + non-standard serial ports, since the configuration information will + be lost when the driver is unloaded. This limitation may be lifted + in the future.] + + BTW1: If you have a mouseman serial mouse which is not recognized by + the X window system, try running gpm first. + + BTW2: If you intend to use a software modem (also called Winmodem) + under Linux, forget it. These modems are crippled and require + proprietary drivers which are only available under Windows. + + Most people will say Y or M here, so that they can use serial mice, + modems and similar devices connecting to the standard serial ports. + +config SERIAL_8250_CONSOLE + bool "Console on 8250/16550 and compatible serial port" + depends on SERIAL_8250=y + select SERIAL_CORE_CONSOLE + ---help--- + If you say Y here, it will be possible to use a serial port as the + system console (the system console is the device which receives all + kernel messages and warnings and which allows logins in single user + mode). This could be useful if some terminal or printer is connected + to that serial port. + + Even if you say Y here, the currently visible virtual console + (/dev/tty0) will still be used as the system console by default, but + you can alter that using a kernel command line option such as + "console=ttyS1". (Try "man bootparam" or see the documentation of + your boot loader (grub or lilo or loadlin) about how to pass options + to the kernel at boot time.) + + If you don't have a VGA card installed and you say Y here, the + kernel will automatically use the first serial line, /dev/ttyS0, as + system console. + + You can set that using a kernel command line option such as + "console=uart8250,io,0x3f8,9600n8" + "console=uart8250,mmio,0xff5e0000,115200n8". + and it will switch to normal serial console when the corresponding + port is ready. + "earlycon=uart8250,io,0x3f8,9600n8" + "earlycon=uart8250,mmio,0xff5e0000,115200n8". + it will not only setup early console. + + If unsure, say N. + +config FIX_EARLYCON_MEM + bool + depends on X86 + default y + +config SERIAL_8250_GSC + tristate + depends on SERIAL_8250 && GSC + default SERIAL_8250 + +config SERIAL_8250_PCI + tristate "8250/16550 PCI device support" if EXPERT + depends on SERIAL_8250 && PCI + default SERIAL_8250 + help + This builds standard PCI serial support. You may be able to + disable this feature if you only need legacy serial support. + Saves about 9K. + +config SERIAL_8250_PNP + tristate "8250/16550 PNP device support" if EXPERT + depends on SERIAL_8250 && PNP + default SERIAL_8250 + help + This builds standard PNP serial support. You may be able to + disable this feature if you only need legacy serial support. + +config SERIAL_8250_HP300 + tristate + depends on SERIAL_8250 && HP300 + default SERIAL_8250 + +config SERIAL_8250_CS + tristate "8250/16550 PCMCIA device support" + depends on PCMCIA && SERIAL_8250 + ---help--- + Say Y here to enable support for 16-bit PCMCIA serial devices, + including serial port cards, modems, and the modem functions of + multi-function Ethernet/modem cards. (PCMCIA- or PC-cards are + credit-card size devices often used with laptops.) + + To compile this driver as a module, choose M here: the + module will be called serial_cs. + + If unsure, say N. + +config SERIAL_8250_NR_UARTS + int "Maximum number of 8250/16550 serial ports" + depends on SERIAL_8250 + default "4" + help + Set this to the number of serial ports you want the driver + to support. This includes any ports discovered via ACPI or + PCI enumeration and any ports that may be added at run-time + via hot-plug, or any ISA multi-port serial cards. + +config SERIAL_8250_RUNTIME_UARTS + int "Number of 8250/16550 serial ports to register at runtime" + depends on SERIAL_8250 + range 0 SERIAL_8250_NR_UARTS + default "4" + help + Set this to the maximum number of serial ports you want + the kernel to register at boot time. This can be overridden + with the module parameter "nr_uarts", or boot-time parameter + 8250.nr_uarts + +config SERIAL_8250_EXTENDED + bool "Extended 8250/16550 serial driver options" + depends on SERIAL_8250 + help + If you wish to use any non-standard features of the standard "dumb" + driver, say Y here. This includes HUB6 support, shared serial + interrupts, special multiport support, support for more than the + four COM 1/2/3/4 boards, etc. + + Note that the answer to this question won't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about serial driver options. If unsure, say N. + +config SERIAL_8250_MANY_PORTS + bool "Support more than 4 legacy serial ports" + depends on SERIAL_8250_EXTENDED && !IA64 + help + Say Y here if you have dumb serial boards other than the four + standard COM 1/2/3/4 ports. This may happen if you have an AST + FourPort, Accent Async, Boca (read the Boca mini-HOWTO, available + from <http://www.tldp.org/docs.html#howto>), or other custom + serial port hardware which acts similar to standard serial port + hardware. If you only use the standard COM 1/2/3/4 ports, you can + say N here to save some memory. You can also say Y if you have an + "intelligent" multiport card such as Cyclades, Digiboards, etc. + +# +# Multi-port serial cards +# + +config SERIAL_8250_FOURPORT + tristate "Support Fourport cards" + depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS + help + Say Y here if you have an AST FourPort serial board. + + To compile this driver as a module, choose M here: the module + will be called 8250_fourport. + +config SERIAL_8250_ACCENT + tristate "Support Accent cards" + depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS + help + Say Y here if you have an Accent Async serial board. + + To compile this driver as a module, choose M here: the module + will be called 8250_accent. + +config SERIAL_8250_BOCA + tristate "Support Boca cards" + depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS + help + Say Y here if you have a Boca serial board. Please read the Boca + mini-HOWTO, available from <http://www.tldp.org/docs.html#howto> + + To compile this driver as a module, choose M here: the module + will be called 8250_boca. + +config SERIAL_8250_EXAR_ST16C554 + tristate "Support Exar ST16C554/554D Quad UART" + depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS + help + The Uplogix Envoy TU301 uses this Exar Quad UART. If you are + tinkering with your Envoy TU301, or have a machine with this UART, + say Y here. + + To compile this driver as a module, choose M here: the module + will be called 8250_exar_st16c554. + +config SERIAL_8250_HUB6 + tristate "Support Hub6 cards" + depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS + help + Say Y here if you have a HUB6 serial board. + + To compile this driver as a module, choose M here: the module + will be called 8250_hub6. + +# +# Misc. options/drivers. +# + +config SERIAL_8250_SHARE_IRQ + bool "Support for sharing serial interrupts" + depends on SERIAL_8250_EXTENDED + help + Some serial boards have hardware support which allows multiple dumb + serial ports on the same board to share a single IRQ. To enable + support for this in the serial driver, say Y here. + +config SERIAL_8250_DETECT_IRQ + bool "Autodetect IRQ on standard ports (unsafe)" + depends on SERIAL_8250_EXTENDED + help + Say Y here if you want the kernel to try to guess which IRQ + to use for your serial port. + + This is considered unsafe; it is far better to configure the IRQ in + a boot script using the setserial command. + + If unsure, say N. + +config SERIAL_8250_RSA + bool "Support RSA serial ports" + depends on SERIAL_8250_EXTENDED + help + ::: To be written ::: + +config SERIAL_8250_MCA + tristate "Support 8250-type ports on MCA buses" + depends on SERIAL_8250 != n && MCA + help + Say Y here if you have a MCA serial ports. + + To compile this driver as a module, choose M here: the module + will be called 8250_mca. + +config SERIAL_8250_ACORN + tristate "Acorn expansion card serial port support" + depends on ARCH_ACORN && SERIAL_8250 + help + If you have an Atomwide Serial card or Serial Port card for an Acorn + system, say Y to this option. The driver can handle 1, 2, or 3 port + cards. If unsure, say N. + +config SERIAL_8250_RM9K + bool "Support for MIPS RM9xxx integrated serial port" + depends on SERIAL_8250 != n && SERIAL_RM9000 + select SERIAL_8250_SHARE_IRQ + help + Selecting this option will add support for the integrated serial + port hardware found on MIPS RM9122 and similar processors. + If unsure, say N. + +config SERIAL_8250_FSL + bool + depends on SERIAL_8250_CONSOLE && PPC_UDBG_16550 + default PPC + +config SERIAL_8250_DW + tristate "Support for Synopsys DesignWare 8250 quirks" + depends on SERIAL_8250 && OF + help + Selecting this option will enable handling of the extra features + present in the Synopsys DesignWare APB UART. diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile new file mode 100644 index 0000000..867bba7 --- /dev/null +++ b/drivers/tty/serial/8250/Makefile @@ -0,0 +1,20 @@ +# +# Makefile for the 8250 serial device drivers. +# + +obj-$(CONFIG_SERIAL_8250) += 8250.o +obj-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o +obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o +obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o +obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o +obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o +obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o +obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o +obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o +obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o +obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o +obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o +obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o +obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o +obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o +obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/8250/m32r_sio.c index 94a6792..94a6792 100644 --- a/drivers/tty/serial/m32r_sio.c +++ b/drivers/tty/serial/8250/m32r_sio.c diff --git a/drivers/tty/serial/m32r_sio.h b/drivers/tty/serial/8250/m32r_sio.h index e9b7e11..e9b7e11 100644 --- a/drivers/tty/serial/m32r_sio.h +++ b/drivers/tty/serial/8250/m32r_sio.h diff --git a/drivers/tty/serial/m32r_sio_reg.h b/drivers/tty/serial/8250/m32r_sio_reg.h index 4671473..4671473 100644 --- a/drivers/tty/serial/m32r_sio_reg.h +++ b/drivers/tty/serial/8250/m32r_sio_reg.h diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c index 8609060..8609060 100644 --- a/drivers/tty/serial/serial_cs.c +++ b/drivers/tty/serial/8250/serial_cs.c diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index aca2386..2de9924 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -5,279 +5,7 @@ menu "Serial drivers" depends on HAS_IOMEM -# -# The new 8250/16550 serial drivers -config SERIAL_8250 - tristate "8250/16550 and compatible serial support" - select SERIAL_CORE - ---help--- - This selects whether you want to include the driver for the standard - serial ports. The standard answer is Y. People who might say N - here are those that are setting up dedicated Ethernet WWW/FTP - servers, or users that have one of the various bus mice instead of a - serial mouse and don't intend to use their machine's standard serial - port for anything. (Note that the Cyclades and Stallion multi - serial port drivers do not need this driver built in for them to - work.) - - To compile this driver as a module, choose M here: the - module will be called 8250. - [WARNING: Do not compile this driver as a module if you are using - non-standard serial ports, since the configuration information will - be lost when the driver is unloaded. This limitation may be lifted - in the future.] - - BTW1: If you have a mouseman serial mouse which is not recognized by - the X window system, try running gpm first. - - BTW2: If you intend to use a software modem (also called Winmodem) - under Linux, forget it. These modems are crippled and require - proprietary drivers which are only available under Windows. - - Most people will say Y or M here, so that they can use serial mice, - modems and similar devices connecting to the standard serial ports. - -config SERIAL_8250_CONSOLE - bool "Console on 8250/16550 and compatible serial port" - depends on SERIAL_8250=y - select SERIAL_CORE_CONSOLE - ---help--- - If you say Y here, it will be possible to use a serial port as the - system console (the system console is the device which receives all - kernel messages and warnings and which allows logins in single user - mode). This could be useful if some terminal or printer is connected - to that serial port. - - Even if you say Y here, the currently visible virtual console - (/dev/tty0) will still be used as the system console by default, but - you can alter that using a kernel command line option such as - "console=ttyS1". (Try "man bootparam" or see the documentation of - your boot loader (grub or lilo or loadlin) about how to pass options - to the kernel at boot time.) - - If you don't have a VGA card installed and you say Y here, the - kernel will automatically use the first serial line, /dev/ttyS0, as - system console. - - You can set that using a kernel command line option such as - "console=uart8250,io,0x3f8,9600n8" - "console=uart8250,mmio,0xff5e0000,115200n8". - and it will switch to normal serial console when the corresponding - port is ready. - "earlycon=uart8250,io,0x3f8,9600n8" - "earlycon=uart8250,mmio,0xff5e0000,115200n8". - it will not only setup early console. - - If unsure, say N. - -config FIX_EARLYCON_MEM - bool - depends on X86 - default y - -config SERIAL_8250_GSC - tristate - depends on SERIAL_8250 && GSC - default SERIAL_8250 - -config SERIAL_8250_PCI - tristate "8250/16550 PCI device support" if EXPERT - depends on SERIAL_8250 && PCI - default SERIAL_8250 - help - This builds standard PCI serial support. You may be able to - disable this feature if you only need legacy serial support. - Saves about 9K. - -config SERIAL_8250_PNP - tristate "8250/16550 PNP device support" if EXPERT - depends on SERIAL_8250 && PNP - default SERIAL_8250 - help - This builds standard PNP serial support. You may be able to - disable this feature if you only need legacy serial support. - -config SERIAL_8250_FSL - bool - depends on SERIAL_8250_CONSOLE && PPC_UDBG_16550 - default PPC - -config SERIAL_8250_HP300 - tristate - depends on SERIAL_8250 && HP300 - default SERIAL_8250 - -config SERIAL_8250_CS - tristate "8250/16550 PCMCIA device support" - depends on PCMCIA && SERIAL_8250 - ---help--- - Say Y here to enable support for 16-bit PCMCIA serial devices, - including serial port cards, modems, and the modem functions of - multi-function Ethernet/modem cards. (PCMCIA- or PC-cards are - credit-card size devices often used with laptops.) - - To compile this driver as a module, choose M here: the - module will be called serial_cs. - - If unsure, say N. - -config SERIAL_8250_NR_UARTS - int "Maximum number of 8250/16550 serial ports" - depends on SERIAL_8250 - default "4" - help - Set this to the number of serial ports you want the driver - to support. This includes any ports discovered via ACPI or - PCI enumeration and any ports that may be added at run-time - via hot-plug, or any ISA multi-port serial cards. - -config SERIAL_8250_RUNTIME_UARTS - int "Number of 8250/16550 serial ports to register at runtime" - depends on SERIAL_8250 - range 0 SERIAL_8250_NR_UARTS - default "4" - help - Set this to the maximum number of serial ports you want - the kernel to register at boot time. This can be overridden - with the module parameter "nr_uarts", or boot-time parameter - 8250.nr_uarts - -config SERIAL_8250_EXTENDED - bool "Extended 8250/16550 serial driver options" - depends on SERIAL_8250 - help - If you wish to use any non-standard features of the standard "dumb" - driver, say Y here. This includes HUB6 support, shared serial - interrupts, special multiport support, support for more than the - four COM 1/2/3/4 boards, etc. - - Note that the answer to this question won't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about serial driver options. If unsure, say N. - -config SERIAL_8250_MANY_PORTS - bool "Support more than 4 legacy serial ports" - depends on SERIAL_8250_EXTENDED && !IA64 - help - Say Y here if you have dumb serial boards other than the four - standard COM 1/2/3/4 ports. This may happen if you have an AST - FourPort, Accent Async, Boca (read the Boca mini-HOWTO, available - from <http://www.tldp.org/docs.html#howto>), or other custom - serial port hardware which acts similar to standard serial port - hardware. If you only use the standard COM 1/2/3/4 ports, you can - say N here to save some memory. You can also say Y if you have an - "intelligent" multiport card such as Cyclades, Digiboards, etc. - -# -# Multi-port serial cards -# - -config SERIAL_8250_FOURPORT - tristate "Support Fourport cards" - depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS - help - Say Y here if you have an AST FourPort serial board. - - To compile this driver as a module, choose M here: the module - will be called 8250_fourport. - -config SERIAL_8250_ACCENT - tristate "Support Accent cards" - depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS - help - Say Y here if you have an Accent Async serial board. - - To compile this driver as a module, choose M here: the module - will be called 8250_accent. - -config SERIAL_8250_BOCA - tristate "Support Boca cards" - depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS - help - Say Y here if you have a Boca serial board. Please read the Boca - mini-HOWTO, available from <http://www.tldp.org/docs.html#howto> - - To compile this driver as a module, choose M here: the module - will be called 8250_boca. - -config SERIAL_8250_EXAR_ST16C554 - tristate "Support Exar ST16C554/554D Quad UART" - depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS - help - The Uplogix Envoy TU301 uses this Exar Quad UART. If you are - tinkering with your Envoy TU301, or have a machine with this UART, - say Y here. - - To compile this driver as a module, choose M here: the module - will be called 8250_exar_st16c554. - -config SERIAL_8250_HUB6 - tristate "Support Hub6 cards" - depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS - help - Say Y here if you have a HUB6 serial board. - - To compile this driver as a module, choose M here: the module - will be called 8250_hub6. - -config SERIAL_8250_SHARE_IRQ - bool "Support for sharing serial interrupts" - depends on SERIAL_8250_EXTENDED - help - Some serial boards have hardware support which allows multiple dumb - serial ports on the same board to share a single IRQ. To enable - support for this in the serial driver, say Y here. - -config SERIAL_8250_DETECT_IRQ - bool "Autodetect IRQ on standard ports (unsafe)" - depends on SERIAL_8250_EXTENDED - help - Say Y here if you want the kernel to try to guess which IRQ - to use for your serial port. - - This is considered unsafe; it is far better to configure the IRQ in - a boot script using the setserial command. - - If unsure, say N. - -config SERIAL_8250_RSA - bool "Support RSA serial ports" - depends on SERIAL_8250_EXTENDED - help - ::: To be written ::: - -config SERIAL_8250_MCA - tristate "Support 8250-type ports on MCA buses" - depends on SERIAL_8250 != n && MCA - help - Say Y here if you have a MCA serial ports. - - To compile this driver as a module, choose M here: the module - will be called 8250_mca. - -config SERIAL_8250_ACORN - tristate "Acorn expansion card serial port support" - depends on ARCH_ACORN && SERIAL_8250 - help - If you have an Atomwide Serial card or Serial Port card for an Acorn - system, say Y to this option. The driver can handle 1, 2, or 3 port - cards. If unsure, say N. - -config SERIAL_8250_RM9K - bool "Support for MIPS RM9xxx integrated serial port" - depends on SERIAL_8250 != n && SERIAL_RM9000 - select SERIAL_8250_SHARE_IRQ - help - Selecting this option will add support for the integrated serial - port hardware found on MIPS RM9122 and similar processors. - If unsure, say N. - -config SERIAL_8250_DW - tristate "Support for Synopsys DesignWare 8250 quirks" - depends on SERIAL_8250 && OF - help - Selecting this option will enable handling of the extra features - present in the Synopsys DesignWare APB UART. +source "drivers/tty/serial/8250/Kconfig" comment "Non-8250 serial port support" @@ -536,15 +264,6 @@ config SERIAL_MAX3107 help MAX3107 chip support -config SERIAL_MAX3107_AAVA - tristate "MAX3107 AAVA platform support" - depends on X86_MRST && SERIAL_MAX3107 && GPIOLIB - select SERIAL_CORE - help - Support for the MAX3107 chip configuration found on the AAVA - platform. Includes the extra initialisation and GPIO support - neded for this device. - config SERIAL_DZ bool "DECstation DZ serial driver" depends on MACH_DECSTATION && 32BIT diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index f5b01f2..fef32e1 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -14,22 +14,9 @@ obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o obj-$(CONFIG_SERIAL_SUNSU) += sunsu.o obj-$(CONFIG_SERIAL_SUNSAB) += sunsab.o -obj-$(CONFIG_SERIAL_8250) += 8250.o -obj-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o -obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o -obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o -obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o -obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o -obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o -obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o -obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o -obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o -obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o -obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o -obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o -obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o -obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o -obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o +# Now bring in any enabled 8250/16450/16550 type drivers. +obj-$(CONFIG_SERIAL_8250) += 8250/ + obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o @@ -42,7 +29,6 @@ obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o obj-$(CONFIG_SERIAL_MAX3100) += max3100.o obj-$(CONFIG_SERIAL_MAX3107) += max3107.o -obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o obj-$(CONFIG_SERIAL_MUX) += mux.o obj-$(CONFIG_SERIAL_68328) += 68328serial.o diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 6958594..6800f5f 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -159,6 +159,7 @@ struct uart_amba_port { unsigned int fifosize; /* vendor-specific */ unsigned int lcrh_tx; /* vendor-specific */ unsigned int lcrh_rx; /* vendor-specific */ + unsigned int old_cr; /* state during shutdown */ bool autorts; char type[12]; bool interrupt_may_hang; /* vendor-specific */ @@ -268,7 +269,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) struct dma_slave_config tx_conf = { .dst_addr = uap->port.mapbase + UART01x_DR, .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, - .direction = DMA_TO_DEVICE, + .direction = DMA_MEM_TO_DEV, .dst_maxburst = uap->fifosize >> 1, }; struct dma_chan *chan; @@ -301,7 +302,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) struct dma_slave_config rx_conf = { .src_addr = uap->port.mapbase + UART01x_DR, .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, - .direction = DMA_FROM_DEVICE, + .direction = DMA_DEV_TO_MEM, .src_maxburst = uap->fifosize >> 1, }; @@ -480,7 +481,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) return -EBUSY; } - desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE, + desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); @@ -676,7 +677,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; dma_dev = rxchan->device; desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1, - DMA_FROM_DEVICE, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); /* * If the DMA engine is busy and cannot prepare a @@ -1411,7 +1412,9 @@ static int pl011_startup(struct uart_port *port) while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) barrier(); - cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; + /* restore RTS and DTR */ + cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); + cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; writew(cr, uap->port.membase + UART011_CR); /* Clear pending error interrupts */ @@ -1469,6 +1472,7 @@ static void pl011_shutdown_channel(struct uart_amba_port *uap, static void pl011_shutdown(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; + unsigned int cr; /* * disable all interrupts @@ -1488,9 +1492,16 @@ static void pl011_shutdown(struct uart_port *port) /* * disable the port + * disable the port. It should not disable RTS and DTR. + * Also RTS and DTR state should be preserved to restore + * it during startup(). */ uap->autorts = false; - writew(UART01x_CR_UARTEN | UART011_CR_TXE, uap->port.membase + UART011_CR); + cr = readw(uap->port.membase + UART011_CR); + uap->old_cr = cr; + cr &= UART011_CR_RTS | UART011_CR_DTR; + cr |= UART01x_CR_UARTEN | UART011_CR_TXE; + writew(cr, uap->port.membase + UART011_CR); /* * disable break condition and fifos @@ -1740,9 +1751,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) { struct uart_amba_port *uap = amba_ports[co->index]; unsigned int status, old_cr, new_cr; + unsigned long flags; + int locked = 1; clk_enable(uap->clk); + local_irq_save(flags); + if (uap->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&uap->port.lock); + else + spin_lock(&uap->port.lock); + /* * First save the CR then disable the interrupts */ @@ -1762,6 +1783,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) } while (status & UART01x_FR_BUSY); writew(old_cr, uap->port.membase + UART011_CR); + if (locked) + spin_unlock(&uap->port.lock); + local_irq_restore(flags); + clk_disable(uap->clk); } @@ -1905,6 +1930,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) uap->vendor = vendor; uap->lcrh_rx = vendor->lcrh_rx; uap->lcrh_tx = vendor->lcrh_tx; + uap->old_cr = 0; uap->fifosize = vendor->fifosize; uap->interrupt_may_hang = vendor->interrupt_may_hang; uap->port.dev = &dev->dev; diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c index 7c867a0..7545fe1 100644 --- a/drivers/tty/serial/jsm/jsm_driver.c +++ b/drivers/tty/serial/jsm/jsm_driver.c @@ -251,6 +251,7 @@ static void jsm_io_resume(struct pci_dev *pdev) struct jsm_board *brd = pci_get_drvdata(pdev); pci_restore_state(pdev); + pci_save_state(pdev); jsm_uart_port_init(brd); } diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c deleted file mode 100644 index aae772a..0000000 --- a/drivers/tty/serial/max3107-aava.c +++ /dev/null @@ -1,344 +0,0 @@ -/* - * max3107.c - spi uart protocol driver for Maxim 3107 - * Based on max3100.c - * by Christian Pellegrin <chripell@evolware.org> - * and max3110.c - * by Feng Tang <feng.tang@intel.com> - * - * Copyright (C) Aavamobile 2009 - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - */ - -#include <linux/delay.h> -#include <linux/device.h> -#include <linux/serial_core.h> -#include <linux/serial.h> -#include <linux/spi/spi.h> -#include <linux/freezer.h> -#include <linux/platform_device.h> -#include <linux/gpio.h> -#include <linux/sfi.h> -#include <linux/module.h> -#include <asm/mrst.h> -#include "max3107.h" - -/* GPIO direction to input function */ -static int max3107_gpio_direction_in(struct gpio_chip *chip, unsigned offset) -{ - struct max3107_port *s = container_of(chip, struct max3107_port, chip); - u16 buf[1]; /* Buffer for SPI transfer */ - - if (offset >= MAX3107_GPIO_COUNT) { - dev_err(&s->spi->dev, "Invalid GPIO\n"); - return -EINVAL; - } - - /* Read current GPIO configuration register */ - buf[0] = MAX3107_GPIOCFG_REG; - /* Perform SPI transfer */ - if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) { - dev_err(&s->spi->dev, "SPI transfer GPIO read failed\n"); - return -EIO; - } - buf[0] &= MAX3107_SPI_RX_DATA_MASK; - - /* Set GPIO to input */ - buf[0] &= ~(0x0001 << offset); - - /* Write new GPIO configuration register value */ - buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG); - /* Perform SPI transfer */ - if (max3107_rw(s, (u8 *)buf, NULL, 2)) { - dev_err(&s->spi->dev, "SPI transfer GPIO write failed\n"); - return -EIO; - } - return 0; -} - -/* GPIO direction to output function */ -static int max3107_gpio_direction_out(struct gpio_chip *chip, unsigned offset, - int value) -{ - struct max3107_port *s = container_of(chip, struct max3107_port, chip); - u16 buf[2]; /* Buffer for SPI transfers */ - - if (offset >= MAX3107_GPIO_COUNT) { - dev_err(&s->spi->dev, "Invalid GPIO\n"); - return -EINVAL; - } - - /* Read current GPIO configuration and data registers */ - buf[0] = MAX3107_GPIOCFG_REG; - buf[1] = MAX3107_GPIODATA_REG; - /* Perform SPI transfer */ - if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) { - dev_err(&s->spi->dev, "SPI transfer gpio failed\n"); - return -EIO; - } - buf[0] &= MAX3107_SPI_RX_DATA_MASK; - buf[1] &= MAX3107_SPI_RX_DATA_MASK; - - /* Set GPIO to output */ - buf[0] |= (0x0001 << offset); - /* Set value */ - if (value) - buf[1] |= (0x0001 << offset); - else - buf[1] &= ~(0x0001 << offset); - - /* Write new GPIO configuration and data register values */ - buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG); - buf[1] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG); - /* Perform SPI transfer */ - if (max3107_rw(s, (u8 *)buf, NULL, 4)) { - dev_err(&s->spi->dev, - "SPI transfer for GPIO conf data w failed\n"); - return -EIO; - } - return 0; -} - -/* GPIO value query function */ -static int max3107_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct max3107_port *s = container_of(chip, struct max3107_port, chip); - u16 buf[1]; /* Buffer for SPI transfer */ - - if (offset >= MAX3107_GPIO_COUNT) { - dev_err(&s->spi->dev, "Invalid GPIO\n"); - return -EINVAL; - } - - /* Read current GPIO data register */ - buf[0] = MAX3107_GPIODATA_REG; - /* Perform SPI transfer */ - if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) { - dev_err(&s->spi->dev, "SPI transfer GPIO data r failed\n"); - return -EIO; - } - buf[0] &= MAX3107_SPI_RX_DATA_MASK; - - /* Return value */ - return buf[0] & (0x0001 << offset); -} - -/* GPIO value set function */ -static void max3107_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - struct max3107_port *s = container_of(chip, struct max3107_port, chip); - u16 buf[2]; /* Buffer for SPI transfers */ - - if (offset >= MAX3107_GPIO_COUNT) { - dev_err(&s->spi->dev, "Invalid GPIO\n"); - return; - } - - /* Read current GPIO configuration registers*/ - buf[0] = MAX3107_GPIODATA_REG; - buf[1] = MAX3107_GPIOCFG_REG; - /* Perform SPI transfer */ - if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) { - dev_err(&s->spi->dev, - "SPI transfer for GPIO data and config read failed\n"); - return; - } - buf[0] &= MAX3107_SPI_RX_DATA_MASK; - buf[1] &= MAX3107_SPI_RX_DATA_MASK; - - if (!(buf[1] & (0x0001 << offset))) { - /* Configured as input, can't set value */ - dev_warn(&s->spi->dev, - "Trying to set value for input GPIO\n"); - return; - } - - /* Set value */ - if (value) - buf[0] |= (0x0001 << offset); - else - buf[0] &= ~(0x0001 << offset); - - /* Write new GPIO data register value */ - buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG); - /* Perform SPI transfer */ - if (max3107_rw(s, (u8 *)buf, NULL, 2)) - dev_err(&s->spi->dev, "SPI transfer GPIO data w failed\n"); -} - -/* GPIO chip data */ -static struct gpio_chip max3107_gpio_chip = { - .owner = THIS_MODULE, - .direction_input = max3107_gpio_direction_in, - .direction_output = max3107_gpio_direction_out, - .get = max3107_gpio_get, - .set = max3107_gpio_set, - .can_sleep = 1, - .base = MAX3107_GPIO_BASE, - .ngpio = MAX3107_GPIO_COUNT, -}; - -/** - * max3107_aava_reset - reset on AAVA systems - * @spi: The SPI device we are probing - * - * Reset the device ready for probing. - */ - -static int max3107_aava_reset(struct spi_device *spi) -{ - /* Reset the chip */ - if (gpio_request(MAX3107_RESET_GPIO, "max3107")) { - pr_err("Requesting RESET GPIO failed\n"); - return -EIO; - } - if (gpio_direction_output(MAX3107_RESET_GPIO, 0)) { - pr_err("Setting RESET GPIO to 0 failed\n"); - gpio_free(MAX3107_RESET_GPIO); - return -EIO; - } - msleep(MAX3107_RESET_DELAY); - if (gpio_direction_output(MAX3107_RESET_GPIO, 1)) { - pr_err("Setting RESET GPIO to 1 failed\n"); - gpio_free(MAX3107_RESET_GPIO); - return -EIO; - } - gpio_free(MAX3107_RESET_GPIO); - msleep(MAX3107_WAKEUP_DELAY); - return 0; -} - -static int max3107_aava_configure(struct max3107_port *s) -{ - int retval; - - /* Initialize GPIO chip data */ - s->chip = max3107_gpio_chip; - s->chip.label = s->spi->modalias; - s->chip.dev = &s->spi->dev; - - /* Add GPIO chip */ - retval = gpiochip_add(&s->chip); - if (retval) { - dev_err(&s->spi->dev, "Adding GPIO chip failed\n"); - return retval; - } - - /* Temporary fix for EV2 boot problems, set modem reset to 0 */ - max3107_gpio_direction_out(&s->chip, 3, 0); - return 0; -} - -#if 0 -/* This will get enabled once we have the board stuff merged for this - specific case */ - -static const struct baud_table brg13_ext[] = { - { 300, MAX3107_BRG13_B300 }, - { 600, MAX3107_BRG13_B600 }, - { 1200, MAX3107_BRG13_B1200 }, - { 2400, MAX3107_BRG13_B2400 }, - { 4800, MAX3107_BRG13_B4800 }, - { 9600, MAX3107_BRG13_B9600 }, - { 19200, MAX3107_BRG13_B19200 }, - { 57600, MAX3107_BRG13_B57600 }, - { 115200, MAX3107_BRG13_B115200 }, - { 230400, MAX3107_BRG13_B230400 }, - { 460800, MAX3107_BRG13_B460800 }, - { 921600, MAX3107_BRG13_B921600 }, - { 0, 0 } -}; - -static void max3107_aava_init(struct max3107_port *s) -{ - /*override for AAVA SC specific*/ - if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) { - if (get_koski_build_id() <= KOSKI_EV2) - if (s->ext_clk) { - s->brg_cfg = MAX3107_BRG13_B9600; - s->baud_tbl = (struct baud_table *)brg13_ext; - } - } -} -#endif - -static int __devexit max3107_aava_remove(struct spi_device *spi) -{ - struct max3107_port *s = dev_get_drvdata(&spi->dev); - - /* Remove GPIO chip */ - if (gpiochip_remove(&s->chip)) - dev_warn(&spi->dev, "Removing GPIO chip failed\n"); - - /* Then do the default remove */ - return max3107_remove(spi); -} - -/* Platform data */ -static struct max3107_plat aava_plat_data = { - .loopback = 0, - .ext_clk = 1, -/* .init = max3107_aava_init, */ - .configure = max3107_aava_configure, - .hw_suspend = max3107_hw_susp, - .polled_mode = 0, - .poll_time = 0, -}; - - -static int __devinit max3107_probe_aava(struct spi_device *spi) -{ - int err = max3107_aava_reset(spi); - if (err < 0) - return err; - return max3107_probe(spi, &aava_plat_data); -} - -/* Spi driver data */ -static struct spi_driver max3107_driver = { - .driver = { - .name = "aava-max3107", - .owner = THIS_MODULE, - }, - .probe = max3107_probe_aava, - .remove = __devexit_p(max3107_aava_remove), - .suspend = max3107_suspend, - .resume = max3107_resume, -}; - -/* Driver init function */ -static int __init max3107_init(void) -{ - return spi_register_driver(&max3107_driver); -} - -/* Driver exit function */ -static void __exit max3107_exit(void) -{ - spi_unregister_driver(&max3107_driver); -} - -module_init(max3107_init); -module_exit(max3107_exit); - -MODULE_DESCRIPTION("MAX3107 driver"); -MODULE_AUTHOR("Aavamobile"); -MODULE_ALIAS("spi:aava-max3107"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index d192dcb..1c24269 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1160,7 +1160,7 @@ static struct uart_driver serial_omap_reg = { .cons = OMAP_CONSOLE, }; -#ifdef CONFIG_SUSPEND +#ifdef CONFIG_PM_SLEEP static int serial_omap_suspend(struct device *dev) { struct uart_omap_port *up = dev_get_drvdata(dev); @@ -1521,6 +1521,7 @@ static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1) } } +#ifdef CONFIG_PM_RUNTIME static void serial_omap_restore_context(struct uart_omap_port *up) { if (up->errata & UART_ERRATA_i202_MDR1_ACCESS) @@ -1550,7 +1551,6 @@ static void serial_omap_restore_context(struct uart_omap_port *up) serial_out(up, UART_OMAP_MDR1, up->mdr1); } -#ifdef CONFIG_PM_RUNTIME static int serial_omap_runtime_suspend(struct device *dev) { struct uart_omap_port *up = dev_get_drvdata(dev); diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index de0f613..17ae657 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -764,7 +764,7 @@ static int dma_handle_rx(struct eg20t_port *priv) sg_dma_address(sg) = priv->rx_buf_dma; desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx, - sg, 1, DMA_FROM_DEVICE, + sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) @@ -923,7 +923,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) } desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx, - priv->sg_tx_p, nent, DMA_TO_DEVICE, + priv->sg_tx_p, nent, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n", diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index c7bf31a..1305618 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2348,11 +2348,11 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) */ tty_dev = tty_register_device(drv->tty_driver, uport->line, uport->dev); if (likely(!IS_ERR(tty_dev))) { - device_init_wakeup(tty_dev, 1); - device_set_wakeup_enable(tty_dev, 0); - } else + device_set_wakeup_capable(tty_dev, 1); + } else { printk(KERN_ERR "Cannot register tty device on line %d\n", uport->line); + } /* * Ensure UPF_DEAD is not set. diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 9e62349..7508579 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1339,7 +1339,7 @@ static void sci_submit_rx(struct sci_port *s) struct dma_async_tx_descriptor *desc; desc = chan->device->device_prep_slave_sg(chan, - sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT); + sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (desc) { s->desc_rx[i] = desc; @@ -1454,7 +1454,7 @@ static void work_fn_tx(struct work_struct *work) BUG_ON(!sg_dma_len(sg)); desc = chan->device->device_prep_slave_sg(chan, - sg, s->sg_len_tx, DMA_TO_DEVICE, + sg, s->sg_len_tx, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { /* switch to PIO */ diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index ef9dd62..bf6e238 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -227,7 +227,6 @@ int tty_port_block_til_ready(struct tty_port *port, int do_clocal = 0, retval; unsigned long flags; DEFINE_WAIT(wait); - int cd; /* block if port is in the process of being closed */ if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) { @@ -284,11 +283,14 @@ int tty_port_block_til_ready(struct tty_port *port, retval = -ERESTARTSYS; break; } - /* Probe the carrier. For devices with no carrier detect this - will always return true */ - cd = tty_port_carrier_raised(port); + /* + * Probe the carrier. For devices with no carrier detect + * tty_port_carrier_raised will always return true. + * Never ask drivers if CLOCAL is set, this causes troubles + * on some hardware. + */ if (!(port->flags & ASYNC_CLOSING) && - (do_clocal || cd)) + (do_clocal || tty_port_carrier_raised(port))) break; if (signal_pending(current)) { retval = -ERESTARTSYS; diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 5e096f4..65447c5 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -1463,7 +1463,6 @@ compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop, if (!perm && op->op != KD_FONT_OP_GET) return -EPERM; op->data = compat_ptr(((struct compat_console_font_op *)op)->data); - op->flags |= KD_FONT_FLAG_OLD; i = con_font_op(vc, op); if (i) return i; diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 1c50baf..d2b3cff 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -57,6 +57,8 @@ MODULE_DEVICE_TABLE (usb, wdm_ids); #define WDM_MAX 16 +/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */ +#define WDM_DEFAULT_BUFSIZE 256 static DEFINE_MUTEX(wdm_mutex); @@ -88,7 +90,8 @@ struct wdm_device { int count; dma_addr_t shandle; dma_addr_t ihandle; - struct mutex lock; + struct mutex wlock; + struct mutex rlock; wait_queue_head_t wait; struct work_struct rxwork; int werr; @@ -323,7 +326,7 @@ static ssize_t wdm_write } /* concurrent writes and disconnect */ - r = mutex_lock_interruptible(&desc->lock); + r = mutex_lock_interruptible(&desc->wlock); rv = -ERESTARTSYS; if (r) { kfree(buf); @@ -386,7 +389,7 @@ static ssize_t wdm_write out: usb_autopm_put_interface(desc->intf); outnp: - mutex_unlock(&desc->lock); + mutex_unlock(&desc->wlock); outnl: return rv < 0 ? rv : count; } @@ -399,7 +402,7 @@ static ssize_t wdm_read struct wdm_device *desc = file->private_data; - rv = mutex_lock_interruptible(&desc->lock); /*concurrent reads */ + rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */ if (rv < 0) return -ERESTARTSYS; @@ -467,14 +470,16 @@ retry: for (i = 0; i < desc->length - cntr; i++) desc->ubuf[i] = desc->ubuf[i + cntr]; + spin_lock_irq(&desc->iuspin); desc->length -= cntr; + spin_unlock_irq(&desc->iuspin); /* in case we had outstanding data */ if (!desc->length) clear_bit(WDM_READ, &desc->flags); rv = cntr; err: - mutex_unlock(&desc->lock); + mutex_unlock(&desc->rlock); return rv; } @@ -540,7 +545,8 @@ static int wdm_open(struct inode *inode, struct file *file) } intf->needs_remote_wakeup = 1; - mutex_lock(&desc->lock); + /* using write lock to protect desc->count */ + mutex_lock(&desc->wlock); if (!desc->count++) { desc->werr = 0; desc->rerr = 0; @@ -553,7 +559,7 @@ static int wdm_open(struct inode *inode, struct file *file) } else { rv = 0; } - mutex_unlock(&desc->lock); + mutex_unlock(&desc->wlock); usb_autopm_put_interface(desc->intf); out: mutex_unlock(&wdm_mutex); @@ -565,9 +571,11 @@ static int wdm_release(struct inode *inode, struct file *file) struct wdm_device *desc = file->private_data; mutex_lock(&wdm_mutex); - mutex_lock(&desc->lock); + + /* using write lock to protect desc->count */ + mutex_lock(&desc->wlock); desc->count--; - mutex_unlock(&desc->lock); + mutex_unlock(&desc->wlock); if (!desc->count) { dev_dbg(&desc->intf->dev, "wdm_release: cleanup"); @@ -630,7 +638,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) struct usb_cdc_dmm_desc *dmhd; u8 *buffer = intf->altsetting->extra; int buflen = intf->altsetting->extralen; - u16 maxcom = 0; + u16 maxcom = WDM_DEFAULT_BUFSIZE; if (!buffer) goto out; @@ -665,7 +673,8 @@ next_desc: desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL); if (!desc) goto out; - mutex_init(&desc->lock); + mutex_init(&desc->rlock); + mutex_init(&desc->wlock); spin_lock_init(&desc->iuspin); init_waitqueue_head(&desc->wait); desc->wMaxCommand = maxcom; @@ -716,7 +725,7 @@ next_desc: goto err; desc->inbuf = usb_alloc_coherent(interface_to_usbdev(intf), - desc->bMaxPacketSize0, + desc->wMaxCommand, GFP_KERNEL, &desc->response->transfer_dma); if (!desc->inbuf) @@ -779,11 +788,13 @@ static void wdm_disconnect(struct usb_interface *intf) /* to terminate pending flushes */ clear_bit(WDM_IN_USE, &desc->flags); spin_unlock_irqrestore(&desc->iuspin, flags); - mutex_lock(&desc->lock); + wake_up_all(&desc->wait); + mutex_lock(&desc->rlock); + mutex_lock(&desc->wlock); kill_urbs(desc); cancel_work_sync(&desc->rxwork); - mutex_unlock(&desc->lock); - wake_up_all(&desc->wait); + mutex_unlock(&desc->wlock); + mutex_unlock(&desc->rlock); if (!desc->count) cleanup(desc); mutex_unlock(&wdm_mutex); @@ -798,8 +809,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); /* if this is an autosuspend the caller does the locking */ - if (!PMSG_IS_AUTO(message)) - mutex_lock(&desc->lock); + if (!PMSG_IS_AUTO(message)) { + mutex_lock(&desc->rlock); + mutex_lock(&desc->wlock); + } spin_lock_irq(&desc->iuspin); if (PMSG_IS_AUTO(message) && @@ -815,8 +828,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) kill_urbs(desc); cancel_work_sync(&desc->rxwork); } - if (!PMSG_IS_AUTO(message)) - mutex_unlock(&desc->lock); + if (!PMSG_IS_AUTO(message)) { + mutex_unlock(&desc->wlock); + mutex_unlock(&desc->rlock); + } return rv; } @@ -854,7 +869,8 @@ static int wdm_pre_reset(struct usb_interface *intf) { struct wdm_device *desc = usb_get_intfdata(intf); - mutex_lock(&desc->lock); + mutex_lock(&desc->rlock); + mutex_lock(&desc->wlock); kill_urbs(desc); /* @@ -876,7 +892,8 @@ static int wdm_post_reset(struct usb_interface *intf) int rv; rv = recover_from_urb_loss(desc); - mutex_unlock(&desc->lock); + mutex_unlock(&desc->wlock); + mutex_unlock(&desc->rlock); return 0; } diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 2f51de5..c8df1dd 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -126,7 +126,6 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep, struct dwc3_request *req) { struct dwc3 *dwc = dep->dwc; - u32 type; int ret = 0; req->request.actual = 0; @@ -149,20 +148,14 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep, direction = !!(dep->flags & DWC3_EP0_DIR_IN); - if (dwc->ep0state == EP0_STATUS_PHASE) { - type = dwc->three_stage_setup - ? DWC3_TRBCTL_CONTROL_STATUS3 - : DWC3_TRBCTL_CONTROL_STATUS2; - } else if (dwc->ep0state == EP0_DATA_PHASE) { - type = DWC3_TRBCTL_CONTROL_DATA; - } else { - /* should never happen */ - WARN_ON(1); + if (dwc->ep0state != EP0_DATA_PHASE) { + dev_WARN(dwc->dev, "Unexpected pending request\n"); return 0; } ret = dwc3_ep0_start_trans(dwc, direction, - req->request.dma, req->request.length, type); + req->request.dma, req->request.length, + DWC3_TRBCTL_CONTROL_DATA); dep->flags &= ~(DWC3_EP_PENDING_REQUEST | DWC3_EP0_DIR_IN); } else if (dwc->delayed_status) { diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a696bde..064b6e2 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -101,7 +101,7 @@ void dwc3_unmap_buffer_from_dma(struct dwc3_request *req) if (req->request.num_mapped_sgs) { req->request.dma = DMA_ADDR_INVALID; dma_unmap_sg(dwc->dev, req->request.sg, - req->request.num_sgs, + req->request.num_mapped_sgs, req->direction ? DMA_TO_DEVICE : DMA_FROM_DEVICE); diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index a95de6a..baaebf2 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -175,13 +175,12 @@ ep_found: _ep->comp_desc = comp_desc; if (g->speed == USB_SPEED_SUPER) { switch (usb_endpoint_type(_ep->desc)) { - case USB_ENDPOINT_XFER_BULK: - case USB_ENDPOINT_XFER_INT: - _ep->maxburst = comp_desc->bMaxBurst; - break; case USB_ENDPOINT_XFER_ISOC: /* mult: bits 1:0 of bmAttributes */ _ep->mult = comp_desc->bmAttributes & 0x3; + case USB_ENDPOINT_XFER_BULK: + case USB_ENDPOINT_XFER_INT: + _ep->maxburst = comp_desc->bMaxBurst; break; default: /* Do nothing for control endpoints */ diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c index 753aa06..e0e6375 100644 --- a/drivers/usb/gadget/epautoconf.c +++ b/drivers/usb/gadget/epautoconf.c @@ -126,7 +126,7 @@ ep_matches ( * descriptor and see if the EP matches it */ if (usb_endpoint_xfer_bulk(desc)) { - if (ep_comp) { + if (ep_comp && gadget->max_speed >= USB_SPEED_SUPER) { num_req_streams = ep_comp->bmAttributes & 0x1f; if (num_req_streams > ep->max_streams) return 0; diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index 6353eca..ee8ceec 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -3123,15 +3123,15 @@ fsg_add(struct usb_composite_dev *cdev, struct usb_configuration *c, struct fsg_module_parameters { char *file[FSG_MAX_LUNS]; - int ro[FSG_MAX_LUNS]; - int removable[FSG_MAX_LUNS]; - int cdrom[FSG_MAX_LUNS]; - int nofua[FSG_MAX_LUNS]; + bool ro[FSG_MAX_LUNS]; + bool removable[FSG_MAX_LUNS]; + bool cdrom[FSG_MAX_LUNS]; + bool nofua[FSG_MAX_LUNS]; unsigned int file_count, ro_count, removable_count, cdrom_count; unsigned int nofua_count; unsigned int luns; /* nluns */ - int stall; /* can_stall */ + bool stall; /* can_stall */ }; #define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \ diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index d7ea6c0..b04712f 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c @@ -1430,7 +1430,7 @@ static void setup_received_irq(struct fsl_udc *udc, int pipe = get_pipe_by_windex(wIndex); struct fsl_ep *ep; - if (wValue != 0 || wLength != 0 || pipe > udc->max_ep) + if (wValue != 0 || wLength != 0 || pipe >= udc->max_ep) break; ep = get_ep_by_pipe(udc, pipe); @@ -1673,7 +1673,7 @@ static void dtd_complete_irq(struct fsl_udc *udc) if (!bit_pos) return; - for (i = 0; i < udc->max_ep * 2; i++) { + for (i = 0; i < udc->max_ep; i++) { ep_num = i >> 1; direction = i % 2; diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c index fa0fcc1..e2293c1 100644 --- a/drivers/usb/gadget/langwell_udc.c +++ b/drivers/usb/gadget/langwell_udc.c @@ -11,11 +11,6 @@ /* #undef DEBUG */ /* #undef VERBOSE_DEBUG */ -#if defined(CONFIG_USB_LANGWELL_OTG) -#define OTG_TRANSCEIVER -#endif - - #include <linux/module.h> #include <linux/pci.h> #include <linux/dma-mapping.h> @@ -1522,8 +1517,7 @@ static void langwell_udc_stop(struct langwell_udc *dev) /* stop all USB activities */ -static void stop_activity(struct langwell_udc *dev, - struct usb_gadget_driver *driver) +static void stop_activity(struct langwell_udc *dev) { struct langwell_ep *ep; dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__); @@ -1535,9 +1529,9 @@ static void stop_activity(struct langwell_udc *dev, } /* report disconnect; the driver is already quiesced */ - if (driver) { + if (dev->driver) { spin_unlock(&dev->lock); - driver->disconnect(&dev->gadget); + dev->driver->disconnect(&dev->gadget); spin_lock(&dev->lock); } @@ -1925,11 +1919,10 @@ static int langwell_stop(struct usb_gadget *g, /* stop all usb activities */ dev->gadget.speed = USB_SPEED_UNKNOWN; - stop_activity(dev, driver); - spin_unlock_irqrestore(&dev->lock, flags); - dev->gadget.dev.driver = NULL; dev->driver = NULL; + stop_activity(dev); + spin_unlock_irqrestore(&dev->lock, flags); device_remove_file(&dev->pdev->dev, &dev_attr_function); @@ -2315,13 +2308,9 @@ static void handle_setup_packet(struct langwell_udc *dev, if (!gadget_is_otg(&dev->gadget)) break; - else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) { + else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) dev->gadget.b_hnp_enable = 1; -#ifdef OTG_TRANSCEIVER - if (!dev->lotg->otg.default_a) - dev->lotg->hsm.b_hnp_enable = 1; -#endif - } else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT) + else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT) dev->gadget.a_hnp_support = 1; else if (setup->bRequest == USB_DEVICE_A_ALT_HNP_SUPPORT) @@ -2733,7 +2722,7 @@ static void handle_usb_reset(struct langwell_udc *dev) dev->bus_reset = 1; /* reset all the queues, stop all USB activities */ - stop_activity(dev, dev->driver); + stop_activity(dev); dev->usb_state = USB_STATE_DEFAULT; } else { dev_vdbg(&dev->pdev->dev, "device controller reset\n"); @@ -2741,7 +2730,7 @@ static void handle_usb_reset(struct langwell_udc *dev) langwell_udc_reset(dev); /* reset all the queues, stop all USB activities */ - stop_activity(dev, dev->driver); + stop_activity(dev); /* reset ep0 dQH and endptctrl */ ep0_reset(dev); @@ -2752,12 +2741,6 @@ static void handle_usb_reset(struct langwell_udc *dev) dev->usb_state = USB_STATE_ATTACHED; } -#ifdef OTG_TRANSCEIVER - /* refer to USB OTG 6.6.2.3 b_hnp_en is cleared */ - if (!dev->lotg->otg.default_a) - dev->lotg->hsm.b_hnp_enable = 0; -#endif - dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__); } @@ -2770,29 +2753,6 @@ static void handle_bus_suspend(struct langwell_udc *dev) dev->resume_state = dev->usb_state; dev->usb_state = USB_STATE_SUSPENDED; -#ifdef OTG_TRANSCEIVER - if (dev->lotg->otg.default_a) { - if (dev->lotg->hsm.b_bus_suspend_vld == 1) { - dev->lotg->hsm.b_bus_suspend = 1; - /* notify transceiver the state changes */ - if (spin_trylock(&dev->lotg->wq_lock)) { - langwell_update_transceiver(); - spin_unlock(&dev->lotg->wq_lock); - } - } - dev->lotg->hsm.b_bus_suspend_vld++; - } else { - if (!dev->lotg->hsm.a_bus_suspend) { - dev->lotg->hsm.a_bus_suspend = 1; - /* notify transceiver the state changes */ - if (spin_trylock(&dev->lotg->wq_lock)) { - langwell_update_transceiver(); - spin_unlock(&dev->lotg->wq_lock); - } - } - } -#endif - /* report suspend to the driver */ if (dev->driver) { if (dev->driver->suspend) { @@ -2823,11 +2783,6 @@ static void handle_bus_resume(struct langwell_udc *dev) if (dev->pdev->device != 0x0829) langwell_phy_low_power(dev, 0); -#ifdef OTG_TRANSCEIVER - if (dev->lotg->otg.default_a == 0) - dev->lotg->hsm.a_bus_suspend = 0; -#endif - /* report resume to the driver */ if (dev->driver) { if (dev->driver->resume) { @@ -3020,7 +2975,6 @@ static void langwell_udc_remove(struct pci_dev *pdev) dev->done = &done; -#ifndef OTG_TRANSCEIVER /* free dTD dma_pool and dQH */ if (dev->dtd_pool) dma_pool_destroy(dev->dtd_pool); @@ -3032,7 +2986,6 @@ static void langwell_udc_remove(struct pci_dev *pdev) /* release SRAM caching */ if (dev->has_sram && dev->got_sram) sram_deinit(dev); -#endif if (dev->status_req) { kfree(dev->status_req->req.buf); @@ -3045,7 +2998,6 @@ static void langwell_udc_remove(struct pci_dev *pdev) if (dev->got_irq) free_irq(pdev->irq, dev); -#ifndef OTG_TRANSCEIVER if (dev->cap_regs) iounmap(dev->cap_regs); @@ -3055,13 +3007,6 @@ static void langwell_udc_remove(struct pci_dev *pdev) if (dev->enabled) pci_disable_device(pdev); -#else - if (dev->transceiver) { - otg_put_transceiver(dev->transceiver); - dev->transceiver = NULL; - dev->lotg = NULL; - } -#endif dev->cap_regs = NULL; @@ -3072,9 +3017,7 @@ static void langwell_udc_remove(struct pci_dev *pdev) device_remove_file(&pdev->dev, &dev_attr_langwell_udc); device_remove_file(&pdev->dev, &dev_attr_remote_wakeup); -#ifndef OTG_TRANSCEIVER pci_set_drvdata(pdev, NULL); -#endif /* free dev, wait for the release() finished */ wait_for_completion(&done); @@ -3089,9 +3032,7 @@ static int langwell_udc_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct langwell_udc *dev; -#ifndef OTG_TRANSCEIVER unsigned long resource, len; -#endif void __iomem *base = NULL; size_t size; int retval; @@ -3109,16 +3050,6 @@ static int langwell_udc_probe(struct pci_dev *pdev, dev->pdev = pdev; dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__); -#ifdef OTG_TRANSCEIVER - /* PCI device is already enabled by otg_transceiver driver */ - dev->enabled = 1; - - /* mem region and register base */ - dev->region = 1; - dev->transceiver = otg_get_transceiver(); - dev->lotg = otg_to_langwell(dev->transceiver); - base = dev->lotg->regs; -#else pci_set_drvdata(pdev, dev); /* now all the pci goodies ... */ @@ -3139,7 +3070,6 @@ static int langwell_udc_probe(struct pci_dev *pdev, dev->region = 1; base = ioremap_nocache(resource, len); -#endif if (base == NULL) { dev_err(&dev->pdev->dev, "can't map memory\n"); retval = -EFAULT; @@ -3163,7 +3093,6 @@ static int langwell_udc_probe(struct pci_dev *pdev, dev->got_sram = 0; dev_vdbg(&dev->pdev->dev, "dev->has_sram: %d\n", dev->has_sram); -#ifndef OTG_TRANSCEIVER /* enable SRAM caching if detected */ if (dev->has_sram && !dev->got_sram) sram_init(dev); @@ -3182,7 +3111,6 @@ static int langwell_udc_probe(struct pci_dev *pdev, goto error; } dev->got_irq = 1; -#endif /* set stopped bit */ dev->stopped = 1; @@ -3257,10 +3185,8 @@ static int langwell_udc_probe(struct pci_dev *pdev, dev->remote_wakeup = 0; dev->dev_status = 1 << USB_DEVICE_SELF_POWERED; -#ifndef OTG_TRANSCEIVER /* reset device controller */ langwell_udc_reset(dev); -#endif /* initialize gadget structure */ dev->gadget.ops = &langwell_ops; /* usb_gadget_ops */ @@ -3268,9 +3194,6 @@ static int langwell_udc_probe(struct pci_dev *pdev, INIT_LIST_HEAD(&dev->gadget.ep_list); /* ep_list */ dev->gadget.speed = USB_SPEED_UNKNOWN; /* speed */ dev->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */ -#ifdef OTG_TRANSCEIVER - dev->gadget.is_otg = 1; /* support otg mode */ -#endif /* the "gadget" abstracts/virtualizes the controller */ dev_set_name(&dev->gadget.dev, "gadget"); @@ -3282,10 +3205,8 @@ static int langwell_udc_probe(struct pci_dev *pdev, /* controller endpoints reinit */ eps_reinit(dev); -#ifndef OTG_TRANSCEIVER /* reset ep0 dQH and endptctrl */ ep0_reset(dev); -#endif /* create dTD dma_pool resource */ dev->dtd_pool = dma_pool_create("langwell_dtd", @@ -3367,7 +3288,7 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state) spin_lock_irq(&dev->lock); /* stop all usb activities */ - stop_activity(dev, dev->driver); + stop_activity(dev); spin_unlock_irq(&dev->lock); /* free dTD dma_pool and dQH */ @@ -3525,22 +3446,14 @@ static struct pci_driver langwell_pci_driver = { static int __init init(void) { -#ifdef OTG_TRANSCEIVER - return langwell_register_peripheral(&langwell_pci_driver); -#else return pci_register_driver(&langwell_pci_driver); -#endif } module_init(init); static void __exit cleanup(void) { -#ifdef OTG_TRANSCEIVER - return langwell_unregister_peripheral(&langwell_pci_driver); -#else pci_unregister_driver(&langwell_pci_driver); -#endif } module_exit(cleanup); diff --git a/drivers/usb/gadget/langwell_udc.h b/drivers/usb/gadget/langwell_udc.h index ef79e24..d6e78ac 100644 --- a/drivers/usb/gadget/langwell_udc.h +++ b/drivers/usb/gadget/langwell_udc.h @@ -8,7 +8,6 @@ */ #include <linux/usb/langwell_udc.h> -#include <linux/usb/langwell_otg.h> /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c index c7f291a..85ea14e 100644 --- a/drivers/usb/gadget/storage_common.c +++ b/drivers/usb/gadget/storage_common.c @@ -598,16 +598,16 @@ static __maybe_unused struct usb_ss_cap_descriptor fsg_ss_cap_desc = { | USB_5GBPS_OPERATION), .bFunctionalitySupport = USB_LOW_SPEED_OPERATION, .bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT, - .bU2DevExitLat = USB_DEFAULT_U2_DEV_EXIT_LAT, + .bU2DevExitLat = cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT), }; static __maybe_unused struct usb_bos_descriptor fsg_bos_desc = { .bLength = USB_DT_BOS_SIZE, .bDescriptorType = USB_DT_BOS, - .wTotalLength = USB_DT_BOS_SIZE + .wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE + USB_DT_USB_EXT_CAP_SIZE - + USB_DT_USB_SS_CAP_SIZE, + + USB_DT_USB_SS_CAP_SIZE), .bNumDeviceCaps = 2, }; diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index e90344a..b556a72 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -125,7 +125,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver, */ if (pdata->init && pdata->init(pdev)) { retval = -ENODEV; - goto err3; + goto err4; } /* Enable USB controller, 83xx or 8536 */ diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index f4b627d..01bb7241d 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -276,6 +276,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd) /* Serial Bus Release Number is at PCI 0x60 offset */ pci_read_config_byte(pdev, 0x60, &ehci->sbrn); + if (pdev->vendor == PCI_VENDOR_ID_STMICRO + && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST) + ehci->sbrn = 0x20; /* ConneXT has no sbrn register */ /* Keep this around for a while just in case some EHCI * implementation uses legacy PCI PM support. This test @@ -526,6 +529,9 @@ static const struct pci_device_id pci_ids [] = { { /* handle any USB 2.0 EHCI controller */ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0), .driver_data = (unsigned long) &ehci_pci_hc_driver, + }, { + PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST), + .driver_data = (unsigned long) &ehci_pci_hc_driver, }, { /* end: all zeroes */ } }; diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index 32793ce..9c2cc46 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c @@ -183,7 +183,7 @@ static int __devinit ehci_hcd_xilinx_of_probe(struct platform_device *op) } irq = irq_of_parse_and_map(dn, 0); - if (irq == NO_IRQ) { + if (!irq) { printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 5df0b0e..77afabc 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -139,8 +139,23 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, } iclk = clk_get(&pdev->dev, "ohci_clk"); + if (IS_ERR(iclk)) { + dev_err(&pdev->dev, "failed to get ohci_clk\n"); + retval = PTR_ERR(iclk); + goto err3; + } fclk = clk_get(&pdev->dev, "uhpck"); + if (IS_ERR(fclk)) { + dev_err(&pdev->dev, "failed to get uhpck\n"); + retval = PTR_ERR(fclk); + goto err4; + } hclk = clk_get(&pdev->dev, "hclk"); + if (IS_ERR(hclk)) { + dev_err(&pdev->dev, "failed to get hclk\n"); + retval = PTR_ERR(hclk); + goto err5; + } at91_start_hc(pdev); ohci_hcd_init(hcd_to_ohci(hcd)); @@ -153,9 +168,12 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, at91_stop_hc(pdev); clk_put(hclk); + err5: clk_put(fclk); + err4: clk_put(iclk); + err3: iounmap(hcd->regs); err2: @@ -226,7 +244,8 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int if (!gpio_is_valid(pdata->vbus_pin[port])) return; - gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable); + gpio_set_value(pdata->vbus_pin[port], + !pdata->vbus_pin_active_low[port] ^ enable); } static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port) @@ -237,7 +256,8 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port) if (!gpio_is_valid(pdata->vbus_pin[port])) return -EINVAL; - return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted; + return gpio_get_value(pdata->vbus_pin[port]) ^ + !pdata->vbus_pin_active_low[port]; } /* diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c index 5179fcd..e4bcb62 100644 --- a/drivers/usb/host/ohci-dbg.c +++ b/drivers/usb/host/ohci-dbg.c @@ -82,6 +82,14 @@ urb_print(struct urb * urb, char * str, int small, int status) ohci_dbg(ohci,format, ## arg ); \ } while (0); +/* Version for use where "next" is the address of a local variable */ +#define ohci_dbg_nosw(ohci, next, size, format, arg...) \ + do { \ + unsigned s_len; \ + s_len = scnprintf(*next, *size, format, ## arg); \ + *size -= s_len; *next += s_len; \ + } while (0); + static void ohci_dump_intr_mask ( struct ohci_hcd *ohci, @@ -653,7 +661,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf) /* dump driver info, then registers in spec order */ - ohci_dbg_sw (ohci, &next, &size, + ohci_dbg_nosw(ohci, &next, &size, "bus %s, device %s\n" "%s\n" "%s\n", @@ -672,7 +680,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf) /* hcca */ if (ohci->hcca) - ohci_dbg_sw (ohci, &next, &size, + ohci_dbg_nosw(ohci, &next, &size, "hcca frame 0x%04x\n", ohci_frame_no(ohci)); /* other registers mostly affect frame timings */ diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 6109810..1843bb6 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -397,6 +397,10 @@ static const struct pci_device_id pci_ids [] = { { /* handle any USB OHCI controller */ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_OHCI, ~0), .driver_data = (unsigned long) &ohci_pci_hc_driver, + }, { + /* The device in the ConneXT I/O hub has no class reg */ + PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_OHCI), + .driver_data = (unsigned long) &ohci_pci_hc_driver, }, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE (pci, pci_ids); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index b90e138..b62037b 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1204,6 +1204,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci, * * Returns a zero-based port number, which is suitable for indexing into each of * the split roothubs' port arrays and bus state arrays. + * Add one to it in order to call xhci_find_slot_id_by_port. */ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, struct xhci_hcd *xhci, u32 port_id) @@ -1324,7 +1325,7 @@ static void handle_port_status(struct xhci_hcd *xhci, xhci_set_link_state(xhci, port_array, faked_port_index, XDEV_U0); slot_id = xhci_find_slot_id_by_port(hcd, xhci, - faked_port_index); + faked_port_index + 1); if (!slot_id) { xhci_dbg(xhci, "slot_id is zero\n"); goto cleanup; @@ -3323,7 +3324,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Check TD length */ if (running_total != td_len) { xhci_err(xhci, "ISOC TD length unmatch\n"); - return -EINVAL; + ret = -EINVAL; + goto cleanup; } } diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c index d9b6a03..da97dce 100644 --- a/drivers/usb/misc/emi26.c +++ b/drivers/usb/misc/emi26.c @@ -37,9 +37,6 @@ static int emi26_set_reset(struct usb_device *dev, unsigned char reset_bit); static int emi26_load_firmware (struct usb_device *dev); static int emi26_probe(struct usb_interface *intf, const struct usb_device_id *id); static void emi26_disconnect(struct usb_interface *intf); -static int __init emi26_init (void); -static void __exit emi26_exit (void); - /* thanks to drivers/usb/serial/keyspan_pda.c code */ static int emi26_writememory (struct usb_device *dev, int address, diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c index 9f39062..4e0f167 100644 --- a/drivers/usb/misc/emi62.c +++ b/drivers/usb/misc/emi62.c @@ -46,9 +46,6 @@ static int emi62_set_reset(struct usb_device *dev, unsigned char reset_bit); static int emi62_load_firmware (struct usb_device *dev); static int emi62_probe(struct usb_interface *intf, const struct usb_device_id *id); static void emi62_disconnect(struct usb_interface *intf); -static int __init emi62_init (void); -static void __exit emi62_exit (void); - /* thanks to drivers/usb/serial/keyspan_pda.c code */ static int emi62_writememory(struct usb_device *dev, int address, diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index 107bf13..b2d82b9 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c @@ -24,7 +24,7 @@ #define VENDOR_ID 0x0fc5 #define PRODUCT_ID 0x1227 -#define MAXLEN 6 +#define MAXLEN 8 /* table of devices that work with this driver */ static const struct usb_device_id id_table[] = { diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index f9a3f62..7c569f5 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c @@ -33,9 +33,6 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> -#include <mach/hardware.h> -#include <mach/memory.h> -#include <asm/gpio.h> #include <mach/cputype.h> #include <asm/mach-types.h> diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 56cf024..3d11cf64 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -981,6 +981,9 @@ static void musb_shutdown(struct platform_device *pdev) unsigned long flags; pm_runtime_get_sync(musb->controller); + + musb_gadget_cleanup(musb); + spin_lock_irqsave(&musb->lock, flags); musb_platform_disable(musb); musb_generic_disable(musb); @@ -1827,8 +1830,6 @@ static void musb_free(struct musb *musb) sysfs_remove_group(&musb->controller->kobj, &musb_attr_group); #endif - musb_gadget_cleanup(musb); - if (musb->nIrq >= 0) { if (musb->irq_wake) disable_irq_wake(musb->nIrq); diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index c27bbbf..df719ea 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -222,7 +222,6 @@ static inline void omap2430_low_level_init(struct musb *musb) musb_writel(musb->mregs, OTG_FORCESTDBY, l); } -/* blocking notifier support */ static int musb_otg_notifications(struct notifier_block *nb, unsigned long event, void *unused) { @@ -231,7 +230,7 @@ static int musb_otg_notifications(struct notifier_block *nb, musb->xceiv_event = event; schedule_work(&musb->otg_notifier_work); - return 0; + return NOTIFY_OK; } static void musb_otg_notifier_work(struct work_struct *data_notifier_work) @@ -386,6 +385,7 @@ static void omap2430_musb_disable(struct musb *musb) static int omap2430_musb_exit(struct musb *musb) { del_timer_sync(&musb_idle_timer); + cancel_work_sync(&musb->otg_notifier_work); omap2430_low_level_exit(musb); otg_put_transceiver(musb->xceiv); diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c index a163632..97cb459 100644 --- a/drivers/usb/musb/ux500_dma.c +++ b/drivers/usb/musb/ux500_dma.c @@ -84,7 +84,7 @@ static bool ux500_configure_channel(struct dma_channel *channel, struct musb_hw_ep *hw_ep = ux500_channel->hw_ep; struct dma_chan *dma_chan = ux500_channel->dma_chan; struct dma_async_tx_descriptor *dma_desc; - enum dma_data_direction direction; + enum dma_transfer_direction direction; struct scatterlist sg; struct dma_slave_config slave_conf; enum dma_slave_buswidth addr_width; @@ -104,7 +104,7 @@ static bool ux500_configure_channel(struct dma_channel *channel, sg_dma_address(&sg) = dma_addr; sg_dma_len(&sg) = len; - direction = ux500_channel->is_tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE : DMA_SLAVE_BUSWIDTH_4_BYTES; diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig index 2a25955..76d6293 100644 --- a/drivers/usb/otg/Kconfig +++ b/drivers/usb/otg/Kconfig @@ -86,20 +86,6 @@ config NOP_USB_XCEIV built-in with usb ip or which are autonomous and doesn't require any phy programming such as ISP1x04 etc. -config USB_LANGWELL_OTG - tristate "Intel Langwell USB OTG dual-role support" - depends on USB && PCI && INTEL_SCU_IPC - select USB_OTG - select USB_OTG_UTILS - help - Say Y here if you want to build Intel Langwell USB OTG - transciever driver in kernel. This driver implements role - switch between EHCI host driver and Langwell USB OTG - client driver. - - To compile this driver as a module, choose M here: the - module will be called langwell_otg. - config USB_MSM_OTG tristate "OTG support for Qualcomm on-chip USB controller" depends on (USB || USB_GADGET) && ARCH_MSM @@ -124,7 +110,7 @@ config AB8500_USB config FSL_USB2_OTG bool "Freescale USB OTG Transceiver Driver" - depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2 + depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2 && USB_SUSPEND select USB_OTG select USB_OTG_UTILS help @@ -132,7 +118,7 @@ config FSL_USB2_OTG config USB_MV_OTG tristate "Marvell USB OTG support" - depends on USB_MV_UDC + depends on USB_MV_UDC && USB_SUSPEND select USB_OTG select USB_OTG_UTILS help diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile index b2c5a95..41aa509 100644 --- a/drivers/usb/otg/Makefile +++ b/drivers/usb/otg/Makefile @@ -13,7 +13,6 @@ obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o obj-$(CONFIG_TWL6030_USB) += twl6030-usb.o -obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o obj-$(CONFIG_USB_ULPI) += ulpi.o obj-$(CONFIG_USB_ULPI_VIEWPORT) += ulpi_viewport.o diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c deleted file mode 100644 index f08f784..0000000 --- a/drivers/usb/otg/langwell_otg.c +++ /dev/null @@ -1,2347 +0,0 @@ -/* - * Intel Langwell USB OTG transceiver driver - * Copyright (C) 2008 - 2010, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - */ -/* This driver helps to switch Langwell OTG controller function between host - * and peripheral. It works with EHCI driver and Langwell client controller - * driver together. - */ -#include <linux/module.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/errno.h> -#include <linux/interrupt.h> -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/moduleparam.h> -#include <linux/usb/ch9.h> -#include <linux/usb/gadget.h> -#include <linux/usb.h> -#include <linux/usb/otg.h> -#include <linux/usb/hcd.h> -#include <linux/notifier.h> -#include <linux/delay.h> -#include <asm/intel_scu_ipc.h> - -#include <linux/usb/langwell_otg.h> - -#define DRIVER_DESC "Intel Langwell USB OTG transceiver driver" -#define DRIVER_VERSION "July 10, 2010" - -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>"); -MODULE_VERSION(DRIVER_VERSION); -MODULE_LICENSE("GPL"); - -static const char driver_name[] = "langwell_otg"; - -static int langwell_otg_probe(struct pci_dev *pdev, - const struct pci_device_id *id); -static void langwell_otg_remove(struct pci_dev *pdev); -static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message); -static int langwell_otg_resume(struct pci_dev *pdev); - -static int langwell_otg_set_host(struct otg_transceiver *otg, - struct usb_bus *host); -static int langwell_otg_set_peripheral(struct otg_transceiver *otg, - struct usb_gadget *gadget); -static int langwell_otg_start_srp(struct otg_transceiver *otg); - -static const struct pci_device_id pci_ids[] = {{ - .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), - .class_mask = ~0, - .vendor = 0x8086, - .device = 0x0811, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, -}, { /* end: all zeroes */ } -}; - -static struct pci_driver otg_pci_driver = { - .name = (char *) driver_name, - .id_table = pci_ids, - - .probe = langwell_otg_probe, - .remove = langwell_otg_remove, - - .suspend = langwell_otg_suspend, - .resume = langwell_otg_resume, -}; - -/* HSM timers */ -static inline struct langwell_otg_timer *otg_timer_initializer -(void (*function)(unsigned long), unsigned long expires, unsigned long data) -{ - struct langwell_otg_timer *timer; - timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL); - if (timer == NULL) - return timer; - - timer->function = function; - timer->expires = expires; - timer->data = data; - return timer; -} - -static struct langwell_otg_timer *a_wait_vrise_tmr, *a_aidl_bdis_tmr, - *b_se0_srp_tmr, *b_srp_init_tmr; - -static struct list_head active_timers; - -static struct langwell_otg *the_transceiver; - -/* host/client notify transceiver when event affects HNP state */ -void langwell_update_transceiver(void) -{ - struct langwell_otg *lnw = the_transceiver; - - dev_dbg(lnw->dev, "transceiver is updated\n"); - - if (!lnw->qwork) - return ; - - queue_work(lnw->qwork, &lnw->work); -} -EXPORT_SYMBOL(langwell_update_transceiver); - -static int langwell_otg_set_host(struct otg_transceiver *otg, - struct usb_bus *host) -{ - otg->host = host; - - return 0; -} - -static int langwell_otg_set_peripheral(struct otg_transceiver *otg, - struct usb_gadget *gadget) -{ - otg->gadget = gadget; - - return 0; -} - -static int langwell_otg_set_power(struct otg_transceiver *otg, - unsigned mA) -{ - return 0; -} - -/* A-device drives vbus, controlled through IPC commands */ -static int langwell_otg_set_vbus(struct otg_transceiver *otg, bool enabled) -{ - struct langwell_otg *lnw = the_transceiver; - u8 sub_id; - - dev_dbg(lnw->dev, "%s <--- %s\n", __func__, enabled ? "on" : "off"); - - if (enabled) - sub_id = 0x8; /* Turn on the VBus */ - else - sub_id = 0x9; /* Turn off the VBus */ - - if (intel_scu_ipc_simple_command(0xef, sub_id)) { - dev_dbg(lnw->dev, "Failed to set Vbus via IPC commands\n"); - return -EBUSY; - } - - dev_dbg(lnw->dev, "%s --->\n", __func__); - - return 0; -} - -/* charge vbus or discharge vbus through a resistor to ground */ -static void langwell_otg_chrg_vbus(int on) -{ - struct langwell_otg *lnw = the_transceiver; - u32 val; - - val = readl(lnw->iotg.base + CI_OTGSC); - - if (on) - writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC, - lnw->iotg.base + CI_OTGSC); - else - writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD, - lnw->iotg.base + CI_OTGSC); -} - -/* Start SRP */ -static int langwell_otg_start_srp(struct otg_transceiver *otg) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - u32 val; - - dev_dbg(lnw->dev, "%s --->\n", __func__); - - val = readl(iotg->base + CI_OTGSC); - - writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP, - iotg->base + CI_OTGSC); - - /* Check if the data plus is finished or not */ - msleep(8); - val = readl(iotg->base + CI_OTGSC); - if (val & (OTGSC_HADP | OTGSC_DP)) - dev_dbg(lnw->dev, "DataLine SRP Error\n"); - - /* Disable interrupt - b_sess_vld */ - val = readl(iotg->base + CI_OTGSC); - val &= (~(OTGSC_BSVIE | OTGSC_BSEIE)); - writel(val, iotg->base + CI_OTGSC); - - /* Start VBus SRP, drive vbus to generate VBus pulse */ - iotg->otg.set_vbus(&iotg->otg, true); - msleep(15); - iotg->otg.set_vbus(&iotg->otg, false); - - /* Enable interrupt - b_sess_vld*/ - val = readl(iotg->base + CI_OTGSC); - dev_dbg(lnw->dev, "after VBUS pulse otgsc = %x\n", val); - - val |= (OTGSC_BSVIE | OTGSC_BSEIE); - writel(val, iotg->base + CI_OTGSC); - - /* If Vbus is valid, then update the hsm */ - if (val & OTGSC_BSV) { - dev_dbg(lnw->dev, "no b_sess_vld interrupt\n"); - - lnw->iotg.hsm.b_sess_vld = 1; - langwell_update_transceiver(); - } - - dev_dbg(lnw->dev, "%s <---\n", __func__); - return 0; -} - -/* stop SOF via bus_suspend */ -static void langwell_otg_loc_sof(int on) -{ - struct langwell_otg *lnw = the_transceiver; - struct usb_hcd *hcd; - int err; - - dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "suspend" : "resume"); - - hcd = bus_to_hcd(lnw->iotg.otg.host); - if (on) - err = hcd->driver->bus_resume(hcd); - else - err = hcd->driver->bus_suspend(hcd); - - if (err) - dev_dbg(lnw->dev, "Fail to resume/suspend USB bus - %d\n", err); - - dev_dbg(lnw->dev, "%s <---\n", __func__); -} - -static int langwell_otg_check_otgsc(void) -{ - struct langwell_otg *lnw = the_transceiver; - u32 otgsc, usbcfg; - - dev_dbg(lnw->dev, "check sync OTGSC and USBCFG registers\n"); - - otgsc = readl(lnw->iotg.base + CI_OTGSC); - usbcfg = readl(lnw->usbcfg); - - dev_dbg(lnw->dev, "OTGSC = %08x, USBCFG = %08x\n", - otgsc, usbcfg); - dev_dbg(lnw->dev, "OTGSC_AVV = %d\n", !!(otgsc & OTGSC_AVV)); - dev_dbg(lnw->dev, "USBCFG.VBUSVAL = %d\n", - !!(usbcfg & USBCFG_VBUSVAL)); - dev_dbg(lnw->dev, "OTGSC_ASV = %d\n", !!(otgsc & OTGSC_ASV)); - dev_dbg(lnw->dev, "USBCFG.AVALID = %d\n", - !!(usbcfg & USBCFG_AVALID)); - dev_dbg(lnw->dev, "OTGSC_BSV = %d\n", !!(otgsc & OTGSC_BSV)); - dev_dbg(lnw->dev, "USBCFG.BVALID = %d\n", - !!(usbcfg & USBCFG_BVALID)); - dev_dbg(lnw->dev, "OTGSC_BSE = %d\n", !!(otgsc & OTGSC_BSE)); - dev_dbg(lnw->dev, "USBCFG.SESEND = %d\n", - !!(usbcfg & USBCFG_SESEND)); - - /* Check USBCFG VBusValid/AValid/BValid/SessEnd */ - if (!!(otgsc & OTGSC_AVV) ^ !!(usbcfg & USBCFG_VBUSVAL)) { - dev_dbg(lnw->dev, "OTGSC.AVV != USBCFG.VBUSVAL\n"); - goto err; - } - if (!!(otgsc & OTGSC_ASV) ^ !!(usbcfg & USBCFG_AVALID)) { - dev_dbg(lnw->dev, "OTGSC.ASV != USBCFG.AVALID\n"); - goto err; - } - if (!!(otgsc & OTGSC_BSV) ^ !!(usbcfg & USBCFG_BVALID)) { - dev_dbg(lnw->dev, "OTGSC.BSV != USBCFG.BVALID\n"); - goto err; - } - if (!!(otgsc & OTGSC_BSE) ^ !!(usbcfg & USBCFG_SESEND)) { - dev_dbg(lnw->dev, "OTGSC.BSE != USBCFG.SESSEN\n"); - goto err; - } - - dev_dbg(lnw->dev, "OTGSC and USBCFG are synced\n"); - - return 0; - -err: - dev_warn(lnw->dev, "OTGSC isn't equal to USBCFG\n"); - return -EPIPE; -} - - -static void langwell_otg_phy_low_power(int on) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - u8 val, phcd; - int retval; - - dev_dbg(lnw->dev, "%s ---> %s mode\n", - __func__, on ? "Low power" : "Normal"); - - phcd = 0x40; - - val = readb(iotg->base + CI_HOSTPC1 + 2); - - if (on) { - /* Due to hardware issue, after set PHCD, sync will failed - * between USBCFG and OTGSC, so before set PHCD, check if - * sync is in process now. If the answer is "yes", then do - * not touch PHCD bit */ - retval = langwell_otg_check_otgsc(); - if (retval) { - dev_dbg(lnw->dev, "Skip PHCD programming..\n"); - return ; - } - - writeb(val | phcd, iotg->base + CI_HOSTPC1 + 2); - } else - writeb(val & ~phcd, iotg->base + CI_HOSTPC1 + 2); - - dev_dbg(lnw->dev, "%s <--- done\n", __func__); -} - -/* After drv vbus, add 5 ms delay to set PHCD */ -static void langwell_otg_phy_low_power_wait(int on) -{ - struct langwell_otg *lnw = the_transceiver; - - dev_dbg(lnw->dev, "add 5ms delay before programing PHCD\n"); - - mdelay(5); - langwell_otg_phy_low_power(on); -} - -/* Enable/Disable OTG interrupt */ -static void langwell_otg_intr(int on) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - u32 val; - - dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off"); - - val = readl(iotg->base + CI_OTGSC); - - /* OTGSC_INT_MASK doesn't contains 1msInt */ - if (on) { - val = val | (OTGSC_INT_MASK); - writel(val, iotg->base + CI_OTGSC); - } else { - val = val & ~(OTGSC_INT_MASK); - writel(val, iotg->base + CI_OTGSC); - } - - dev_dbg(lnw->dev, "%s <---\n", __func__); -} - -/* set HAAR: Hardware Assist Auto-Reset */ -static void langwell_otg_HAAR(int on) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - u32 val; - - dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off"); - - val = readl(iotg->base + CI_OTGSC); - if (on) - writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR, - iotg->base + CI_OTGSC); - else - writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR, - iotg->base + CI_OTGSC); - - dev_dbg(lnw->dev, "%s <---\n", __func__); -} - -/* set HABA: Hardware Assist B-Disconnect to A-Connect */ -static void langwell_otg_HABA(int on) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - u32 val; - - dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off"); - - val = readl(iotg->base + CI_OTGSC); - if (on) - writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA, - iotg->base + CI_OTGSC); - else - writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA, - iotg->base + CI_OTGSC); - - dev_dbg(lnw->dev, "%s <---\n", __func__); -} - -static int langwell_otg_check_se0_srp(int on) -{ - struct langwell_otg *lnw = the_transceiver; - int delay_time = TB_SE0_SRP * 10; - u32 val; - - dev_dbg(lnw->dev, "%s --->\n", __func__); - - do { - udelay(100); - if (!delay_time--) - break; - val = readl(lnw->iotg.base + CI_PORTSC1); - val &= PORTSC_LS; - } while (!val); - - dev_dbg(lnw->dev, "%s <---\n", __func__); - return val; -} - -/* The timeout callback function to set time out bit */ -static void set_tmout(unsigned long indicator) -{ - *(int *)indicator = 1; -} - -void langwell_otg_nsf_msg(unsigned long indicator) -{ - struct langwell_otg *lnw = the_transceiver; - - switch (indicator) { - case 2: - case 4: - case 6: - case 7: - dev_warn(lnw->dev, - "OTG:NSF-%lu - deivce not responding\n", indicator); - break; - case 3: - dev_warn(lnw->dev, - "OTG:NSF-%lu - deivce not supported\n", indicator); - break; - default: - dev_warn(lnw->dev, "Do not have this kind of NSF\n"); - break; - } -} - -/* Initialize timers */ -static int langwell_otg_init_timers(struct otg_hsm *hsm) -{ - /* HSM used timers */ - a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE, - (unsigned long)&hsm->a_wait_vrise_tmout); - if (a_wait_vrise_tmr == NULL) - return -ENOMEM; - a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS, - (unsigned long)&hsm->a_aidl_bdis_tmout); - if (a_aidl_bdis_tmr == NULL) - return -ENOMEM; - b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP, - (unsigned long)&hsm->b_se0_srp); - if (b_se0_srp_tmr == NULL) - return -ENOMEM; - b_srp_init_tmr = otg_timer_initializer(&set_tmout, TB_SRP_INIT, - (unsigned long)&hsm->b_srp_init_tmout); - if (b_srp_init_tmr == NULL) - return -ENOMEM; - - return 0; -} - -/* Free timers */ -static void langwell_otg_free_timers(void) -{ - kfree(a_wait_vrise_tmr); - kfree(a_aidl_bdis_tmr); - kfree(b_se0_srp_tmr); - kfree(b_srp_init_tmr); -} - -/* The timeout callback function to set time out bit */ -static void langwell_otg_timer_fn(unsigned long indicator) -{ - struct langwell_otg *lnw = the_transceiver; - - *(int *)indicator = 1; - - dev_dbg(lnw->dev, "kernel timer - timeout\n"); - - langwell_update_transceiver(); -} - -/* kernel timer used instead of HW based interrupt */ -static void langwell_otg_add_ktimer(enum langwell_otg_timer_type timers) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - unsigned long j = jiffies; - unsigned long data, time; - - switch (timers) { - case TA_WAIT_VRISE_TMR: - iotg->hsm.a_wait_vrise_tmout = 0; - data = (unsigned long)&iotg->hsm.a_wait_vrise_tmout; - time = TA_WAIT_VRISE; - break; - case TA_WAIT_BCON_TMR: - iotg->hsm.a_wait_bcon_tmout = 0; - data = (unsigned long)&iotg->hsm.a_wait_bcon_tmout; - time = TA_WAIT_BCON; - break; - case TA_AIDL_BDIS_TMR: - iotg->hsm.a_aidl_bdis_tmout = 0; - data = (unsigned long)&iotg->hsm.a_aidl_bdis_tmout; - time = TA_AIDL_BDIS; - break; - case TB_ASE0_BRST_TMR: - iotg->hsm.b_ase0_brst_tmout = 0; - data = (unsigned long)&iotg->hsm.b_ase0_brst_tmout; - time = TB_ASE0_BRST; - break; - case TB_SRP_INIT_TMR: - iotg->hsm.b_srp_init_tmout = 0; - data = (unsigned long)&iotg->hsm.b_srp_init_tmout; - time = TB_SRP_INIT; - break; - case TB_SRP_FAIL_TMR: - iotg->hsm.b_srp_fail_tmout = 0; - data = (unsigned long)&iotg->hsm.b_srp_fail_tmout; - time = TB_SRP_FAIL; - break; - case TB_BUS_SUSPEND_TMR: - iotg->hsm.b_bus_suspend_tmout = 0; - data = (unsigned long)&iotg->hsm.b_bus_suspend_tmout; - time = TB_BUS_SUSPEND; - break; - default: - dev_dbg(lnw->dev, "unknown timer, cannot enable it\n"); - return; - } - - lnw->hsm_timer.data = data; - lnw->hsm_timer.function = langwell_otg_timer_fn; - lnw->hsm_timer.expires = j + time * HZ / 1000; /* milliseconds */ - - add_timer(&lnw->hsm_timer); - - dev_dbg(lnw->dev, "add timer successfully\n"); -} - -/* Add timer to timer list */ -static void langwell_otg_add_timer(void *gtimer) -{ - struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer; - struct langwell_otg_timer *tmp_timer; - struct intel_mid_otg_xceiv *iotg = &the_transceiver->iotg; - u32 val32; - - /* Check if the timer is already in the active list, - * if so update timer count - */ - list_for_each_entry(tmp_timer, &active_timers, list) - if (tmp_timer == timer) { - timer->count = timer->expires; - return; - } - timer->count = timer->expires; - - if (list_empty(&active_timers)) { - val32 = readl(iotg->base + CI_OTGSC); - writel(val32 | OTGSC_1MSE, iotg->base + CI_OTGSC); - } - - list_add_tail(&timer->list, &active_timers); -} - -/* Remove timer from the timer list; clear timeout status */ -static void langwell_otg_del_timer(void *gtimer) -{ - struct langwell_otg *lnw = the_transceiver; - struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer; - struct langwell_otg_timer *tmp_timer, *del_tmp; - u32 val32; - - list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) - if (tmp_timer == timer) - list_del(&timer->list); - - if (list_empty(&active_timers)) { - val32 = readl(lnw->iotg.base + CI_OTGSC); - writel(val32 & ~OTGSC_1MSE, lnw->iotg.base + CI_OTGSC); - } -} - -/* Reduce timer count by 1, and find timeout conditions.*/ -static int langwell_otg_tick_timer(u32 *int_sts) -{ - struct langwell_otg *lnw = the_transceiver; - struct langwell_otg_timer *tmp_timer, *del_tmp; - int expired = 0; - - list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) { - tmp_timer->count--; - /* check if timer expires */ - if (!tmp_timer->count) { - list_del(&tmp_timer->list); - tmp_timer->function(tmp_timer->data); - expired = 1; - } - } - - if (list_empty(&active_timers)) { - dev_dbg(lnw->dev, "tick timer: disable 1ms int\n"); - *int_sts = *int_sts & ~OTGSC_1MSE; - } - return expired; -} - -static void reset_otg(void) -{ - struct langwell_otg *lnw = the_transceiver; - int delay_time = 1000; - u32 val; - - dev_dbg(lnw->dev, "reseting OTG controller ...\n"); - val = readl(lnw->iotg.base + CI_USBCMD); - writel(val | USBCMD_RST, lnw->iotg.base + CI_USBCMD); - do { - udelay(100); - if (!delay_time--) - dev_dbg(lnw->dev, "reset timeout\n"); - val = readl(lnw->iotg.base + CI_USBCMD); - val &= USBCMD_RST; - } while (val != 0); - dev_dbg(lnw->dev, "reset done.\n"); -} - -static void set_host_mode(void) -{ - struct langwell_otg *lnw = the_transceiver; - u32 val; - - reset_otg(); - val = readl(lnw->iotg.base + CI_USBMODE); - val = (val & (~USBMODE_CM)) | USBMODE_HOST; - writel(val, lnw->iotg.base + CI_USBMODE); -} - -static void set_client_mode(void) -{ - struct langwell_otg *lnw = the_transceiver; - u32 val; - - reset_otg(); - val = readl(lnw->iotg.base + CI_USBMODE); - val = (val & (~USBMODE_CM)) | USBMODE_DEVICE; - writel(val, lnw->iotg.base + CI_USBMODE); -} - -static void init_hsm(void) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - u32 val32; - - /* read OTGSC after reset */ - val32 = readl(lnw->iotg.base + CI_OTGSC); - dev_dbg(lnw->dev, "%s: OTGSC init value = 0x%x\n", __func__, val32); - - /* set init state */ - if (val32 & OTGSC_ID) { - iotg->hsm.id = 1; - iotg->otg.default_a = 0; - set_client_mode(); - iotg->otg.state = OTG_STATE_B_IDLE; - } else { - iotg->hsm.id = 0; - iotg->otg.default_a = 1; - set_host_mode(); - iotg->otg.state = OTG_STATE_A_IDLE; - } - - /* set session indicator */ - if (val32 & OTGSC_BSE) - iotg->hsm.b_sess_end = 1; - if (val32 & OTGSC_BSV) - iotg->hsm.b_sess_vld = 1; - if (val32 & OTGSC_ASV) - iotg->hsm.a_sess_vld = 1; - if (val32 & OTGSC_AVV) - iotg->hsm.a_vbus_vld = 1; - - /* defautly power the bus */ - iotg->hsm.a_bus_req = 1; - iotg->hsm.a_bus_drop = 0; - /* defautly don't request bus as B device */ - iotg->hsm.b_bus_req = 0; - /* no system error */ - iotg->hsm.a_clr_err = 0; - - langwell_otg_phy_low_power_wait(1); -} - -static void update_hsm(void) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - u32 val32; - - /* read OTGSC */ - val32 = readl(lnw->iotg.base + CI_OTGSC); - dev_dbg(lnw->dev, "%s: OTGSC value = 0x%x\n", __func__, val32); - - iotg->hsm.id = !!(val32 & OTGSC_ID); - iotg->hsm.b_sess_end = !!(val32 & OTGSC_BSE); - iotg->hsm.b_sess_vld = !!(val32 & OTGSC_BSV); - iotg->hsm.a_sess_vld = !!(val32 & OTGSC_ASV); - iotg->hsm.a_vbus_vld = !!(val32 & OTGSC_AVV); -} - -static irqreturn_t otg_dummy_irq(int irq, void *_dev) -{ - struct langwell_otg *lnw = the_transceiver; - void __iomem *reg_base = _dev; - u32 val; - u32 int_mask = 0; - - val = readl(reg_base + CI_USBMODE); - if ((val & USBMODE_CM) != USBMODE_DEVICE) - return IRQ_NONE; - - val = readl(reg_base + CI_USBSTS); - int_mask = val & INTR_DUMMY_MASK; - - if (int_mask == 0) - return IRQ_NONE; - - /* clear hsm.b_conn here since host driver can't detect it - * otg_dummy_irq called means B-disconnect happened. - */ - if (lnw->iotg.hsm.b_conn) { - lnw->iotg.hsm.b_conn = 0; - if (spin_trylock(&lnw->wq_lock)) { - langwell_update_transceiver(); - spin_unlock(&lnw->wq_lock); - } - } - - /* Clear interrupts */ - writel(int_mask, reg_base + CI_USBSTS); - return IRQ_HANDLED; -} - -static irqreturn_t otg_irq(int irq, void *_dev) -{ - struct langwell_otg *lnw = _dev; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - u32 int_sts, int_en; - u32 int_mask = 0; - int flag = 0; - - int_sts = readl(lnw->iotg.base + CI_OTGSC); - int_en = (int_sts & OTGSC_INTEN_MASK) >> 8; - int_mask = int_sts & int_en; - if (int_mask == 0) - return IRQ_NONE; - - if (int_mask & OTGSC_IDIS) { - dev_dbg(lnw->dev, "%s: id change int\n", __func__); - iotg->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0; - dev_dbg(lnw->dev, "id = %d\n", iotg->hsm.id); - flag = 1; - } - if (int_mask & OTGSC_DPIS) { - dev_dbg(lnw->dev, "%s: data pulse int\n", __func__); - iotg->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0; - dev_dbg(lnw->dev, "data pulse = %d\n", iotg->hsm.a_srp_det); - flag = 1; - } - if (int_mask & OTGSC_BSEIS) { - dev_dbg(lnw->dev, "%s: b session end int\n", __func__); - iotg->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0; - dev_dbg(lnw->dev, "b_sess_end = %d\n", iotg->hsm.b_sess_end); - flag = 1; - } - if (int_mask & OTGSC_BSVIS) { - dev_dbg(lnw->dev, "%s: b session valid int\n", __func__); - iotg->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0; - dev_dbg(lnw->dev, "b_sess_vld = %d\n", iotg->hsm.b_sess_end); - flag = 1; - } - if (int_mask & OTGSC_ASVIS) { - dev_dbg(lnw->dev, "%s: a session valid int\n", __func__); - iotg->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0; - dev_dbg(lnw->dev, "a_sess_vld = %d\n", iotg->hsm.a_sess_vld); - flag = 1; - } - if (int_mask & OTGSC_AVVIS) { - dev_dbg(lnw->dev, "%s: a vbus valid int\n", __func__); - iotg->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0; - dev_dbg(lnw->dev, "a_vbus_vld = %d\n", iotg->hsm.a_vbus_vld); - flag = 1; - } - - if (int_mask & OTGSC_1MSS) { - /* need to schedule otg_work if any timer is expired */ - if (langwell_otg_tick_timer(&int_sts)) - flag = 1; - } - - writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask, - lnw->iotg.base + CI_OTGSC); - if (flag) - langwell_update_transceiver(); - - return IRQ_HANDLED; -} - -static int langwell_otg_iotg_notify(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = data; - int flag = 0; - - if (iotg == NULL) - return NOTIFY_BAD; - - if (lnw == NULL) - return NOTIFY_BAD; - - switch (action) { - case MID_OTG_NOTIFY_CONNECT: - dev_dbg(lnw->dev, "Lnw OTG Notify Connect Event\n"); - if (iotg->otg.default_a == 1) - iotg->hsm.b_conn = 1; - else - iotg->hsm.a_conn = 1; - flag = 1; - break; - case MID_OTG_NOTIFY_DISCONN: - dev_dbg(lnw->dev, "Lnw OTG Notify Disconnect Event\n"); - if (iotg->otg.default_a == 1) - iotg->hsm.b_conn = 0; - else - iotg->hsm.a_conn = 0; - flag = 1; - break; - case MID_OTG_NOTIFY_HSUSPEND: - dev_dbg(lnw->dev, "Lnw OTG Notify Host Bus suspend Event\n"); - if (iotg->otg.default_a == 1) - iotg->hsm.a_suspend_req = 1; - else - iotg->hsm.b_bus_req = 0; - flag = 1; - break; - case MID_OTG_NOTIFY_HRESUME: - dev_dbg(lnw->dev, "Lnw OTG Notify Host Bus resume Event\n"); - if (iotg->otg.default_a == 1) - iotg->hsm.b_bus_resume = 1; - flag = 1; - break; - case MID_OTG_NOTIFY_CSUSPEND: - dev_dbg(lnw->dev, "Lnw OTG Notify Client Bus suspend Event\n"); - if (iotg->otg.default_a == 1) { - if (iotg->hsm.b_bus_suspend_vld == 2) { - iotg->hsm.b_bus_suspend = 1; - iotg->hsm.b_bus_suspend_vld = 0; - flag = 1; - } else { - iotg->hsm.b_bus_suspend_vld++; - flag = 0; - } - } else { - if (iotg->hsm.a_bus_suspend == 0) { - iotg->hsm.a_bus_suspend = 1; - flag = 1; - } - } - break; - case MID_OTG_NOTIFY_CRESUME: - dev_dbg(lnw->dev, "Lnw OTG Notify Client Bus resume Event\n"); - if (iotg->otg.default_a == 0) - iotg->hsm.a_bus_suspend = 0; - flag = 0; - break; - case MID_OTG_NOTIFY_HOSTADD: - dev_dbg(lnw->dev, "Lnw OTG Nofity Host Driver Add\n"); - flag = 1; - break; - case MID_OTG_NOTIFY_HOSTREMOVE: - dev_dbg(lnw->dev, "Lnw OTG Nofity Host Driver remove\n"); - flag = 1; - break; - case MID_OTG_NOTIFY_CLIENTADD: - dev_dbg(lnw->dev, "Lnw OTG Nofity Client Driver Add\n"); - flag = 1; - break; - case MID_OTG_NOTIFY_CLIENTREMOVE: - dev_dbg(lnw->dev, "Lnw OTG Nofity Client Driver remove\n"); - flag = 1; - break; - default: - dev_dbg(lnw->dev, "Lnw OTG Nofity unknown notify message\n"); - return NOTIFY_DONE; - } - - if (flag) - langwell_update_transceiver(); - - return NOTIFY_OK; -} - -static void langwell_otg_work(struct work_struct *work) -{ - struct langwell_otg *lnw; - struct intel_mid_otg_xceiv *iotg; - int retval; - struct pci_dev *pdev; - - lnw = container_of(work, struct langwell_otg, work); - iotg = &lnw->iotg; - pdev = to_pci_dev(lnw->dev); - - dev_dbg(lnw->dev, "%s: old state = %s\n", __func__, - otg_state_string(iotg->otg.state)); - - switch (iotg->otg.state) { - case OTG_STATE_UNDEFINED: - case OTG_STATE_B_IDLE: - if (!iotg->hsm.id) { - langwell_otg_del_timer(b_srp_init_tmr); - del_timer_sync(&lnw->hsm_timer); - - iotg->otg.default_a = 1; - iotg->hsm.a_srp_det = 0; - - langwell_otg_chrg_vbus(0); - set_host_mode(); - langwell_otg_phy_low_power(1); - - iotg->otg.state = OTG_STATE_A_IDLE; - langwell_update_transceiver(); - } else if (iotg->hsm.b_sess_vld) { - langwell_otg_del_timer(b_srp_init_tmr); - del_timer_sync(&lnw->hsm_timer); - iotg->hsm.b_sess_end = 0; - iotg->hsm.a_bus_suspend = 0; - langwell_otg_chrg_vbus(0); - - if (lnw->iotg.start_peripheral) { - lnw->iotg.start_peripheral(&lnw->iotg); - iotg->otg.state = OTG_STATE_B_PERIPHERAL; - } else - dev_dbg(lnw->dev, "client driver not loaded\n"); - - } else if (iotg->hsm.b_srp_init_tmout) { - iotg->hsm.b_srp_init_tmout = 0; - dev_warn(lnw->dev, "SRP init timeout\n"); - } else if (iotg->hsm.b_srp_fail_tmout) { - iotg->hsm.b_srp_fail_tmout = 0; - iotg->hsm.b_bus_req = 0; - - /* No silence failure */ - langwell_otg_nsf_msg(6); - } else if (iotg->hsm.b_bus_req && iotg->hsm.b_sess_end) { - del_timer_sync(&lnw->hsm_timer); - /* workaround for b_se0_srp detection */ - retval = langwell_otg_check_se0_srp(0); - if (retval) { - iotg->hsm.b_bus_req = 0; - dev_dbg(lnw->dev, "LS isn't SE0, try later\n"); - } else { - /* clear the PHCD before start srp */ - langwell_otg_phy_low_power(0); - - /* Start SRP */ - langwell_otg_add_timer(b_srp_init_tmr); - iotg->otg.start_srp(&iotg->otg); - langwell_otg_del_timer(b_srp_init_tmr); - langwell_otg_add_ktimer(TB_SRP_FAIL_TMR); - - /* reset PHY low power mode here */ - langwell_otg_phy_low_power_wait(1); - } - } - break; - case OTG_STATE_B_SRP_INIT: - if (!iotg->hsm.id) { - iotg->otg.default_a = 1; - iotg->hsm.a_srp_det = 0; - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - langwell_otg_chrg_vbus(0); - set_host_mode(); - langwell_otg_phy_low_power(1); - iotg->otg.state = OTG_STATE_A_IDLE; - langwell_update_transceiver(); - } else if (iotg->hsm.b_sess_vld) { - langwell_otg_chrg_vbus(0); - if (lnw->iotg.start_peripheral) { - lnw->iotg.start_peripheral(&lnw->iotg); - iotg->otg.state = OTG_STATE_B_PERIPHERAL; - } else - dev_dbg(lnw->dev, "client driver not loaded\n"); - } - break; - case OTG_STATE_B_PERIPHERAL: - if (!iotg->hsm.id) { - iotg->otg.default_a = 1; - iotg->hsm.a_srp_det = 0; - - langwell_otg_chrg_vbus(0); - - if (lnw->iotg.stop_peripheral) - lnw->iotg.stop_peripheral(&lnw->iotg); - else - dev_dbg(lnw->dev, - "client driver has been removed.\n"); - - set_host_mode(); - langwell_otg_phy_low_power(1); - iotg->otg.state = OTG_STATE_A_IDLE; - langwell_update_transceiver(); - } else if (!iotg->hsm.b_sess_vld) { - iotg->hsm.b_hnp_enable = 0; - - if (lnw->iotg.stop_peripheral) - lnw->iotg.stop_peripheral(&lnw->iotg); - else - dev_dbg(lnw->dev, - "client driver has been removed.\n"); - - iotg->otg.state = OTG_STATE_B_IDLE; - } else if (iotg->hsm.b_bus_req && iotg->otg.gadget && - iotg->otg.gadget->b_hnp_enable && - iotg->hsm.a_bus_suspend) { - - if (lnw->iotg.stop_peripheral) - lnw->iotg.stop_peripheral(&lnw->iotg); - else - dev_dbg(lnw->dev, - "client driver has been removed.\n"); - - langwell_otg_HAAR(1); - iotg->hsm.a_conn = 0; - - if (lnw->iotg.start_host) { - lnw->iotg.start_host(&lnw->iotg); - iotg->otg.state = OTG_STATE_B_WAIT_ACON; - } else - dev_dbg(lnw->dev, - "host driver not loaded.\n"); - - iotg->hsm.a_bus_resume = 0; - langwell_otg_add_ktimer(TB_ASE0_BRST_TMR); - } - break; - - case OTG_STATE_B_WAIT_ACON: - if (!iotg->hsm.id) { - /* delete hsm timer for b_ase0_brst_tmr */ - del_timer_sync(&lnw->hsm_timer); - - iotg->otg.default_a = 1; - iotg->hsm.a_srp_det = 0; - - langwell_otg_chrg_vbus(0); - - langwell_otg_HAAR(0); - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - set_host_mode(); - langwell_otg_phy_low_power(1); - iotg->otg.state = OTG_STATE_A_IDLE; - langwell_update_transceiver(); - } else if (!iotg->hsm.b_sess_vld) { - /* delete hsm timer for b_ase0_brst_tmr */ - del_timer_sync(&lnw->hsm_timer); - - iotg->hsm.b_hnp_enable = 0; - iotg->hsm.b_bus_req = 0; - - langwell_otg_chrg_vbus(0); - langwell_otg_HAAR(0); - - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - set_client_mode(); - langwell_otg_phy_low_power(1); - iotg->otg.state = OTG_STATE_B_IDLE; - } else if (iotg->hsm.a_conn) { - /* delete hsm timer for b_ase0_brst_tmr */ - del_timer_sync(&lnw->hsm_timer); - - langwell_otg_HAAR(0); - iotg->otg.state = OTG_STATE_B_HOST; - langwell_update_transceiver(); - } else if (iotg->hsm.a_bus_resume || - iotg->hsm.b_ase0_brst_tmout) { - /* delete hsm timer for b_ase0_brst_tmr */ - del_timer_sync(&lnw->hsm_timer); - - langwell_otg_HAAR(0); - langwell_otg_nsf_msg(7); - - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - iotg->hsm.a_bus_suspend = 0; - iotg->hsm.b_bus_req = 0; - - if (lnw->iotg.start_peripheral) - lnw->iotg.start_peripheral(&lnw->iotg); - else - dev_dbg(lnw->dev, - "client driver not loaded.\n"); - - iotg->otg.state = OTG_STATE_B_PERIPHERAL; - } - break; - - case OTG_STATE_B_HOST: - if (!iotg->hsm.id) { - iotg->otg.default_a = 1; - iotg->hsm.a_srp_det = 0; - - langwell_otg_chrg_vbus(0); - - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - set_host_mode(); - langwell_otg_phy_low_power(1); - iotg->otg.state = OTG_STATE_A_IDLE; - langwell_update_transceiver(); - } else if (!iotg->hsm.b_sess_vld) { - iotg->hsm.b_hnp_enable = 0; - iotg->hsm.b_bus_req = 0; - - langwell_otg_chrg_vbus(0); - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - set_client_mode(); - langwell_otg_phy_low_power(1); - iotg->otg.state = OTG_STATE_B_IDLE; - } else if ((!iotg->hsm.b_bus_req) || - (!iotg->hsm.a_conn)) { - iotg->hsm.b_bus_req = 0; - langwell_otg_loc_sof(0); - - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - iotg->hsm.a_bus_suspend = 0; - - if (lnw->iotg.start_peripheral) - lnw->iotg.start_peripheral(&lnw->iotg); - else - dev_dbg(lnw->dev, - "client driver not loaded.\n"); - - iotg->otg.state = OTG_STATE_B_PERIPHERAL; - } - break; - - case OTG_STATE_A_IDLE: - iotg->otg.default_a = 1; - if (iotg->hsm.id) { - iotg->otg.default_a = 0; - iotg->hsm.b_bus_req = 0; - iotg->hsm.vbus_srp_up = 0; - - langwell_otg_chrg_vbus(0); - set_client_mode(); - langwell_otg_phy_low_power(1); - iotg->otg.state = OTG_STATE_B_IDLE; - langwell_update_transceiver(); - } else if (!iotg->hsm.a_bus_drop && - (iotg->hsm.a_srp_det || iotg->hsm.a_bus_req)) { - langwell_otg_phy_low_power(0); - - /* Turn on VBus */ - iotg->otg.set_vbus(&iotg->otg, true); - - iotg->hsm.vbus_srp_up = 0; - iotg->hsm.a_wait_vrise_tmout = 0; - langwell_otg_add_timer(a_wait_vrise_tmr); - iotg->otg.state = OTG_STATE_A_WAIT_VRISE; - langwell_update_transceiver(); - } else if (!iotg->hsm.a_bus_drop && iotg->hsm.a_sess_vld) { - iotg->hsm.vbus_srp_up = 1; - } else if (!iotg->hsm.a_sess_vld && iotg->hsm.vbus_srp_up) { - msleep(10); - langwell_otg_phy_low_power(0); - - /* Turn on VBus */ - iotg->otg.set_vbus(&iotg->otg, true); - iotg->hsm.a_srp_det = 1; - iotg->hsm.vbus_srp_up = 0; - iotg->hsm.a_wait_vrise_tmout = 0; - langwell_otg_add_timer(a_wait_vrise_tmr); - iotg->otg.state = OTG_STATE_A_WAIT_VRISE; - langwell_update_transceiver(); - } else if (!iotg->hsm.a_sess_vld && - !iotg->hsm.vbus_srp_up) { - langwell_otg_phy_low_power(1); - } - break; - case OTG_STATE_A_WAIT_VRISE: - if (iotg->hsm.id) { - langwell_otg_del_timer(a_wait_vrise_tmr); - iotg->hsm.b_bus_req = 0; - iotg->otg.default_a = 0; - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - set_client_mode(); - langwell_otg_phy_low_power_wait(1); - iotg->otg.state = OTG_STATE_B_IDLE; - } else if (iotg->hsm.a_vbus_vld) { - langwell_otg_del_timer(a_wait_vrise_tmr); - iotg->hsm.b_conn = 0; - if (lnw->iotg.start_host) - lnw->iotg.start_host(&lnw->iotg); - else { - dev_dbg(lnw->dev, "host driver not loaded.\n"); - break; - } - - langwell_otg_add_ktimer(TA_WAIT_BCON_TMR); - iotg->otg.state = OTG_STATE_A_WAIT_BCON; - } else if (iotg->hsm.a_wait_vrise_tmout) { - iotg->hsm.b_conn = 0; - if (iotg->hsm.a_vbus_vld) { - if (lnw->iotg.start_host) - lnw->iotg.start_host(&lnw->iotg); - else { - dev_dbg(lnw->dev, - "host driver not loaded.\n"); - break; - } - langwell_otg_add_ktimer(TA_WAIT_BCON_TMR); - iotg->otg.state = OTG_STATE_A_WAIT_BCON; - } else { - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - langwell_otg_phy_low_power_wait(1); - iotg->otg.state = OTG_STATE_A_VBUS_ERR; - } - } - break; - case OTG_STATE_A_WAIT_BCON: - if (iotg->hsm.id) { - /* delete hsm timer for a_wait_bcon_tmr */ - del_timer_sync(&lnw->hsm_timer); - - iotg->otg.default_a = 0; - iotg->hsm.b_bus_req = 0; - - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - set_client_mode(); - langwell_otg_phy_low_power_wait(1); - iotg->otg.state = OTG_STATE_B_IDLE; - langwell_update_transceiver(); - } else if (!iotg->hsm.a_vbus_vld) { - /* delete hsm timer for a_wait_bcon_tmr */ - del_timer_sync(&lnw->hsm_timer); - - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - langwell_otg_phy_low_power_wait(1); - iotg->otg.state = OTG_STATE_A_VBUS_ERR; - } else if (iotg->hsm.a_bus_drop || - (iotg->hsm.a_wait_bcon_tmout && - !iotg->hsm.a_bus_req)) { - /* delete hsm timer for a_wait_bcon_tmr */ - del_timer_sync(&lnw->hsm_timer); - - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - iotg->otg.state = OTG_STATE_A_WAIT_VFALL; - } else if (iotg->hsm.b_conn) { - /* delete hsm timer for a_wait_bcon_tmr */ - del_timer_sync(&lnw->hsm_timer); - - iotg->hsm.a_suspend_req = 0; - iotg->otg.state = OTG_STATE_A_HOST; - if (iotg->hsm.a_srp_det && iotg->otg.host && - !iotg->otg.host->b_hnp_enable) { - /* SRP capable peripheral-only device */ - iotg->hsm.a_bus_req = 1; - iotg->hsm.a_srp_det = 0; - } else if (!iotg->hsm.a_bus_req && iotg->otg.host && - iotg->otg.host->b_hnp_enable) { - /* It is not safe enough to do a fast - * transition from A_WAIT_BCON to - * A_SUSPEND */ - msleep(10000); - if (iotg->hsm.a_bus_req) - break; - - if (request_irq(pdev->irq, - otg_dummy_irq, IRQF_SHARED, - driver_name, iotg->base) != 0) { - dev_dbg(lnw->dev, - "request interrupt %d fail\n", - pdev->irq); - } - - langwell_otg_HABA(1); - iotg->hsm.b_bus_resume = 0; - iotg->hsm.a_aidl_bdis_tmout = 0; - - langwell_otg_loc_sof(0); - /* clear PHCD to enable HW timer */ - langwell_otg_phy_low_power(0); - langwell_otg_add_timer(a_aidl_bdis_tmr); - iotg->otg.state = OTG_STATE_A_SUSPEND; - } else if (!iotg->hsm.a_bus_req && iotg->otg.host && - !iotg->otg.host->b_hnp_enable) { - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - iotg->otg.state = OTG_STATE_A_WAIT_VFALL; - } - } - break; - case OTG_STATE_A_HOST: - if (iotg->hsm.id) { - iotg->otg.default_a = 0; - iotg->hsm.b_bus_req = 0; - - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - set_client_mode(); - langwell_otg_phy_low_power_wait(1); - iotg->otg.state = OTG_STATE_B_IDLE; - langwell_update_transceiver(); - } else if (iotg->hsm.a_bus_drop || - (iotg->otg.host && - !iotg->otg.host->b_hnp_enable && - !iotg->hsm.a_bus_req)) { - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - iotg->otg.state = OTG_STATE_A_WAIT_VFALL; - } else if (!iotg->hsm.a_vbus_vld) { - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - langwell_otg_phy_low_power_wait(1); - iotg->otg.state = OTG_STATE_A_VBUS_ERR; - } else if (iotg->otg.host && - iotg->otg.host->b_hnp_enable && - !iotg->hsm.a_bus_req) { - /* Set HABA to enable hardware assistance to signal - * A-connect after receiver B-disconnect. Hardware - * will then set client mode and enable URE, SLE and - * PCE after the assistance. otg_dummy_irq is used to - * clean these ints when client driver is not resumed. - */ - if (request_irq(pdev->irq, otg_dummy_irq, IRQF_SHARED, - driver_name, iotg->base) != 0) { - dev_dbg(lnw->dev, - "request interrupt %d failed\n", - pdev->irq); - } - - /* set HABA */ - langwell_otg_HABA(1); - iotg->hsm.b_bus_resume = 0; - iotg->hsm.a_aidl_bdis_tmout = 0; - langwell_otg_loc_sof(0); - /* clear PHCD to enable HW timer */ - langwell_otg_phy_low_power(0); - langwell_otg_add_timer(a_aidl_bdis_tmr); - iotg->otg.state = OTG_STATE_A_SUSPEND; - } else if (!iotg->hsm.b_conn || !iotg->hsm.a_bus_req) { - langwell_otg_add_ktimer(TA_WAIT_BCON_TMR); - iotg->otg.state = OTG_STATE_A_WAIT_BCON; - } - break; - case OTG_STATE_A_SUSPEND: - if (iotg->hsm.id) { - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - free_irq(pdev->irq, iotg->base); - iotg->otg.default_a = 0; - iotg->hsm.b_bus_req = 0; - - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - set_client_mode(); - langwell_otg_phy_low_power(1); - iotg->otg.state = OTG_STATE_B_IDLE; - langwell_update_transceiver(); - } else if (iotg->hsm.a_bus_req || - iotg->hsm.b_bus_resume) { - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - free_irq(pdev->irq, iotg->base); - iotg->hsm.a_suspend_req = 0; - langwell_otg_loc_sof(1); - iotg->otg.state = OTG_STATE_A_HOST; - } else if (iotg->hsm.a_aidl_bdis_tmout || - iotg->hsm.a_bus_drop) { - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - free_irq(pdev->irq, iotg->base); - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - iotg->otg.state = OTG_STATE_A_WAIT_VFALL; - } else if (!iotg->hsm.b_conn && iotg->otg.host && - iotg->otg.host->b_hnp_enable) { - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - free_irq(pdev->irq, iotg->base); - - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - iotg->hsm.b_bus_suspend = 0; - iotg->hsm.b_bus_suspend_vld = 0; - - /* msleep(200); */ - if (lnw->iotg.start_peripheral) - lnw->iotg.start_peripheral(&lnw->iotg); - else - dev_dbg(lnw->dev, - "client driver not loaded.\n"); - - langwell_otg_add_ktimer(TB_BUS_SUSPEND_TMR); - iotg->otg.state = OTG_STATE_A_PERIPHERAL; - break; - } else if (!iotg->hsm.a_vbus_vld) { - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - free_irq(pdev->irq, iotg->base); - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - langwell_otg_phy_low_power_wait(1); - iotg->otg.state = OTG_STATE_A_VBUS_ERR; - } - break; - case OTG_STATE_A_PERIPHERAL: - if (iotg->hsm.id) { - /* delete hsm timer for b_bus_suspend_tmr */ - del_timer_sync(&lnw->hsm_timer); - iotg->otg.default_a = 0; - iotg->hsm.b_bus_req = 0; - if (lnw->iotg.stop_peripheral) - lnw->iotg.stop_peripheral(&lnw->iotg); - else - dev_dbg(lnw->dev, - "client driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - set_client_mode(); - langwell_otg_phy_low_power_wait(1); - iotg->otg.state = OTG_STATE_B_IDLE; - langwell_update_transceiver(); - } else if (!iotg->hsm.a_vbus_vld) { - /* delete hsm timer for b_bus_suspend_tmr */ - del_timer_sync(&lnw->hsm_timer); - - if (lnw->iotg.stop_peripheral) - lnw->iotg.stop_peripheral(&lnw->iotg); - else - dev_dbg(lnw->dev, - "client driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - langwell_otg_phy_low_power_wait(1); - iotg->otg.state = OTG_STATE_A_VBUS_ERR; - } else if (iotg->hsm.a_bus_drop) { - /* delete hsm timer for b_bus_suspend_tmr */ - del_timer_sync(&lnw->hsm_timer); - - if (lnw->iotg.stop_peripheral) - lnw->iotg.stop_peripheral(&lnw->iotg); - else - dev_dbg(lnw->dev, - "client driver has been removed.\n"); - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - iotg->otg.state = OTG_STATE_A_WAIT_VFALL; - } else if (iotg->hsm.b_bus_suspend) { - /* delete hsm timer for b_bus_suspend_tmr */ - del_timer_sync(&lnw->hsm_timer); - - if (lnw->iotg.stop_peripheral) - lnw->iotg.stop_peripheral(&lnw->iotg); - else - dev_dbg(lnw->dev, - "client driver has been removed.\n"); - - if (lnw->iotg.start_host) - lnw->iotg.start_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver not loaded.\n"); - langwell_otg_add_ktimer(TA_WAIT_BCON_TMR); - iotg->otg.state = OTG_STATE_A_WAIT_BCON; - } else if (iotg->hsm.b_bus_suspend_tmout) { - u32 val; - val = readl(lnw->iotg.base + CI_PORTSC1); - if (!(val & PORTSC_SUSP)) - break; - - if (lnw->iotg.stop_peripheral) - lnw->iotg.stop_peripheral(&lnw->iotg); - else - dev_dbg(lnw->dev, - "client driver has been removed.\n"); - - if (lnw->iotg.start_host) - lnw->iotg.start_host(&lnw->iotg); - else - dev_dbg(lnw->dev, - "host driver not loaded.\n"); - langwell_otg_add_ktimer(TA_WAIT_BCON_TMR); - iotg->otg.state = OTG_STATE_A_WAIT_BCON; - } - break; - case OTG_STATE_A_VBUS_ERR: - if (iotg->hsm.id) { - iotg->otg.default_a = 0; - iotg->hsm.a_clr_err = 0; - iotg->hsm.a_srp_det = 0; - set_client_mode(); - langwell_otg_phy_low_power(1); - iotg->otg.state = OTG_STATE_B_IDLE; - langwell_update_transceiver(); - } else if (iotg->hsm.a_clr_err) { - iotg->hsm.a_clr_err = 0; - iotg->hsm.a_srp_det = 0; - reset_otg(); - init_hsm(); - if (iotg->otg.state == OTG_STATE_A_IDLE) - langwell_update_transceiver(); - } else { - /* FW will clear PHCD bit when any VBus - * event detected. Reset PHCD to 1 again */ - langwell_otg_phy_low_power(1); - } - break; - case OTG_STATE_A_WAIT_VFALL: - if (iotg->hsm.id) { - iotg->otg.default_a = 0; - set_client_mode(); - langwell_otg_phy_low_power(1); - iotg->otg.state = OTG_STATE_B_IDLE; - langwell_update_transceiver(); - } else if (iotg->hsm.a_bus_req) { - - /* Turn on VBus */ - iotg->otg.set_vbus(&iotg->otg, true); - iotg->hsm.a_wait_vrise_tmout = 0; - langwell_otg_add_timer(a_wait_vrise_tmr); - iotg->otg.state = OTG_STATE_A_WAIT_VRISE; - } else if (!iotg->hsm.a_sess_vld) { - iotg->hsm.a_srp_det = 0; - set_host_mode(); - langwell_otg_phy_low_power(1); - iotg->otg.state = OTG_STATE_A_IDLE; - } - break; - default: - ; - } - - dev_dbg(lnw->dev, "%s: new state = %s\n", __func__, - otg_state_string(iotg->otg.state)); -} - -static ssize_t -show_registers(struct device *_dev, struct device_attribute *attr, char *buf) -{ - struct langwell_otg *lnw = the_transceiver; - char *next; - unsigned size, t; - - next = buf; - size = PAGE_SIZE; - - t = scnprintf(next, size, - "\n" - "USBCMD = 0x%08x\n" - "USBSTS = 0x%08x\n" - "USBINTR = 0x%08x\n" - "ASYNCLISTADDR = 0x%08x\n" - "PORTSC1 = 0x%08x\n" - "HOSTPC1 = 0x%08x\n" - "OTGSC = 0x%08x\n" - "USBMODE = 0x%08x\n", - readl(lnw->iotg.base + 0x30), - readl(lnw->iotg.base + 0x34), - readl(lnw->iotg.base + 0x38), - readl(lnw->iotg.base + 0x48), - readl(lnw->iotg.base + 0x74), - readl(lnw->iotg.base + 0xb4), - readl(lnw->iotg.base + 0xf4), - readl(lnw->iotg.base + 0xf8) - ); - size -= t; - next += t; - - return PAGE_SIZE - size; -} -static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL); - -static ssize_t -show_hsm(struct device *_dev, struct device_attribute *attr, char *buf) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - char *next; - unsigned size, t; - - next = buf; - size = PAGE_SIZE; - - if (iotg->otg.host) - iotg->hsm.a_set_b_hnp_en = iotg->otg.host->b_hnp_enable; - - if (iotg->otg.gadget) - iotg->hsm.b_hnp_enable = iotg->otg.gadget->b_hnp_enable; - - t = scnprintf(next, size, - "\n" - "current state = %s\n" - "a_bus_resume = \t%d\n" - "a_bus_suspend = \t%d\n" - "a_conn = \t%d\n" - "a_sess_vld = \t%d\n" - "a_srp_det = \t%d\n" - "a_vbus_vld = \t%d\n" - "b_bus_resume = \t%d\n" - "b_bus_suspend = \t%d\n" - "b_conn = \t%d\n" - "b_se0_srp = \t%d\n" - "b_sess_end = \t%d\n" - "b_sess_vld = \t%d\n" - "id = \t%d\n" - "a_set_b_hnp_en = \t%d\n" - "b_srp_done = \t%d\n" - "b_hnp_enable = \t%d\n" - "a_wait_vrise_tmout = \t%d\n" - "a_wait_bcon_tmout = \t%d\n" - "a_aidl_bdis_tmout = \t%d\n" - "b_ase0_brst_tmout = \t%d\n" - "a_bus_drop = \t%d\n" - "a_bus_req = \t%d\n" - "a_clr_err = \t%d\n" - "a_suspend_req = \t%d\n" - "b_bus_req = \t%d\n" - "b_bus_suspend_tmout = \t%d\n" - "b_bus_suspend_vld = \t%d\n", - otg_state_string(iotg->otg.state), - iotg->hsm.a_bus_resume, - iotg->hsm.a_bus_suspend, - iotg->hsm.a_conn, - iotg->hsm.a_sess_vld, - iotg->hsm.a_srp_det, - iotg->hsm.a_vbus_vld, - iotg->hsm.b_bus_resume, - iotg->hsm.b_bus_suspend, - iotg->hsm.b_conn, - iotg->hsm.b_se0_srp, - iotg->hsm.b_sess_end, - iotg->hsm.b_sess_vld, - iotg->hsm.id, - iotg->hsm.a_set_b_hnp_en, - iotg->hsm.b_srp_done, - iotg->hsm.b_hnp_enable, - iotg->hsm.a_wait_vrise_tmout, - iotg->hsm.a_wait_bcon_tmout, - iotg->hsm.a_aidl_bdis_tmout, - iotg->hsm.b_ase0_brst_tmout, - iotg->hsm.a_bus_drop, - iotg->hsm.a_bus_req, - iotg->hsm.a_clr_err, - iotg->hsm.a_suspend_req, - iotg->hsm.b_bus_req, - iotg->hsm.b_bus_suspend_tmout, - iotg->hsm.b_bus_suspend_vld - ); - size -= t; - next += t; - - return PAGE_SIZE - size; -} -static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL); - -static ssize_t -get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct langwell_otg *lnw = the_transceiver; - char *next; - unsigned size, t; - - next = buf; - size = PAGE_SIZE; - - t = scnprintf(next, size, "%d", lnw->iotg.hsm.a_bus_req); - size -= t; - next += t; - - return PAGE_SIZE - size; -} - -static ssize_t -set_a_bus_req(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - - if (!iotg->otg.default_a) - return -1; - if (count > 2) - return -1; - - if (buf[0] == '0') { - iotg->hsm.a_bus_req = 0; - dev_dbg(lnw->dev, "User request: a_bus_req = 0\n"); - } else if (buf[0] == '1') { - /* If a_bus_drop is TRUE, a_bus_req can't be set */ - if (iotg->hsm.a_bus_drop) - return -1; - iotg->hsm.a_bus_req = 1; - dev_dbg(lnw->dev, "User request: a_bus_req = 1\n"); - } - if (spin_trylock(&lnw->wq_lock)) { - langwell_update_transceiver(); - spin_unlock(&lnw->wq_lock); - } - return count; -} -static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req, set_a_bus_req); - -static ssize_t -get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct langwell_otg *lnw = the_transceiver; - char *next; - unsigned size, t; - - next = buf; - size = PAGE_SIZE; - - t = scnprintf(next, size, "%d", lnw->iotg.hsm.a_bus_drop); - size -= t; - next += t; - - return PAGE_SIZE - size; -} - -static ssize_t -set_a_bus_drop(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - - if (!iotg->otg.default_a) - return -1; - if (count > 2) - return -1; - - if (buf[0] == '0') { - iotg->hsm.a_bus_drop = 0; - dev_dbg(lnw->dev, "User request: a_bus_drop = 0\n"); - } else if (buf[0] == '1') { - iotg->hsm.a_bus_drop = 1; - iotg->hsm.a_bus_req = 0; - dev_dbg(lnw->dev, "User request: a_bus_drop = 1\n"); - dev_dbg(lnw->dev, "User request: and a_bus_req = 0\n"); - } - if (spin_trylock(&lnw->wq_lock)) { - langwell_update_transceiver(); - spin_unlock(&lnw->wq_lock); - } - return count; -} -static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR, get_a_bus_drop, set_a_bus_drop); - -static ssize_t -get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct langwell_otg *lnw = the_transceiver; - char *next; - unsigned size, t; - - next = buf; - size = PAGE_SIZE; - - t = scnprintf(next, size, "%d", lnw->iotg.hsm.b_bus_req); - size -= t; - next += t; - - return PAGE_SIZE - size; -} - -static ssize_t -set_b_bus_req(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - - if (iotg->otg.default_a) - return -1; - - if (count > 2) - return -1; - - if (buf[0] == '0') { - iotg->hsm.b_bus_req = 0; - dev_dbg(lnw->dev, "User request: b_bus_req = 0\n"); - } else if (buf[0] == '1') { - iotg->hsm.b_bus_req = 1; - dev_dbg(lnw->dev, "User request: b_bus_req = 1\n"); - } - if (spin_trylock(&lnw->wq_lock)) { - langwell_update_transceiver(); - spin_unlock(&lnw->wq_lock); - } - return count; -} -static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUSR, get_b_bus_req, set_b_bus_req); - -static ssize_t -set_a_clr_err(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - - if (!iotg->otg.default_a) - return -1; - if (count > 2) - return -1; - - if (buf[0] == '1') { - iotg->hsm.a_clr_err = 1; - dev_dbg(lnw->dev, "User request: a_clr_err = 1\n"); - } - if (spin_trylock(&lnw->wq_lock)) { - langwell_update_transceiver(); - spin_unlock(&lnw->wq_lock); - } - return count; -} -static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err); - -static struct attribute *inputs_attrs[] = { - &dev_attr_a_bus_req.attr, - &dev_attr_a_bus_drop.attr, - &dev_attr_b_bus_req.attr, - &dev_attr_a_clr_err.attr, - NULL, -}; - -static struct attribute_group debug_dev_attr_group = { - .name = "inputs", - .attrs = inputs_attrs, -}; - -static int langwell_otg_probe(struct pci_dev *pdev, - const struct pci_device_id *id) -{ - unsigned long resource, len; - void __iomem *base = NULL; - int retval; - u32 val32; - struct langwell_otg *lnw; - char qname[] = "langwell_otg_queue"; - - retval = 0; - dev_dbg(&pdev->dev, "\notg controller is detected.\n"); - if (pci_enable_device(pdev) < 0) { - retval = -ENODEV; - goto done; - } - - lnw = kzalloc(sizeof *lnw, GFP_KERNEL); - if (lnw == NULL) { - retval = -ENOMEM; - goto done; - } - the_transceiver = lnw; - - /* control register: BAR 0 */ - resource = pci_resource_start(pdev, 0); - len = pci_resource_len(pdev, 0); - if (!request_mem_region(resource, len, driver_name)) { - retval = -EBUSY; - goto err; - } - lnw->region = 1; - - base = ioremap_nocache(resource, len); - if (base == NULL) { - retval = -EFAULT; - goto err; - } - lnw->iotg.base = base; - - if (!request_mem_region(USBCFG_ADDR, USBCFG_LEN, driver_name)) { - retval = -EBUSY; - goto err; - } - lnw->cfg_region = 1; - - /* For the SCCB.USBCFG register */ - base = ioremap_nocache(USBCFG_ADDR, USBCFG_LEN); - if (base == NULL) { - retval = -EFAULT; - goto err; - } - lnw->usbcfg = base; - - if (!pdev->irq) { - dev_dbg(&pdev->dev, "No IRQ.\n"); - retval = -ENODEV; - goto err; - } - - lnw->qwork = create_singlethread_workqueue(qname); - if (!lnw->qwork) { - dev_dbg(&pdev->dev, "cannot create workqueue %s\n", qname); - retval = -ENOMEM; - goto err; - } - INIT_WORK(&lnw->work, langwell_otg_work); - - /* OTG common part */ - lnw->dev = &pdev->dev; - lnw->iotg.otg.dev = lnw->dev; - lnw->iotg.otg.label = driver_name; - lnw->iotg.otg.set_host = langwell_otg_set_host; - lnw->iotg.otg.set_peripheral = langwell_otg_set_peripheral; - lnw->iotg.otg.set_power = langwell_otg_set_power; - lnw->iotg.otg.set_vbus = langwell_otg_set_vbus; - lnw->iotg.otg.start_srp = langwell_otg_start_srp; - lnw->iotg.otg.state = OTG_STATE_UNDEFINED; - - if (otg_set_transceiver(&lnw->iotg.otg)) { - dev_dbg(lnw->dev, "can't set transceiver\n"); - retval = -EBUSY; - goto err; - } - - reset_otg(); - init_hsm(); - - spin_lock_init(&lnw->lock); - spin_lock_init(&lnw->wq_lock); - INIT_LIST_HEAD(&active_timers); - retval = langwell_otg_init_timers(&lnw->iotg.hsm); - if (retval) { - dev_dbg(&pdev->dev, "Failed to init timers\n"); - goto err; - } - - init_timer(&lnw->hsm_timer); - ATOMIC_INIT_NOTIFIER_HEAD(&lnw->iotg.iotg_notifier); - - lnw->iotg_notifier.notifier_call = langwell_otg_iotg_notify; - - retval = intel_mid_otg_register_notifier(&lnw->iotg, - &lnw->iotg_notifier); - if (retval) { - dev_dbg(lnw->dev, "Failed to register notifier\n"); - goto err; - } - - if (request_irq(pdev->irq, otg_irq, IRQF_SHARED, - driver_name, lnw) != 0) { - dev_dbg(lnw->dev, "request interrupt %d failed\n", pdev->irq); - retval = -EBUSY; - goto err; - } - - /* enable OTGSC int */ - val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE | - OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU; - writel(val32, lnw->iotg.base + CI_OTGSC); - - retval = device_create_file(&pdev->dev, &dev_attr_registers); - if (retval < 0) { - dev_dbg(lnw->dev, - "Can't register sysfs attribute: %d\n", retval); - goto err; - } - - retval = device_create_file(&pdev->dev, &dev_attr_hsm); - if (retval < 0) { - dev_dbg(lnw->dev, "Can't hsm sysfs attribute: %d\n", retval); - goto err; - } - - retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group); - if (retval < 0) { - dev_dbg(lnw->dev, - "Can't register sysfs attr group: %d\n", retval); - goto err; - } - - if (lnw->iotg.otg.state == OTG_STATE_A_IDLE) - langwell_update_transceiver(); - - return 0; - -err: - if (the_transceiver) - langwell_otg_remove(pdev); -done: - return retval; -} - -static void langwell_otg_remove(struct pci_dev *pdev) -{ - struct langwell_otg *lnw = the_transceiver; - - if (lnw->qwork) { - flush_workqueue(lnw->qwork); - destroy_workqueue(lnw->qwork); - } - intel_mid_otg_unregister_notifier(&lnw->iotg, &lnw->iotg_notifier); - langwell_otg_free_timers(); - - /* disable OTGSC interrupt as OTGSC doesn't change in reset */ - writel(0, lnw->iotg.base + CI_OTGSC); - - if (pdev->irq) - free_irq(pdev->irq, lnw); - if (lnw->usbcfg) - iounmap(lnw->usbcfg); - if (lnw->cfg_region) - release_mem_region(USBCFG_ADDR, USBCFG_LEN); - if (lnw->iotg.base) - iounmap(lnw->iotg.base); - if (lnw->region) - release_mem_region(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - - otg_set_transceiver(NULL); - pci_disable_device(pdev); - sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group); - device_remove_file(&pdev->dev, &dev_attr_hsm); - device_remove_file(&pdev->dev, &dev_attr_registers); - kfree(lnw); - lnw = NULL; -} - -static void transceiver_suspend(struct pci_dev *pdev) -{ - pci_save_state(pdev); - pci_set_power_state(pdev, PCI_D3hot); - langwell_otg_phy_low_power(1); -} - -static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message) -{ - struct langwell_otg *lnw = the_transceiver; - struct intel_mid_otg_xceiv *iotg = &lnw->iotg; - int ret = 0; - - /* Disbale OTG interrupts */ - langwell_otg_intr(0); - - if (pdev->irq) - free_irq(pdev->irq, lnw); - - /* Prevent more otg_work */ - flush_workqueue(lnw->qwork); - destroy_workqueue(lnw->qwork); - lnw->qwork = NULL; - - /* start actions */ - switch (iotg->otg.state) { - case OTG_STATE_A_WAIT_VFALL: - iotg->otg.state = OTG_STATE_A_IDLE; - case OTG_STATE_A_IDLE: - case OTG_STATE_B_IDLE: - case OTG_STATE_A_VBUS_ERR: - transceiver_suspend(pdev); - break; - case OTG_STATE_A_WAIT_VRISE: - langwell_otg_del_timer(a_wait_vrise_tmr); - iotg->hsm.a_srp_det = 0; - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - iotg->otg.state = OTG_STATE_A_IDLE; - transceiver_suspend(pdev); - break; - case OTG_STATE_A_WAIT_BCON: - del_timer_sync(&lnw->hsm_timer); - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(&pdev->dev, "host driver has been removed.\n"); - - iotg->hsm.a_srp_det = 0; - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - iotg->otg.state = OTG_STATE_A_IDLE; - transceiver_suspend(pdev); - break; - case OTG_STATE_A_HOST: - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(&pdev->dev, "host driver has been removed.\n"); - - iotg->hsm.a_srp_det = 0; - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - - iotg->otg.state = OTG_STATE_A_IDLE; - transceiver_suspend(pdev); - break; - case OTG_STATE_A_SUSPEND: - langwell_otg_del_timer(a_aidl_bdis_tmr); - langwell_otg_HABA(0); - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(lnw->dev, "host driver has been removed.\n"); - iotg->hsm.a_srp_det = 0; - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - iotg->otg.state = OTG_STATE_A_IDLE; - transceiver_suspend(pdev); - break; - case OTG_STATE_A_PERIPHERAL: - del_timer_sync(&lnw->hsm_timer); - - if (lnw->iotg.stop_peripheral) - lnw->iotg.stop_peripheral(&lnw->iotg); - else - dev_dbg(&pdev->dev, - "client driver has been removed.\n"); - iotg->hsm.a_srp_det = 0; - - /* Turn off VBus */ - iotg->otg.set_vbus(&iotg->otg, false); - iotg->otg.state = OTG_STATE_A_IDLE; - transceiver_suspend(pdev); - break; - case OTG_STATE_B_HOST: - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(&pdev->dev, "host driver has been removed.\n"); - iotg->hsm.b_bus_req = 0; - iotg->otg.state = OTG_STATE_B_IDLE; - transceiver_suspend(pdev); - break; - case OTG_STATE_B_PERIPHERAL: - if (lnw->iotg.stop_peripheral) - lnw->iotg.stop_peripheral(&lnw->iotg); - else - dev_dbg(&pdev->dev, - "client driver has been removed.\n"); - iotg->otg.state = OTG_STATE_B_IDLE; - transceiver_suspend(pdev); - break; - case OTG_STATE_B_WAIT_ACON: - /* delete hsm timer for b_ase0_brst_tmr */ - del_timer_sync(&lnw->hsm_timer); - - langwell_otg_HAAR(0); - - if (lnw->iotg.stop_host) - lnw->iotg.stop_host(&lnw->iotg); - else - dev_dbg(&pdev->dev, "host driver has been removed.\n"); - iotg->hsm.b_bus_req = 0; - iotg->otg.state = OTG_STATE_B_IDLE; - transceiver_suspend(pdev); - break; - default: - dev_dbg(lnw->dev, "error state before suspend\n"); - break; - } - - return ret; -} - -static void transceiver_resume(struct pci_dev *pdev) -{ - pci_restore_state(pdev); - pci_set_power_state(pdev, PCI_D0); -} - -static int langwell_otg_resume(struct pci_dev *pdev) -{ - struct langwell_otg *lnw = the_transceiver; - int ret = 0; - - transceiver_resume(pdev); - - lnw->qwork = create_singlethread_workqueue("langwell_otg_queue"); - if (!lnw->qwork) { - dev_dbg(&pdev->dev, "cannot create langwell otg workqueuen"); - ret = -ENOMEM; - goto error; - } - - if (request_irq(pdev->irq, otg_irq, IRQF_SHARED, - driver_name, lnw) != 0) { - dev_dbg(&pdev->dev, "request interrupt %d failed\n", pdev->irq); - ret = -EBUSY; - goto error; - } - - /* enable OTG interrupts */ - langwell_otg_intr(1); - - update_hsm(); - - langwell_update_transceiver(); - - return ret; -error: - langwell_otg_intr(0); - transceiver_suspend(pdev); - return ret; -} - -static int __init langwell_otg_init(void) -{ - return pci_register_driver(&otg_pci_driver); -} -module_init(langwell_otg_init); - -static void __exit langwell_otg_cleanup(void) -{ - pci_unregister_driver(&otg_pci_driver); -} -module_exit(langwell_otg_cleanup); diff --git a/drivers/usb/otg/mv_otg.c b/drivers/usb/otg/mv_otg.c index db0d4fc..b5fbe14 100644 --- a/drivers/usb/otg/mv_otg.c +++ b/drivers/usb/otg/mv_otg.c @@ -202,6 +202,7 @@ static void mv_otg_init_irq(struct mv_otg *mvotg) static void mv_otg_start_host(struct mv_otg *mvotg, int on) { +#ifdef CONFIG_USB struct otg_transceiver *otg = &mvotg->otg; struct usb_hcd *hcd; @@ -216,6 +217,7 @@ static void mv_otg_start_host(struct mv_otg *mvotg, int on) usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); else usb_remove_hcd(hcd); +#endif /* CONFIG_USB */ } static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on) diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index b51fcd8..72339bd 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -772,10 +772,10 @@ static void usbhsf_dma_prepare_tasklet(unsigned long data) struct dma_async_tx_descriptor *desc; struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt); struct device *dev = usbhs_priv_to_dev(priv); - enum dma_data_direction dir; + enum dma_transfer_direction dir; dma_cookie_t cookie; - dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; sg_init_table(&sg, 1); sg_set_page(&sg, virt_to_page(pkt->dma), diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 528691d5f..7542aa9 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -425,7 +425,7 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv, struct usbhs_pipe *pipe; int recip = ctrl->bRequestType & USB_RECIP_MASK; int nth = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK; - int ret; + int ret = 0; int (*func)(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl); char *msg; diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index fba1147..8dbf51a 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -39,6 +39,8 @@ static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *port); static void cp210x_get_termios_port(struct usb_serial_port *port, unsigned int *cflagp, unsigned int *baudp); +static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *, + struct ktermios *); static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *, struct ktermios*); static int cp210x_tiocmget(struct tty_struct *); @@ -138,6 +140,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ + { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ { } /* Terminating Entry */ }; @@ -201,6 +204,8 @@ static struct usb_serial_driver cp210x_device = { #define CP210X_EMBED_EVENTS 0x15 #define CP210X_GET_EVENTSTATE 0x16 #define CP210X_SET_CHARS 0x19 +#define CP210X_GET_BAUDRATE 0x1D +#define CP210X_SET_BAUDRATE 0x1E /* CP210X_IFC_ENABLE */ #define UART_ENABLE 0x0001 @@ -360,8 +365,8 @@ static inline int cp210x_set_config_single(struct usb_serial_port *port, * Quantises the baud rate as per AN205 Table 1 */ static unsigned int cp210x_quantise_baudrate(unsigned int baud) { - if (baud <= 56) baud = 0; - else if (baud <= 300) baud = 300; + if (baud <= 300) + baud = 300; else if (baud <= 600) baud = 600; else if (baud <= 1200) baud = 1200; else if (baud <= 1800) baud = 1800; @@ -389,10 +394,10 @@ static unsigned int cp210x_quantise_baudrate(unsigned int baud) { else if (baud <= 491520) baud = 460800; else if (baud <= 567138) baud = 500000; else if (baud <= 670254) baud = 576000; - else if (baud <= 1053257) baud = 921600; - else if (baud <= 1474560) baud = 1228800; - else if (baud <= 2457600) baud = 1843200; - else baud = 3686400; + else if (baud < 1000000) + baud = 921600; + else if (baud > 2000000) + baud = 2000000; return baud; } @@ -409,13 +414,14 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port) return result; } - result = usb_serial_generic_open(tty, port); - if (result) - return result; - /* Configure the termios structure */ cp210x_get_termios(tty, port); - return 0; + + /* The baud rate must be initialised on cp2104 */ + if (tty) + cp210x_change_speed(tty, port, NULL); + + return usb_serial_generic_open(tty, port); } static void cp210x_close(struct usb_serial_port *port) @@ -467,10 +473,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, dbg("%s - port %d", __func__, port->number); - cp210x_get_config(port, CP210X_GET_BAUDDIV, &baud, 2); - /* Convert to baudrate */ - if (baud) - baud = cp210x_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud); + cp210x_get_config(port, CP210X_GET_BAUDRATE, &baud, 4); dbg("%s - baud rate = %d", __func__, baud); *baudp = baud; @@ -579,11 +582,64 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, *cflagp = cflag; } +/* + * CP2101 supports the following baud rates: + * + * 300, 600, 1200, 1800, 2400, 4800, 7200, 9600, 14400, 19200, 28800, + * 38400, 56000, 57600, 115200, 128000, 230400, 460800, 921600 + * + * CP2102 and CP2103 support the following additional rates: + * + * 4000, 16000, 51200, 64000, 76800, 153600, 250000, 256000, 500000, + * 576000 + * + * The device will map a requested rate to a supported one, but the result + * of requests for rates greater than 1053257 is undefined (see AN205). + * + * CP2104, CP2105 and CP2110 support most rates up to 2M, 921k and 1M baud, + * respectively, with an error less than 1%. The actual rates are determined + * by + * + * div = round(freq / (2 x prescale x request)) + * actual = freq / (2 x prescale x div) + * + * For CP2104 and CP2105 freq is 48Mhz and prescale is 4 for request <= 365bps + * or 1 otherwise. + * For CP2110 freq is 24Mhz and prescale is 4 for request <= 300bps or 1 + * otherwise. + */ +static void cp210x_change_speed(struct tty_struct *tty, + struct usb_serial_port *port, struct ktermios *old_termios) +{ + u32 baud; + + baud = tty->termios->c_ospeed; + + /* This maps the requested rate to a rate valid on cp2102 or cp2103, + * or to an arbitrary rate in [1M,2M]. + * + * NOTE: B0 is not implemented. + */ + baud = cp210x_quantise_baudrate(baud); + + dbg("%s - setting baud rate to %u", __func__, baud); + if (cp210x_set_config(port, CP210X_SET_BAUDRATE, &baud, + sizeof(baud))) { + dev_warn(&port->dev, "failed to set baud rate to %u\n", baud); + if (old_termios) + baud = old_termios->c_ospeed; + else + baud = 9600; + } + + tty_encode_baud_rate(tty, baud, baud); +} + static void cp210x_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { unsigned int cflag, old_cflag; - unsigned int baud = 0, bits; + unsigned int bits; unsigned int modem_ctl[4]; dbg("%s - port %d", __func__, port->number); @@ -593,20 +649,9 @@ static void cp210x_set_termios(struct tty_struct *tty, cflag = tty->termios->c_cflag; old_cflag = old_termios->c_cflag; - baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty)); - - /* If the baud rate is to be updated*/ - if (baud != tty_termios_baud_rate(old_termios) && baud != 0) { - dbg("%s - Setting baud rate to %d baud", __func__, - baud); - if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV, - ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) { - dbg("Baud rate requested not supported by device"); - baud = tty_termios_baud_rate(old_termios); - } - } - /* Report back the resulting baud rate */ - tty_encode_baud_rate(tty, baud, baud); + + if (tty->termios->c_ospeed != old_termios->c_ospeed) + cp210x_change_speed(tty, port, old_termios); /* If the number of data bits is to be updated */ if ((cflag & CSIZE) != (old_cflag & CSIZE)) { diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 01b6404..ad654f8 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -797,6 +797,7 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(HORNBY_VID, HORNBY_ELITE_PID) }, { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, @@ -805,6 +806,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, TI_XDS100V2_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) }, @@ -841,6 +844,7 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(ST_VID, ST_STMCLT1030_PID), .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_RF_R106) }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; @@ -1333,8 +1337,7 @@ static int set_serial_info(struct tty_struct *tty, goto check_and_exit; } - if ((new_serial.baud_base != priv->baud_base) && - (new_serial.baud_base < 9600)) { + if (new_serial.baud_base != priv->baud_base) { mutex_unlock(&priv->cfg_lock); return -EINVAL; } @@ -1824,6 +1827,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port) static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) { + struct ktermios dummy; struct usb_device *dev = port->serial->dev; struct ftdi_private *priv = usb_get_serial_port_data(port); int result; @@ -1842,8 +1846,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) This is same behaviour as serial.c/rs_open() - Kuba */ /* ftdi_set_termios will send usb control messages */ - if (tty) - ftdi_set_termios(tty, port, tty->termios); + if (tty) { + memset(&dummy, 0, sizeof(dummy)); + ftdi_set_termios(tty, port, &dummy); + } /* Start reading from the device */ result = usb_serial_generic_open(tty, port); diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index df1d7da..f994503 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -39,6 +39,13 @@ /* www.candapter.com Ewert Energy Systems CANdapter device */ #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ +/* + * Texas Instruments XDS100v2 JTAG / BeagleBone A3 + * http://processors.wiki.ti.com/index.php/XDS100 + * http://beagleboard.org/bone + */ +#define TI_XDS100V2_PID 0xa6d0 + #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ /* US Interface Navigator (http://www.usinterface.com/) */ @@ -525,6 +532,12 @@ #define ADI_GNICEPLUS_PID 0xF001 /* + * Hornby Elite + */ +#define HORNBY_VID 0x04D8 +#define HORNBY_ELITE_PID 0x000A + +/* * RATOC REX-USB60F */ #define RATOC_VENDOR_ID 0x0584 @@ -1168,3 +1181,9 @@ */ /* TagTracer MIFARE*/ #define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0 + +/* + * Rainforest Automation + */ +/* ZigBee controller */ +#define FTDI_RF_R106 0x8A28 diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index 65bf06a..5818bfc 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -2657,15 +2657,7 @@ cleanup: static void edge_disconnect(struct usb_serial *serial) { - int i; - struct edgeport_port *edge_port; - dbg("%s", __func__); - - for (i = 0; i < serial->num_ports; ++i) { - edge_port = usb_get_serial_port_data(serial->port[i]); - edge_remove_sysfs_attrs(edge_port->port); - } } static void edge_release(struct usb_serial *serial) @@ -2744,6 +2736,7 @@ static struct usb_serial_driver edgeport_1port_device = { .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_create_sysfs_attrs, + .port_remove = edge_remove_sysfs_attrs, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, @@ -2775,6 +2768,7 @@ static struct usb_serial_driver edgeport_2port_device = { .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_create_sysfs_attrs, + .port_remove = edge_remove_sysfs_attrs, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index 5d3beee..a92a3ef 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -38,7 +38,7 @@ #include <linux/ioctl.h> #include "kobil_sct.h" -static int debug; +static bool debug; /* Version Information */ #define DRIVER_VERSION "21/05/2004" diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 420d985..ea126a4 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -480,6 +480,10 @@ static void option_instat_callback(struct urb *urb); #define ZD_VENDOR_ID 0x0685 #define ZD_PRODUCT_7000 0x7000 +/* LG products */ +#define LG_VENDOR_ID 0x1004 +#define LG_PRODUCT_L02C 0x618f + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -1183,6 +1187,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, + { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c index 30b73e6..a348198 100644 --- a/drivers/usb/serial/qcaux.c +++ b/drivers/usb/serial/qcaux.c @@ -36,6 +36,7 @@ #define UTSTARCOM_PRODUCT_UM175_V1 0x3712 #define UTSTARCOM_PRODUCT_UM175_V2 0x3714 #define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715 +#define PANTECH_PRODUCT_UML190_VZW 0x3716 #define PANTECH_PRODUCT_UML290_VZW 0x3718 /* CMOTECH devices */ @@ -67,7 +68,11 @@ static struct usb_device_id id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) }, - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xfe, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfd, 0xff) }, /* NMEA */ + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfe, 0xff) }, /* WMC */ + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) }, /* DIAG */ { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 1f62723..d32f720 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -789,7 +789,7 @@ static void rts51x_suspend_timer_fn(unsigned long data) rts51x_set_stat(chip, RTS51X_STAT_SS); /* ignore mass storage interface's children */ pm_suspend_ignore_children(&us->pusb_intf->dev, true); - usb_autopm_put_interface(us->pusb_intf); + usb_autopm_put_interface_async(us->pusb_intf); US_DEBUGP("%s: RTS51X_STAT_SS 01," "intf->pm_usage_cnt:%d, power.usage:%d\n", __func__, diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index 8efeae2..b4a7167 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c @@ -27,8 +27,6 @@ #define USB_SKEL_VENDOR_ID 0xfff0 #define USB_SKEL_PRODUCT_ID 0xfff0 -static DEFINE_MUTEX(skel_mutex); - /* table of devices that work with this driver */ static const struct usb_device_id skel_table[] = { { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) }, @@ -101,25 +99,18 @@ static int skel_open(struct inode *inode, struct file *file) goto exit; } - mutex_lock(&skel_mutex); dev = usb_get_intfdata(interface); if (!dev) { - mutex_unlock(&skel_mutex); retval = -ENODEV; goto exit; } /* increment our usage count for the device */ kref_get(&dev->kref); - mutex_unlock(&skel_mutex); /* lock the device to allow correctly handling errors * in resumption */ mutex_lock(&dev->io_mutex); - if (!dev->interface) { - retval = -ENODEV; - goto out_err; - } retval = usb_autopm_get_interface(interface); if (retval) @@ -127,11 +118,7 @@ static int skel_open(struct inode *inode, struct file *file) /* save our object in the file's private structure */ file->private_data = dev; - -out_err: mutex_unlock(&dev->io_mutex); - if (retval) - kref_put(&dev->kref, skel_delete); exit: return retval; @@ -611,6 +598,7 @@ static void skel_disconnect(struct usb_interface *interface) int minor = interface->minor; dev = usb_get_intfdata(interface); + usb_set_intfdata(interface, NULL); /* give back our minor */ usb_deregister_dev(interface, &skel_class); @@ -622,12 +610,8 @@ static void skel_disconnect(struct usb_interface *interface) usb_kill_anchored_urbs(&dev->submitted); - mutex_lock(&skel_mutex); - usb_set_intfdata(interface, NULL); - /* decrement our usage count */ kref_put(&dev->kref, skel_delete); - mutex_unlock(&skel_mutex); dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor); } diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig index 0ead882..f29fdd7 100644 --- a/drivers/usb/wusbcore/Kconfig +++ b/drivers/usb/wusbcore/Kconfig @@ -6,7 +6,7 @@ config USB_WUSB depends on EXPERIMENTAL depends on USB depends on PCI - select UWB + depends on UWB select CRYPTO select CRYPTO_BLKCIPHER select CRYPTO_CBC diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 882a51f..9dab1f5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -856,9 +856,9 @@ static const struct file_operations vhost_net_fops = { }; static struct miscdevice vhost_net_misc = { - MISC_DYNAMIC_MINOR, - "vhost-net", - &vhost_net_fops, + .minor = VHOST_NET_MINOR, + .name = "vhost-net", + .fops = &vhost_net_fops, }; static int vhost_net_init(void) @@ -879,3 +879,5 @@ MODULE_VERSION("0.0.1"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Michael S. Tsirkin"); MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); +MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR); +MODULE_ALIAS("devname:vhost-net"); diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 0d7b20d..e40c00f 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -1108,7 +1108,7 @@ static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg) */ lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL); - sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL); + sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR); lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0); if (sinfo->atmel_lcdfb_power_control) sinfo->atmel_lcdfb_power_control(0); diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c index 66bc74d..378276c 100644 --- a/drivers/video/backlight/adp8860_bl.c +++ b/drivers/video/backlight/adp8860_bl.c @@ -146,7 +146,7 @@ static int adp8860_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask ret = adp8860_read(client, reg, ®_val); - if (!ret && ((reg_val & bit_mask) == 0)) { + if (!ret && ((reg_val & bit_mask) != bit_mask)) { reg_val |= bit_mask; ret = adp8860_write(client, reg, reg_val); } diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c index 6c68a68..6735059 100644 --- a/drivers/video/backlight/adp8870_bl.c +++ b/drivers/video/backlight/adp8870_bl.c @@ -160,7 +160,7 @@ static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask ret = adp8870_read(client, reg, ®_val); - if (!ret && ((reg_val & bit_mask) == 0)) { + if (!ret && ((reg_val & bit_mask) != bit_mask)) { reg_val |= bit_mask; ret = adp8870_write(client, reg, reg_val); } diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index 4f5d1c4..27d1d7a 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c @@ -190,6 +190,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi) priv->io_reg = regulator_get(&spi->dev, "vdd"); if (IS_ERR(priv->io_reg)) { + ret = PTR_ERR(priv->io_reg); dev_err(&spi->dev, "%s: Unable to get the IO regulator\n", __func__); goto err3; @@ -197,6 +198,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi) priv->core_reg = regulator_get(&spi->dev, "vcore"); if (IS_ERR(priv->core_reg)) { + ret = PTR_ERR(priv->core_reg); dev_err(&spi->dev, "%s: Unable to get the core regulator\n", __func__); goto err4; diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index acf292b..6af3f16 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c @@ -1432,7 +1432,7 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state) struct fsl_diu_data *data; data = dev_get_drvdata(&ofdev->dev); - disable_lcdc(data->fsl_diu_info[0]); + disable_lcdc(data->fsl_diu_info); return 0; } @@ -1442,7 +1442,7 @@ static int fsl_diu_resume(struct platform_device *ofdev) struct fsl_diu_data *data; data = dev_get_drvdata(&ofdev->dev); - enable_lcdc(data->fsl_diu_info[0]); + enable_lcdc(data->fsl_diu_info); return 0; } diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c index c6afa33..02fd226 100644 --- a/drivers/video/intelfb/intelfbdrv.c +++ b/drivers/video/intelfb/intelfbdrv.c @@ -529,7 +529,6 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev, if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) { ERR_MSG("Could not allocate cmap for intelfb_info.\n"); goto err_out_cmap; - return -ENODEV; } dinfo = info->par; diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c index 43207cc..fe01add 100644 --- a/drivers/video/macfb.c +++ b/drivers/video/macfb.c @@ -592,12 +592,12 @@ static int __init macfb_init(void) if (!fb_info.screen_base) return -ENODEV; - printk("macfb: framebuffer at 0x%08lx, mapped to 0x%p, size %dk\n", - macfb_fix.smem_start, fb_info.screen_base, - macfb_fix.smem_len / 1024); - printk("macfb: mode is %dx%dx%d, linelength=%d\n", - macfb_defined.xres, macfb_defined.yres, - macfb_defined.bits_per_pixel, macfb_fix.line_length); + pr_info("macfb: framebuffer at 0x%08lx, mapped to 0x%p, size %dk\n", + macfb_fix.smem_start, fb_info.screen_base, + macfb_fix.smem_len / 1024); + pr_info("macfb: mode is %dx%dx%d, linelength=%d\n", + macfb_defined.xres, macfb_defined.yres, + macfb_defined.bits_per_pixel, macfb_fix.line_length); /* Fill in the available video resolution */ macfb_defined.xres_virtual = macfb_defined.xres; @@ -613,14 +613,10 @@ static int __init macfb_init(void) switch (macfb_defined.bits_per_pixel) { case 1: - /* - * XXX: I think this will catch any program that tries - * to do FBIO_PUTCMAP when the visual is monochrome. - */ macfb_defined.red.length = macfb_defined.bits_per_pixel; macfb_defined.green.length = macfb_defined.bits_per_pixel; macfb_defined.blue.length = macfb_defined.bits_per_pixel; - video_cmap_len = 0; + video_cmap_len = 2; macfb_fix.visual = FB_VISUAL_MONO01; break; case 2: @@ -660,11 +656,10 @@ static int __init macfb_init(void) macfb_fix.visual = FB_VISUAL_TRUECOLOR; break; default: - video_cmap_len = 0; - macfb_fix.visual = FB_VISUAL_MONO01; - printk("macfb: unknown or unsupported bit depth: %d\n", + pr_err("macfb: unknown or unsupported bit depth: %d\n", macfb_defined.bits_per_pixel); - break; + err = -EINVAL; + goto fail_unmap; } /* @@ -734,8 +729,8 @@ static int __init macfb_init(void) case MAC_MODEL_Q950: strcpy(macfb_fix.id, "DAFB"); macfb_setpalette = dafb_setpalette; - macfb_defined.activate = FB_ACTIVATE_NOW; dafb_cmap_regs = ioremap(DAFB_BASE, 0x1000); + macfb_defined.activate = FB_ACTIVATE_NOW; break; /* @@ -744,8 +739,8 @@ static int __init macfb_init(void) case MAC_MODEL_LCII: strcpy(macfb_fix.id, "V8"); macfb_setpalette = v8_brazil_setpalette; - macfb_defined.activate = FB_ACTIVATE_NOW; v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000); + macfb_defined.activate = FB_ACTIVATE_NOW; break; /* @@ -758,8 +753,8 @@ static int __init macfb_init(void) case MAC_MODEL_P600: strcpy(macfb_fix.id, "Brazil"); macfb_setpalette = v8_brazil_setpalette; - macfb_defined.activate = FB_ACTIVATE_NOW; v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000); + macfb_defined.activate = FB_ACTIVATE_NOW; break; /* @@ -773,10 +768,10 @@ static int __init macfb_init(void) case MAC_MODEL_P520: case MAC_MODEL_P550: case MAC_MODEL_P460: - macfb_setpalette = v8_brazil_setpalette; - macfb_defined.activate = FB_ACTIVATE_NOW; strcpy(macfb_fix.id, "Sonora"); + macfb_setpalette = v8_brazil_setpalette; v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000); + macfb_defined.activate = FB_ACTIVATE_NOW; break; /* @@ -786,10 +781,10 @@ static int __init macfb_init(void) */ case MAC_MODEL_IICI: case MAC_MODEL_IISI: - macfb_setpalette = rbv_setpalette; - macfb_defined.activate = FB_ACTIVATE_NOW; strcpy(macfb_fix.id, "RBV"); + macfb_setpalette = rbv_setpalette; rbv_cmap_regs = ioremap(DAC_BASE, 0x1000); + macfb_defined.activate = FB_ACTIVATE_NOW; break; /* @@ -797,10 +792,10 @@ static int __init macfb_init(void) */ case MAC_MODEL_Q840: case MAC_MODEL_C660: - macfb_setpalette = civic_setpalette; - macfb_defined.activate = FB_ACTIVATE_NOW; strcpy(macfb_fix.id, "Civic"); + macfb_setpalette = civic_setpalette; civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000); + macfb_defined.activate = FB_ACTIVATE_NOW; break; @@ -809,26 +804,26 @@ static int __init macfb_init(void) * We think this may be like the LC II */ case MAC_MODEL_LC: + strcpy(macfb_fix.id, "LC"); if (vidtest) { macfb_setpalette = v8_brazil_setpalette; - macfb_defined.activate = FB_ACTIVATE_NOW; v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000); + macfb_defined.activate = FB_ACTIVATE_NOW; } - strcpy(macfb_fix.id, "LC"); break; /* * We think this may be like the LC II */ case MAC_MODEL_CCL: + strcpy(macfb_fix.id, "Color Classic"); if (vidtest) { macfb_setpalette = v8_brazil_setpalette; - macfb_defined.activate = FB_ACTIVATE_NOW; v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000); + macfb_defined.activate = FB_ACTIVATE_NOW; } - strcpy(macfb_fix.id, "Color Classic"); break; /* @@ -893,10 +888,10 @@ static int __init macfb_init(void) case MAC_MODEL_PB270C: case MAC_MODEL_PB280: case MAC_MODEL_PB280C: - macfb_setpalette = csc_setpalette; - macfb_defined.activate = FB_ACTIVATE_NOW; strcpy(macfb_fix.id, "CSC"); + macfb_setpalette = csc_setpalette; csc_cmap_regs = ioremap(CSC_BASE, 0x1000); + macfb_defined.activate = FB_ACTIVATE_NOW; break; default: @@ -918,8 +913,9 @@ static int __init macfb_init(void) if (err) goto fail_dealloc; - printk("fb%d: %s frame buffer device\n", - fb_info.node, fb_info.fix.id); + pr_info("fb%d: %s frame buffer device\n", + fb_info.node, fb_info.fix.id); + return 0; fail_dealloc: diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index e3406ab..727a514 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c @@ -245,6 +245,7 @@ struct mx3fb_data { uint32_t h_start_width; uint32_t v_start_width; + enum disp_data_mapping disp_data_fmt; }; struct dma_chan_request { @@ -287,11 +288,14 @@ static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long r __raw_writel(value, mx3fb->reg_base + reg); } -static const uint32_t di_mappings[] = { - 0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */ - 0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */ - 0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */ - 0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */ +struct di_mapping { + uint32_t b0, b1, b2; +}; + +static const struct di_mapping di_mappings[] = { + [IPU_DISP_DATA_MAPPING_RGB666] = { 0x0005000f, 0x000b000f, 0x0011000f }, + [IPU_DISP_DATA_MAPPING_RGB565] = { 0x0004003f, 0x000a000f, 0x000f003f }, + [IPU_DISP_DATA_MAPPING_RGB888] = { 0x00070000, 0x000f0000, 0x00170000 }, }; static void sdc_fb_init(struct mx3fb_info *fbi) @@ -334,7 +338,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi) /* This enables the channel */ if (mx3_fbi->cookie < 0) { mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan, - &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT); + &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!mx3_fbi->txd) { dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n", dma_chan->chan_id); @@ -425,7 +429,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel * @pixel_clk: desired pixel clock frequency in Hz. * @width: width of panel in pixels. * @height: height of panel in pixels. - * @pixel_fmt: pixel format of buffer as FOURCC ASCII code. * @h_start_width: number of pixel clocks between the HSYNC signal pulse * and the start of valid data. * @h_sync_width: width of the HSYNC signal in units of pixel clocks. @@ -442,7 +445,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel, uint32_t pixel_clk, uint16_t width, uint16_t height, - enum pixel_fmt pixel_fmt, uint16_t h_start_width, uint16_t h_sync_width, uint16_t h_end_width, uint16_t v_start_width, uint16_t v_sync_width, uint16_t v_end_width, @@ -453,6 +455,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel, uint32_t old_conf; uint32_t div; struct clk *ipu_clk; + const struct di_mapping *map; dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height); @@ -540,36 +543,10 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel, sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT; mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL); - switch (pixel_fmt) { - case IPU_PIX_FMT_RGB24: - mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP); - mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP); - mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP); - mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | - ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC); - break; - case IPU_PIX_FMT_RGB666: - mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP); - mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP); - mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP); - mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | - ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC); - break; - case IPU_PIX_FMT_BGR666: - mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP); - mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP); - mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP); - mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | - ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC); - break; - default: - mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP); - mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP); - mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP); - mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | - ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC); - break; - } + map = &di_mappings[mx3fb->disp_data_fmt]; + mx3fb_write_reg(mx3fb, map->b0, DI_DISP3_B0_MAP); + mx3fb_write_reg(mx3fb, map->b1, DI_DISP3_B1_MAP); + mx3fb_write_reg(mx3fb, map->b2, DI_DISP3_B2_MAP); spin_unlock_irqrestore(&mx3fb->lock, lock_flags); @@ -780,8 +757,6 @@ static int __set_par(struct fb_info *fbi, bool lock) if (sdc_init_panel(mx3fb, mode, (PICOS2KHZ(fbi->var.pixclock)) * 1000UL, fbi->var.xres, fbi->var.yres, - (fbi->var.sync & FB_SYNC_SWAP_RGB) ? - IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666, fbi->var.left_margin, fbi->var.hsync_len, fbi->var.right_margin + @@ -1117,7 +1092,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var, async_tx_ack(mx3_fbi->txd); txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg + - mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT); + mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!txd) { dev_err(fbi->device, "Error preparing a DMA transaction descriptor.\n"); @@ -1349,6 +1324,12 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan) const struct fb_videomode *mode; int ret, num_modes; + if (mx3fb_pdata->disp_data_fmt >= ARRAY_SIZE(di_mappings)) { + dev_err(dev, "Illegal display data format %d\n", + mx3fb_pdata->disp_data_fmt); + return -EINVAL; + } + ichan->client = mx3fb; irq = ichan->eof_irq; @@ -1402,6 +1383,8 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan) mx3fbi->mx3fb = mx3fb; mx3fbi->blank = FB_BLANK_NORMAL; + mx3fb->disp_data_fmt = mx3fb_pdata->disp_data_fmt; + init_completion(&mx3fbi->flip_cmpl); disable_irq(ichan->eof_irq); dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq); diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index a5ec7f3..e1626a1 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -401,7 +401,7 @@ void dispc_runtime_put(void) DSSDBG("dispc_runtime_put\n"); - r = pm_runtime_put(&dispc.pdev->dev); + r = pm_runtime_put_sync(&dispc.pdev->dev); WARN_ON(r < 0); } diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index d4d676c..52f36ec 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -1079,7 +1079,7 @@ void dsi_runtime_put(struct platform_device *dsidev) DSSDBG("dsi_runtime_put\n"); - r = pm_runtime_put(&dsi->pdev->dev); + r = pm_runtime_put_sync(&dsi->pdev->dev); WARN_ON(r < 0); } diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 1703345..77c2b5a 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c @@ -720,7 +720,7 @@ void dss_runtime_put(void) DSSDBG("dss_runtime_put\n"); - r = pm_runtime_put(&dss.pdev->dev); + r = pm_runtime_put_sync(&dss.pdev->dev); WARN_ON(r < 0); } diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index b4c270e..d7aa3b0 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -176,7 +176,7 @@ static void hdmi_runtime_put(void) DSSDBG("hdmi_runtime_put\n"); - r = pm_runtime_put(&hdmi.pdev->dev); + r = pm_runtime_put_sync(&hdmi.pdev->dev); WARN_ON(r < 0); } @@ -497,6 +497,7 @@ bool omapdss_hdmi_detect(void) int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev) { + struct omap_dss_hdmi_data *priv = dssdev->data; int r = 0; DSSDBG("ENTER hdmi_display_enable\n"); @@ -509,6 +510,8 @@ int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev) goto err0; } + hdmi.ip_data.hpd_gpio = priv->hpd_gpio; + r = omap_dss_start_device(dssdev); if (r) { DSSERR("failed to start device\n"); diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c index 814bb95..55f3980 100644 --- a/drivers/video/omap2/dss/rfbi.c +++ b/drivers/video/omap2/dss/rfbi.c @@ -140,7 +140,7 @@ static void rfbi_runtime_put(void) DSSDBG("rfbi_runtime_put\n"); - r = pm_runtime_put(&rfbi.pdev->dev); + r = pm_runtime_put_sync(&rfbi.pdev->dev); WARN_ON(r < 0); } diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h index 7503f7f..50dadba 100644 --- a/drivers/video/omap2/dss/ti_hdmi.h +++ b/drivers/video/omap2/dss/ti_hdmi.h @@ -126,6 +126,10 @@ struct hdmi_ip_data { const struct ti_hdmi_ip_ops *ops; struct hdmi_config cfg; struct hdmi_pll_info pll_data; + + /* ti_hdmi_4xxx_ip private data. These should be in a separate struct */ + int hpd_gpio; + bool phy_tx_enabled; }; int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data); void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data); diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c index 9af81f1..2d72334 100644 --- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c +++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c @@ -28,6 +28,7 @@ #include <linux/delay.h> #include <linux/string.h> #include <linux/seq_file.h> +#include <linux/gpio.h> #include "ti_hdmi_4xxx_ip.h" #include "dss.h" @@ -223,6 +224,49 @@ void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data) hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF); } +static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data) +{ + unsigned long flags; + bool hpd; + int r; + /* this should be in ti_hdmi_4xxx_ip private data */ + static DEFINE_SPINLOCK(phy_tx_lock); + + spin_lock_irqsave(&phy_tx_lock, flags); + + hpd = gpio_get_value(ip_data->hpd_gpio); + + if (hpd == ip_data->phy_tx_enabled) { + spin_unlock_irqrestore(&phy_tx_lock, flags); + return 0; + } + + if (hpd) + r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON); + else + r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON); + + if (r) { + DSSERR("Failed to %s PHY TX power\n", + hpd ? "enable" : "disable"); + goto err; + } + + ip_data->phy_tx_enabled = hpd; +err: + spin_unlock_irqrestore(&phy_tx_lock, flags); + return r; +} + +static irqreturn_t hpd_irq_handler(int irq, void *data) +{ + struct hdmi_ip_data *ip_data = data; + + hdmi_check_hpd_state(ip_data); + + return IRQ_HANDLED; +} + int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data) { u16 r = 0; @@ -232,10 +276,6 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data) if (r) return r; - r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON); - if (r) - return r; - /* * Read address 0 in order to get the SCP reset done completed * Dummy access performed to make sure reset is done @@ -257,12 +297,32 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data) /* Write to phy address 3 to change the polarity control */ REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27); + r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio), + NULL, hpd_irq_handler, + IRQF_DISABLED | IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, "hpd", ip_data); + if (r) { + DSSERR("HPD IRQ request failed\n"); + hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF); + return r; + } + + r = hdmi_check_hpd_state(ip_data); + if (r) { + free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data); + hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF); + return r; + } + return 0; } void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data) { + free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data); + hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF); + ip_data->phy_tx_enabled = false; } static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data) diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c index b3e9f90..5c3d0f9 100644 --- a/drivers/video/omap2/dss/venc.c +++ b/drivers/video/omap2/dss/venc.c @@ -401,7 +401,7 @@ static void venc_runtime_put(void) DSSDBG("venc_runtime_put\n"); - r = pm_runtime_put(&venc.pdev->dev); + r = pm_runtime_put_sync(&venc.pdev->dev); WARN_ON(r < 0); } diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 79e1b29..5aa43c3 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -35,7 +35,7 @@ #define virtio_rmb(vq) \ do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0) #define virtio_wmb(vq) \ - do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0) + do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0) #else /* We must force memory ordering even if guest is UP since host could be * running on another CPU, but SMP barriers are defined to barrier() in that @@ -308,9 +308,9 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq) bool needs_kick; START_USE(vq); - /* Descriptors and available array need to be set before we expose the - * new available array entries. */ - virtio_wmb(vq); + /* We need to expose available array entries before checking avail + * event. */ + virtio_mb(vq); old = vq->vring.avail->idx - vq->num_added; new = vq->vring.avail->idx; diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index 1b0e3dd..63d7b58 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -300,11 +300,7 @@ static int __devinit dw_wdt_drv_probe(struct platform_device *pdev) if (!mem) return -EINVAL; - if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), - "dw_wdt")) - return -ENOMEM; - - dw_wdt.regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); + dw_wdt.regs = devm_request_and_ioremap(&pdev->dev, mem); if (!dw_wdt.regs) return -ENOMEM; diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 99796c5..bdf401b 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -36,6 +36,7 @@ * document number TBD : Patsburg (PBG) * document number TBD : DH89xxCC * document number TBD : Panther Point + * document number TBD : Lynx Point */ /* @@ -126,6 +127,7 @@ enum iTCO_chipsets { TCO_PBG, /* Patsburg */ TCO_DH89XXCC, /* DH89xxCC */ TCO_PPT, /* Panther Point */ + TCO_LPT, /* Lynx Point */ }; static struct { @@ -189,6 +191,7 @@ static struct { {"Patsburg", 2}, {"DH89xxCC", 2}, {"Panther Point", 2}, + {"Lynx Point", 2}, {NULL, 0} }; @@ -331,6 +334,38 @@ static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = { { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT}, { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT}, { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT}, + { PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT}, + { PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT}, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index b8ef2c6..c44c333 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -247,7 +247,6 @@ static struct miscdevice imx2_wdt_miscdev = { static int __init imx2_wdt_probe(struct platform_device *pdev) { int ret; - int res_size; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -256,15 +255,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) return -ENODEV; } - res_size = resource_size(res); - if (!devm_request_mem_region(&pdev->dev, res->start, res_size, - res->name)) { - dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n", - res_size, res->start); - return -ENOMEM; - } - - imx2_wdt.base = devm_ioremap_nocache(&pdev->dev, res->start, res_size); + imx2_wdt.base = devm_request_and_ioremap(&pdev->dev, res); if (!imx2_wdt.base) { dev_err(&pdev->dev, "ioremap failed\n"); return -ENOMEM; diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c index 50359ba..529085b 100644 --- a/drivers/watchdog/nuc900_wdt.c +++ b/drivers/watchdog/nuc900_wdt.c @@ -72,7 +72,7 @@ struct nuc900_wdt { }; static unsigned long nuc900wdt_busy; -struct nuc900_wdt *nuc900_wdt; +static struct nuc900_wdt *nuc900_wdt; static inline void nuc900_wdt_keepalive(void) { @@ -287,7 +287,8 @@ static int __devinit nuc900wdt_probe(struct platform_device *pdev) setup_timer(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0); - if (misc_register(&nuc900wdt_miscdev)) { + ret = misc_register(&nuc900wdt_miscdev); + if (ret) { dev_err(&pdev->dev, "err register miscdev on minor=%d (%d)\n", WATCHDOG_MINOR, ret); goto err_clk; diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index 4b33e3f..d19ff51 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -339,6 +339,7 @@ static int __devinit omap_wdt_probe(struct platform_device *pdev) return 0; err_misc: + pm_runtime_disable(wdev->dev); platform_set_drvdata(pdev, NULL); iounmap(wdev->base); @@ -371,6 +372,7 @@ static int __devexit omap_wdt_remove(struct platform_device *pdev) struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pm_runtime_disable(wdev->dev); if (!res) return -ENOENT; diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index bd143c9..8e210aa 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c @@ -226,7 +226,7 @@ static long pnx4008_wdt_ioctl(struct file *file, unsigned int cmd, static int pnx4008_wdt_release(struct inode *inode, struct file *file) { if (!test_bit(WDT_OK_TO_CLOSE, &wdt_status)) - printk(KERN_WARNING "WATCHDOG: Device closed unexpectdly\n"); + printk(KERN_WARNING "WATCHDOG: Device closed unexpectedly\n"); wdt_disable(); clk_disable(wdt_clk); diff --git a/drivers/watchdog/stmp3xxx_wdt.c b/drivers/watchdog/stmp3xxx_wdt.c index 4c2a4e8..e37d811 100644 --- a/drivers/watchdog/stmp3xxx_wdt.c +++ b/drivers/watchdog/stmp3xxx_wdt.c @@ -174,7 +174,7 @@ static int stmp3xxx_wdt_release(struct inode *inode, struct file *file) if (!nowayout) { if (!test_bit(WDT_OK_TO_CLOSE, &wdt_status)) { wdt_ping(); - pr_debug("%s: Device closed unexpectdly\n", __func__); + pr_debug("%s: Device closed unexpectedly\n", __func__); ret = -EINVAL; } else { wdt_disable(); diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c index 026b4bb..8f07dd4 100644 --- a/drivers/watchdog/via_wdt.c +++ b/drivers/watchdog/via_wdt.c @@ -124,8 +124,6 @@ static int wdt_stop(struct watchdog_device *wdd) static int wdt_set_timeout(struct watchdog_device *wdd, unsigned int new_timeout) { - if (new_timeout < 1 || new_timeout > WDT_TIMEOUT_MAX) - return -EINVAL; writel(new_timeout, wdt_mem + VIA_WDT_COUNT); timeout = new_timeout; return 0; @@ -150,6 +148,8 @@ static const struct watchdog_ops wdt_ops = { static struct watchdog_device wdt_dev = { .info = &wdt_info, .ops = &wdt_ops, + .min_timeout = 1, + .max_timeout = WDT_TIMEOUT_MAX, }; static int __devinit wdt_probe(struct pci_dev *pdev, @@ -233,7 +233,7 @@ static void __devexit wdt_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = { +static DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700) }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800) }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) }, diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c index 42e940c..c3c3188 100644 --- a/drivers/watchdog/wafer5823wdt.c +++ b/drivers/watchdog/wafer5823wdt.c @@ -152,12 +152,12 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd, return -EFAULT; if (options & WDIOS_DISABLECARD) { - wafwdt_start(); + wafwdt_stop(); retval = 0; } if (options & WDIOS_ENABLECARD) { - wafwdt_stop(); + wafwdt_start(); retval = 0; } diff --git a/drivers/watchdog/wm8350_wdt.c b/drivers/watchdog/wm8350_wdt.c index 909c786..5d7113c 100644 --- a/drivers/watchdog/wm8350_wdt.c +++ b/drivers/watchdog/wm8350_wdt.c @@ -212,10 +212,10 @@ static long wm8350_wdt_ioctl(struct file *file, unsigned int cmd, /* Setting both simultaneously means at least one must fail */ if (options == WDIOS_DISABLECARD) - ret = wm8350_wdt_start(wm8350); + ret = wm8350_wdt_stop(wm8350); if (options == WDIOS_ENABLECARD) - ret = wm8350_wdt_stop(wm8350); + ret = wm8350_wdt_start(wm8350); break; } diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c index ba6eda4..0edb91c 100644 --- a/drivers/xen/biomerge.c +++ b/drivers/xen/biomerge.c @@ -1,5 +1,6 @@ #include <linux/bio.h> #include <linux/io.h> +#include <linux/export.h> #include <xen/page.h> bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, @@ -11,3 +12,4 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && ((mfn1 == mfn2) || ((mfn1+1) == mfn2)); } +EXPORT_SYMBOL(xen_biovec_phys_mergeable); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 1cd94da..b4d4eac 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -948,9 +948,12 @@ static void gnttab_request_version(void) int rc; struct gnttab_set_version gsv; - gsv.version = 2; + if (xen_hvm_domain()) + gsv.version = 1; + else + gsv.version = 2; rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); - if (rc == 0) { + if (rc == 0 && gsv.version == 2) { grant_table_version = 2; gnttab_interface = &gnttab_v2_ops; } else if (grant_table_version == 2) { diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index 3832e30..596e6a7 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c @@ -221,7 +221,7 @@ static int register_balloon(struct device *dev) { int i, error; - error = bus_register(&balloon_subsys); + error = subsys_system_register(&balloon_subsys, NULL); if (error) return error; |