diff options
38 files changed, 384 insertions, 336 deletions
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py index 230ce71..2b47704 100755 --- a/Documentation/target/tcm_mod_builder.py +++ b/Documentation/target/tcm_mod_builder.py @@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" buf += " .close_session = " + fabric_mod_name + "_close_session,\n" - buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" - buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" - buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" buf += " .sess_get_initiator_sid = NULL,\n" buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" @@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" - buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" + buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n" buf += " /*\n" buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" buf += " */\n" @@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += " /*\n" buf += " * Register the top level struct config_item_type with TCM core\n" buf += " */\n" - buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" + buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n" buf += " if (IS_ERR(fabric)) {\n" buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" buf += " return PTR_ERR(fabric);\n" @@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): if re.search('get_fabric_name', fo): buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" buf += "{\n" - buf += " return \"" + fabric_mod_name[4:] + "\";\n" + buf += " return \"" + fabric_mod_name + "\";\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" continue @@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += "}\n\n" bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" - if re.search('stop_session\)\(', fo): - buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" - buf += "{\n" - buf += " return;\n" - buf += "}\n\n" - bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" - - if re.search('fall_back_to_erl0\)\(', fo): - buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" - buf += "{\n" - buf += " return;\n" - buf += "}\n\n" - bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" - - if re.search('sess_logged_in\)\(', fo): - buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" - buf += "{\n" - buf += " return 0;\n" - buf += "}\n\n" - bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" - if re.search('sess_get_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" buf += "{\n" @@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" if re.search('queue_tm_rsp\)\(', fo): - buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" + buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" buf += "{\n" - buf += " return 0;\n" + buf += " return;\n" buf += "}\n\n" - bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" + bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" - if re.search('is_state_remove\)\(', fo): - buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" + if re.search('aborted_task\)\(', fo): + buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n" buf += "{\n" - buf += " return 0;\n" + buf += " return;\n" buf += "}\n\n" - bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" - + bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n" ret = p.write(buf) if ret: @@ -1018,11 +993,11 @@ def main(modname, proto_ident): tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) - input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") + input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) - input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") + input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) diff --git a/MAINTAINERS b/MAINTAINERS index 3589d67..8bed045 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5280,6 +5280,15 @@ W: www.open-iscsi.org Q: http://patchwork.kernel.org/project/linux-rdma/list/ F: drivers/infiniband/ulp/iser/ +ISCSI EXTENSIONS FOR RDMA (ISER) TARGET +M: Sagi Grimberg <sagig@mellanox.com> +T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master +L: linux-rdma@vger.kernel.org +L: target-devel@vger.kernel.org +S: Supported +W: http://www.linux-iscsi.org +F: drivers/infiniband/ulp/isert + ISDN SUBSYSTEM M: Karsten Keil <isdn@linux-pingi.de> L: isdn4linux@listserv.isdn4linux.de (subscribers-only) diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 6bf3a13..78a881b 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -40,6 +40,7 @@ #include <xen/interface/physdev.h> #include <xen/interface/vcpu.h> #include <xen/interface/memory.h> +#include <xen/interface/nmi.h> #include <xen/interface/xen-mca.h> #include <xen/features.h> #include <xen/page.h> @@ -66,6 +67,7 @@ #include <asm/reboot.h> #include <asm/stackprotector.h> #include <asm/hypervisor.h> +#include <asm/mach_traps.h> #include <asm/mwait.h> #include <asm/pci_x86.h> #include <asm/pat.h> @@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = { .emergency_restart = xen_emergency_restart, }; +static unsigned char xen_get_nmi_reason(void) +{ + unsigned char reason = 0; + + /* Construct a value which looks like it came from port 0x61. */ + if (test_bit(_XEN_NMIREASON_io_error, + &HYPERVISOR_shared_info->arch.nmi_reason)) + reason |= NMI_REASON_IOCHK; + if (test_bit(_XEN_NMIREASON_pci_serr, + &HYPERVISOR_shared_info->arch.nmi_reason)) + reason |= NMI_REASON_SERR; + + return reason; +} + static void __init xen_boot_params_init_edd(void) { #if IS_ENABLED(CONFIG_EDD) @@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void) pv_info = xen_info; pv_init_ops = xen_init_ops; pv_apic_ops = xen_apic_ops; - if (!xen_pvh_domain()) + if (!xen_pvh_domain()) { pv_cpu_ops = xen_cpu_ops; + x86_platform.get_nmi_reason = xen_get_nmi_reason; + } + if (xen_feature(XENFEAT_auto_translated_physmap)) x86_init.resources.memory_setup = xen_auto_xlated_memory_setup; else diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index edbc7a6..70fb507 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void) return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); } -/* Only to be called in case of a race for a page just allocated! */ -static void free_p2m_page(void *p) +static void __ref free_p2m_page(void *p) { - BUG_ON(!slab_is_available()); + if (unlikely(!slab_is_available())) { + free_bootmem((unsigned long)p, PAGE_SIZE); + return; + } + free_page((unsigned long)p); } @@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m) p2m_missing_pte : p2m_identity_pte; for (i = 0; i < PMDS_PER_MID_PAGE; i++) { pmdp = populate_extra_pmd( - (unsigned long)(p2m + pfn + i * PTRS_PER_PTE)); + (unsigned long)(p2m + pfn) + i * PMD_SIZE); set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); } } @@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine); * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual * pmd. In case of PAE/x86-32 there are multiple pmds to allocate! */ -static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) +static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg) { pte_t *ptechk; - pte_t *pteret = ptep; pte_t *pte_newpg[PMDS_PER_MID_PAGE]; pmd_t *pmdp; unsigned int level; @@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) if (ptechk == pte_pg) { set_pmd(pmdp, __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); - if (vaddr == (addr & ~(PMD_SIZE - 1))) - pteret = pte_offset_kernel(pmdp, addr); pte_newpg[i] = NULL; } @@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) vaddr += PMD_SIZE; } - return pteret; + return lookup_address(addr, &level); } /* @@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn) if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { /* PMD level is missing, allocate a new one */ - ptep = alloc_p2m_pmd(addr, ptep, pte_pg); + ptep = alloc_p2m_pmd(addr, pte_pg); if (!ptep) return false; } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index dfd77de..865e56c 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size) unsigned long __ref xen_chk_extra_mem(unsigned long pfn) { int i; - unsigned long addr = PFN_PHYS(pfn); + phys_addr_t addr = PFN_PHYS(pfn); for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { if (addr >= xen_extra_mem[i].start && @@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void) int i; for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + if (!xen_extra_mem[i].size) + continue; pfn_s = PFN_DOWN(xen_extra_mem[i].start); pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size); for (pfn = pfn_s; pfn < pfn_e; pfn++) @@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn) * as a fallback if the remapping fails. */ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, - unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, - unsigned long *released) + unsigned long end_pfn, unsigned long nr_pages, unsigned long *released) { - unsigned long len = 0; unsigned long pfn, end; int ret; WARN_ON(start_pfn > end_pfn); + /* Release pages first. */ end = min(end_pfn, nr_pages); for (pfn = start_pfn; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); @@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); if (ret == 1) { + (*released)++; if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) break; - len++; } else break; } - /* Need to release pages first */ - *released += len; - *identity += set_phys_range_identity(start_pfn, end_pfn); + set_phys_range_identity(start_pfn, end_pfn); } /* @@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) } /* Update kernel mapping, but not for highmem. */ - if ((pfn << PAGE_SHIFT) >= __pa(high_memory)) + if (pfn >= PFN_UP(__pa(high_memory - 1))) return; if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), @@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk( unsigned long ident_pfn_iter, remap_pfn_iter; unsigned long ident_end_pfn = start_pfn + size; unsigned long left = size; - unsigned long ident_cnt = 0; unsigned int i, chunk; WARN_ON(size == 0); @@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk( xen_remap_mfn = mfn; /* Set identity map */ - ident_cnt += set_phys_range_identity(ident_pfn_iter, - ident_pfn_iter + chunk); + set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk); left -= chunk; } @@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk( static unsigned long __init xen_set_identity_and_remap_chunk( const struct e820entry *list, size_t map_size, unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, - unsigned long *identity, unsigned long *released) + unsigned long *released, unsigned long *remapped) { unsigned long pfn; unsigned long i = 0; @@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( /* Do not remap pages beyond the current allocation */ if (cur_pfn >= nr_pages) { /* Identity map remaining pages */ - *identity += set_phys_range_identity(cur_pfn, - cur_pfn + size); + set_phys_range_identity(cur_pfn, cur_pfn + size); break; } if (cur_pfn + size > nr_pages) @@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( if (!remap_range_size) { pr_warning("Unable to find available pfn range, not remapping identity pages\n"); xen_set_identity_and_release_chunk(cur_pfn, - cur_pfn + left, nr_pages, identity, released); + cur_pfn + left, nr_pages, released); break; } /* Adjust size to fit in current e820 RAM region */ @@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( /* Update variables to reflect new mappings. */ i += size; remap_pfn += size; - *identity += size; + *remapped += size; } /* @@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk( static void __init xen_set_identity_and_remap( const struct e820entry *list, size_t map_size, unsigned long nr_pages, - unsigned long *released) + unsigned long *released, unsigned long *remapped) { phys_addr_t start = 0; - unsigned long identity = 0; unsigned long last_pfn = nr_pages; const struct e820entry *entry; unsigned long num_released = 0; + unsigned long num_remapped = 0; int i; /* @@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap( last_pfn = xen_set_identity_and_remap_chunk( list, map_size, start_pfn, end_pfn, nr_pages, last_pfn, - &identity, &num_released); + &num_released, &num_remapped); start = end; } } *released = num_released; + *remapped = num_remapped; - pr_info("Set %ld page(s) to 1-1 mapping\n", identity); pr_info("Released %ld page(s)\n", num_released); } @@ -586,6 +582,7 @@ char * __init xen_memory_setup(void) struct xen_memory_map memmap; unsigned long max_pages; unsigned long extra_pages = 0; + unsigned long remapped_pages; int i; int op; @@ -635,9 +632,10 @@ char * __init xen_memory_setup(void) * underlying RAM. */ xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, - &xen_released_pages); + &xen_released_pages, &remapped_pages); extra_pages += xen_released_pages; + extra_pages += remapped_pages; /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index f473d26..6908734 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent = struct xen_clock_event_device { struct clock_event_device evt; - char *name; + char name[16]; }; static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 }; @@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu) if (evt->irq >= 0) { unbind_from_irqhandler(evt->irq, NULL); evt->irq = -1; - kfree(per_cpu(xen_clock_events, cpu).name); - per_cpu(xen_clock_events, cpu).name = NULL; } } void xen_setup_timer(int cpu) { - char *name; - struct clock_event_device *evt; + struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu); + struct clock_event_device *evt = &xevt->evt; int irq; - evt = &per_cpu(xen_clock_events, cpu).evt; WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); if (evt->irq >= 0) xen_teardown_timer(cpu); printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); - name = kasprintf(GFP_KERNEL, "timer%d", cpu); - if (!name) - name = "<timer kasprintf failed>"; + snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu); irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, - name, NULL); + xevt->name, NULL); (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX); memcpy(evt, xen_clockevent, sizeof(*evt)); evt->cpumask = cpumask_of(cpu); evt->irq = irq; - per_cpu(xen_clock_events, cpu).name = name; } void xen_setup_cpu_clockevents(void) { - BUG_ON(preemptible()); - clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); } diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c index a27d31d..9dcf836 100644 --- a/drivers/acpi/int340x_thermal.c +++ b/drivers/acpi/int340x_thermal.c @@ -14,10 +14,10 @@ #include "internal.h" -#define DO_ENUMERATION 0x01 +#define INT3401_DEVICE 0X01 static const struct acpi_device_id int340x_thermal_device_ids[] = { - {"INT3400", DO_ENUMERATION }, - {"INT3401"}, + {"INT3400"}, + {"INT3401", INT3401_DEVICE}, {"INT3402"}, {"INT3403"}, {"INT3404"}, @@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev, const struct acpi_device_id *id) { #if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) - if (id->driver_data == DO_ENUMERATION) + acpi_create_platform_device(adev); +#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE) + /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ + if (id->driver_data == INT3401_DEVICE) acpi_create_platform_device(adev); #endif return 1; diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c index 978b51e..ce3c155 100644 --- a/drivers/gpio/gpio-dln2.c +++ b/drivers/gpio/gpio-dln2.c @@ -47,13 +47,6 @@ #define DLN2_GPIO_MAX_PINS 32 -struct dln2_irq_work { - struct work_struct work; - struct dln2_gpio *dln2; - int pin; - int type; -}; - struct dln2_gpio { struct platform_device *pdev; struct gpio_chip gpio; @@ -64,10 +57,12 @@ struct dln2_gpio { */ DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS); - DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS); - DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS); - DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS); - struct dln2_irq_work *irq_work; + /* active IRQs - not synced to hardware */ + DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS); + /* active IRQS - synced to hardware */ + DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS); + int irq_type[DLN2_GPIO_MAX_PINS]; + struct mutex irq_lock; }; struct dln2_gpio_pin { @@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin) return !!ret; } -static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2, - unsigned int pin, int value) +static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2, + unsigned int pin, int value) { struct dln2_gpio_pin_val req = { .pin = cpu_to_le16(pin), .value = value, }; - dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req, - sizeof(req)); + return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req, + sizeof(req)); } #define DLN2_GPIO_DIRECTION_IN 0 @@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset) static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { + struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio); + int ret; + + ret = dln2_gpio_pin_set_out_val(dln2, offset, value); + if (ret < 0) + return ret; + return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT); } @@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin, &req, sizeof(req)); } -static void dln2_irq_work(struct work_struct *w) -{ - struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work); - struct dln2_gpio *dln2 = iw->dln2; - u8 type = iw->type & DLN2_GPIO_EVENT_MASK; - - if (test_bit(iw->pin, dln2->irqs_enabled)) - dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0); - else - dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0); -} - -static void dln2_irq_enable(struct irq_data *irqd) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); - struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); - int pin = irqd_to_hwirq(irqd); - - set_bit(pin, dln2->irqs_enabled); - schedule_work(&dln2->irq_work[pin].work); -} - -static void dln2_irq_disable(struct irq_data *irqd) +static void dln2_irq_unmask(struct irq_data *irqd) { struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); int pin = irqd_to_hwirq(irqd); - clear_bit(pin, dln2->irqs_enabled); - schedule_work(&dln2->irq_work[pin].work); + set_bit(pin, dln2->unmasked_irqs); } static void dln2_irq_mask(struct irq_data *irqd) @@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd) struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); int pin = irqd_to_hwirq(irqd); - set_bit(pin, dln2->irqs_masked); -} - -static void dln2_irq_unmask(struct irq_data *irqd) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); - struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); - struct device *dev = dln2->gpio.dev; - int pin = irqd_to_hwirq(irqd); - - if (test_and_clear_bit(pin, dln2->irqs_pending)) { - int irq; - - irq = irq_find_mapping(dln2->gpio.irqdomain, pin); - if (!irq) { - dev_err(dev, "pin %d not mapped to IRQ\n", pin); - return; - } - - generic_handle_irq(irq); - } + clear_bit(pin, dln2->unmasked_irqs); } static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) @@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) switch (type) { case IRQ_TYPE_LEVEL_HIGH: - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH; + dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH; break; case IRQ_TYPE_LEVEL_LOW: - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW; + dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW; break; case IRQ_TYPE_EDGE_BOTH: - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE; + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE; break; case IRQ_TYPE_EDGE_RISING: - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING; + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING; break; case IRQ_TYPE_EDGE_FALLING: - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING; + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING; break; default: return -EINVAL; @@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) return 0; } +static void dln2_irq_bus_lock(struct irq_data *irqd) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); + struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); + + mutex_lock(&dln2->irq_lock); +} + +static void dln2_irq_bus_unlock(struct irq_data *irqd) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); + struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); + int pin = irqd_to_hwirq(irqd); + int enabled, unmasked; + unsigned type; + int ret; + + enabled = test_bit(pin, dln2->enabled_irqs); + unmasked = test_bit(pin, dln2->unmasked_irqs); + + if (enabled != unmasked) { + if (unmasked) { + type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK; + set_bit(pin, dln2->enabled_irqs); + } else { + type = DLN2_GPIO_EVENT_NONE; + clear_bit(pin, dln2->enabled_irqs); + } + + ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0); + if (ret) + dev_err(dln2->gpio.dev, "failed to set event\n"); + } + + mutex_unlock(&dln2->irq_lock); +} + static struct irq_chip dln2_gpio_irqchip = { .name = "dln2-irq", - .irq_enable = dln2_irq_enable, - .irq_disable = dln2_irq_disable, .irq_mask = dln2_irq_mask, .irq_unmask = dln2_irq_unmask, .irq_set_type = dln2_irq_set_type, + .irq_bus_lock = dln2_irq_bus_lock, + .irq_bus_sync_unlock = dln2_irq_bus_unlock, }; static void dln2_gpio_event(struct platform_device *pdev, u16 echo, @@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo, return; } - if (!test_bit(pin, dln2->irqs_enabled)) - return; - if (test_bit(pin, dln2->irqs_masked)) { - set_bit(pin, dln2->irqs_pending); - return; - } - - switch (dln2->irq_work[pin].type) { + switch (dln2->irq_type[pin]) { case DLN2_GPIO_EVENT_CHANGE_RISING: if (event->value) generic_handle_irq(irq); @@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev) struct dln2_gpio *dln2; struct device *dev = &pdev->dev; int pins; - int i, ret; + int ret; pins = dln2_gpio_get_pin_count(pdev); if (pins < 0) { @@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev) if (!dln2) return -ENOMEM; - dln2->irq_work = devm_kcalloc(&pdev->dev, pins, - sizeof(struct dln2_irq_work), GFP_KERNEL); - if (!dln2->irq_work) - return -ENOMEM; - for (i = 0; i < pins; i++) { - INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work); - dln2->irq_work[i].pin = i; - dln2->irq_work[i].dln2 = dln2; - } + mutex_init(&dln2->irq_lock); dln2->pdev = pdev; @@ -529,11 +510,8 @@ out: static int dln2_gpio_remove(struct platform_device *pdev) { struct dln2_gpio *dln2 = platform_get_drvdata(pdev); - int i; dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV); - for (i = 0; i < dln2->gpio.ngpio; i++) - flush_work(&dln2->irq_work[i].work); gpiochip_remove(&dln2->gpio); return 0; diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index 09daaf2..3a5a710 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev) err = gpiochip_add(gc); if (err) { dev_err(&ofdev->dev, "Could not add gpiochip\n"); - irq_domain_remove(priv->domain); + if (priv->domain) + irq_domain_remove(priv->domain); return err; } diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index e3e56d3..970314e 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, + { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio }, { "PNP0D40" }, { }, }; @@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = { { "INT33BB" }, { "INT33C6" }, { "INT3436" }, + { "INT344D" }, { "PNP0D40" }, { }, }; diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 0342775..4f38554 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = { .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_SPT_EMMC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_SPT_SDIO, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_SPT_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd, + }, + { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8120, diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index d57c3d1..1ec684d 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -21,6 +21,9 @@ #define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 #define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 #define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7 +#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b +#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c +#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d /* * PCI registers diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 4523887..ca3424e 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -300,13 +300,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (IS_ERR(host)) return PTR_ERR(host); - if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { - ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); - if (ret < 0) - goto err_mbus_win; - } - - pltfm_host = sdhci_priv(host); pltfm_host->priv = pxa; @@ -325,6 +318,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (!IS_ERR(pxa->clk_core)) clk_prepare_enable(pxa->clk_core); + if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { + ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); + if (ret < 0) + goto err_mbus_win; + } + /* enable 1/8V DDR capable */ host->mmc->caps |= MMC_CAP_1_8V_DDR; @@ -396,11 +395,11 @@ err_add_host: pm_runtime_disable(&pdev->dev); err_of_parse: err_cd_req: +err_mbus_win: clk_disable_unprepare(pxa->clk_io); if (!IS_ERR(pxa->clk_core)) clk_disable_unprepare(pxa->clk_core); err_clk_get: -err_mbus_win: sdhci_pltfm_free(pdev); return ret; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index cbb245b..1453cd1 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -259,8 +259,6 @@ static void sdhci_reinit(struct sdhci_host *host) del_timer_sync(&host->tuning_timer); host->flags &= ~SDHCI_NEEDS_RETUNING; - host->mmc->max_blk_count = - (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; } sdhci_enable_card_detection(host); } @@ -1353,6 +1351,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_runtime_pm_get(host); + present = mmc_gpio_get_cd(host->mmc); + spin_lock_irqsave(&host->lock, flags); WARN_ON(host->mrq != NULL); @@ -1381,7 +1381,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) * zero: cd-gpio is used, and card is removed * one: cd-gpio is used, and card is present */ - present = mmc_gpio_get_cd(host->mmc); if (present < 0) { /* If polling, assume that the card is always present. */ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) @@ -1880,6 +1879,18 @@ static int sdhci_card_busy(struct mmc_host *mmc) return !(present_state & SDHCI_DATA_LVL_MASK); } +static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->flags |= SDHCI_HS400_TUNING; + spin_unlock_irqrestore(&host->lock, flags); + + return 0; +} + static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); @@ -1887,10 +1898,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) int tuning_loop_counter = MAX_TUNING_LOOP; int err = 0; unsigned long flags; + unsigned int tuning_count = 0; + bool hs400_tuning; sdhci_runtime_pm_get(host); spin_lock_irqsave(&host->lock, flags); + hs400_tuning = host->flags & SDHCI_HS400_TUNING; + host->flags &= ~SDHCI_HS400_TUNING; + + if (host->tuning_mode == SDHCI_TUNING_MODE_1) + tuning_count = host->tuning_count; + /* * The Host Controller needs tuning only in case of SDR104 mode * and for SDR50 mode when Use Tuning for SDR50 is set in the @@ -1899,8 +1918,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) * tuning function has to be executed. */ switch (host->timing) { + /* HS400 tuning is done in HS200 mode */ case MMC_TIMING_MMC_HS400: + err = -EINVAL; + goto out_unlock; + case MMC_TIMING_MMC_HS200: + /* + * Periodic re-tuning for HS400 is not expected to be needed, so + * disable it here. + */ + if (hs400_tuning) + tuning_count = 0; + break; + case MMC_TIMING_UHS_SDR104: break; @@ -1911,9 +1942,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) /* FALLTHROUGH */ default: - spin_unlock_irqrestore(&host->lock, flags); - sdhci_runtime_pm_put(host); - return 0; + goto out_unlock; } if (host->ops->platform_execute_tuning) { @@ -2037,24 +2066,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) } out: - /* - * If this is the very first time we are here, we start the retuning - * timer. Since only during the first time, SDHCI_NEEDS_RETUNING - * flag won't be set, we check this condition before actually starting - * the timer. - */ - if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && - (host->tuning_mode == SDHCI_TUNING_MODE_1)) { + host->flags &= ~SDHCI_NEEDS_RETUNING; + + if (tuning_count) { host->flags |= SDHCI_USING_RETUNING_TIMER; - mod_timer(&host->tuning_timer, jiffies + - host->tuning_count * HZ); - /* Tuning mode 1 limits the maximum data length to 4MB */ - mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size; - } else if (host->flags & SDHCI_USING_RETUNING_TIMER) { - host->flags &= ~SDHCI_NEEDS_RETUNING; - /* Reload the new initial value for timer */ - mod_timer(&host->tuning_timer, jiffies + - host->tuning_count * HZ); + mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ); } /* @@ -2070,6 +2086,7 @@ out: sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +out_unlock: spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_pm_put(host); @@ -2110,15 +2127,18 @@ static void sdhci_card_event(struct mmc_host *mmc) { struct sdhci_host *host = mmc_priv(mmc); unsigned long flags; + int present; /* First check if client has provided their own card event */ if (host->ops->card_event) host->ops->card_event(host); + present = sdhci_do_get_cd(host); + spin_lock_irqsave(&host->lock, flags); /* Check host->mrq first in case we are runtime suspended */ - if (host->mrq && !sdhci_do_get_cd(host)) { + if (host->mrq && !present) { pr_err("%s: Card removed during transfer!\n", mmc_hostname(host->mmc)); pr_err("%s: Resetting controller.\n", @@ -2142,6 +2162,7 @@ static const struct mmc_host_ops sdhci_ops = { .hw_reset = sdhci_hw_reset, .enable_sdio_irq = sdhci_enable_sdio_irq, .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, + .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, .execute_tuning = sdhci_execute_tuning, .card_event = sdhci_card_event, .card_busy = sdhci_card_busy, @@ -3260,8 +3281,9 @@ int sdhci_add_host(struct sdhci_host *host) mmc->max_segs = SDHCI_MAX_SEGS; /* - * Maximum number of sectors in one transfer. Limited by DMA boundary - * size (512KiB). + * Maximum number of sectors in one transfer. Limited by SDMA boundary + * size (512KiB). Note some tuning modes impose a 4MiB limit, but this + * is less anyway. */ mmc->max_req_size = 524288; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 55f6774..aebde32 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, goto reject; } if (!strncmp("=All", text_ptr, 4)) { - cmd->cmd_flags |= IFC_SENDTARGETS_ALL; + cmd->cmd_flags |= ICF_SENDTARGETS_ALL; } else if (!strncmp("=iqn.", text_ptr, 5) || !strncmp("=eui.", text_ptr, 5)) { - cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE; + cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE; } else { pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr); goto reject; @@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, return -ENOMEM; } /* - * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE + * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE * explicit case.. */ - if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) { + if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) { text_ptr = strchr(text_in, '='); if (!text_ptr) { pr_err("Unable to locate '=' string in text_in:" @@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, spin_lock(&tiqn_lock); list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { - if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) && + if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) && strcmp(tiqn->tiqn, text_ptr)) { continue; } @@ -3512,7 +3512,7 @@ eob: if (end_of_buf) break; - if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) + if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) break; } spin_unlock(&tiqn_lock); diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 09a522b..cbcff38 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -135,8 +135,8 @@ enum cmd_flags_table { ICF_CONTIG_MEMORY = 0x00000020, ICF_ATTACHED_TO_RQUEUE = 0x00000040, ICF_OOO_CMDSN = 0x00000080, - IFC_SENDTARGETS_ALL = 0x00000100, - IFC_SENDTARGETS_SINGLE = 0x00000200, + ICF_SENDTARGETS_ALL = 0x00000100, + ICF_SENDTARGETS_SINGLE = 0x00000200, }; /* struct iscsi_cmd->i_state */ diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 7653cfb..58f49ff 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) } EXPORT_SYMBOL(se_dev_set_queue_depth); -int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) -{ - int block_size = dev->dev_attrib.block_size; - - if (dev->export_count) { - pr_err("dev[%p]: Unable to change SE Device" - " fabric_max_sectors while export_count is %d\n", - dev, dev->export_count); - return -EINVAL; - } - if (!fabric_max_sectors) { - pr_err("dev[%p]: Illegal ZERO value for" - " fabric_max_sectors\n", dev); - return -EINVAL; - } - if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) { - pr_err("dev[%p]: Passed fabric_max_sectors: %u less than" - " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors, - DA_STATUS_MAX_SECTORS_MIN); - return -EINVAL; - } - if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { - pr_err("dev[%p]: Passed fabric_max_sectors: %u" - " greater than DA_STATUS_MAX_SECTORS_MAX:" - " %u\n", dev, fabric_max_sectors, - DA_STATUS_MAX_SECTORS_MAX); - return -EINVAL; - } - /* - * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() - */ - if (!block_size) { - block_size = 512; - pr_warn("Defaulting to 512 for zero block_size\n"); - } - fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, - block_size); - - dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; - pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", - dev, fabric_max_sectors); - return 0; -} -EXPORT_SYMBOL(se_dev_set_fabric_max_sectors); - int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) { if (dev->export_count) { @@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) dev, dev->export_count); return -EINVAL; } - if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { + if (optimal_sectors > dev->dev_attrib.hw_max_sectors) { pr_err("dev[%p]: Passed optimal_sectors %u cannot be" - " greater than fabric_max_sectors: %u\n", dev, - optimal_sectors, dev->dev_attrib.fabric_max_sectors); + " greater than hw_max_sectors: %u\n", dev, + optimal_sectors, dev->dev_attrib.hw_max_sectors); return -EINVAL; } @@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.unmap_granularity_alignment = DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; - dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; - dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; xcopy_lun = &dev->xcopy_lun; xcopy_lun->lun_se_dev = dev; @@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev) dev->dev_attrib.hw_max_sectors = se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, dev->dev_attrib.hw_block_size); + dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); dev->creation_time = get_jiffies_64(); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index c2aea09..d836de2 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, struct fd_prot fd_prot; sense_reason_t rc; int ret = 0; - + /* + * We are currently limited by the number of iovecs (2048) per + * single vfs_[writev,readv] call. + */ + if (cmd->data_length > FD_MAX_BYTES) { + pr_err("FILEIO: Not able to process I/O of %u bytes due to" + "FD_MAX_BYTES: %u iovec count limitiation\n", + cmd->data_length, FD_MAX_BYTES); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } /* * Call vectorized fileio functions to map struct scatterlist * physical memory addresses to struct iovec virtual memory. @@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = { &fileio_dev_attrib_hw_block_size.attr, &fileio_dev_attrib_block_size.attr, &fileio_dev_attrib_hw_max_sectors.attr, - &fileio_dev_attrib_fabric_max_sectors.attr, &fileio_dev_attrib_optimal_sectors.attr, &fileio_dev_attrib_hw_queue_depth.attr, &fileio_dev_attrib_queue_depth.attr, diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 3efff94..78346b8 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev) q = bdev_get_queue(bd); dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); - dev->dev_attrib.hw_max_sectors = UINT_MAX; + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); dev->dev_attrib.hw_queue_depth = q->nr_requests; /* @@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = { &iblock_dev_attrib_hw_block_size.attr, &iblock_dev_attrib_block_size.attr, &iblock_dev_attrib_hw_max_sectors.attr, - &iblock_dev_attrib_fabric_max_sectors.attr, &iblock_dev_attrib_optimal_sectors.attr, &iblock_dev_attrib_hw_queue_depth.attr, &iblock_dev_attrib_queue_depth.attr, diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index d56f2aa..283cf78 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder( return 0; } + } else if (we && registered_nexus) { + /* + * Reads are allowed for Write Exclusive locks + * from all registrants. + */ + if (cmd->data_direction == DMA_FROM_DEVICE) { + pr_debug("Allowing READ CDB: 0x%02x for %s" + " reservation\n", cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); + + return 0; + } } pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" " for %s reservation\n", transport_dump_cmd_direction(cmd), diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 60ebd17..98e83ac 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = { &rd_mcp_dev_attrib_hw_block_size.attr, &rd_mcp_dev_attrib_block_size.attr, &rd_mcp_dev_attrib_hw_max_sectors.attr, - &rd_mcp_dev_attrib_fabric_max_sectors.attr, &rd_mcp_dev_attrib_optimal_sectors.attr, &rd_mcp_dev_attrib_hw_queue_depth.attr, &rd_mcp_dev_attrib_queue_depth.attr, diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 11bea19..cd4bed7 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { unsigned long long end_lba; - - if (sectors > dev->dev_attrib.fabric_max_sectors) { - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" - " big sectors %u exceeds fabric_max_sectors:" - " %u\n", cdb[0], sectors, - dev->dev_attrib.fabric_max_sectors); - return TCM_INVALID_CDB_FIELD; - } - if (sectors > dev->dev_attrib.hw_max_sectors) { - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" - " big sectors %u exceeds backend hw_max_sectors:" - " %u\n", cdb[0], sectors, - dev->dev_attrib.hw_max_sectors); - return TCM_INVALID_CDB_FIELD; - } check_lba: end_lba = dev->transport->get_blocks(dev) + 1; if (cmd->t_task_lba + sectors > end_lba) { diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 1307600..4c71657 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -505,7 +505,6 @@ static sense_reason_t spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; - u32 max_sectors; int have_tp = 0; int opt, min; @@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set MAXIMUM TRANSFER LENGTH */ - max_sectors = min(dev->dev_attrib.fabric_max_sectors, - dev->dev_attrib.hw_max_sectors); - put_unaligned_be32(max_sectors, &buf[8]); + put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); /* * Set OPTIMAL TRANSFER LENGTH diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 8bfa61c..1157b55 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1118,7 +1118,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = { &tcmu_dev_attrib_hw_block_size.attr, &tcmu_dev_attrib_block_size.attr, &tcmu_dev_attrib_hw_max_sectors.attr, - &tcmu_dev_attrib_fabric_max_sectors.attr, &tcmu_dev_attrib_optimal_sectors.attr, &tcmu_dev_attrib_hw_queue_depth.attr, &tcmu_dev_attrib_queue_depth.attr, diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c index 231cabc..2c2ec76 100644 --- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c +++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c @@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp, continue; result = acpi_bus_get_device(trt->source, &adev); - if (!result) - acpi_create_platform_device(adev); - else + if (result) pr_warn("Failed to get source ACPI device\n"); result = acpi_bus_get_device(trt->target, &adev); - if (!result) - acpi_create_platform_device(adev); - else + if (result) pr_warn("Failed to get target ACPI device\n"); } @@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp, if (art->source) { result = acpi_bus_get_device(art->source, &adev); - if (!result) - acpi_create_platform_device(adev); - else + if (result) pr_warn("Failed to get source ACPI device\n"); } if (art->target) { result = acpi_bus_get_device(art->target, &adev); - if (!result) - acpi_create_platform_device(adev); - else + if (result) pr_warn("Failed to get source ACPI device\n"); } } diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c index 31bb553..0fe5dbb 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c @@ -130,6 +130,8 @@ static int proc_thermal_add(struct device *dev, int ret; adev = ACPI_COMPANION(dev); + if (!adev) + return -ENODEV; status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf); if (ACPI_FAILURE(status)) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 01c01cb..d695b16 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, return 0; } +static int vhost_scsi_to_tcm_attr(int attr) +{ + switch (attr) { + case VIRTIO_SCSI_S_SIMPLE: + return TCM_SIMPLE_TAG; + case VIRTIO_SCSI_S_ORDERED: + return TCM_ORDERED_TAG; + case VIRTIO_SCSI_S_HEAD: + return TCM_HEAD_TAG; + case VIRTIO_SCSI_S_ACA: + return TCM_ACA_TAG; + default: + break; + } + return TCM_SIMPLE_TAG; +} + static void tcm_vhost_submission_work(struct work_struct *work) { struct tcm_vhost_cmd *cmd = @@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work) rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, cmd->tvc_cdb, &cmd->tvc_sense_buf[0], cmd->tvc_lun, cmd->tvc_exp_data_len, - cmd->tvc_task_attr, cmd->tvc_data_direction, - TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, - NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count); + vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), + cmd->tvc_data_direction, TARGET_SCF_ACK_KREF, + sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr, + cmd->tvc_prot_sgl_count); if (rc < 0) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 0884805..db284bf 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb, static inline void __tlb_reset_range(struct mmu_gather *tlb) { - tlb->start = TASK_SIZE; - tlb->end = 0; + if (tlb->fullmm) { + tlb->start = tlb->end = ~0; + } else { + tlb->start = TASK_SIZE; + tlb->end = 0; + } } /* diff --git a/include/linux/compiler.h b/include/linux/compiler.h index a1c81f8..33063f8 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si } } -static __always_inline void __assign_once_size(volatile void *p, void *res, int size) +static __always_inline void __write_once_size(volatile void *p, void *res, int size) { switch (size) { case 1: *(volatile __u8 *)p = *(__u8 *)res; break; @@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int /* * Prevent the compiler from merging or refetching reads or writes. The * compiler is also forbidden from reordering successive instances of - * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the + * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the * compiler is aware of some particular ordering. One way to make the * compiler aware of ordering is to put the two invocations of READ_ONCE, - * ASSIGN_ONCE or ACCESS_ONCE() in different C statements. + * WRITE_ONCE or ACCESS_ONCE() in different C statements. * * In contrast to ACCESS_ONCE these two macros will also work on aggregate * data types like structs or unions. If the size of the accessed data * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) - * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a * compile-time warning. * * Their two major use cases are: (1) Mediating communication between @@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int #define READ_ONCE(x) \ ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) -#define ASSIGN_ONCE(val, x) \ - ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) +#define WRITE_ONCE(x, val) \ + ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; }) #endif /* __KERNEL__ */ diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index 375af80..f767a0d 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h @@ -137,6 +137,7 @@ struct sdhci_host { #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ #define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ +#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */ unsigned int version; /* SDHCI spec. version */ diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 430cfaf..db81c65 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int); int se_dev_set_emulate_rest_reord(struct se_device *dev, int); int se_dev_set_queue_depth(struct se_device *, u32); int se_dev_set_max_sectors(struct se_device *, u32); -int se_dev_set_fabric_max_sectors(struct se_device *, u32); int se_dev_set_optimal_sectors(struct se_device *, u32); int se_dev_set_block_size(struct se_device *, u32); diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h index 3247d75..186f7a9 100644 --- a/include/target/target_core_backend_configfs.h +++ b/include/target/target_core_backend_configfs.h @@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \ DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \ TB_DEV_ATTR_RO(_backend, hw_max_sectors); \ - DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \ - TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \ DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \ TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \ DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 397fb63..4a8795a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -77,8 +77,6 @@ #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 /* Default max_write_same_len, disabled by default */ #define DA_MAX_WRITE_SAME_LEN 0 -/* Default max transfer length */ -#define DA_FABRIC_MAX_SECTORS 8192 /* Use a model alias based on the configfs backend device name */ #define DA_EMULATE_MODEL_ALIAS 0 /* Emulation for Direct Page Out */ @@ -694,7 +692,6 @@ struct se_dev_attrib { u32 hw_block_size; u32 block_size; u32 hw_max_sectors; - u32 fabric_max_sectors; u32 optimal_sectors; u32 hw_queue_depth; u32 queue_depth; diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h new file mode 100644 index 0000000..b47d9d0 --- /dev/null +++ b/include/xen/interface/nmi.h @@ -0,0 +1,51 @@ +/****************************************************************************** + * nmi.h + * + * NMI callback registration and reason codes. + * + * Copyright (c) 2005, Keir Fraser <keir@xensource.com> + */ + +#ifndef __XEN_PUBLIC_NMI_H__ +#define __XEN_PUBLIC_NMI_H__ + +#include <xen/interface/xen.h> + +/* + * NMI reason codes: + * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. + */ + /* I/O-check error reported via ISA port 0x61, bit 6. */ +#define _XEN_NMIREASON_io_error 0 +#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) + /* PCI SERR reported via ISA port 0x61, bit 7. */ +#define _XEN_NMIREASON_pci_serr 1 +#define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr) + /* Unknown hardware-generated NMI. */ +#define _XEN_NMIREASON_unknown 2 +#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) + +/* + * long nmi_op(unsigned int cmd, void *arg) + * NB. All ops return zero on success, else a negative error code. + */ + +/* + * Register NMI callback for this (calling) VCPU. Currently this only makes + * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. + * arg == pointer to xennmi_callback structure. + */ +#define XENNMI_register_callback 0 +struct xennmi_callback { + unsigned long handler_address; + unsigned long pad; +}; +DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback); + +/* + * Deregister NMI callback for this (calling) VCPU. + * arg == NULL. + */ +#define XENNMI_unregister_callback 1 + +#endif /* __XEN_PUBLIC_NMI_H__ */ diff --git a/mm/memory.c b/mm/memory.c index c6565f0..54f3a9b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { + if (!tlb->end) + return; + tlb_flush(tlb); mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); #ifdef CONFIG_HAVE_RCU_TABLE_FREE @@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; - for (batch = &tlb->local; batch; batch = batch->next) { + for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; } @@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) void tlb_flush_mmu(struct mmu_gather *tlb) { - if (!tlb->end) - return; - tlb_flush_mmu_tlbonly(tlb); tlb_flush_mmu_free(tlb); } diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c index d273624..e238c95 100644 --- a/tools/testing/selftests/exec/execveat.c +++ b/tools/testing/selftests/exec/execveat.c @@ -62,7 +62,7 @@ static int _check_execveat_fail(int fd, const char *path, int flags, } static int check_execveat_invoked_rc(int fd, const char *path, int flags, - int expected_rc) + int expected_rc, int expected_rc2) { int status; int rc; @@ -98,9 +98,10 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags, child, status); return 1; } - if (WEXITSTATUS(status) != expected_rc) { - printf("[FAIL] (child %d exited with %d not %d)\n", - child, WEXITSTATUS(status), expected_rc); + if ((WEXITSTATUS(status) != expected_rc) && + (WEXITSTATUS(status) != expected_rc2)) { + printf("[FAIL] (child %d exited with %d not %d nor %d)\n", + child, WEXITSTATUS(status), expected_rc, expected_rc2); return 1; } printf("[OK]\n"); @@ -109,7 +110,7 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags, static int check_execveat(int fd, const char *path, int flags) { - return check_execveat_invoked_rc(fd, path, flags, 99); + return check_execveat_invoked_rc(fd, path, flags, 99, 99); } static char *concat(const char *left, const char *right) @@ -192,9 +193,15 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script) * Execute as a long pathname relative to ".". If this is a script, * the interpreter will launch but fail to open the script because its * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX. + * + * The failure code is usually 127 (POSIX: "If a command is not found, + * the exit status shall be 127."), but some systems give 126 (POSIX: + * "If the command name is found, but it is not an executable utility, + * the exit status shall be 126."), so allow either. */ if (is_script) - fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127); + fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, + 127, 126); else fail += check_execveat(dot_dfd, longpath, 0); diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c index 94dae65..8519e9e 100644 --- a/tools/testing/selftests/mqueue/mq_perf_tests.c +++ b/tools/testing/selftests/mqueue/mq_perf_tests.c @@ -536,10 +536,9 @@ int main(int argc, char *argv[]) { struct mq_attr attr; char *option, *next_option; - int i, cpu; + int i, cpu, rc; struct sigaction sa; poptContext popt_context; - char rc; void *retval; main_thread = pthread_self(); diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 4c4b1f6..077828c 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -7,7 +7,7 @@ BINARIES += transhuge-stress all: $(BINARIES) %: %.c - $(CC) $(CFLAGS) -o $@ $^ + $(CC) $(CFLAGS) -o $@ $^ -lrt run_tests: all @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1) |