From 7a9bdd8842d0847f3b15068550a3632a2d259057 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Tue, 5 Apr 2005 18:05:00 -0700 Subject: [IA64] Add config SCHED_SMT Now that we have MC/MT detection patches in, appended patch allows us to configure MT scheduler optimizations. For now, we will this option off by default. There is some discussion going on lkml about setting up sched-domains which are absolutely needed (like for example, we shouldn't setup SMT domain for non MT processors). Once that patch goes in, we can enable this option by default. Signed-off-by: Suresh Siddha Signed-off-by: Tony Luck --- arch/ia64/Kconfig | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 468dbe8..cad8346 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -261,6 +261,15 @@ config HOTPLUG_CPU can be controlled through /sys/devices/system/cpu/cpu#. Say N if you want to disable CPU hotplug. +config SCHED_SMT + bool "SMT scheduler support" + depends on SMP + default off + help + Improves the CPU scheduler's decision making when dealing with + Intel IA64 chips with MultiThreading at a cost of slightly increased + overhead in some places. If unsure say N here. + config PREEMPT bool "Preemptible Kernel" help -- cgit v1.1 From 512f64295f2f0049515dcc4e97c1f1ae0df9629c Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Tue, 26 Apr 2005 23:00:00 -0700 Subject: [IA64] Fix memcpy_mck.S for current binutils The current ia64 assembler complains about mismatching .proc/.endp pairs. (Same patch also sent by H.J. Lu) Signed-off-by: Andreas Schwab Signed-off-by: Tony Luck --- arch/ia64/lib/memcpy_mck.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S index 3c2cd2f..6f308e6 100644 --- a/arch/ia64/lib/memcpy_mck.S +++ b/arch/ia64/lib/memcpy_mck.S @@ -75,6 +75,7 @@ GLOBAL_ENTRY(memcpy) mov f6=f0 br.cond.sptk .common_code ;; +END(memcpy) GLOBAL_ENTRY(__copy_user) .prologue // check dest alignment @@ -524,7 +525,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ #undef B #undef C #undef D -END(memcpy) /* * Due to lack of local tag support in gcc 2.x assembler, it is not clear which -- cgit v1.1 From 9df6f705c0934a49d0f0a3468a5b5044c8aec4f1 Mon Sep 17 00:00:00 2001 From: David Mosberger-Tang Date: Fri, 25 Mar 2005 00:16:00 -0700 Subject: [IA64] fix typos caught by new assembler Patch below fixes 3 trivial typos which are caught by the new assembler (v2.169.90). Please apply. [Note: fix to memcpy that was also part of this patch was separately applied from patches by H.J. and Andreas ... so the delta here only has the other two fixes. -Tony] Signed-off-by: David Mosberger-Tang Signed-off-by: Tony Luck --- arch/ia64/kernel/entry.S | 2 +- arch/ia64/lib/memset.S | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index d3f0938..81c45d4 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -782,7 +782,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit .mem.offset 8,0 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit -END(ia64_ret_from_ia32_execve_syscall) +END(ia64_ret_from_ia32_execve) // fall through #endif /* CONFIG_IA32_SUPPORT */ GLOBAL_ENTRY(ia64_leave_kernel) diff --git a/arch/ia64/lib/memset.S b/arch/ia64/lib/memset.S index bd8cf90..f26c16a 100644 --- a/arch/ia64/lib/memset.S +++ b/arch/ia64/lib/memset.S @@ -57,10 +57,10 @@ GLOBAL_ENTRY(memset) { .mmi .prologue alloc tmp = ar.pfs, 3, 0, 0, 0 - .body lfetch.nt1 [dest] // .save ar.lc, save_lc mov.i save_lc = ar.lc + .body } { .mmi mov ret0 = dest // return value cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero -- cgit v1.1 From 2074615a13a4f250e0a4e3f6ec8e3733b950a783 Mon Sep 17 00:00:00 2001 From: David Mosberger-Tang Date: Fri, 18 Feb 2005 19:09:00 -0700 Subject: [IA64] use fc.i for fluch_icache_range() This is a small patch to switch fluch_icache_range() to use fc.i instead of fc. This would save time on processors which can establish i-cache coherency without flushing the cache-line out to memory (not that any current processors do). On existing processors, fc.i behaves like fc. The only caveat is that very old assemblers may not know about fc.i yet. Signed-off-by: David Mosberger-Tang Signed-off-by: Tony Luck --- arch/ia64/lib/flush.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S index 29c802b..a1af914 100644 --- a/arch/ia64/lib/flush.S +++ b/arch/ia64/lib/flush.S @@ -1,8 +1,8 @@ /* * Cache flushing routines. * - * Copyright (C) 1999-2001 Hewlett-Packard Co - * Copyright (C) 1999-2001 David Mosberger-Tang + * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co + * David Mosberger-Tang */ #include #include @@ -26,7 +26,7 @@ GLOBAL_ENTRY(flush_icache_range) mov ar.lc=r8 ;; -.Loop: fc in0 // issuable on M0 only +.Loop: fc.i in0 // issuable on M2 only add in0=32,in0 br.cloop.sptk.few .Loop ;; -- cgit v1.1 From c2d1d65ad441c8abe624bdb1c2cff2e47c8c1ee1 Mon Sep 17 00:00:00 2001 From: Bruce Losure Date: Thu, 24 Mar 2005 12:28:00 -0700 Subject: [IA64-SGI] Altix only: Remove hubdev SAL call Hi Tony, This patch against ia64-test-2.6.12 fixes a bug where the tiocx code was inadvertently un-doing some address modifications done in earlier fixup code. This patch just removes the offending code. Signed-off-by: Bruce Losure Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/tiocx.c | 19 ------------------- 1 file changed, 19 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 66190d7..128ccf2 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -330,19 +330,6 @@ EXPORT_SYMBOL(tiocx_bus_type); EXPORT_SYMBOL(tiocx_dma_addr); EXPORT_SYMBOL(tiocx_swin_base); -static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address) -{ - - struct ia64_sal_retval ret_stuff; - ret_stuff.status = 0; - ret_stuff.v0 = 0; - - ia64_sal_oemcall_nolock(&ret_stuff, - SN_SAL_IOIF_GET_HUBDEV_INFO, - handle, address, 0, 0, 0, 0, 0); - return ret_stuff.v0; -} - static void tio_conveyor_set(nasid_t nasid, int enable_flag) { uint64_t ice_frz; @@ -477,18 +464,12 @@ static int __init tiocx_init(void) if (nasid & 0x1) { /* TIO's are always odd */ struct hubdev_info *hubdev; - uint64_t status; struct xwidget_info *widgetp; DBG("Found TIO at nasid 0x%x\n", nasid); hubdev = (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo); - status = - tiocx_get_hubdev_info(nasid, - (uint64_t) __pa(hubdev)); - if (status) - continue; widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET]; -- cgit v1.1 From ce0a3956b32650e229b68964c4400bbdc5ad3ca1 Mon Sep 17 00:00:00 2001 From: Bruce Losure Date: Mon, 25 Apr 2005 19:41:00 -0700 Subject: [IA64-SGI] Altix patch to add bricktype knowledge to tiocx Here is a patch to enable the SGI tiocx bus driver to distingush between FPGA-attached h/w and non-FPGA-attached h/w. Signed-off-by: Bruce Losure Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/tiocx.c | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 128ccf2..ab9b5f3 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include "tio.h" #include "xtalk/xwidgetdev.h" #include "xtalk/hubdev.h" @@ -308,14 +310,12 @@ void tiocx_irq_free(struct sn_irq_info *sn_irq_info) } } -uint64_t -tiocx_dma_addr(uint64_t addr) +uint64_t tiocx_dma_addr(uint64_t addr) { return PHYS_TO_TIODMA(addr); } -uint64_t -tiocx_swin_base(int nasid) +uint64_t tiocx_swin_base(int nasid) { return TIO_SWIN_BASE(nasid, TIOCX_CORELET); } @@ -366,7 +366,29 @@ static void tio_corelet_reset(nasid_t nasid, int corelet) udelay(2000); } -static int fpga_attached(nasid_t nasid) +static int tiocx_btchar_get(int nasid) +{ + moduleid_t module_id; + geoid_t geoid; + int cnodeid; + + cnodeid = nasid_to_cnodeid(nasid); + geoid = cnodeid_get_geoid(cnodeid); + module_id = geo_module(geoid); + return MODULE_GET_BTCHAR(module_id); +} + +static int is_fpga_brick(int nasid) +{ + switch (tiocx_btchar_get(nasid)) { + case L1_BRICKTYPE_SA: + case L1_BRICKTYPE_ATHENA: + return 1; + } + return 0; +} + +static int bitstream_loaded(nasid_t nasid) { uint64_t cx_credits; @@ -383,7 +405,7 @@ static int tiocx_reload(struct cx_dev *cx_dev) int mfg_num = CX_DEV_NONE; nasid_t nasid = cx_dev->cx_id.nasid; - if (fpga_attached(nasid)) { + if (bitstream_loaded(nasid)) { uint64_t cx_id; cx_id = @@ -414,9 +436,10 @@ static ssize_t show_cxdev_control(struct device *dev, char *buf) { struct cx_dev *cx_dev = to_cx_dev(dev); - return sprintf(buf, "0x%x 0x%x 0x%x\n", + return sprintf(buf, "0x%x 0x%x 0x%x %d\n", cx_dev->cx_id.nasid, - cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num); + cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num, + tiocx_btchar_get(cx_dev->cx_id.nasid)); } static ssize_t store_cxdev_control(struct device *dev, const char *buf, @@ -462,7 +485,7 @@ static int __init tiocx_init(void) if ((nasid = cnodeid_to_nasid(cnodeid)) < 0) break; /* No more nasids .. bail out of loop */ - if (nasid & 0x1) { /* TIO's are always odd */ + if ((nasid & 0x1) && is_fpga_brick(nasid)) { struct hubdev_info *hubdev; struct xwidget_info *widgetp; -- cgit v1.1 From 2e34f07ff0c944399a6456e2d91cf0ca1d9a497c Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Mon, 21 Mar 2005 19:41:00 -0700 Subject: [PATCH] move cnodeid_to_nasid_table out of pda Another step in the effort to eliminate the SN pda structure. This patch moves the cnodeid_to_nasid_table field out of the pda, making it a standalone per-cpu data item, and exports it so it can be accessed by kernel modules. Signed-off-by: Dean Nelson Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/setup.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index d35f2a6..fea71ee 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. */ #include @@ -73,6 +73,9 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); EXPORT_PER_CPU_SYMBOL(__sn_hub_info); +DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); +EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); + partid_t sn_partid = -1; EXPORT_SYMBOL(sn_partid); char sn_system_serial_number_string[128]; @@ -373,11 +376,11 @@ static void __init sn_init_pdas(char **cmdline_p) { cnodeid_t cnode; - memset(pda->cnodeid_to_nasid_table, -1, - sizeof(pda->cnodeid_to_nasid_table)); + memset(sn_cnodeid_to_nasid, -1, + sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); for_each_online_node(cnode) - pda->cnodeid_to_nasid_table[cnode] = - pxm_to_nasid(nid_to_pxm_map[cnode]); + sn_cnodeid_to_nasid[cnode] = + pxm_to_nasid(nid_to_pxm_map[cnode]); numionodes = num_online_nodes(); scan_for_ionodes(); @@ -486,15 +489,18 @@ void __init sn_cpu_init(void) pda->idle_flag = 0; if (cpuid != 0) { - memcpy(pda->cnodeid_to_nasid_table, - pdacpu(0)->cnodeid_to_nasid_table, - sizeof(pda->cnodeid_to_nasid_table)); + /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */ + memcpy(sn_cnodeid_to_nasid, + (&per_cpu(__sn_cnodeid_to_nasid, 0)), + sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); } /* * Check for WARs. * Only needs to be done once, on BSP. - * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i]. + * Has to be done after loop above, because it uses this cpu's + * sn_cnodeid_to_nasid table which was just initialized if this + * isn't cpu 0. * Has to be done before assignment below. */ if (!wars_have_been_checked) { @@ -580,8 +586,7 @@ static void __init scan_for_ionodes(void) brd = find_lboard_any(brd, KLTYPE_SNIA); while (brd) { - pda->cnodeid_to_nasid_table[numionodes] = - brd->brd_nasid; + sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid; physical_node_map[brd->brd_nasid] = numionodes; root_lboard[numionodes] = brd; numionodes++; @@ -602,8 +607,7 @@ static void __init scan_for_ionodes(void) root_lboard[nasid_to_cnodeid(nasid)], KLTYPE_TIO); while (brd) { - pda->cnodeid_to_nasid_table[numionodes] = - brd->brd_nasid; + sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid; physical_node_map[brd->brd_nasid] = numionodes; root_lboard[numionodes] = brd; numionodes++; @@ -614,7 +618,6 @@ static void __init scan_for_ionodes(void) brd = find_lboard_any(brd, KLTYPE_TIO); } } - } int -- cgit v1.1 From b0d82bd5df874f7dadbeced1b0163473387da37c Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Wed, 23 Mar 2005 19:46:00 -0700 Subject: [IA64-SGI] SGI Altix cross partition functionality (2nd This patch contains the shim module (XP) which interfaces between the communication module (XPC) and the functional support modules (like XPNET). Signed-off-by: Dean Nelson Signed-off-by: Tony Luck --- arch/ia64/Kconfig | 10 ++ arch/ia64/sn/kernel/xp_main.c | 289 +++++++++++++++++++++++++++++++++++++++ arch/ia64/sn/kernel/xp_nofault.S | 31 +++++ 3 files changed, 330 insertions(+) create mode 100644 arch/ia64/sn/kernel/xp_main.c create mode 100644 arch/ia64/sn/kernel/xp_nofault.S (limited to 'arch/ia64') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index cad8346..ce13ad6 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -217,6 +217,16 @@ config IA64_SGI_SN_SIM If you are compiling a kernel that will run under SGI's IA-64 simulator (Medusa) then say Y, otherwise say N. +config IA64_SGI_SN_XP + tristate "Support communication between SGI SSIs" + depends on MSPEC + help + An SGI machine can be divided into multiple Single System + Images which act independently of each other and have + hardware based memory protection from the others. Enabling + this feature will allow for direct communication between SSIs + based on a network adapter and DMA messaging. + config FORCE_MAX_ZONEORDER int default "18" diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c new file mode 100644 index 0000000..3be52a3 --- /dev/null +++ b/arch/ia64/sn/kernel/xp_main.c @@ -0,0 +1,289 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * Cross Partition (XP) base. + * + * XP provides a base from which its users can interact + * with XPC, yet not be dependent on XPC. + * + */ + + +#include +#include +#include +#include +#include +#include + + +/* + * Target of nofault PIO read. + */ +u64 xp_nofault_PIOR_target; + + +/* + * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level + * users of XPC. + */ +struct xpc_registration xpc_registrations[XPC_NCHANNELS]; + + +/* + * Initialize the XPC interface to indicate that XPC isn't loaded. + */ +static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; } + +struct xpc_interface xpc_interface = { + (void (*)(int)) xpc_notloaded, + (void (*)(int)) xpc_notloaded, + (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded, + (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded, + (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *)) + xpc_notloaded, + (void (*)(partid_t, int, void *)) xpc_notloaded, + (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded +}; + + +/* + * XPC calls this when it (the XPC module) has been loaded. + */ +void +xpc_set_interface(void (*connect)(int), + void (*disconnect)(int), + enum xpc_retval (*allocate)(partid_t, int, u32, void **), + enum xpc_retval (*send)(partid_t, int, void *), + enum xpc_retval (*send_notify)(partid_t, int, void *, + xpc_notify_func, void *), + void (*received)(partid_t, int, void *), + enum xpc_retval (*partid_to_nasids)(partid_t, void *)) +{ + xpc_interface.connect = connect; + xpc_interface.disconnect = disconnect; + xpc_interface.allocate = allocate; + xpc_interface.send = send; + xpc_interface.send_notify = send_notify; + xpc_interface.received = received; + xpc_interface.partid_to_nasids = partid_to_nasids; +} + + +/* + * XPC calls this when it (the XPC module) is being unloaded. + */ +void +xpc_clear_interface(void) +{ + xpc_interface.connect = (void (*)(int)) xpc_notloaded; + xpc_interface.disconnect = (void (*)(int)) xpc_notloaded; + xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32, + void **)) xpc_notloaded; + xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *)) + xpc_notloaded; + xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *, + xpc_notify_func, void *)) xpc_notloaded; + xpc_interface.received = (void (*)(partid_t, int, void *)) + xpc_notloaded; + xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *)) + xpc_notloaded; +} + + +/* + * Register for automatic establishment of a channel connection whenever + * a partition comes up. + * + * Arguments: + * + * ch_number - channel # to register for connection. + * func - function to call for asynchronous notification of channel + * state changes (i.e., connection, disconnection, error) and + * the arrival of incoming messages. + * key - pointer to optional user-defined value that gets passed back + * to the user on any callouts made to func. + * payload_size - size in bytes of the XPC message's payload area which + * contains a user-defined message. The user should make + * this large enough to hold their largest message. + * nentries - max #of XPC message entries a message queue can contain. + * The actual number, which is determined when a connection + * is established and may be less then requested, will be + * passed to the user via the xpcConnected callout. + * assigned_limit - max number of kthreads allowed to be processing + * messages (per connection) at any given instant. + * idle_limit - max number of kthreads allowed to be idle at any given + * instant. + */ +enum xpc_retval +xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, + u16 nentries, u32 assigned_limit, u32 idle_limit) +{ + struct xpc_registration *registration; + + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + DBUG_ON(payload_size == 0 || nentries == 0); + DBUG_ON(func == NULL); + DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); + + registration = &xpc_registrations[ch_number]; + + if (down_interruptible(®istration->sema) != 0) { + return xpcInterrupted; + } + + /* if XPC_CHANNEL_REGISTERED(ch_number) */ + if (registration->func != NULL) { + up(®istration->sema); + return xpcAlreadyRegistered; + } + + /* register the channel for connection */ + registration->msg_size = XPC_MSG_SIZE(payload_size); + registration->nentries = nentries; + registration->assigned_limit = assigned_limit; + registration->idle_limit = idle_limit; + registration->key = key; + registration->func = func; + + up(®istration->sema); + + xpc_interface.connect(ch_number); + + return xpcSuccess; +} + + +/* + * Remove the registration for automatic connection of the specified channel + * when a partition comes up. + * + * Before returning this xpc_disconnect() will wait for all connections on the + * specified channel have been closed/torndown. So the caller can be assured + * that they will not be receiving any more callouts from XPC to their + * function registered via xpc_connect(). + * + * Arguments: + * + * ch_number - channel # to unregister. + */ +void +xpc_disconnect(int ch_number) +{ + struct xpc_registration *registration; + + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + + registration = &xpc_registrations[ch_number]; + + /* + * We've decided not to make this a down_interruptible(), since we + * figured XPC's users will just turn around and call xpc_disconnect() + * again anyways, so we might as well wait, if need be. + */ + down(®istration->sema); + + /* if !XPC_CHANNEL_REGISTERED(ch_number) */ + if (registration->func == NULL) { + up(®istration->sema); + return; + } + + /* remove the connection registration for the specified channel */ + registration->func = NULL; + registration->key = NULL; + registration->nentries = 0; + registration->msg_size = 0; + registration->assigned_limit = 0; + registration->idle_limit = 0; + + xpc_interface.disconnect(ch_number); + + up(®istration->sema); + + return; +} + + +int __init +xp_init(void) +{ + int ret, ch_number; + u64 func_addr = *(u64 *) xp_nofault_PIOR; + u64 err_func_addr = *(u64 *) xp_error_PIOR; + + + if (!ia64_platform_is("sn2")) { + return -ENODEV; + } + + /* + * Register a nofault code region which performs a cross-partition + * PIO read. If the PIO read times out, the MCA handler will consume + * the error and return to a kernel-provided instruction to indicate + * an error. This PIO read exists because it is guaranteed to timeout + * if the destination is down (AMO operations do not timeout on at + * least some CPUs on Shubs <= v1.2, which unfortunately we have to + * work around). + */ + if ((ret = sn_register_nofault_code(func_addr, err_func_addr, + err_func_addr, 1, 1)) != 0) { + printk(KERN_ERR "XP: can't register nofault code, error=%d\n", + ret); + } + /* + * Setup the nofault PIO read target. (There is no special reason why + * SH_IPI_ACCESS was selected.) + */ + if (is_shub2()) { + xp_nofault_PIOR_target = SH2_IPI_ACCESS0; + } else { + xp_nofault_PIOR_target = SH1_IPI_ACCESS; + } + + /* initialize the connection registration semaphores */ + for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { + sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */ + } + + return 0; +} +module_init(xp_init); + + +void __exit +xp_exit(void) +{ + u64 func_addr = *(u64 *) xp_nofault_PIOR; + u64 err_func_addr = *(u64 *) xp_error_PIOR; + + + /* unregister the PIO read nofault code region */ + (void) sn_register_nofault_code(func_addr, err_func_addr, + err_func_addr, 1, 0); +} +module_exit(xp_exit); + + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition (XP) base"); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(xp_nofault_PIOR); +EXPORT_SYMBOL(xp_nofault_PIOR_target); +EXPORT_SYMBOL(xpc_registrations); +EXPORT_SYMBOL(xpc_interface); +EXPORT_SYMBOL(xpc_clear_interface); +EXPORT_SYMBOL(xpc_set_interface); +EXPORT_SYMBOL(xpc_connect); +EXPORT_SYMBOL(xpc_disconnect); + diff --git a/arch/ia64/sn/kernel/xp_nofault.S b/arch/ia64/sn/kernel/xp_nofault.S new file mode 100644 index 0000000..b772543 --- /dev/null +++ b/arch/ia64/sn/kernel/xp_nofault.S @@ -0,0 +1,31 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * The xp_nofault_PIOR function takes a pointer to a remote PIO register + * and attempts to load and consume a value from it. This function + * will be registered as a nofault code block. In the event that the + * PIO read fails, the MCA handler will force the error to look + * corrected and vector to the xp_error_PIOR which will return an error. + * + * extern int xp_nofault_PIOR(void *remote_register); + */ + + .global xp_nofault_PIOR +xp_nofault_PIOR: + mov r8=r0 // Stage a success return value + ld8.acq r9=[r32];; // PIO Read the specified register + adds r9=1,r9 // Add to force a consume + br.ret.sptk.many b0;; // Return success + + .global xp_error_PIOR +xp_error_PIOR: + mov r8=1 // Return value of 1 + br.ret.sptk.many b0;; // Return failure + -- cgit v1.1 From 21223a9e78050919499d3d9039170e608eb939cc Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 3 May 2005 12:25:50 -0700 Subject: [IA64] manually apply changes to arch/ia64/sn/kernel/Makefile cg-patch couldn't apply the patch to Makefile, and my dumb script rushed on and ran cg-commit without this change. Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile index 4f381fb..b1a4a23 100644 --- a/arch/ia64/sn/kernel/Makefile +++ b/arch/ia64/sn/kernel/Makefile @@ -4,10 +4,12 @@ # License. See the file "COPYING" in the main directory of this archive # for more details. # -# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved. +# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved. # obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ huberror.o io_init.o iomv.o klconflib.o sn2/ obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_SGI_TIOCX) += tiocx.o +obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o +xp-y := xp_main.o xp_nofault.o -- cgit v1.1 From 89eb8eb927e324366c3ac0458998aaf9953fc5cd Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Wed, 23 Mar 2005 19:50:00 -0700 Subject: [IA64-SGI] SGI Altix cross partition functionality [2/3] This patch contains the communication module (XPC) for cross partition communication on a partitioned SGI Altix. Signed-off-by: Dean Nelson Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/Makefile | 2 + arch/ia64/sn/kernel/xpc.h | 991 +++++++++++++++ arch/ia64/sn/kernel/xpc_channel.c | 2297 +++++++++++++++++++++++++++++++++++ arch/ia64/sn/kernel/xpc_main.c | 1064 ++++++++++++++++ arch/ia64/sn/kernel/xpc_partition.c | 971 +++++++++++++++ 5 files changed, 5325 insertions(+) create mode 100644 arch/ia64/sn/kernel/xpc.h create mode 100644 arch/ia64/sn/kernel/xpc_channel.c create mode 100644 arch/ia64/sn/kernel/xpc_main.c create mode 100644 arch/ia64/sn/kernel/xpc_partition.c (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile index b1a4a23..6959736 100644 --- a/arch/ia64/sn/kernel/Makefile +++ b/arch/ia64/sn/kernel/Makefile @@ -13,3 +13,5 @@ obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_SGI_TIOCX) += tiocx.o obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o xp-y := xp_main.o xp_nofault.o +obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o +xpc-y := xpc_main.o xpc_channel.o xpc_partition.o diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h new file mode 100644 index 0000000..1a0aed8 --- /dev/null +++ b/arch/ia64/sn/kernel/xpc.h @@ -0,0 +1,991 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * Cross Partition Communication (XPC) structures and macros. + */ + +#ifndef _IA64_SN_KERNEL_XPC_H +#define _IA64_SN_KERNEL_XPC_H + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * XPC Version numbers consist of a major and minor number. XPC can always + * talk to versions with same major #, and never talk to versions with a + * different major #. + */ +#define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf)) +#define XPC_VERSION_MAJOR(_v) ((_v) >> 4) +#define XPC_VERSION_MINOR(_v) ((_v) & 0xf) + + +/* + * The next macros define word or bit representations for given + * C-brick nasid in either the SAL provided bit array representing + * nasids in the partition/machine or the AMO_t array used for + * inter-partition initiation communications. + * + * For SN2 machines, C-Bricks are alway even numbered NASIDs. As + * such, some space will be saved by insisting that nasid information + * passed from SAL always be packed for C-Bricks and the + * cross-partition interrupts use the same packing scheme. + */ +#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2) +#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1)) +#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \ + (1UL << XPC_NASID_B_INDEX(_n))) +#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2) + +#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ +#define XPC_HB_CHECK_DEFAULT_TIMEOUT 20 /* check HB every x secs */ + +/* define the process name of HB checker and the CPU it is pinned to */ +#define XPC_HB_CHECK_THREAD_NAME "xpc_hb" +#define XPC_HB_CHECK_CPU 0 + +/* define the process name of the discovery thread */ +#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" + + +#define XPC_HB_ALLOWED(_p, _v) ((_v)->heartbeating_to_mask & (1UL << (_p))) +#define XPC_ALLOW_HB(_p, _v) (_v)->heartbeating_to_mask |= (1UL << (_p)) +#define XPC_DISALLOW_HB(_p, _v) (_v)->heartbeating_to_mask &= (~(1UL << (_p))) + + +/* + * Reserved Page provided by SAL. + * + * SAL provides one page per partition of reserved memory. When SAL + * initialization is complete, SAL_signature, SAL_version, partid, + * part_nasids, and mach_nasids are set. + * + * Note: Until vars_pa is set, the partition XPC code has not been initialized. + */ +struct xpc_rsvd_page { + u64 SAL_signature; /* SAL unique signature */ + u64 SAL_version; /* SAL specified version */ + u8 partid; /* partition ID from SAL */ + u8 version; + u8 pad[6]; /* pad to u64 align */ + u64 vars_pa; + u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; + u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; +}; +#define XPC_RP_VERSION _XPC_VERSION(1,0) /* version 1.0 of the reserved page */ + +#define XPC_RSVD_PAGE_ALIGNED_SIZE \ + (L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))) + + +/* + * Define the structures by which XPC variables can be exported to other + * partitions. (There are two: struct xpc_vars and struct xpc_vars_part) + */ + +/* + * The following structure describes the partition generic variables + * needed by other partitions in order to properly initialize. + * + * struct xpc_vars version number also applies to struct xpc_vars_part. + * Changes to either structure and/or related functionality should be + * reflected by incrementing either the major or minor version numbers + * of struct xpc_vars. + */ +struct xpc_vars { + u8 version; + u64 heartbeat; + u64 heartbeating_to_mask; + u64 kdb_status; /* 0 = machine running */ + int act_nasid; + int act_phys_cpuid; + u64 vars_part_pa; + u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */ + AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ + AMO_t *act_amos; /* pointer to the first activation AMO */ +}; +#define XPC_V_VERSION _XPC_VERSION(3,0) /* version 3.0 of the cross vars */ + +#define XPC_VARS_ALIGNED_SIZE (L1_CACHE_ALIGN(sizeof(struct xpc_vars))) + +/* + * The following structure describes the per partition specific variables. + * + * An array of these structures, one per partition, will be defined. As a + * partition becomes active XPC will copy the array entry corresponding to + * itself from that partition. It is desirable that the size of this + * structure evenly divide into a cacheline, such that none of the entries + * in this array crosses a cacheline boundary. As it is now, each entry + * occupies half a cacheline. + */ +struct xpc_vars_part { + u64 magic; + + u64 openclose_args_pa; /* physical address of open and close args */ + u64 GPs_pa; /* physical address of Get/Put values */ + + u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */ + int IPI_nasid; /* nasid of where to send IPIs */ + int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */ + + u8 nchannels; /* #of defined channels supported */ + + u8 reserved[23]; /* pad to a full 64 bytes */ +}; + +/* + * The vars_part MAGIC numbers play a part in the first contact protocol. + * + * MAGIC1 indicates that the per partition specific variables for a remote + * partition have been initialized by this partition. + * + * MAGIC2 indicates that this partition has pulled the remote partititions + * per partition variables that pertain to this partition. + */ +#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ +#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ + + + +/* + * Functions registered by add_timer() or called by kernel_thread() only + * allow for a single 64-bit argument. The following macros can be used to + * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from + * the passed argument. + */ +#define XPC_PACK_ARGS(_arg1, _arg2) \ + ((((u64) _arg1) & 0xffffffff) | \ + ((((u64) _arg2) & 0xffffffff) << 32)) + +#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) +#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) + + + +/* + * Define a Get/Put value pair (pointers) used with a message queue. + */ +struct xpc_gp { + s64 get; /* Get value */ + s64 put; /* Put value */ +}; + +#define XPC_GP_SIZE \ + L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) + + + +/* + * Define a structure that contains arguments associated with opening and + * closing a channel. + */ +struct xpc_openclose_args { + u16 reason; /* reason why channel is closing */ + u16 msg_size; /* sizeof each message entry */ + u16 remote_nentries; /* #of message entries in remote msg queue */ + u16 local_nentries; /* #of message entries in local msg queue */ + u64 local_msgqueue_pa; /* physical address of local message queue */ +}; + +#define XPC_OPENCLOSE_ARGS_SIZE \ + L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) + + + +/* struct xpc_msg flags */ + +#define XPC_M_DONE 0x01 /* msg has been received/consumed */ +#define XPC_M_READY 0x02 /* msg is ready to be sent */ +#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ + + +#define XPC_MSG_ADDRESS(_payload) \ + ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) + + + +/* + * Defines notify entry. + * + * This is used to notify a message's sender that their message was received + * and consumed by the intended recipient. + */ +struct xpc_notify { + struct semaphore sema; /* notify semaphore */ + u8 type; /* type of notification */ + + /* the following two fields are only used if type == XPC_N_CALL */ + xpc_notify_func func; /* user's notify function */ + void *key; /* pointer to user's key */ +}; + +/* struct xpc_notify type of notification */ + +#define XPC_N_CALL 0x01 /* notify function provided by user */ + + + +/* + * Define the structure that manages all the stuff required by a channel. In + * particular, they are used to manage the messages sent across the channel. + * + * This structure is private to a partition, and is NOT shared across the + * partition boundary. + * + * There is an array of these structures for each remote partition. It is + * allocated at the time a partition becomes active. The array contains one + * of these structures for each potential channel connection to that partition. + * + * Each of these structures manages two message queues (circular buffers). + * They are allocated at the time a channel connection is made. One of + * these message queues (local_msgqueue) holds the locally created messages + * that are destined for the remote partition. The other of these message + * queues (remote_msgqueue) is a locally cached copy of the remote partition's + * own local_msgqueue. + * + * The following is a description of the Get/Put pointers used to manage these + * two message queues. Consider the local_msgqueue to be on one partition + * and the remote_msgqueue to be its cached copy on another partition. A + * description of what each of the lettered areas contains is included. + * + * + * local_msgqueue remote_msgqueue + * + * |/////////| |/////////| + * w_remote_GP.get --> +---------+ |/////////| + * | F | |/////////| + * remote_GP.get --> +---------+ +---------+ <-- local_GP->get + * | | | | + * | | | E | + * | | | | + * | | +---------+ <-- w_local_GP.get + * | B | |/////////| + * | | |////D////| + * | | |/////////| + * | | +---------+ <-- w_remote_GP.put + * | | |////C////| + * local_GP->put --> +---------+ +---------+ <-- remote_GP.put + * | | |/////////| + * | A | |/////////| + * | | |/////////| + * w_local_GP.put --> +---------+ |/////////| + * |/////////| |/////////| + * + * + * ( remote_GP.[get|put] are cached copies of the remote + * partition's local_GP->[get|put], and thus their values can + * lag behind their counterparts on the remote partition. ) + * + * + * A - Messages that have been allocated, but have not yet been sent to the + * remote partition. + * + * B - Messages that have been sent, but have not yet been acknowledged by the + * remote partition as having been received. + * + * C - Area that needs to be prepared for the copying of sent messages, by + * the clearing of the message flags of any previously received messages. + * + * D - Area into which sent messages are to be copied from the remote + * partition's local_msgqueue and then delivered to their intended + * recipients. [ To allow for a multi-message copy, another pointer + * (next_msg_to_pull) has been added to keep track of the next message + * number needing to be copied (pulled). It chases after w_remote_GP.put. + * Any messages lying between w_local_GP.get and next_msg_to_pull have + * been copied and are ready to be delivered. ] + * + * E - Messages that have been copied and delivered, but have not yet been + * acknowledged by the recipient as having been received. + * + * F - Messages that have been acknowledged, but XPC has not yet notified the + * sender that the message was received by its intended recipient. + * This is also an area that needs to be prepared for the allocating of + * new messages, by the clearing of the message flags of the acknowledged + * messages. + */ +struct xpc_channel { + partid_t partid; /* ID of remote partition connected */ + spinlock_t lock; /* lock for updating this structure */ + u32 flags; /* general flags */ + + enum xpc_retval reason; /* reason why channel is disconnect'g */ + int reason_line; /* line# disconnect initiated from */ + + u16 number; /* channel # */ + + u16 msg_size; /* sizeof each msg entry */ + u16 local_nentries; /* #of msg entries in local msg queue */ + u16 remote_nentries; /* #of msg entries in remote msg queue*/ + + void *local_msgqueue_base; /* base address of kmalloc'd space */ + struct xpc_msg *local_msgqueue; /* local message queue */ + void *remote_msgqueue_base; /* base address of kmalloc'd space */ + struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */ + /* local message queue */ + u64 remote_msgqueue_pa; /* phys addr of remote partition's */ + /* local message queue */ + + atomic_t references; /* #of external references to queues */ + + atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ + wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ + + /* queue of msg senders who want to be notified when msg received */ + + atomic_t n_to_notify; /* #of msg senders to notify */ + struct xpc_notify *notify_queue;/* notify queue for messages sent */ + + xpc_channel_func func; /* user's channel function */ + void *key; /* pointer to user's key */ + + struct semaphore msg_to_pull_sema; /* next msg to pull serialization */ + struct semaphore teardown_sema; /* wait for teardown completion */ + + struct xpc_openclose_args *local_openclose_args; /* args passed on */ + /* opening or closing of channel */ + + /* various flavors of local and remote Get/Put values */ + + struct xpc_gp *local_GP; /* local Get/Put values */ + struct xpc_gp remote_GP; /* remote Get/Put values */ + struct xpc_gp w_local_GP; /* working local Get/Put values */ + struct xpc_gp w_remote_GP; /* working remote Get/Put values */ + s64 next_msg_to_pull; /* Put value of next msg to pull */ + + /* kthread management related fields */ + +// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps +// >>> allow the assigned limit be unbounded and let the idle limit be dynamic +// >>> dependent on activity over the last interval of time + atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ + u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ + atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ + u32 kthreads_idle_limit; /* limit on #of kthreads idle */ + atomic_t kthreads_active; /* #of kthreads actively working */ + // >>> following field is temporary + u32 kthreads_created; /* total #of kthreads created */ + + wait_queue_head_t idle_wq; /* idle kthread wait queue */ + +} ____cacheline_aligned; + + +/* struct xpc_channel flags */ + +#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ + +#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ +#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ +#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ +#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ + +#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ +#define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */ +#define XPC_C_CONNECTED 0x00000080 /* local channel is connected */ +#define XPC_C_CONNECTING 0x00000100 /* channel is being connected */ + +#define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */ +#define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */ +#define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */ +#define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */ + +#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */ +#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */ + + + +/* + * Manages channels on a partition basis. There is one of these structures + * for each partition (a partition will never utilize the structure that + * represents itself). + */ +struct xpc_partition { + + /* XPC HB infrastructure */ + + u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ + u64 remote_vars_pa; /* phys addr of partition's vars */ + u64 remote_vars_part_pa; /* phys addr of partition's vars part */ + u64 last_heartbeat; /* HB at last read */ + u64 remote_amos_page_pa; /* phys addr of partition's amos page */ + int remote_act_nasid; /* active part's act/deact nasid */ + int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ + u32 act_IRQ_rcvd; /* IRQs since activation */ + spinlock_t act_lock; /* protect updating of act_state */ + u8 act_state; /* from XPC HB viewpoint */ + enum xpc_retval reason; /* reason partition is deactivating */ + int reason_line; /* line# deactivation initiated from */ + int reactivate_nasid; /* nasid in partition to reactivate */ + + + /* XPC infrastructure referencing and teardown control */ + + u8 setup_state; /* infrastructure setup state */ + wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ + atomic_t references; /* #of references to infrastructure */ + + + /* + * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN + * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION + * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE + * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) + */ + + + u8 nchannels; /* #of defined channels supported */ + atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ + struct xpc_channel *channels;/* array of channel structures */ + + void *local_GPs_base; /* base address of kmalloc'd space */ + struct xpc_gp *local_GPs; /* local Get/Put values */ + void *remote_GPs_base; /* base address of kmalloc'd space */ + struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */ + /* values */ + u64 remote_GPs_pa; /* phys address of remote partition's local */ + /* Get/Put values */ + + + /* fields used to pass args when opening or closing a channel */ + + void *local_openclose_args_base; /* base address of kmalloc'd space */ + struct xpc_openclose_args *local_openclose_args; /* local's args */ + void *remote_openclose_args_base; /* base address of kmalloc'd space */ + struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ + /* args */ + u64 remote_openclose_args_pa; /* phys addr of remote's args */ + + + /* IPI sending, receiving and handling related fields */ + + int remote_IPI_nasid; /* nasid of where to send IPIs */ + int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ + AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ + + AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ + u64 local_IPI_amo; /* IPI amo flags yet to be handled */ + char IPI_owner[8]; /* IPI owner's name */ + struct timer_list dropped_IPI_timer; /* dropped IPI timer */ + + spinlock_t IPI_lock; /* IPI handler lock */ + + + /* channel manager related fields */ + + atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ + wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ + +} ____cacheline_aligned; + + +/* struct xpc_partition act_state values (for XPC HB) */ + +#define XPC_P_INACTIVE 0x00 /* partition is not active */ +#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */ +#define XPC_P_ACTIVATING 0x02 /* activation thread started */ +#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ +#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ + + +#define XPC_DEACTIVATE_PARTITION(_p, _reason) \ + xpc_deactivate_partition(__LINE__, (_p), (_reason)) + + +/* struct xpc_partition setup_state values */ + +#define XPC_P_UNSET 0x00 /* infrastructure was never setup */ +#define XPC_P_SETUP 0x01 /* infrastructure is setup */ +#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ +#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ + + +/* + * struct xpc_partition IPI_timer #of seconds to wait before checking for + * dropped IPIs. These occur whenever an IPI amo write doesn't complete until + * after the IPI was received. + */ +#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) + + +#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) + + + +/* found in xp_main.c */ +extern struct xpc_registration xpc_registrations[]; + + +/* >>> found in xpc_main.c only */ +extern struct device *xpc_part; +extern struct device *xpc_chan; +extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *); +extern void xpc_dropped_IPI_check(struct xpc_partition *); +extern void xpc_activate_kthreads(struct xpc_channel *, int); +extern void xpc_create_kthreads(struct xpc_channel *, int); +extern void xpc_disconnect_wait(int); + + +/* found in xpc_main.c and efi-xpc.c */ +extern void xpc_activate_partition(struct xpc_partition *); + + +/* found in xpc_partition.c */ +extern int xpc_exiting; +extern int xpc_hb_interval; +extern int xpc_hb_check_interval; +extern struct xpc_vars *xpc_vars; +extern struct xpc_rsvd_page *xpc_rsvd_page; +extern struct xpc_vars_part *xpc_vars_part; +extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; +extern char xpc_remote_copy_buffer[]; +extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); +extern void xpc_allow_IPI_ops(void); +extern void xpc_restrict_IPI_ops(void); +extern int xpc_identify_act_IRQ_sender(void); +extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *); +extern void xpc_mark_partition_inactive(struct xpc_partition *); +extern void xpc_discovery(void); +extern void xpc_check_remote_hb(void); +extern void xpc_deactivate_partition(const int, struct xpc_partition *, + enum xpc_retval); +extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); + + +/* found in xpc_channel.c */ +extern void xpc_initiate_connect(int); +extern void xpc_initiate_disconnect(int); +extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **); +extern enum xpc_retval xpc_initiate_send(partid_t, int, void *); +extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *, + xpc_notify_func, void *); +extern void xpc_initiate_received(partid_t, int, void *); +extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *); +extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *); +extern void xpc_process_channel_activity(struct xpc_partition *); +extern void xpc_connected_callout(struct xpc_channel *); +extern void xpc_deliver_msg(struct xpc_channel *); +extern void xpc_disconnect_channel(const int, struct xpc_channel *, + enum xpc_retval, unsigned long *); +extern void xpc_disconnected_callout(struct xpc_channel *); +extern void xpc_partition_down(struct xpc_partition *, enum xpc_retval); +extern void xpc_teardown_infrastructure(struct xpc_partition *); + + + +static inline void +xpc_wakeup_channel_mgr(struct xpc_partition *part) +{ + if (atomic_inc_return(&part->channel_mgr_requests) == 1) { + wake_up(&part->channel_mgr_wq); + } +} + + + +/* + * These next two inlines are used to keep us from tearing down a channel's + * msg queues while a thread may be referencing them. + */ +static inline void +xpc_msgqueue_ref(struct xpc_channel *ch) +{ + atomic_inc(&ch->references); +} + +static inline void +xpc_msgqueue_deref(struct xpc_channel *ch) +{ + s32 refs = atomic_dec_return(&ch->references); + + DBUG_ON(refs < 0); + if (refs == 0) { + xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); + } +} + + + +#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ + xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) + + +/* + * These two inlines are used to keep us from tearing down a partition's + * setup infrastructure while a thread may be referencing it. + */ +static inline void +xpc_part_deref(struct xpc_partition *part) +{ + s32 refs = atomic_dec_return(&part->references); + + + DBUG_ON(refs < 0); + if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { + wake_up(&part->teardown_wq); + } +} + +static inline int +xpc_part_ref(struct xpc_partition *part) +{ + int setup; + + + atomic_inc(&part->references); + setup = (part->setup_state == XPC_P_SETUP); + if (!setup) { + xpc_part_deref(part); + } + return setup; +} + + + +/* + * The following macro is to be used for the setting of the reason and + * reason_line fields in both the struct xpc_channel and struct xpc_partition + * structures. + */ +#define XPC_SET_REASON(_p, _reason, _line) \ + { \ + (_p)->reason = _reason; \ + (_p)->reason_line = _line; \ + } + + + +/* + * The following set of macros and inlines are used for the sending and + * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, + * one that is associated with partition activity (SGI_XPC_ACTIVATE) and + * the other that is associated with channel activity (SGI_XPC_NOTIFY). + */ + +static inline u64 +xpc_IPI_receive(AMO_t *amo) +{ + return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR); +} + + +static inline enum xpc_retval +xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) +{ + int ret = 0; + unsigned long irq_flags; + + + local_irq_save(irq_flags); + + FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag); + sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); + + /* + * We must always use the nofault function regardless of whether we + * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we + * didn't, we'd never know that the other partition is down and would + * keep sending IPIs and AMOs to it until the heartbeat times out. + */ + ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), + xp_nofault_PIOR_target)); + + local_irq_restore(irq_flags); + + return ((ret == 0) ? xpcSuccess : xpcPioReadError); +} + + +/* + * IPIs associated with SGI_XPC_ACTIVATE IRQ. + */ + +/* + * Flag the appropriate AMO variable and send an IPI to the specified node. + */ +static inline void +xpc_activate_IRQ_send(u64 amos_page, int from_nasid, int to_nasid, + int to_phys_cpuid) +{ + int w_index = XPC_NASID_W_INDEX(from_nasid); + int b_index = XPC_NASID_B_INDEX(from_nasid); + AMO_t *amos = (AMO_t *) __va(amos_page + + (XP_MAX_PARTITIONS * sizeof(AMO_t))); + + + (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, + to_phys_cpuid, SGI_XPC_ACTIVATE); +} + +static inline void +xpc_IPI_send_activate(struct xpc_vars *vars) +{ + xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), + vars->act_nasid, vars->act_phys_cpuid); +} + +static inline void +xpc_IPI_send_activated(struct xpc_partition *part) +{ + xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), + part->remote_act_nasid, part->remote_act_phys_cpuid); +} + +static inline void +xpc_IPI_send_reactivate(struct xpc_partition *part) +{ + xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, + xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); +} + + +/* + * IPIs associated with SGI_XPC_NOTIFY IRQ. + */ + +/* + * Send an IPI to the remote partition that is associated with the + * specified channel. + */ +#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \ + xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f) + +static inline void +xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, + unsigned long *irq_flags) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + enum xpc_retval ret; + + + if (likely(part->act_state != XPC_P_DEACTIVATING)) { + ret = xpc_IPI_send(part->remote_IPI_amo_va, + (u64) ipi_flag << (ch->number * 8), + part->remote_IPI_nasid, + part->remote_IPI_phys_cpuid, + SGI_XPC_NOTIFY); + dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", + ipi_flag_string, ch->partid, ch->number, ret); + if (unlikely(ret != xpcSuccess)) { + if (irq_flags != NULL) { + spin_unlock_irqrestore(&ch->lock, *irq_flags); + } + XPC_DEACTIVATE_PARTITION(part, ret); + if (irq_flags != NULL) { + spin_lock_irqsave(&ch->lock, *irq_flags); + } + } + } +} + + +/* + * Make it look like the remote partition, which is associated with the + * specified channel, sent us an IPI. This faked IPI will be handled + * by xpc_dropped_IPI_check(). + */ +#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \ + xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f) + +static inline void +xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, + char *ipi_flag_string) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + + + FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable), + FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8))); + dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", + ipi_flag_string, ch->partid, ch->number); +} + + +/* + * The sending and receiving of IPIs includes the setting of an AMO variable + * to indicate the reason the IPI was sent. The 64-bit variable is divided + * up into eight bytes, ordered from right to left. Byte zero pertains to + * channel 0, byte one to channel 1, and so on. Each byte is described by + * the following IPI flags. + */ + +#define XPC_IPI_CLOSEREQUEST 0x01 +#define XPC_IPI_CLOSEREPLY 0x02 +#define XPC_IPI_OPENREQUEST 0x04 +#define XPC_IPI_OPENREPLY 0x08 +#define XPC_IPI_MSGREQUEST 0x10 + + +/* given an AMO variable and a channel#, get its associated IPI flags */ +#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) + +#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) +#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) + + +static inline void +xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->local_openclose_args; + + + args->reason = ch->reason; + + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags); +} + +static inline void +xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags) +{ + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags); +} + +static inline void +xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->local_openclose_args; + + + args->msg_size = ch->msg_size; + args->local_nentries = ch->local_nentries; + + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags); +} + +static inline void +xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_openclose_args *args = ch->local_openclose_args; + + + args->remote_nentries = ch->remote_nentries; + args->local_nentries = ch->local_nentries; + args->local_msgqueue_pa = __pa(ch->local_msgqueue); + + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags); +} + +static inline void +xpc_IPI_send_msgrequest(struct xpc_channel *ch) +{ + XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL); +} + +static inline void +xpc_IPI_send_local_msgrequest(struct xpc_channel *ch) +{ + XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); +} + + +/* + * Memory for XPC's AMO variables is allocated by the MSPEC driver. These + * pages are located in the lowest granule. The lowest granule uses 4k pages + * for cached references and an alternate TLB handler to never provide a + * cacheable mapping for the entire region. This will prevent speculative + * reading of cached copies of our lines from being issued which will cause + * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 + * (XP_MAX_PARTITIONS) AMO variables for message notification (xpc_main.c) + * and an additional 16 AMO variables for partition activation (xpc_hb.c). + */ +static inline AMO_t * +xpc_IPI_init(partid_t partid) +{ + AMO_t *part_amo = xpc_vars->amos_page + partid; + + + xpc_IPI_receive(part_amo); + return part_amo; +} + + + +static inline enum xpc_retval +xpc_map_bte_errors(bte_result_t error) +{ + switch (error) { + case BTE_SUCCESS: return xpcSuccess; + case BTEFAIL_DIR: return xpcBteDirectoryError; + case BTEFAIL_POISON: return xpcBtePoisonError; + case BTEFAIL_WERR: return xpcBteWriteError; + case BTEFAIL_ACCESS: return xpcBteAccessError; + case BTEFAIL_PWERR: return xpcBtePWriteError; + case BTEFAIL_PRERR: return xpcBtePReadError; + case BTEFAIL_TOUT: return xpcBteTimeOutError; + case BTEFAIL_XTERR: return xpcBteXtalkError; + case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable; + default: return xpcBteUnmappedError; + } +} + + + +static inline void * +xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base) +{ + /* see if kmalloc will give us cachline aligned memory by default */ + *base = kmalloc(size, flags); + if (*base == NULL) { + return NULL; + } + if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { + return *base; + } + kfree(*base); + + /* nope, we'll have to do it ourselves */ + *base = kmalloc(size + L1_CACHE_BYTES, flags); + if (*base == NULL) { + return NULL; + } + return (void *) L1_CACHE_ALIGN((u64) *base); +} + + +/* + * Check to see if there is any channel activity to/from the specified + * partition. + */ +static inline void +xpc_check_for_channel_activity(struct xpc_partition *part) +{ + u64 IPI_amo; + unsigned long irq_flags; + + + IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); + if (IPI_amo == 0) { + return; + } + + spin_lock_irqsave(&part->IPI_lock, irq_flags); + part->local_IPI_amo |= IPI_amo; + spin_unlock_irqrestore(&part->IPI_lock, irq_flags); + + dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n", + XPC_PARTID(part), IPI_amo); + + xpc_wakeup_channel_mgr(part); +} + + +#endif /* _IA64_SN_KERNEL_XPC_H */ + diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c new file mode 100644 index 0000000..0bf6fbc --- /dev/null +++ b/arch/ia64/sn/kernel/xpc_channel.c @@ -0,0 +1,2297 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * Cross Partition Communication (XPC) channel support. + * + * This is the part of XPC that manages the channels and + * sends/receives messages across them to/from other partitions. + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include "xpc.h" + + +/* + * Set up the initial values for the XPartition Communication channels. + */ +static void +xpc_initialize_channels(struct xpc_partition *part, partid_t partid) +{ + int ch_number; + struct xpc_channel *ch; + + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + ch->partid = partid; + ch->number = ch_number; + ch->flags = XPC_C_DISCONNECTED; + + ch->local_GP = &part->local_GPs[ch_number]; + ch->local_openclose_args = + &part->local_openclose_args[ch_number]; + + atomic_set(&ch->kthreads_assigned, 0); + atomic_set(&ch->kthreads_idle, 0); + atomic_set(&ch->kthreads_active, 0); + + atomic_set(&ch->references, 0); + atomic_set(&ch->n_to_notify, 0); + + spin_lock_init(&ch->lock); + sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ + + atomic_set(&ch->n_on_msg_allocate_wq, 0); + init_waitqueue_head(&ch->msg_allocate_wq); + init_waitqueue_head(&ch->idle_wq); + } +} + + +/* + * Setup the infrastructure necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +enum xpc_retval +xpc_setup_infrastructure(struct xpc_partition *part) +{ + int ret; + struct timer_list *timer; + partid_t partid = XPC_PARTID(part); + + + /* + * Zero out MOST of the entry for this partition. Only the fields + * starting with `nchannels' will be zeroed. The preceding fields must + * remain `viable' across partition ups and downs, since they may be + * referenced during this memset() operation. + */ + memset(&part->nchannels, 0, sizeof(struct xpc_partition) - + offsetof(struct xpc_partition, nchannels)); + + /* + * Allocate all of the channel structures as a contiguous chunk of + * memory. + */ + part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, + GFP_KERNEL); + if (part->channels == NULL) { + dev_err(xpc_chan, "can't get memory for channels\n"); + return xpcNoMemory; + } + memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS); + + part->nchannels = XPC_NCHANNELS; + + + /* allocate all the required GET/PUT values */ + + part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, + GFP_KERNEL, &part->local_GPs_base); + if (part->local_GPs == NULL) { + kfree(part->channels); + part->channels = NULL; + dev_err(xpc_chan, "can't get memory for local get/put " + "values\n"); + return xpcNoMemory; + } + memset(part->local_GPs, 0, XPC_GP_SIZE); + + part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, + GFP_KERNEL, &part->remote_GPs_base); + if (part->remote_GPs == NULL) { + kfree(part->channels); + part->channels = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + dev_err(xpc_chan, "can't get memory for remote get/put " + "values\n"); + return xpcNoMemory; + } + memset(part->remote_GPs, 0, XPC_GP_SIZE); + + + /* allocate all the required open and close args */ + + part->local_openclose_args = xpc_kmalloc_cacheline_aligned( + XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, + &part->local_openclose_args_base); + if (part->local_openclose_args == NULL) { + kfree(part->channels); + part->channels = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + dev_err(xpc_chan, "can't get memory for local connect args\n"); + return xpcNoMemory; + } + memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE); + + part->remote_openclose_args = xpc_kmalloc_cacheline_aligned( + XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, + &part->remote_openclose_args_base); + if (part->remote_openclose_args == NULL) { + kfree(part->channels); + part->channels = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + kfree(part->local_openclose_args_base); + part->local_openclose_args = NULL; + dev_err(xpc_chan, "can't get memory for remote connect args\n"); + return xpcNoMemory; + } + memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE); + + + xpc_initialize_channels(part, partid); + + atomic_set(&part->nchannels_active, 0); + + + /* local_IPI_amo were set to 0 by an earlier memset() */ + + /* Initialize this partitions AMO_t structure */ + part->local_IPI_amo_va = xpc_IPI_init(partid); + + spin_lock_init(&part->IPI_lock); + + atomic_set(&part->channel_mgr_requests, 1); + init_waitqueue_head(&part->channel_mgr_wq); + + sprintf(part->IPI_owner, "xpc%02d", partid); + ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ, + part->IPI_owner, (void *) (u64) partid); + if (ret != 0) { + kfree(part->channels); + part->channels = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + kfree(part->local_openclose_args_base); + part->local_openclose_args = NULL; + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; + dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " + "errno=%d\n", -ret); + return xpcLackOfResources; + } + + /* Setup a timer to check for dropped IPIs */ + timer = &part->dropped_IPI_timer; + init_timer(timer); + timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check; + timer->data = (unsigned long) part; + timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; + add_timer(timer); + + /* + * With the setting of the partition setup_state to XPC_P_SETUP, we're + * declaring that this partition is ready to go. + */ + (volatile u8) part->setup_state = XPC_P_SETUP; + + + /* + * Setup the per partition specific variables required by the + * remote partition to establish channel connections with us. + * + * The setting of the magic # indicates that these per partition + * specific variables are ready to be used. + */ + xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); + xpc_vars_part[partid].openclose_args_pa = + __pa(part->local_openclose_args); + xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); + xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(smp_processor_id()); + xpc_vars_part[partid].IPI_phys_cpuid = + cpu_physical_id(smp_processor_id()); + xpc_vars_part[partid].nchannels = part->nchannels; + (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC1; + + return xpcSuccess; +} + + +/* + * Create a wrapper that hides the underlying mechanism for pulling a cacheline + * (or multiple cachelines) from a remote partition. + * + * src must be a cacheline aligned physical address on the remote partition. + * dst must be a cacheline aligned virtual address on this partition. + * cnt must be an cacheline sized + */ +static enum xpc_retval +xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, + const void *src, size_t cnt) +{ + bte_result_t bte_ret; + + + DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src)); + DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst)); + DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); + + if (part->act_state == XPC_P_DEACTIVATING) { + return part->reason; + } + + bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst), + (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL); + if (bte_ret == BTE_SUCCESS) { + return xpcSuccess; + } + + dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", + XPC_PARTID(part), bte_ret); + + return xpc_map_bte_errors(bte_ret); +} + + +/* + * Pull the remote per partititon specific variables from the specified + * partition. + */ +enum xpc_retval +xpc_pull_remote_vars_part(struct xpc_partition *part) +{ + u8 buffer[L1_CACHE_BYTES * 2]; + struct xpc_vars_part *pulled_entry_cacheline = + (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer); + struct xpc_vars_part *pulled_entry; + u64 remote_entry_cacheline_pa, remote_entry_pa; + partid_t partid = XPC_PARTID(part); + enum xpc_retval ret; + + + /* pull the cacheline that contains the variables we're interested in */ + + DBUG_ON(part->remote_vars_part_pa != + L1_CACHE_ALIGN(part->remote_vars_part_pa)); + DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); + + remote_entry_pa = part->remote_vars_part_pa + + sn_partition_id * sizeof(struct xpc_vars_part); + + remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); + + pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline + + (remote_entry_pa & (L1_CACHE_BYTES - 1))); + + ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, + (void *) remote_entry_cacheline_pa, + L1_CACHE_BYTES); + if (ret != xpcSuccess) { + dev_dbg(xpc_chan, "failed to pull XPC vars_part from " + "partition %d, ret=%d\n", partid, ret); + return ret; + } + + + /* see if they've been set up yet */ + + if (pulled_entry->magic != XPC_VP_MAGIC1 && + pulled_entry->magic != XPC_VP_MAGIC2) { + + if (pulled_entry->magic != 0) { + dev_dbg(xpc_chan, "partition %d's XPC vars_part for " + "partition %d has bad magic value (=0x%lx)\n", + partid, sn_partition_id, pulled_entry->magic); + return xpcBadMagic; + } + + /* they've not been initialized yet */ + return xpcRetry; + } + + if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { + + /* validate the variables */ + + if (pulled_entry->GPs_pa == 0 || + pulled_entry->openclose_args_pa == 0 || + pulled_entry->IPI_amo_pa == 0) { + + dev_err(xpc_chan, "partition %d's XPC vars_part for " + "partition %d are not valid\n", partid, + sn_partition_id); + return xpcInvalidAddress; + } + + /* the variables we imported look to be valid */ + + part->remote_GPs_pa = pulled_entry->GPs_pa; + part->remote_openclose_args_pa = + pulled_entry->openclose_args_pa; + part->remote_IPI_amo_va = + (AMO_t *) __va(pulled_entry->IPI_amo_pa); + part->remote_IPI_nasid = pulled_entry->IPI_nasid; + part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; + + if (part->nchannels > pulled_entry->nchannels) { + part->nchannels = pulled_entry->nchannels; + } + + /* let the other side know that we've pulled their variables */ + + (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC2; + } + + if (pulled_entry->magic == XPC_VP_MAGIC1) { + return xpcRetry; + } + + return xpcSuccess; +} + + +/* + * Get the IPI flags and pull the openclose args and/or remote GPs as needed. + */ +static u64 +xpc_get_IPI_flags(struct xpc_partition *part) +{ + unsigned long irq_flags; + u64 IPI_amo; + enum xpc_retval ret; + + + /* + * See if there are any IPI flags to be handled. + */ + + spin_lock_irqsave(&part->IPI_lock, irq_flags); + if ((IPI_amo = part->local_IPI_amo) != 0) { + part->local_IPI_amo = 0; + } + spin_unlock_irqrestore(&part->IPI_lock, irq_flags); + + + if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { + ret = xpc_pull_remote_cachelines(part, + part->remote_openclose_args, + (void *) part->remote_openclose_args_pa, + XPC_OPENCLOSE_ARGS_SIZE); + if (ret != xpcSuccess) { + XPC_DEACTIVATE_PARTITION(part, ret); + + dev_dbg(xpc_chan, "failed to pull openclose args from " + "partition %d, ret=%d\n", XPC_PARTID(part), + ret); + + /* don't bother processing IPIs anymore */ + IPI_amo = 0; + } + } + + if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { + ret = xpc_pull_remote_cachelines(part, part->remote_GPs, + (void *) part->remote_GPs_pa, + XPC_GP_SIZE); + if (ret != xpcSuccess) { + XPC_DEACTIVATE_PARTITION(part, ret); + + dev_dbg(xpc_chan, "failed to pull GPs from partition " + "%d, ret=%d\n", XPC_PARTID(part), ret); + + /* don't bother processing IPIs anymore */ + IPI_amo = 0; + } + } + + return IPI_amo; +} + + +/* + * Allocate the local message queue and the notify queue. + */ +static enum xpc_retval +xpc_allocate_local_msgqueue(struct xpc_channel *ch) +{ + unsigned long irq_flags; + int nentries; + size_t nbytes; + + + // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between + // >>> iterations of the for-loop, bail if set? + + // >>> should we impose a minumum #of entries? like 4 or 8? + for (nentries = ch->local_nentries; nentries > 0; nentries--) { + + nbytes = nentries * ch->msg_size; + ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, + (GFP_KERNEL | GFP_DMA), + &ch->local_msgqueue_base); + if (ch->local_msgqueue == NULL) { + continue; + } + memset(ch->local_msgqueue, 0, nbytes); + + nbytes = nentries * sizeof(struct xpc_notify); + ch->notify_queue = kmalloc(nbytes, (GFP_KERNEL | GFP_DMA)); + if (ch->notify_queue == NULL) { + kfree(ch->local_msgqueue_base); + ch->local_msgqueue = NULL; + continue; + } + memset(ch->notify_queue, 0, nbytes); + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->local_nentries) { + dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, " + "partid=%d, channel=%d\n", nentries, + ch->local_nentries, ch->partid, ch->number); + + ch->local_nentries = nentries; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpcSuccess; + } + + dev_dbg(xpc_chan, "can't get memory for local message queue and notify " + "queue, partid=%d, channel=%d\n", ch->partid, ch->number); + return xpcNoMemory; +} + + +/* + * Allocate the cached remote message queue. + */ +static enum xpc_retval +xpc_allocate_remote_msgqueue(struct xpc_channel *ch) +{ + unsigned long irq_flags; + int nentries; + size_t nbytes; + + + DBUG_ON(ch->remote_nentries <= 0); + + // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between + // >>> iterations of the for-loop, bail if set? + + // >>> should we impose a minumum #of entries? like 4 or 8? + for (nentries = ch->remote_nentries; nentries > 0; nentries--) { + + nbytes = nentries * ch->msg_size; + ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, + (GFP_KERNEL | GFP_DMA), + &ch->remote_msgqueue_base); + if (ch->remote_msgqueue == NULL) { + continue; + } + memset(ch->remote_msgqueue, 0, nbytes); + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->remote_nentries) { + dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, " + "partid=%d, channel=%d\n", nentries, + ch->remote_nentries, ch->partid, ch->number); + + ch->remote_nentries = nentries; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpcSuccess; + } + + dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + return xpcNoMemory; +} + + +/* + * Allocate message queues and other stuff associated with a channel. + * + * Note: Assumes all of the channel sizes are filled in. + */ +static enum xpc_retval +xpc_allocate_msgqueues(struct xpc_channel *ch) +{ + unsigned long irq_flags; + int i; + enum xpc_retval ret; + + + DBUG_ON(ch->flags & XPC_C_SETUP); + + if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) { + return ret; + } + + if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) { + kfree(ch->local_msgqueue_base); + ch->local_msgqueue = NULL; + kfree(ch->notify_queue); + ch->notify_queue = NULL; + return ret; + } + + for (i = 0; i < ch->local_nentries; i++) { + /* use a semaphore as an event wait queue */ + sema_init(&ch->notify_queue[i].sema, 0); + } + + sema_init(&ch->teardown_sema, 0); /* event wait */ + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_SETUP; + spin_unlock_irqrestore(&ch->lock, irq_flags); + + return xpcSuccess; +} + + +/* + * Process a connect message from a remote partition. + * + * Note: xpc_process_connect() is expecting to be called with the + * spin_lock_irqsave held and will leave it locked upon return. + */ +static void +xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) +{ + enum xpc_retval ret; + + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (!(ch->flags & XPC_C_OPENREQUEST) || + !(ch->flags & XPC_C_ROPENREQUEST)) { + /* nothing more to do for now */ + return; + } + DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); + + if (!(ch->flags & XPC_C_SETUP)) { + spin_unlock_irqrestore(&ch->lock, *irq_flags); + ret = xpc_allocate_msgqueues(ch); + spin_lock_irqsave(&ch->lock, *irq_flags); + + if (ret != xpcSuccess) { + XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); + } + if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) { + return; + } + + DBUG_ON(!(ch->flags & XPC_C_SETUP)); + DBUG_ON(ch->local_msgqueue == NULL); + DBUG_ON(ch->remote_msgqueue == NULL); + } + + if (!(ch->flags & XPC_C_OPENREPLY)) { + ch->flags |= XPC_C_OPENREPLY; + xpc_IPI_send_openreply(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_ROPENREPLY)) { + return; + } + + DBUG_ON(ch->remote_msgqueue_pa == 0); + + ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ + + dev_info(xpc_chan, "channel %d to partition %d connected\n", + ch->number, ch->partid); + + spin_unlock_irqrestore(&ch->lock, *irq_flags); + xpc_create_kthreads(ch, 1); + spin_lock_irqsave(&ch->lock, *irq_flags); +} + + +/* + * Free up message queues and other stuff that were allocated for the specified + * channel. + * + * Note: ch->reason and ch->reason_line are left set for debugging purposes, + * they're cleared when XPC_C_DISCONNECTED is cleared. + */ +static void +xpc_free_msgqueues(struct xpc_channel *ch) +{ + DBUG_ON(!spin_is_locked(&ch->lock)); + DBUG_ON(atomic_read(&ch->n_to_notify) != 0); + + ch->remote_msgqueue_pa = 0; + ch->func = NULL; + ch->key = NULL; + ch->msg_size = 0; + ch->local_nentries = 0; + ch->remote_nentries = 0; + ch->kthreads_assigned_limit = 0; + ch->kthreads_idle_limit = 0; + + ch->local_GP->get = 0; + ch->local_GP->put = 0; + ch->remote_GP.get = 0; + ch->remote_GP.put = 0; + ch->w_local_GP.get = 0; + ch->w_local_GP.put = 0; + ch->w_remote_GP.get = 0; + ch->w_remote_GP.put = 0; + ch->next_msg_to_pull = 0; + + if (ch->flags & XPC_C_SETUP) { + ch->flags &= ~XPC_C_SETUP; + + dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n", + ch->flags, ch->partid, ch->number); + + kfree(ch->local_msgqueue_base); + ch->local_msgqueue = NULL; + kfree(ch->remote_msgqueue_base); + ch->remote_msgqueue = NULL; + kfree(ch->notify_queue); + ch->notify_queue = NULL; + + /* in case someone is waiting for the teardown to complete */ + up(&ch->teardown_sema); + } +} + + +/* + * spin_lock_irqsave() is expected to be held on entry. + */ +static void +xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + u32 ch_flags = ch->flags; + + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + return; + } + + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + + /* make sure all activity has settled down first */ + + if (atomic_read(&ch->references) > 0) { + return; + } + DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); + + /* it's now safe to free the channel's message queues */ + + xpc_free_msgqueues(ch); + DBUG_ON(ch->flags & XPC_C_SETUP); + + if (part->act_state != XPC_P_DEACTIVATING) { + + /* as long as the other side is up do the full protocol */ + + if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { + return; + } + + if (!(ch->flags & XPC_C_CLOSEREPLY)) { + ch->flags |= XPC_C_CLOSEREPLY; + xpc_IPI_send_closereply(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_RCLOSEREPLY)) { + return; + } + } + + /* both sides are disconnected now */ + + ch->flags = XPC_C_DISCONNECTED; /* clear all flags, but this one */ + + atomic_dec(&part->nchannels_active); + + if (ch_flags & XPC_C_WASCONNECTED) { + dev_info(xpc_chan, "channel %d to partition %d disconnected, " + "reason=%d\n", ch->number, ch->partid, ch->reason); + } +} + + +/* + * Process a change in the channel's remote connection state. + */ +static void +xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, + u8 IPI_flags) +{ + unsigned long irq_flags; + struct xpc_openclose_args *args = + &part->remote_openclose_args[ch_number]; + struct xpc_channel *ch = &part->channels[ch_number]; + enum xpc_retval reason; + + + + spin_lock_irqsave(&ch->lock, irq_flags); + + + if (IPI_flags & XPC_IPI_CLOSEREQUEST) { + + dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received " + "from partid=%d, channel=%d\n", args->reason, + ch->partid, ch->number); + + /* + * If RCLOSEREQUEST is set, we're probably waiting for + * RCLOSEREPLY. We should find it and a ROPENREQUEST packed + * with this RCLOSEQREUQEST in the IPI_flags. + */ + + if (ch->flags & XPC_C_RCLOSEREQUEST) { + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); + DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); + + DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY)); + IPI_flags &= ~XPC_IPI_CLOSEREPLY; + ch->flags |= XPC_C_RCLOSEREPLY; + + /* both sides have finished disconnecting */ + xpc_process_disconnect(ch, &irq_flags); + } + + if (ch->flags & XPC_C_DISCONNECTED) { + // >>> explain this section + + if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { + DBUG_ON(part->act_state != + XPC_P_DEACTIVATING); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&part->nchannels_active); + ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); + } + + IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY); + + /* + * The meaningful CLOSEREQUEST connection state fields are: + * reason = reason connection is to be closed + */ + + ch->flags |= XPC_C_RCLOSEREQUEST; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + reason = args->reason; + if (reason <= xpcSuccess || reason > xpcUnknownReason) { + reason = xpcUnknownReason; + } else if (reason == xpcUnregistering) { + reason = xpcOtherUnregistering; + } + + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); + } else { + xpc_process_disconnect(ch, &irq_flags); + } + } + + + if (IPI_flags & XPC_IPI_CLOSEREPLY) { + + dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d," + " channel=%d\n", ch->partid, ch->number); + + if (ch->flags & XPC_C_DISCONNECTED) { + DBUG_ON(part->act_state != XPC_P_DEACTIVATING); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + + DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); + DBUG_ON(!(ch->flags & XPC_C_RCLOSEREQUEST)); + + ch->flags |= XPC_C_RCLOSEREPLY; + + if (ch->flags & XPC_C_CLOSEREPLY) { + /* both sides have finished disconnecting */ + xpc_process_disconnect(ch, &irq_flags); + } + } + + + if (IPI_flags & XPC_IPI_OPENREQUEST) { + + dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, " + "local_nentries=%d) received from partid=%d, " + "channel=%d\n", args->msg_size, args->local_nentries, + ch->partid, ch->number); + + if ((ch->flags & XPC_C_DISCONNECTING) || + part->act_state == XPC_P_DEACTIVATING) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | + XPC_C_OPENREQUEST))); + DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | + XPC_C_OPENREPLY | XPC_C_CONNECTED)); + + /* + * The meaningful OPENREQUEST connection state fields are: + * msg_size = size of channel's messages in bytes + * local_nentries = remote partition's local_nentries + */ + DBUG_ON(args->msg_size == 0); + DBUG_ON(args->local_nentries == 0); + + ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); + ch->remote_nentries = args->local_nentries; + + + if (ch->flags & XPC_C_OPENREQUEST) { + if (args->msg_size != ch->msg_size) { + XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + } else { + ch->msg_size = args->msg_size; + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&part->nchannels_active); + } + + xpc_process_connect(ch, &irq_flags); + } + + + if (IPI_flags & XPC_IPI_OPENREPLY) { + + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, " + "local_nentries=%d, remote_nentries=%d) received from " + "partid=%d, channel=%d\n", args->local_msgqueue_pa, + args->local_nentries, args->remote_nentries, + ch->partid, ch->number); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + return; + } + DBUG_ON(!(ch->flags & XPC_C_OPENREQUEST)); + DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); + DBUG_ON(ch->flags & XPC_C_CONNECTED); + + /* + * The meaningful OPENREPLY connection state fields are: + * local_msgqueue_pa = physical address of remote + * partition's local_msgqueue + * local_nentries = remote partition's local_nentries + * remote_nentries = remote partition's remote_nentries + */ + DBUG_ON(args->local_msgqueue_pa == 0); + DBUG_ON(args->local_nentries == 0); + DBUG_ON(args->remote_nentries == 0); + + ch->flags |= XPC_C_ROPENREPLY; + ch->remote_msgqueue_pa = args->local_msgqueue_pa; + + if (args->local_nentries < ch->remote_nentries) { + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " + "remote_nentries=%d, old remote_nentries=%d, " + "partid=%d, channel=%d\n", + args->local_nentries, ch->remote_nentries, + ch->partid, ch->number); + + ch->remote_nentries = args->local_nentries; + } + if (args->remote_nentries < ch->local_nentries) { + dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " + "local_nentries=%d, old local_nentries=%d, " + "partid=%d, channel=%d\n", + args->remote_nentries, ch->local_nentries, + ch->partid, ch->number); + + ch->local_nentries = args->remote_nentries; + } + + xpc_process_connect(ch, &irq_flags); + } + + spin_unlock_irqrestore(&ch->lock, irq_flags); +} + + +/* + * Attempt to establish a channel connection to a remote partition. + */ +static enum xpc_retval +xpc_connect_channel(struct xpc_channel *ch) +{ + unsigned long irq_flags; + struct xpc_registration *registration = &xpc_registrations[ch->number]; + + + if (down_interruptible(®istration->sema) != 0) { + return xpcInterrupted; + } + + if (!XPC_CHANNEL_REGISTERED(ch->number)) { + up(®istration->sema); + return xpcUnregistered; + } + + spin_lock_irqsave(&ch->lock, irq_flags); + + DBUG_ON(ch->flags & XPC_C_CONNECTED); + DBUG_ON(ch->flags & XPC_C_OPENREQUEST); + + if (ch->flags & XPC_C_DISCONNECTING) { + spin_unlock_irqrestore(&ch->lock, irq_flags); + up(®istration->sema); + return ch->reason; + } + + + /* add info from the channel connect registration to the channel */ + + ch->kthreads_assigned_limit = registration->assigned_limit; + ch->kthreads_idle_limit = registration->idle_limit; + DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); + DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); + DBUG_ON(atomic_read(&ch->kthreads_active) != 0); + + ch->func = registration->func; + DBUG_ON(registration->func == NULL); + ch->key = registration->key; + + ch->local_nentries = registration->nentries; + + if (ch->flags & XPC_C_ROPENREQUEST) { + if (registration->msg_size != ch->msg_size) { + /* the local and remote sides aren't the same */ + + /* + * Because XPC_DISCONNECT_CHANNEL() can block we're + * forced to up the registration sema before we unlock + * the channel lock. But that's okay here because we're + * done with the part that required the registration + * sema. XPC_DISCONNECT_CHANNEL() requires that the + * channel lock be locked and will unlock and relock + * the channel lock as needed. + */ + up(®istration->sema); + XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpcUnequalMsgSizes; + } + } else { + ch->msg_size = registration->msg_size; + + XPC_SET_REASON(ch, 0, 0); + ch->flags &= ~XPC_C_DISCONNECTED; + + atomic_inc(&xpc_partitions[ch->partid].nchannels_active); + } + + up(®istration->sema); + + + /* initiate the connection */ + + ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); + xpc_IPI_send_openrequest(ch, &irq_flags); + + xpc_process_connect(ch, &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + + return xpcSuccess; +} + + +/* + * Notify those who wanted to be notified upon delivery of their message. + */ +static void +xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) +{ + struct xpc_notify *notify; + u8 notify_type; + s64 get = ch->w_remote_GP.get - 1; + + + while (++get < put && atomic_read(&ch->n_to_notify) > 0) { + + notify = &ch->notify_queue[get % ch->local_nentries]; + + /* + * See if the notify entry indicates it was associated with + * a message who's sender wants to be notified. It is possible + * that it is, but someone else is doing or has done the + * notification. + */ + notify_type = notify->type; + if (notify_type == 0 || + cmpxchg(¬ify->type, notify_type, 0) != + notify_type) { + continue; + } + + DBUG_ON(notify_type != XPC_N_CALL); + + atomic_dec(&ch->n_to_notify); + + if (notify->func != NULL) { + dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", + (void *) notify, get, ch->partid, ch->number); + + notify->func(reason, ch->partid, ch->number, + notify->key); + + dev_dbg(xpc_chan, "notify->func() returned, " + "notify=0x%p, msg_number=%ld, partid=%d, " + "channel=%d\n", (void *) notify, get, + ch->partid, ch->number); + } + } +} + + +/* + * Clear some of the msg flags in the local message queue. + */ +static inline void +xpc_clear_local_msgqueue_flags(struct xpc_channel *ch) +{ + struct xpc_msg *msg; + s64 get; + + + get = ch->w_remote_GP.get; + do { + msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + + (get % ch->local_nentries) * ch->msg_size); + msg->flags = 0; + } while (++get < (volatile s64) ch->remote_GP.get); +} + + +/* + * Clear some of the msg flags in the remote message queue. + */ +static inline void +xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch) +{ + struct xpc_msg *msg; + s64 put; + + + put = ch->w_remote_GP.put; + do { + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + + (put % ch->remote_nentries) * ch->msg_size); + msg->flags = 0; + } while (++put < (volatile s64) ch->remote_GP.put); +} + + +static void +xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) +{ + struct xpc_channel *ch = &part->channels[ch_number]; + int nmsgs_sent; + + + ch->remote_GP = part->remote_GPs[ch_number]; + + + /* See what, if anything, has changed for each connected channel */ + + xpc_msgqueue_ref(ch); + + if (ch->w_remote_GP.get == ch->remote_GP.get && + ch->w_remote_GP.put == ch->remote_GP.put) { + /* nothing changed since GPs were last pulled */ + xpc_msgqueue_deref(ch); + return; + } + + if (!(ch->flags & XPC_C_CONNECTED)){ + xpc_msgqueue_deref(ch); + return; + } + + + /* + * First check to see if messages recently sent by us have been + * received by the other side. (The remote GET value will have + * changed since we last looked at it.) + */ + + if (ch->w_remote_GP.get != ch->remote_GP.get) { + + /* + * We need to notify any senders that want to be notified + * that their sent messages have been received by their + * intended recipients. We need to do this before updating + * w_remote_GP.get so that we don't allocate the same message + * queue entries prematurely (see xpc_allocate_msg()). + */ + if (atomic_read(&ch->n_to_notify) > 0) { + /* + * Notify senders that messages sent have been + * received and delivered by the other side. + */ + xpc_notify_senders(ch, xpcMsgDelivered, + ch->remote_GP.get); + } + + /* + * Clear msg->flags in previously sent messages, so that + * they're ready for xpc_allocate_msg(). + */ + xpc_clear_local_msgqueue_flags(ch); + + (volatile s64) ch->w_remote_GP.get = ch->remote_GP.get; + + dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, " + "channel=%d\n", ch->w_remote_GP.get, ch->partid, + ch->number); + + /* + * If anyone was waiting for message queue entries to become + * available, wake them up. + */ + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { + wake_up(&ch->msg_allocate_wq); + } + } + + + /* + * Now check for newly sent messages by the other side. (The remote + * PUT value will have changed since we last looked at it.) + */ + + if (ch->w_remote_GP.put != ch->remote_GP.put) { + /* + * Clear msg->flags in previously received messages, so that + * they're ready for xpc_get_deliverable_msg(). + */ + xpc_clear_remote_msgqueue_flags(ch); + + (volatile s64) ch->w_remote_GP.put = ch->remote_GP.put; + + dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " + "channel=%d\n", ch->w_remote_GP.put, ch->partid, + ch->number); + + nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get; + if (nmsgs_sent > 0) { + dev_dbg(xpc_chan, "msgs waiting to be copied and " + "delivered=%d, partid=%d, channel=%d\n", + nmsgs_sent, ch->partid, ch->number); + + if (ch->flags & XPC_C_CONNECTCALLOUT) { + xpc_activate_kthreads(ch, nmsgs_sent); + } + } + } + + xpc_msgqueue_deref(ch); +} + + +void +xpc_process_channel_activity(struct xpc_partition *part) +{ + unsigned long irq_flags; + u64 IPI_amo, IPI_flags; + struct xpc_channel *ch; + int ch_number; + + + IPI_amo = xpc_get_IPI_flags(part); + + /* + * Initiate channel connections for registered channels. + * + * For each connected channel that has pending messages activate idle + * kthreads and/or create new kthreads as needed. + */ + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + + /* + * Process any open or close related IPI flags, and then deal + * with connecting or disconnecting the channel as required. + */ + + IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); + + if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) { + xpc_process_openclose_IPI(part, ch_number, IPI_flags); + } + + + if (ch->flags & XPC_C_DISCONNECTING) { + spin_lock_irqsave(&ch->lock, irq_flags); + xpc_process_disconnect(ch, &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + continue; + } + + if (part->act_state == XPC_P_DEACTIVATING) { + continue; + } + + if (!(ch->flags & XPC_C_CONNECTED)) { + if (!(ch->flags & XPC_C_OPENREQUEST)) { + DBUG_ON(ch->flags & XPC_C_SETUP); + (void) xpc_connect_channel(ch); + } else { + spin_lock_irqsave(&ch->lock, irq_flags); + xpc_process_connect(ch, &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + continue; + } + + + /* + * Process any message related IPI flags, this may involve the + * activation of kthreads to deliver any pending messages sent + * from the other partition. + */ + + if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) { + xpc_process_msg_IPI(part, ch_number); + } + } +} + + +/* + * XPC's heartbeat code calls this function to inform XPC that a partition has + * gone down. XPC responds by tearing down the XPartition Communication + * infrastructure used for the just downed partition. + * + * XPC's heartbeat code will never call this function and xpc_partition_up() + * at the same time. Nor will it ever make multiple calls to either function + * at the same time. + */ +void +xpc_partition_down(struct xpc_partition *part, enum xpc_retval reason) +{ + unsigned long irq_flags; + int ch_number; + struct xpc_channel *ch; + + + dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", + XPC_PARTID(part), reason); + + if (!xpc_part_ref(part)) { + /* infrastructure for this partition isn't currently set up */ + return; + } + + + /* disconnect all channels associated with the downed partition */ + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + + xpc_msgqueue_ref(ch); + spin_lock_irqsave(&ch->lock, irq_flags); + + XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + xpc_msgqueue_deref(ch); + } + + xpc_wakeup_channel_mgr(part); + + xpc_part_deref(part); +} + + +/* + * Teardown the infrastructure necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +void +xpc_teardown_infrastructure(struct xpc_partition *part) +{ + partid_t partid = XPC_PARTID(part); + + + /* + * We start off by making this partition inaccessible to local + * processes by marking it as no longer setup. Then we make it + * inaccessible to remote processes by clearing the XPC per partition + * specific variable's magic # (which indicates that these variables + * are no longer valid) and by ignoring all XPC notify IPIs sent to + * this partition. + */ + + DBUG_ON(atomic_read(&part->nchannels_active) != 0); + DBUG_ON(part->setup_state != XPC_P_SETUP); + part->setup_state = XPC_P_WTEARDOWN; + + xpc_vars_part[partid].magic = 0; + + + free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid); + + + /* + * Before proceding with the teardown we have to wait until all + * existing references cease. + */ + wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); + + + /* now we can begin tearing down the infrastructure */ + + part->setup_state = XPC_P_TORNDOWN; + + /* in case we've still got outstanding timers registered... */ + del_timer_sync(&part->dropped_IPI_timer); + + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; + kfree(part->local_openclose_args_base); + part->local_openclose_args = NULL; + kfree(part->remote_GPs_base); + part->remote_GPs = NULL; + kfree(part->local_GPs_base); + part->local_GPs = NULL; + kfree(part->channels); + part->channels = NULL; + part->local_IPI_amo_va = NULL; +} + + +/* + * Called by XP at the time of channel connection registration to cause + * XPC to establish connections to all currently active partitions. + */ +void +xpc_initiate_connect(int ch_number) +{ + partid_t partid; + struct xpc_partition *part; + struct xpc_channel *ch; + + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + ch = &part->channels[ch_number]; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + DBUG_ON(ch->flags & XPC_C_OPENREQUEST); + DBUG_ON(ch->flags & XPC_C_CONNECTED); + DBUG_ON(ch->flags & XPC_C_SETUP); + + /* + * Initiate the establishment of a connection + * on the newly registered channel to the + * remote partition. + */ + xpc_wakeup_channel_mgr(part); + } + + xpc_part_deref(part); + } + } +} + + +void +xpc_connected_callout(struct xpc_channel *ch) +{ + unsigned long irq_flags; + + + /* let the registerer know that a connection has been established */ + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + + ch->func(xpcConnected, ch->partid, ch->number, + (void *) (u64) ch->local_nentries, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " + "partid=%d, channel=%d\n", ch->partid, ch->number); + } + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_CONNECTCALLOUT; + spin_unlock_irqrestore(&ch->lock, irq_flags); +} + + +/* + * Called by XP at the time of channel connection unregistration to cause + * XPC to teardown all current connections for the specified channel. + * + * Before returning xpc_initiate_disconnect() will wait until all connections + * on the specified channel have been closed/torndown. So the caller can be + * assured that they will not be receiving any more callouts from XPC to the + * function they registered via xpc_connect(). + * + * Arguments: + * + * ch_number - channel # to unregister. + */ +void +xpc_initiate_disconnect(int ch_number) +{ + unsigned long irq_flags; + partid_t partid; + struct xpc_partition *part; + struct xpc_channel *ch; + + + DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + + /* initiate the channel disconnect for every active partition */ + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + ch = &part->channels[ch_number]; + xpc_msgqueue_ref(ch); + + spin_lock_irqsave(&ch->lock, irq_flags); + + XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, + &irq_flags); + + spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_msgqueue_deref(ch); + xpc_part_deref(part); + } + } + + xpc_disconnect_wait(ch_number); +} + + +/* + * To disconnect a channel, and reflect it back to all who may be waiting. + * + * >>> An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by + * >>> xpc_free_msgqueues(). + * + * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. + */ +void +xpc_disconnect_channel(const int line, struct xpc_channel *ch, + enum xpc_retval reason, unsigned long *irq_flags) +{ + u32 flags; + + + DBUG_ON(!spin_is_locked(&ch->lock)); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { + return; + } + DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); + + dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", + reason, line, ch->partid, ch->number); + + XPC_SET_REASON(ch, reason, line); + + flags = ch->flags; + /* some of these may not have been set */ + ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | + XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | + XPC_C_CONNECTING | XPC_C_CONNECTED); + + ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); + xpc_IPI_send_closerequest(ch, irq_flags); + + if (flags & XPC_C_CONNECTED) { + ch->flags |= XPC_C_WASCONNECTED; + } + + if (atomic_read(&ch->kthreads_idle) > 0) { + /* wake all idle kthreads so they can exit */ + wake_up_all(&ch->idle_wq); + } + + spin_unlock_irqrestore(&ch->lock, *irq_flags); + + + /* wake those waiting to allocate an entry from the local msg queue */ + + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { + wake_up(&ch->msg_allocate_wq); + } + + /* wake those waiting for notify completion */ + + if (atomic_read(&ch->n_to_notify) > 0) { + xpc_notify_senders(ch, reason, ch->w_local_GP.put); + } + + spin_lock_irqsave(&ch->lock, *irq_flags); +} + + +void +xpc_disconnected_callout(struct xpc_channel *ch) +{ + /* + * Let the channel's registerer know that the channel is now + * disconnected. We don't want to do this if the registerer was never + * informed of a connection being made, unless the disconnect was for + * abnormal reasons. + */ + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " + "channel=%d\n", ch->reason, ch->partid, ch->number); + + ch->func(ch->reason, ch->partid, ch->number, NULL, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " + "channel=%d\n", ch->reason, ch->partid, ch->number); + } +} + + +/* + * Wait for a message entry to become available for the specified channel, + * but don't wait any longer than 1 jiffy. + */ +static enum xpc_retval +xpc_allocate_msg_wait(struct xpc_channel *ch) +{ + enum xpc_retval ret; + + + if (ch->flags & XPC_C_DISCONNECTING) { + DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? + return ch->reason; + } + + atomic_inc(&ch->n_on_msg_allocate_wq); + ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); + atomic_dec(&ch->n_on_msg_allocate_wq); + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? + } else if (ret == 0) { + ret = xpcTimeout; + } else { + ret = xpcInterrupted; + } + + return ret; +} + + +/* + * Allocate an entry for a message from the message queue associated with the + * specified channel. + */ +static enum xpc_retval +xpc_allocate_msg(struct xpc_channel *ch, u32 flags, + struct xpc_msg **address_of_msg) +{ + struct xpc_msg *msg; + enum xpc_retval ret; + s64 put; + + + /* this reference will be dropped in xpc_send_msg() */ + xpc_msgqueue_ref(ch); + + if (ch->flags & XPC_C_DISCONNECTING) { + xpc_msgqueue_deref(ch); + return ch->reason; + } + if (!(ch->flags & XPC_C_CONNECTED)) { + xpc_msgqueue_deref(ch); + return xpcNotConnected; + } + + + /* + * Get the next available message entry from the local message queue. + * If none are available, we'll make sure that we grab the latest + * GP values. + */ + ret = xpcTimeout; + + while (1) { + + put = (volatile s64) ch->w_local_GP.put; + if (put - (volatile s64) ch->w_remote_GP.get < + ch->local_nentries) { + + /* There are available message entries. We need to try + * to secure one for ourselves. We'll do this by trying + * to increment w_local_GP.put as long as someone else + * doesn't beat us to it. If they do, we'll have to + * try again. + */ + if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == + put) { + /* we got the entry referenced by put */ + break; + } + continue; /* try again */ + } + + + /* + * There aren't any available msg entries at this time. + * + * In waiting for a message entry to become available, + * we set a timeout in case the other side is not + * sending completion IPIs. This lets us fake an IPI + * that will cause the IPI handler to fetch the latest + * GP values as if an IPI was sent by the other side. + */ + if (ret == xpcTimeout) { + xpc_IPI_send_local_msgrequest(ch); + } + + if (flags & XPC_NOWAIT) { + xpc_msgqueue_deref(ch); + return xpcNoWait; + } + + ret = xpc_allocate_msg_wait(ch); + if (ret != xpcInterrupted && ret != xpcTimeout) { + xpc_msgqueue_deref(ch); + return ret; + } + } + + + /* get the message's address and initialize it */ + msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + + (put % ch->local_nentries) * ch->msg_size); + + + DBUG_ON(msg->flags != 0); + msg->number = put; + + dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", put + 1, + (void *) msg, msg->number, ch->partid, ch->number); + + *address_of_msg = msg; + + return xpcSuccess; +} + + +/* + * Allocate an entry for a message from the message queue associated with the + * specified channel. NOTE that this routine can sleep waiting for a message + * entry to become available. To not sleep, pass in the XPC_NOWAIT flag. + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel #. + * flags - see xpc.h for valid flags. + * payload - address of the allocated payload area pointer (filled in on + * return) in which the user-defined message is constructed. + */ +enum xpc_retval +xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + enum xpc_retval ret = xpcUnknownReason; + struct xpc_msg *msg; + + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + + *payload = NULL; + + if (xpc_part_ref(part)) { + ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); + xpc_part_deref(part); + + if (msg != NULL) { + *payload = &msg->payload; + } + } + + return ret; +} + + +/* + * Now we actually send the messages that are ready to be sent by advancing + * the local message queue's Put value and then send an IPI to the recipient + * partition. + */ +static void +xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) +{ + struct xpc_msg *msg; + s64 put = initial_put + 1; + int send_IPI = 0; + + + while (1) { + + while (1) { + if (put == (volatile s64) ch->w_local_GP.put) { + break; + } + + msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + + (put % ch->local_nentries) * ch->msg_size); + + if (!(msg->flags & XPC_M_READY)) { + break; + } + + put++; + } + + if (put == initial_put) { + /* nothing's changed */ + break; + } + + if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != + initial_put) { + /* someone else beat us to it */ + DBUG_ON((volatile s64) ch->local_GP->put < initial_put); + break; + } + + /* we just set the new value of local_GP->put */ + + dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, " + "channel=%d\n", put, ch->partid, ch->number); + + send_IPI = 1; + + /* + * We need to ensure that the message referenced by + * local_GP->put is not XPC_M_READY or that local_GP->put + * equals w_local_GP.put, so we'll go have a look. + */ + initial_put = put; + } + + if (send_IPI) { + xpc_IPI_send_msgrequest(ch); + } +} + + +/* + * Common code that does the actual sending of the message by advancing the + * local message queue's Put value and sends an IPI to the partition the + * message is being sent to. + */ +static enum xpc_retval +xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, + xpc_notify_func func, void *key) +{ + enum xpc_retval ret = xpcSuccess; + struct xpc_notify *notify = NULL; // >>> to keep the compiler happy!! + s64 put, msg_number = msg->number; + + + DBUG_ON(notify_type == XPC_N_CALL && func == NULL); + DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) != + msg_number % ch->local_nentries); + DBUG_ON(msg->flags & XPC_M_READY); + + if (ch->flags & XPC_C_DISCONNECTING) { + /* drop the reference grabbed in xpc_allocate_msg() */ + xpc_msgqueue_deref(ch); + return ch->reason; + } + + if (notify_type != 0) { + /* + * Tell the remote side to send an ACK interrupt when the + * message has been delivered. + */ + msg->flags |= XPC_M_INTERRUPT; + + atomic_inc(&ch->n_to_notify); + + notify = &ch->notify_queue[msg_number % ch->local_nentries]; + notify->func = func; + notify->key = key; + (volatile u8) notify->type = notify_type; + + // >>> is a mb() needed here? + + if (ch->flags & XPC_C_DISCONNECTING) { + /* + * An error occurred between our last error check and + * this one. We will try to clear the type field from + * the notify entry. If we succeed then + * xpc_disconnect_channel() didn't already process + * the notify entry. + */ + if (cmpxchg(¬ify->type, notify_type, 0) == + notify_type) { + atomic_dec(&ch->n_to_notify); + ret = ch->reason; + } + + /* drop the reference grabbed in xpc_allocate_msg() */ + xpc_msgqueue_deref(ch); + return ret; + } + } + + msg->flags |= XPC_M_READY; + + /* + * The preceding store of msg->flags must occur before the following + * load of ch->local_GP->put. + */ + mb(); + + /* see if the message is next in line to be sent, if so send it */ + + put = ch->local_GP->put; + if (put == msg_number) { + xpc_send_msgs(ch, put); + } + + /* drop the reference grabbed in xpc_allocate_msg() */ + xpc_msgqueue_deref(ch); + return ret; +} + + +/* + * Send a message previously allocated using xpc_initiate_allocate() on the + * specified channel connected to the specified partition. + * + * This routine will not wait for the message to be received, nor will + * notification be given when it does happen. Once this routine has returned + * the message entry allocated via xpc_initiate_allocate() is no longer + * accessable to the caller. + * + * This routine, although called by users, does not call xpc_part_ref() to + * ensure that the partition infrastructure is in place. It relies on the + * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # to send message on. + * payload - pointer to the payload area allocated via + * xpc_initiate_allocate(). + */ +enum xpc_retval +xpc_initiate_send(partid_t partid, int ch_number, void *payload) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); + enum xpc_retval ret; + + + dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg, + partid, ch_number); + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + DBUG_ON(msg == NULL); + + ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL); + + return ret; +} + + +/* + * Send a message previously allocated using xpc_initiate_allocate on the + * specified channel connected to the specified partition. + * + * This routine will not wait for the message to be sent. Once this routine + * has returned the message entry allocated via xpc_initiate_allocate() is no + * longer accessable to the caller. + * + * Once the remote end of the channel has received the message, the function + * passed as an argument to xpc_initiate_send_notify() will be called. This + * allows the sender to free up or re-use any buffers referenced by the + * message, but does NOT mean the message has been processed at the remote + * end by a receiver. + * + * If this routine returns an error, the caller's function will NOT be called. + * + * This routine, although called by users, does not call xpc_part_ref() to + * ensure that the partition infrastructure is in place. It relies on the + * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # to send message on. + * payload - pointer to the payload area allocated via + * xpc_initiate_allocate(). + * func - function to call with asynchronous notification of message + * receipt. THIS FUNCTION MUST BE NON-BLOCKING. + * key - user-defined key to be passed to the function when it's called. + */ +enum xpc_retval +xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, + xpc_notify_func func, void *key) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); + enum xpc_retval ret; + + + dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg, + partid, ch_number); + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + DBUG_ON(msg == NULL); + DBUG_ON(func == NULL); + + ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, + func, key); + return ret; +} + + +static struct xpc_msg * +xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) +{ + struct xpc_partition *part = &xpc_partitions[ch->partid]; + struct xpc_msg *remote_msg, *msg; + u32 msg_index, nmsgs; + u64 msg_offset; + enum xpc_retval ret; + + + if (down_interruptible(&ch->msg_to_pull_sema) != 0) { + /* we were interrupted by a signal */ + return NULL; + } + + while (get >= ch->next_msg_to_pull) { + + /* pull as many messages as are ready and able to be pulled */ + + msg_index = ch->next_msg_to_pull % ch->remote_nentries; + + DBUG_ON(ch->next_msg_to_pull >= + (volatile s64) ch->w_remote_GP.put); + nmsgs = (volatile s64) ch->w_remote_GP.put - + ch->next_msg_to_pull; + if (msg_index + nmsgs > ch->remote_nentries) { + /* ignore the ones that wrap the msg queue for now */ + nmsgs = ch->remote_nentries - msg_index; + } + + msg_offset = msg_index * ch->msg_size; + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + + msg_offset); + remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa + + msg_offset); + + if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg, + nmsgs * ch->msg_size)) != xpcSuccess) { + + dev_dbg(xpc_chan, "failed to pull %d msgs starting with" + " msg %ld from partition %d, channel=%d, " + "ret=%d\n", nmsgs, ch->next_msg_to_pull, + ch->partid, ch->number, ret); + + XPC_DEACTIVATE_PARTITION(part, ret); + + up(&ch->msg_to_pull_sema); + return NULL; + } + + mb(); /* >>> this may not be needed, we're not sure */ + + ch->next_msg_to_pull += nmsgs; + } + + up(&ch->msg_to_pull_sema); + + /* return the message we were looking for */ + msg_offset = (get % ch->remote_nentries) * ch->msg_size; + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset); + + return msg; +} + + +/* + * Get a message to be delivered. + */ +static struct xpc_msg * +xpc_get_deliverable_msg(struct xpc_channel *ch) +{ + struct xpc_msg *msg = NULL; + s64 get; + + + do { + if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) { + break; + } + + get = (volatile s64) ch->w_local_GP.get; + if (get == (volatile s64) ch->w_remote_GP.put) { + break; + } + + /* There are messages waiting to be pulled and delivered. + * We need to try to secure one for ourselves. We'll do this + * by trying to increment w_local_GP.get and hope that no one + * else beats us to it. If they do, we'll we'll simply have + * to try again for the next one. + */ + + if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { + /* we got the entry referenced by get */ + + dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " + "partid=%d, channel=%d\n", get + 1, + ch->partid, ch->number); + + /* pull the message from the remote partition */ + + msg = xpc_pull_remote_msg(ch, get); + + DBUG_ON(msg != NULL && msg->number != get); + DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE)); + DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY)); + + break; + } + + } while (1); + + return msg; +} + + +/* + * Deliver a message to its intended recipient. + */ +void +xpc_deliver_msg(struct xpc_channel *ch) +{ + struct xpc_msg *msg; + + + if ((msg = xpc_get_deliverable_msg(ch)) != NULL) { + + /* + * This ref is taken to protect the payload itself from being + * freed before the user is finished with it, which the user + * indicates by calling xpc_initiate_received(). + */ + xpc_msgqueue_ref(ch); + + atomic_inc(&ch->kthreads_active); + + if (ch->func != NULL) { + dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", + (void *) msg, msg->number, ch->partid, + ch->number); + + /* deliver the message to its intended recipient */ + ch->func(xpcMsgReceived, ch->partid, ch->number, + &msg->payload, ch->key); + + dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " + "msg_number=%ld, partid=%d, channel=%d\n", + (void *) msg, msg->number, ch->partid, + ch->number); + } + + atomic_dec(&ch->kthreads_active); + } +} + + +/* + * Now we actually acknowledge the messages that have been delivered and ack'd + * by advancing the cached remote message queue's Get value and if requested + * send an IPI to the message sender's partition. + */ +static void +xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) +{ + struct xpc_msg *msg; + s64 get = initial_get + 1; + int send_IPI = 0; + + + while (1) { + + while (1) { + if (get == (volatile s64) ch->w_local_GP.get) { + break; + } + + msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + + (get % ch->remote_nentries) * ch->msg_size); + + if (!(msg->flags & XPC_M_DONE)) { + break; + } + + msg_flags |= msg->flags; + get++; + } + + if (get == initial_get) { + /* nothing's changed */ + break; + } + + if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != + initial_get) { + /* someone else beat us to it */ + DBUG_ON((volatile s64) ch->local_GP->get <= + initial_get); + break; + } + + /* we just set the new value of local_GP->get */ + + dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, " + "channel=%d\n", get, ch->partid, ch->number); + + send_IPI = (msg_flags & XPC_M_INTERRUPT); + + /* + * We need to ensure that the message referenced by + * local_GP->get is not XPC_M_DONE or that local_GP->get + * equals w_local_GP.get, so we'll go have a look. + */ + initial_get = get; + } + + if (send_IPI) { + xpc_IPI_send_msgrequest(ch); + } +} + + +/* + * Acknowledge receipt of a delivered message. + * + * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition + * that sent the message. + * + * This function, although called by users, does not call xpc_part_ref() to + * ensure that the partition infrastructure is in place. It relies on the + * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg(). + * + * Arguments: + * + * partid - ID of partition to which the channel is connected. + * ch_number - channel # message received on. + * payload - pointer to the payload area allocated via + * xpc_initiate_allocate(). + */ +void +xpc_initiate_received(partid_t partid, int ch_number, void *payload) +{ + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_channel *ch; + struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); + s64 get, msg_number = msg->number; + + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); + + ch = &part->channels[ch_number]; + + dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", + (void *) msg, msg_number, ch->partid, ch->number); + + DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) != + msg_number % ch->remote_nentries); + DBUG_ON(msg->flags & XPC_M_DONE); + + msg->flags |= XPC_M_DONE; + + /* + * The preceding store of msg->flags must occur before the following + * load of ch->local_GP->get. + */ + mb(); + + /* + * See if this message is next in line to be acknowledged as having + * been delivered. + */ + get = ch->local_GP->get; + if (get == msg_number) { + xpc_acknowledge_msgs(ch, get, msg->flags); + } + + /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ + xpc_msgqueue_deref(ch); +} + diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c new file mode 100644 index 0000000..177ddb7 --- /dev/null +++ b/arch/ia64/sn/kernel/xpc_main.c @@ -0,0 +1,1064 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * Cross Partition Communication (XPC) support - standard version. + * + * XPC provides a message passing capability that crosses partition + * boundaries. This module is made up of two parts: + * + * partition This part detects the presence/absence of other + * partitions. It provides a heartbeat and monitors + * the heartbeats of other partitions. + * + * channel This part manages the channels and sends/receives + * messages across them to/from other partitions. + * + * There are a couple of additional functions residing in XP, which + * provide an interface to XPC for its users. + * + * + * Caveats: + * + * . We currently have no way to determine which nasid an IPI came + * from. Thus, xpc_IPI_send() does a remote AMO write followed by + * an IPI. The AMO indicates where data is to be pulled from, so + * after the IPI arrives, the remote partition checks the AMO word. + * The IPI can actually arrive before the AMO however, so other code + * must periodically check for this case. Also, remote AMO operations + * do not reliably time out. Thus we do a remote PIO read solely to + * know whether the remote partition is down and whether we should + * stop sending IPIs to it. This remote PIO read operation is set up + * in a special nofault region so SAL knows to ignore (and cleanup) + * any errors due to the remote AMO write, PIO read, and/or PIO + * write operations. + * + * If/when new hardware solves this IPI problem, we should abandon + * the current approach. + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "xpc.h" + + +/* define two XPC debug device structures to be used with dev_dbg() et al */ + +struct device_driver xpc_dbg_name = { + .name = "xpc" +}; + +struct device xpc_part_dbg_subname = { + .bus_id = {0}, /* set to "part" at xpc_init() time */ + .driver = &xpc_dbg_name +}; + +struct device xpc_chan_dbg_subname = { + .bus_id = {0}, /* set to "chan" at xpc_init() time */ + .driver = &xpc_dbg_name +}; + +struct device *xpc_part = &xpc_part_dbg_subname; +struct device *xpc_chan = &xpc_chan_dbg_subname; + + +/* systune related variables for /proc/sys directories */ + +static int xpc_hb_min = 1; +static int xpc_hb_max = 10; + +static int xpc_hb_check_min = 10; +static int xpc_hb_check_max = 120; + +static ctl_table xpc_sys_xpc_hb_dir[] = { + { + 1, + "hb_interval", + &xpc_hb_interval, + sizeof(int), + 0644, + NULL, + &proc_dointvec_minmax, + &sysctl_intvec, + NULL, + &xpc_hb_min, &xpc_hb_max + }, + { + 2, + "hb_check_interval", + &xpc_hb_check_interval, + sizeof(int), + 0644, + NULL, + &proc_dointvec_minmax, + &sysctl_intvec, + NULL, + &xpc_hb_check_min, &xpc_hb_check_max + }, + {0} +}; +static ctl_table xpc_sys_xpc_dir[] = { + { + 1, + "hb", + NULL, + 0, + 0555, + xpc_sys_xpc_hb_dir + }, + {0} +}; +static ctl_table xpc_sys_dir[] = { + { + 1, + "xpc", + NULL, + 0, + 0555, + xpc_sys_xpc_dir + }, + {0} +}; +static struct ctl_table_header *xpc_sysctl; + + +/* #of IRQs received */ +static atomic_t xpc_act_IRQ_rcvd; + +/* IRQ handler notifies this wait queue on receipt of an IRQ */ +static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); + +static unsigned long xpc_hb_check_timeout; + +/* xpc_hb_checker thread exited notification */ +static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); + +/* xpc_discovery thread exited notification */ +static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); + + +static struct timer_list xpc_hb_timer; + + +static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); + + +/* + * Notify the heartbeat check thread that an IRQ has been received. + */ +static irqreturn_t +xpc_act_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs) +{ + atomic_inc(&xpc_act_IRQ_rcvd); + wake_up_interruptible(&xpc_act_IRQ_wq); + return IRQ_HANDLED; +} + + +/* + * Timer to produce the heartbeat. The timer structures function is + * already set when this is initially called. A tunable is used to + * specify when the next timeout should occur. + */ +static void +xpc_hb_beater(unsigned long dummy) +{ + xpc_vars->heartbeat++; + + if (jiffies >= xpc_hb_check_timeout) { + wake_up_interruptible(&xpc_act_IRQ_wq); + } + + xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); + add_timer(&xpc_hb_timer); +} + + +/* + * This thread is responsible for nearly all of the partition + * activation/deactivation. + */ +static int +xpc_hb_checker(void *ignore) +{ + int last_IRQ_count = 0; + int new_IRQ_count; + int force_IRQ=0; + + + /* this thread was marked active by xpc_hb_init() */ + + daemonize(XPC_HB_CHECK_THREAD_NAME); + + set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); + + xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); + + while (!(volatile int) xpc_exiting) { + + /* wait for IRQ or timeout */ + (void) wait_event_interruptible(xpc_act_IRQ_wq, + (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || + jiffies >= xpc_hb_check_timeout || + (volatile int) xpc_exiting)); + + dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " + "been received\n", + (int) (xpc_hb_check_timeout - jiffies), + atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); + + + /* checking of remote heartbeats is skewed by IRQ handling */ + if (jiffies >= xpc_hb_check_timeout) { + dev_dbg(xpc_part, "checking remote heartbeats\n"); + xpc_check_remote_hb(); + + /* + * We need to periodically recheck to ensure no + * IPI/AMO pairs have been missed. That check + * must always reset xpc_hb_check_timeout. + */ + force_IRQ = 1; + } + + + new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); + if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { + force_IRQ = 0; + + dev_dbg(xpc_part, "found an IRQ to process; will be " + "resetting xpc_hb_check_timeout\n"); + + last_IRQ_count += xpc_identify_act_IRQ_sender(); + if (last_IRQ_count < new_IRQ_count) { + /* retry once to help avoid missing AMO */ + (void) xpc_identify_act_IRQ_sender(); + } + last_IRQ_count = new_IRQ_count; + + xpc_hb_check_timeout = jiffies + + (xpc_hb_check_interval * HZ); + } + } + + dev_dbg(xpc_part, "heartbeat checker is exiting\n"); + + + /* mark this thread as inactive */ + up(&xpc_hb_checker_exited); + return 0; +} + + +/* + * This thread will attempt to discover other partitions to activate + * based on info provided by SAL. This new thread is short lived and + * will exit once discovery is complete. + */ +static int +xpc_initiate_discovery(void *ignore) +{ + daemonize(XPC_DISCOVERY_THREAD_NAME); + + xpc_discovery(); + + dev_dbg(xpc_part, "discovery thread is exiting\n"); + + /* mark this thread as inactive */ + up(&xpc_discovery_exited); + return 0; +} + + +/* + * Establish first contact with the remote partititon. This involves pulling + * the XPC per partition variables from the remote partition and waiting for + * the remote partition to pull ours. + */ +static enum xpc_retval +xpc_make_first_contact(struct xpc_partition *part) +{ + enum xpc_retval ret; + + + while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { + if (ret != xpcRetry) { + XPC_DEACTIVATE_PARTITION(part, ret); + return ret; + } + + dev_dbg(xpc_chan, "waiting to make first contact with " + "partition %d\n", XPC_PARTID(part)); + + /* wait a 1/4 of a second or so */ + set_current_state(TASK_INTERRUPTIBLE); + (void) schedule_timeout(0.25 * HZ); + + if (part->act_state == XPC_P_DEACTIVATING) { + return part->reason; + } + } + + return xpc_mark_partition_active(part); +} + + +/* + * The first kthread assigned to a newly activated partition is the one + * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to + * that kthread until the partition is brought down, at which time that kthread + * returns back to XPC HB. (The return of that kthread will signify to XPC HB + * that XPC has dismantled all communication infrastructure for the associated + * partition.) This kthread becomes the channel manager for that partition. + * + * Each active partition has a channel manager, who, besides connecting and + * disconnecting channels, will ensure that each of the partition's connected + * channels has the required number of assigned kthreads to get the work done. + */ +static void +xpc_channel_mgr(struct xpc_partition *part) +{ + while (part->act_state != XPC_P_DEACTIVATING || + atomic_read(&part->nchannels_active) > 0) { + + xpc_process_channel_activity(part); + + + /* + * Wait until we've been requested to activate kthreads or + * all of the channel's message queues have been torn down or + * a signal is pending. + * + * The channel_mgr_requests is set to 1 after being awakened, + * This is done to prevent the channel mgr from making one pass + * through the loop for each request, since he will + * be servicing all the requests in one pass. The reason it's + * set to 1 instead of 0 is so that other kthreads will know + * that the channel mgr is running and won't bother trying to + * wake him up. + */ + atomic_dec(&part->channel_mgr_requests); + (void) wait_event_interruptible(part->channel_mgr_wq, + (atomic_read(&part->channel_mgr_requests) > 0 || + (volatile u64) part->local_IPI_amo != 0 || + ((volatile u8) part->act_state == + XPC_P_DEACTIVATING && + atomic_read(&part->nchannels_active) == 0))); + atomic_set(&part->channel_mgr_requests, 1); + + // >>> Does it need to wakeup periodically as well? In case we + // >>> miscalculated the #of kthreads to wakeup or create? + } +} + + +/* + * When XPC HB determines that a partition has come up, it will create a new + * kthread and that kthread will call this function to attempt to set up the + * basic infrastructure used for Cross Partition Communication with the newly + * upped partition. + * + * The kthread that was created by XPC HB and which setup the XPC + * infrastructure will remain assigned to the partition until the partition + * goes down. At which time the kthread will teardown the XPC infrastructure + * and then exit. + * + * XPC HB will put the remote partition's XPC per partition specific variables + * physical address into xpc_partitions[partid].remote_vars_part_pa prior to + * calling xpc_partition_up(). + */ +static void +xpc_partition_up(struct xpc_partition *part) +{ + DBUG_ON(part->channels != NULL); + + dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); + + if (xpc_setup_infrastructure(part) != xpcSuccess) { + return; + } + + /* + * The kthread that XPC HB called us with will become the + * channel manager for this partition. It will not return + * back to XPC HB until the partition's XPC infrastructure + * has been dismantled. + */ + + (void) xpc_part_ref(part); /* this will always succeed */ + + if (xpc_make_first_contact(part) == xpcSuccess) { + xpc_channel_mgr(part); + } + + xpc_part_deref(part); + + xpc_teardown_infrastructure(part); +} + + +static int +xpc_activating(void *__partid) +{ + partid_t partid = (u64) __partid; + struct xpc_partition *part = &xpc_partitions[partid]; + unsigned long irq_flags; + struct sched_param param = { sched_priority: MAX_USER_RT_PRIO - 1 }; + int ret; + + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + + spin_lock_irqsave(&part->act_lock, irq_flags); + + if (part->act_state == XPC_P_DEACTIVATING) { + part->act_state = XPC_P_INACTIVE; + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; + return 0; + } + + /* indicate the thread is activating */ + DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ); + part->act_state = XPC_P_ACTIVATING; + + XPC_SET_REASON(part, 0, 0); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + dev_dbg(xpc_part, "bringing partition %d up\n", partid); + + daemonize("xpc%02d", partid); + + /* + * This thread needs to run at a realtime priority to prevent a + * significant performance degradation. + */ + ret = sched_setscheduler(current, SCHED_FIFO, ¶m); + if (ret != 0) { + dev_warn(xpc_part, "unable to set pid %d to a realtime " + "priority, ret=%d\n", current->pid, ret); + } + + /* allow this thread and its children to run on any CPU */ + set_cpus_allowed(current, CPU_MASK_ALL); + + /* + * Register the remote partition's AMOs with SAL so it can handle + * and cleanup errors within that address range should the remote + * partition go down. We don't unregister this range because it is + * difficult to tell when outstanding writes to the remote partition + * are finished and thus when it is safe to unregister. This should + * not result in wasted space in the SAL xp_addr_region table because + * we should get the same page for remote_amos_page_pa after module + * reloads and system reboots. + */ + if (sn_register_xp_addr_region(part->remote_amos_page_pa, + PAGE_SIZE, 1) < 0) { + dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " + "xp_addr region\n", partid); + + spin_lock_irqsave(&part->act_lock, irq_flags); + part->act_state = XPC_P_INACTIVE; + XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; + return 0; + } + + XPC_ALLOW_HB(partid, xpc_vars); + xpc_IPI_send_activated(part); + + + /* + * xpc_partition_up() holds this thread and marks this partition as + * XPC_P_ACTIVE by calling xpc_hb_mark_active(). + */ + (void) xpc_partition_up(part); + + xpc_mark_partition_inactive(part); + + if (part->reason == xpcReactivating) { + /* interrupting ourselves results in activating partition */ + xpc_IPI_send_reactivate(part); + } + + return 0; +} + + +void +xpc_activate_partition(struct xpc_partition *part) +{ + partid_t partid = XPC_PARTID(part); + unsigned long irq_flags; + pid_t pid; + + + spin_lock_irqsave(&part->act_lock, irq_flags); + + pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0); + + DBUG_ON(part->act_state != XPC_P_INACTIVE); + + if (pid > 0) { + part->act_state = XPC_P_ACTIVATION_REQ; + XPC_SET_REASON(part, xpcCloneKThread, __LINE__); + } else { + XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); + } + + spin_unlock_irqrestore(&part->act_lock, irq_flags); +} + + +/* + * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified + * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more + * than one partition, we use an AMO_t structure per partition to indicate + * whether a partition has sent an IPI or not. >>> If it has, then wake up the + * associated kthread to handle it. + * + * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC + * running on other partitions. + * + * Noteworthy Arguments: + * + * irq - Interrupt ReQuest number. NOT USED. + * + * dev_id - partid of IPI's potential sender. + * + * regs - processor's context before the processor entered + * interrupt code. NOT USED. + */ +irqreturn_t +xpc_notify_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs) +{ + partid_t partid = (partid_t) (u64) dev_id; + struct xpc_partition *part = &xpc_partitions[partid]; + + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + + if (xpc_part_ref(part)) { + xpc_check_for_channel_activity(part); + + xpc_part_deref(part); + } + return IRQ_HANDLED; +} + + +/* + * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor + * because the write to their associated IPI amo completed after the IRQ/IPI + * was received. + */ +void +xpc_dropped_IPI_check(struct xpc_partition *part) +{ + if (xpc_part_ref(part)) { + xpc_check_for_channel_activity(part); + + part->dropped_IPI_timer.expires = jiffies + + XPC_P_DROPPED_IPI_WAIT; + add_timer(&part->dropped_IPI_timer); + xpc_part_deref(part); + } +} + + +void +xpc_activate_kthreads(struct xpc_channel *ch, int needed) +{ + int idle = atomic_read(&ch->kthreads_idle); + int assigned = atomic_read(&ch->kthreads_assigned); + int wakeup; + + + DBUG_ON(needed <= 0); + + if (idle > 0) { + wakeup = (needed > idle) ? idle : needed; + needed -= wakeup; + + dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, " + "channel=%d\n", wakeup, ch->partid, ch->number); + + /* only wakeup the requested number of kthreads */ + wake_up_nr(&ch->idle_wq, wakeup); + } + + if (needed <= 0) { + return; + } + + if (needed + assigned > ch->kthreads_assigned_limit) { + needed = ch->kthreads_assigned_limit - assigned; + // >>>should never be less than 0 + if (needed <= 0) { + return; + } + } + + dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", + needed, ch->partid, ch->number); + + xpc_create_kthreads(ch, needed); +} + + +/* + * This function is where XPC's kthreads wait for messages to deliver. + */ +static void +xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) +{ + do { + /* deliver messages to their intended recipients */ + + while ((volatile s64) ch->w_local_GP.get < + (volatile s64) ch->w_remote_GP.put && + !((volatile u32) ch->flags & + XPC_C_DISCONNECTING)) { + xpc_deliver_msg(ch); + } + + if (atomic_inc_return(&ch->kthreads_idle) > + ch->kthreads_idle_limit) { + /* too many idle kthreads on this channel */ + atomic_dec(&ch->kthreads_idle); + break; + } + + dev_dbg(xpc_chan, "idle kthread calling " + "wait_event_interruptible_exclusive()\n"); + + (void) wait_event_interruptible_exclusive(ch->idle_wq, + ((volatile s64) ch->w_local_GP.get < + (volatile s64) ch->w_remote_GP.put || + ((volatile u32) ch->flags & + XPC_C_DISCONNECTING))); + + atomic_dec(&ch->kthreads_idle); + + } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING)); +} + + +static int +xpc_daemonize_kthread(void *args) +{ + partid_t partid = XPC_UNPACK_ARG1(args); + u16 ch_number = XPC_UNPACK_ARG2(args); + struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_channel *ch; + int n_needed; + + + daemonize("xpc%02dc%d", partid, ch_number); + + dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", + partid, ch_number); + + ch = &part->channels[ch_number]; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + DBUG_ON(!(ch->flags & XPC_C_CONNECTED)); + + /* let registerer know that connection has been established */ + + if (atomic_read(&ch->kthreads_assigned) == 1) { + xpc_connected_callout(ch); + + /* + * It is possible that while the callout was being + * made that the remote partition sent some messages. + * If that is the case, we may need to activate + * additional kthreads to help deliver them. We only + * need one less than total #of messages to deliver. + */ + n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; + if (n_needed > 0 && + !(ch->flags & XPC_C_DISCONNECTING)) { + xpc_activate_kthreads(ch, n_needed); + } + } + + xpc_kthread_waitmsgs(part, ch); + } + + if (atomic_dec_return(&ch->kthreads_assigned) == 0 && + ((ch->flags & XPC_C_CONNECTCALLOUT) || + (ch->reason != xpcUnregistering && + ch->reason != xpcOtherUnregistering))) { + xpc_disconnected_callout(ch); + } + + + xpc_msgqueue_deref(ch); + + dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", + partid, ch_number); + + xpc_part_deref(part); + return 0; +} + + +/* + * For each partition that XPC has established communications with, there is + * a minimum of one kernel thread assigned to perform any operation that + * may potentially sleep or block (basically the callouts to the asynchronous + * functions registered via xpc_connect()). + * + * Additional kthreads are created and destroyed by XPC as the workload + * demands. + * + * A kthread is assigned to one of the active channels that exists for a given + * partition. + */ +void +xpc_create_kthreads(struct xpc_channel *ch, int needed) +{ + unsigned long irq_flags; + pid_t pid; + u64 args = XPC_PACK_ARGS(ch->partid, ch->number); + + + while (needed-- > 0) { + pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); + if (pid < 0) { + /* the fork failed */ + + if (atomic_read(&ch->kthreads_assigned) < + ch->kthreads_idle_limit) { + /* + * Flag this as an error only if we have an + * insufficient #of kthreads for the channel + * to function. + * + * No xpc_msgqueue_ref() is needed here since + * the channel mgr is doing this. + */ + spin_lock_irqsave(&ch->lock, irq_flags); + XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, + &irq_flags); + spin_unlock_irqrestore(&ch->lock, irq_flags); + } + break; + } + + /* + * The following is done on behalf of the newly created + * kthread. That kthread is responsible for doing the + * counterpart to the following before it exits. + */ + (void) xpc_part_ref(&xpc_partitions[ch->partid]); + xpc_msgqueue_ref(ch); + atomic_inc(&ch->kthreads_assigned); + ch->kthreads_created++; // >>> temporary debug only!!! + } +} + + +void +xpc_disconnect_wait(int ch_number) +{ + partid_t partid; + struct xpc_partition *part; + struct xpc_channel *ch; + + + /* now wait for all callouts to the caller's function to cease */ + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + ch = &part->channels[ch_number]; + +// >>> how do we keep from falling into the window between our check and going +// >>> down and coming back up where sema is re-inited? + if (ch->flags & XPC_C_SETUP) { + (void) down(&ch->teardown_sema); + } + + xpc_part_deref(part); + } + } +} + + +static void +xpc_do_exit(void) +{ + partid_t partid; + int active_part_count; + struct xpc_partition *part; + + + /* now it's time to eliminate our heartbeat */ + del_timer_sync(&xpc_hb_timer); + xpc_vars->heartbeating_to_mask = 0; + + /* indicate to others that our reserved page is uninitialized */ + xpc_rsvd_page->vars_pa = 0; + + /* + * Ignore all incoming interrupts. Without interupts the heartbeat + * checker won't activate any new partitions that may come up. + */ + free_irq(SGI_XPC_ACTIVATE, NULL); + + /* + * Cause the heartbeat checker and the discovery threads to exit. + * We don't want them attempting to activate new partitions as we + * try to deactivate the existing ones. + */ + xpc_exiting = 1; + wake_up_interruptible(&xpc_act_IRQ_wq); + + /* wait for the heartbeat checker thread to mark itself inactive */ + down(&xpc_hb_checker_exited); + + /* wait for the discovery thread to mark itself inactive */ + down(&xpc_discovery_exited); + + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(0.3 * HZ); + set_current_state(TASK_RUNNING); + + + /* wait for all partitions to become inactive */ + + do { + active_part_count = 0; + + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + if (part->act_state != XPC_P_INACTIVE) { + active_part_count++; + + XPC_DEACTIVATE_PARTITION(part, xpcUnloading); + } + } + + if (active_part_count) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(0.3 * HZ); + set_current_state(TASK_RUNNING); + } + + } while (active_part_count > 0); + + + /* close down protections for IPI operations */ + xpc_restrict_IPI_ops(); + + + /* clear the interface to XPC's functions */ + xpc_clear_interface(); + + if (xpc_sysctl) { + unregister_sysctl_table(xpc_sysctl); + } +} + + +int __init +xpc_init(void) +{ + int ret; + partid_t partid; + struct xpc_partition *part; + pid_t pid; + + + /* + * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng + * both a partition's reserved page and its XPC variables. Its size was + * based on the size of a reserved page. So we need to ensure that the + * XPC variables will fit as well. + */ + if (XPC_VARS_ALIGNED_SIZE > XPC_RSVD_PAGE_ALIGNED_SIZE) { + dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n"); + return -EPERM; + } + DBUG_ON((u64) xpc_remote_copy_buffer != + L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer)); + + snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); + snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); + + xpc_sysctl = register_sysctl_table(xpc_sys_dir, 1); + + /* + * The first few fields of each entry of xpc_partitions[] need to + * be initialized now so that calls to xpc_connect() and + * xpc_disconnect() can be made prior to the activation of any remote + * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE + * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING + * PARTITION HAS BEEN ACTIVATED. + */ + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + part = &xpc_partitions[partid]; + + DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part)); + + part->act_IRQ_rcvd = 0; + spin_lock_init(&part->act_lock); + part->act_state = XPC_P_INACTIVE; + XPC_SET_REASON(part, 0, 0); + part->setup_state = XPC_P_UNSET; + init_waitqueue_head(&part->teardown_wq); + atomic_set(&part->references, 0); + } + + /* + * Open up protections for IPI operations (and AMO operations on + * Shub 1.1 systems). + */ + xpc_allow_IPI_ops(); + + /* + * Interrupts being processed will increment this atomic variable and + * awaken the heartbeat thread which will process the interrupts. + */ + atomic_set(&xpc_act_IRQ_rcvd, 0); + + /* + * This is safe to do before the xpc_hb_checker thread has started + * because the handler releases a wait queue. If an interrupt is + * received before the thread is waiting, it will not go to sleep, + * but rather immediately process the interrupt. + */ + ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, + "xpc hb", NULL); + if (ret != 0) { + dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " + "errno=%d\n", -ret); + + xpc_restrict_IPI_ops(); + + if (xpc_sysctl) { + unregister_sysctl_table(xpc_sysctl); + } + return -EBUSY; + } + + /* + * Fill the partition reserved page with the information needed by + * other partitions to discover we are alive and establish initial + * communications. + */ + xpc_rsvd_page = xpc_rsvd_page_init(); + if (xpc_rsvd_page == NULL) { + dev_err(xpc_part, "could not setup our reserved page\n"); + + free_irq(SGI_XPC_ACTIVATE, NULL); + xpc_restrict_IPI_ops(); + + if (xpc_sysctl) { + unregister_sysctl_table(xpc_sysctl); + } + return -EBUSY; + } + + + /* + * Set the beating to other partitions into motion. This is + * the last requirement for other partitions' discovery to + * initiate communications with us. + */ + init_timer(&xpc_hb_timer); + xpc_hb_timer.function = xpc_hb_beater; + xpc_hb_beater(0); + + + /* + * The real work-horse behind xpc. This processes incoming + * interrupts and monitors remote heartbeats. + */ + pid = kernel_thread(xpc_hb_checker, NULL, 0); + if (pid < 0) { + dev_err(xpc_part, "failed while forking hb check thread\n"); + + /* indicate to others that our reserved page is uninitialized */ + xpc_rsvd_page->vars_pa = 0; + + del_timer_sync(&xpc_hb_timer); + free_irq(SGI_XPC_ACTIVATE, NULL); + xpc_restrict_IPI_ops(); + + if (xpc_sysctl) { + unregister_sysctl_table(xpc_sysctl); + } + return -EBUSY; + } + + + /* + * Startup a thread that will attempt to discover other partitions to + * activate based on info provided by SAL. This new thread is short + * lived and will exit once discovery is complete. + */ + pid = kernel_thread(xpc_initiate_discovery, NULL, 0); + if (pid < 0) { + dev_err(xpc_part, "failed while forking discovery thread\n"); + + /* mark this new thread as a non-starter */ + up(&xpc_discovery_exited); + + xpc_do_exit(); + return -EBUSY; + } + + + /* set the interface to point at XPC's functions */ + xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, + xpc_initiate_allocate, xpc_initiate_send, + xpc_initiate_send_notify, xpc_initiate_received, + xpc_initiate_partid_to_nasids); + + return 0; +} +module_init(xpc_init); + + +void __exit +xpc_exit(void) +{ + xpc_do_exit(); +} +module_exit(xpc_exit); + + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); +MODULE_LICENSE("GPL"); + +module_param(xpc_hb_interval, int, 0); +MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " + "heartbeat increments."); + +module_param(xpc_hb_check_interval, int, 0); +MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " + "heartbeat checks."); + diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c new file mode 100644 index 0000000..b31d9988 --- /dev/null +++ b/arch/ia64/sn/kernel/xpc_partition.c @@ -0,0 +1,971 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. + */ + + +/* + * Cross Partition Communication (XPC) partition support. + * + * This is the part of XPC that detects the presence/absence of + * other partitions. It provides a heartbeat and monitors the + * heartbeats of other partitions. + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "xpc.h" + + +/* XPC is exiting flag */ +int xpc_exiting; + + +/* SH_IPI_ACCESS shub register value on startup */ +static u64 xpc_sh1_IPI_access; +static u64 xpc_sh2_IPI_access0; +static u64 xpc_sh2_IPI_access1; +static u64 xpc_sh2_IPI_access2; +static u64 xpc_sh2_IPI_access3; + + +/* original protection values for each node */ +u64 xpc_prot_vec[MAX_COMPACT_NODES]; + + +/* this partition's reserved page */ +struct xpc_rsvd_page *xpc_rsvd_page; + +/* this partition's XPC variables (within the reserved page) */ +struct xpc_vars *xpc_vars; +struct xpc_vars_part *xpc_vars_part; + + +/* + * For performance reasons, each entry of xpc_partitions[] is cacheline + * aligned. And xpc_partitions[] is padded with an additional entry at the + * end so that the last legitimate entry doesn't share its cacheline with + * another variable. + */ +struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; + + +/* + * Generic buffer used to store a local copy of the remote partitions + * reserved page or XPC variables. + * + * xpc_discovery runs only once and is a seperate thread that is + * very likely going to be processing in parallel with receiving + * interrupts. + */ +char ____cacheline_aligned + xpc_remote_copy_buffer[XPC_RSVD_PAGE_ALIGNED_SIZE]; + + +/* systune related variables */ +int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; +int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_TIMEOUT; + + +/* + * Given a nasid, get the physical address of the partition's reserved page + * for that nasid. This function returns 0 on any error. + */ +static u64 +xpc_get_rsvd_page_pa(int nasid, u64 buf, u64 buf_size) +{ + bte_result_t bte_res; + s64 status; + u64 cookie = 0; + u64 rp_pa = nasid; /* seed with nasid */ + u64 len = 0; + + + while (1) { + + status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, + &len); + + dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" + "0x%016lx, address=0x%016lx, len=0x%016lx\n", + status, cookie, rp_pa, len); + + if (status != SALRET_MORE_PASSES) { + break; + } + + if (len > buf_size) { + dev_err(xpc_part, "len (=0x%016lx) > buf_size\n", len); + status = SALRET_ERROR; + break; + } + + bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_size, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bte_res != BTE_SUCCESS) { + dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); + status = SALRET_ERROR; + break; + } + } + + if (status != SALRET_OK) { + rp_pa = 0; + } + dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); + return rp_pa; +} + + +/* + * Fill the partition reserved page with the information needed by + * other partitions to discover we are alive and establish initial + * communications. + */ +struct xpc_rsvd_page * +xpc_rsvd_page_init(void) +{ + struct xpc_rsvd_page *rp; + AMO_t *amos_page; + u64 rp_pa, next_cl, nasid_array = 0; + int i, ret; + + + /* get the local reserved page's address */ + + rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0), + (u64) xpc_remote_copy_buffer, + XPC_RSVD_PAGE_ALIGNED_SIZE); + if (rp_pa == 0) { + dev_err(xpc_part, "SAL failed to locate the reserved page\n"); + return NULL; + } + rp = (struct xpc_rsvd_page *) __va(rp_pa); + + if (rp->partid != sn_partition_id) { + dev_err(xpc_part, "the reserved page's partid of %d should be " + "%d\n", rp->partid, sn_partition_id); + return NULL; + } + + rp->version = XPC_RP_VERSION; + + /* + * Place the XPC variables on the cache line following the + * reserved page structure. + */ + next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE; + xpc_vars = (struct xpc_vars *) next_cl; + + /* + * Before clearing xpc_vars, see if a page of AMOs had been previously + * allocated. If not we'll need to allocate one and set permissions + * so that cross-partition AMOs are allowed. + * + * The allocated AMO page needs MCA reporting to remain disabled after + * XPC has unloaded. To make this work, we keep a copy of the pointer + * to this page (i.e., amos_page) in the struct xpc_vars structure, + * which is pointed to by the reserved page, and re-use that saved copy + * on subsequent loads of XPC. This AMO page is never freed, and its + * memory protections are never restricted. + */ + if ((amos_page = xpc_vars->amos_page) == NULL) { + amos_page = (AMO_t *) mspec_kalloc_page(0); + if (amos_page == NULL) { + dev_err(xpc_part, "can't allocate page of AMOs\n"); + return NULL; + } + + /* + * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems + * when xpc_allow_IPI_ops() is called via xpc_hb_init(). + */ + if (!enable_shub_wars_1_1()) { + ret = sn_change_memprotect(ia64_tpa((u64) amos_page), + PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, + &nasid_array); + if (ret != 0) { + dev_err(xpc_part, "can't change memory " + "protections\n"); + mspec_kfree_page((unsigned long) amos_page); + return NULL; + } + } + } + + memset(xpc_vars, 0, sizeof(struct xpc_vars)); + + /* + * Place the XPC per partition specific variables on the cache line + * following the XPC variables structure. + */ + next_cl += XPC_VARS_ALIGNED_SIZE; + memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) * + XP_MAX_PARTITIONS); + xpc_vars_part = (struct xpc_vars_part *) next_cl; + xpc_vars->vars_part_pa = __pa(next_cl); + + xpc_vars->version = XPC_V_VERSION; + xpc_vars->act_nasid = cpuid_to_nasid(0); + xpc_vars->act_phys_cpuid = cpu_physical_id(0); + xpc_vars->amos_page = amos_page; /* save for next load of XPC */ + + + /* + * Initialize the activation related AMO variables. + */ + xpc_vars->act_amos = xpc_IPI_init(XP_MAX_PARTITIONS); + for (i = 1; i < XP_NASID_MASK_WORDS; i++) { + xpc_IPI_init(i + XP_MAX_PARTITIONS); + } + /* export AMO page's physical address to other partitions */ + xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page); + + /* + * This signifies to the remote partition that our reserved + * page is initialized. + */ + (volatile u64) rp->vars_pa = __pa(xpc_vars); + + return rp; +} + + +/* + * Change protections to allow IPI operations (and AMO operations on + * Shub 1.1 systems). + */ +void +xpc_allow_IPI_ops(void) +{ + int node; + int nasid; + + + // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. + + if (is_shub2()) { + xpc_sh2_IPI_access0 = + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); + xpc_sh2_IPI_access1 = + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); + xpc_sh2_IPI_access2 = + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); + xpc_sh2_IPI_access3 = + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), + -1UL); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), + -1UL); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), + -1UL); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), + -1UL); + } + + } else { + xpc_sh1_IPI_access = + (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), + -1UL); + + /* + * Since the BIST collides with memory operations on + * SHUB 1.1 sn_change_memprotect() cannot be used. + */ + if (enable_shub_wars_1_1()) { + /* open up everything */ + xpc_prot_vec[node] = (u64) HUB_L((u64 *) + GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQLP_MMR_DIR_PRIVEC0)); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQLP_MMR_DIR_PRIVEC0), + -1UL); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQRP_MMR_DIR_PRIVEC0), + -1UL); + } + } + } +} + + +/* + * Restrict protections to disallow IPI operations (and AMO operations on + * Shub 1.1 systems). + */ +void +xpc_restrict_IPI_ops(void) +{ + int node; + int nasid; + + + // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. + + if (is_shub2()) { + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), + xpc_sh2_IPI_access0); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), + xpc_sh2_IPI_access1); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), + xpc_sh2_IPI_access2); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), + xpc_sh2_IPI_access3); + } + + } else { + + for_each_online_node(node) { + nasid = cnodeid_to_nasid(node); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), + xpc_sh1_IPI_access); + + if (enable_shub_wars_1_1()) { + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQLP_MMR_DIR_PRIVEC0), + xpc_prot_vec[node]); + HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, + SH1_MD_DQRP_MMR_DIR_PRIVEC0), + xpc_prot_vec[node]); + } + } + } +} + + +/* + * At periodic intervals, scan through all active partitions and ensure + * their heartbeat is still active. If not, the partition is deactivated. + */ +void +xpc_check_remote_hb(void) +{ + struct xpc_vars *remote_vars; + struct xpc_partition *part; + partid_t partid; + bte_result_t bres; + + + remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; + + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + if (partid == sn_partition_id) { + continue; + } + + part = &xpc_partitions[partid]; + + if (part->act_state == XPC_P_INACTIVE || + part->act_state == XPC_P_DEACTIVATING) { + continue; + } + + /* pull the remote_hb cache line */ + bres = xp_bte_copy(part->remote_vars_pa, + ia64_tpa((u64) remote_vars), + XPC_VARS_ALIGNED_SIZE, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bres != BTE_SUCCESS) { + XPC_DEACTIVATE_PARTITION(part, + xpc_map_bte_errors(bres)); + continue; + } + + dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat" + " = %ld, kdb_status = %ld, HB_mask = 0x%lx\n", partid, + remote_vars->heartbeat, part->last_heartbeat, + remote_vars->kdb_status, + remote_vars->heartbeating_to_mask); + + if (((remote_vars->heartbeat == part->last_heartbeat) && + (remote_vars->kdb_status == 0)) || + !XPC_HB_ALLOWED(sn_partition_id, remote_vars)) { + + XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); + continue; + } + + part->last_heartbeat = remote_vars->heartbeat; + } +} + + +/* + * Get a copy of the remote partition's rsvd page. + * + * remote_rp points to a buffer that is cacheline aligned for BTE copies and + * assumed to be of size XPC_RSVD_PAGE_ALIGNED_SIZE. + */ +static enum xpc_retval +xpc_get_remote_rp(int nasid, u64 *discovered_nasids, + struct xpc_rsvd_page *remote_rp, u64 *remote_rsvd_page_pa) +{ + int bres, i; + + + /* get the reserved page's physical address */ + + *remote_rsvd_page_pa = xpc_get_rsvd_page_pa(nasid, (u64) remote_rp, + XPC_RSVD_PAGE_ALIGNED_SIZE); + if (*remote_rsvd_page_pa == 0) { + return xpcNoRsvdPageAddr; + } + + + /* pull over the reserved page structure */ + + bres = xp_bte_copy(*remote_rsvd_page_pa, ia64_tpa((u64) remote_rp), + XPC_RSVD_PAGE_ALIGNED_SIZE, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bres != BTE_SUCCESS) { + return xpc_map_bte_errors(bres); + } + + + if (discovered_nasids != NULL) { + for (i = 0; i < XP_NASID_MASK_WORDS; i++) { + discovered_nasids[i] |= remote_rp->part_nasids[i]; + } + } + + + /* check that the partid is for another partition */ + + if (remote_rp->partid < 1 || + remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { + return xpcInvalidPartid; + } + + if (remote_rp->partid == sn_partition_id) { + return xpcLocalPartid; + } + + + if (XPC_VERSION_MAJOR(remote_rp->version) != + XPC_VERSION_MAJOR(XPC_RP_VERSION)) { + return xpcBadVersion; + } + + return xpcSuccess; +} + + +/* + * Get a copy of the remote partition's XPC variables. + * + * remote_vars points to a buffer that is cacheline aligned for BTE copies and + * assumed to be of size XPC_VARS_ALIGNED_SIZE. + */ +static enum xpc_retval +xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) +{ + int bres; + + + if (remote_vars_pa == 0) { + return xpcVarsNotSet; + } + + + /* pull over the cross partition variables */ + + bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars), + XPC_VARS_ALIGNED_SIZE, + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (bres != BTE_SUCCESS) { + return xpc_map_bte_errors(bres); + } + + if (XPC_VERSION_MAJOR(remote_vars->version) != + XPC_VERSION_MAJOR(XPC_V_VERSION)) { + return xpcBadVersion; + } + + return xpcSuccess; +} + + +/* + * Prior code has determine the nasid which generated an IPI. Inspect + * that nasid to determine if its partition needs to be activated or + * deactivated. + * + * A partition is consider "awaiting activation" if our partition + * flags indicate it is not active and it has a heartbeat. A + * partition is considered "awaiting deactivation" if our partition + * flags indicate it is active but it has no heartbeat or it is not + * sending its heartbeat to us. + * + * To determine the heartbeat, the remote nasid must have a properly + * initialized reserved page. + */ +static void +xpc_identify_act_IRQ_req(int nasid) +{ + struct xpc_rsvd_page *remote_rp; + struct xpc_vars *remote_vars; + u64 remote_rsvd_page_pa; + u64 remote_vars_pa; + partid_t partid; + struct xpc_partition *part; + enum xpc_retval ret; + + + /* pull over the reserved page structure */ + + remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer; + + ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rsvd_page_pa); + if (ret != xpcSuccess) { + dev_warn(xpc_part, "unable to get reserved page from nasid %d, " + "which sent interrupt, reason=%d\n", nasid, ret); + return; + } + + remote_vars_pa = remote_rp->vars_pa; + partid = remote_rp->partid; + part = &xpc_partitions[partid]; + + + /* pull over the cross partition variables */ + + remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; + + ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); + if (ret != xpcSuccess) { + + dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " + "which sent interrupt, reason=%d\n", nasid, ret); + + XPC_DEACTIVATE_PARTITION(part, ret); + return; + } + + + part->act_IRQ_rcvd++; + + dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " + "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd, + remote_vars->heartbeat, remote_vars->heartbeating_to_mask); + + + if (part->act_state == XPC_P_INACTIVE) { + + part->remote_rp_pa = remote_rsvd_page_pa; + dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", + part->remote_rp_pa); + + part->remote_vars_pa = remote_vars_pa; + dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n", + part->remote_vars_pa); + + part->last_heartbeat = remote_vars->heartbeat; + dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", + part->last_heartbeat); + + part->remote_vars_part_pa = remote_vars->vars_part_pa; + dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", + part->remote_vars_part_pa); + + part->remote_act_nasid = remote_vars->act_nasid; + dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n", + part->remote_act_nasid); + + part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid; + dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n", + part->remote_act_phys_cpuid); + + part->remote_amos_page_pa = remote_vars->amos_page_pa; + dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n", + part->remote_amos_page_pa); + + xpc_activate_partition(part); + + } else if (part->remote_amos_page_pa != remote_vars->amos_page_pa || + !XPC_HB_ALLOWED(sn_partition_id, remote_vars)) { + + part->reactivate_nasid = nasid; + XPC_DEACTIVATE_PARTITION(part, xpcReactivating); + } +} + + +/* + * Loop through the activation AMO variables and process any bits + * which are set. Each bit indicates a nasid sending a partition + * activation or deactivation request. + * + * Return #of IRQs detected. + */ +int +xpc_identify_act_IRQ_sender(void) +{ + int word, bit; + u64 nasid_mask; + u64 nasid; /* remote nasid */ + int n_IRQs_detected = 0; + AMO_t *act_amos; + struct xpc_rsvd_page *rp = (struct xpc_rsvd_page *) xpc_rsvd_page; + + + act_amos = xpc_vars->act_amos; + + + /* scan through act AMO variable looking for non-zero entries */ + for (word = 0; word < XP_NASID_MASK_WORDS; word++) { + + nasid_mask = xpc_IPI_receive(&act_amos[word]); + if (nasid_mask == 0) { + /* no IRQs from nasids in this variable */ + continue; + } + + dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, + nasid_mask); + + + /* + * If this nasid has been added to the machine since + * our partition was reset, this will retain the + * remote nasid in our reserved pages machine mask. + * This is used in the event of module reload. + */ + rp->mach_nasids[word] |= nasid_mask; + + + /* locate the nasid(s) which sent interrupts */ + + for (bit = 0; bit < (8 * sizeof(u64)); bit++) { + if (nasid_mask & (1UL << bit)) { + n_IRQs_detected++; + nasid = XPC_NASID_FROM_W_B(word, bit); + dev_dbg(xpc_part, "interrupt from nasid %ld\n", + nasid); + xpc_identify_act_IRQ_req(nasid); + } + } + } + return n_IRQs_detected; +} + + +/* + * Mark specified partition as active. + */ +enum xpc_retval +xpc_mark_partition_active(struct xpc_partition *part) +{ + unsigned long irq_flags; + enum xpc_retval ret; + + + dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); + + spin_lock_irqsave(&part->act_lock, irq_flags); + if (part->act_state == XPC_P_ACTIVATING) { + part->act_state = XPC_P_ACTIVE; + ret = xpcSuccess; + } else { + DBUG_ON(part->reason == xpcSuccess); + ret = part->reason; + } + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + return ret; +} + + +/* + * Notify XPC that the partition is down. + */ +void +xpc_deactivate_partition(const int line, struct xpc_partition *part, + enum xpc_retval reason) +{ + unsigned long irq_flags; + partid_t partid = XPC_PARTID(part); + + + spin_lock_irqsave(&part->act_lock, irq_flags); + + if (part->act_state == XPC_P_INACTIVE) { + XPC_SET_REASON(part, reason, line); + spin_unlock_irqrestore(&part->act_lock, irq_flags); + if (reason == xpcReactivating) { + /* we interrupt ourselves to reactivate partition */ + xpc_IPI_send_reactivate(part); + } + return; + } + if (part->act_state == XPC_P_DEACTIVATING) { + if ((part->reason == xpcUnloading && reason != xpcUnloading) || + reason == xpcReactivating) { + XPC_SET_REASON(part, reason, line); + } + spin_unlock_irqrestore(&part->act_lock, irq_flags); + return; + } + + part->act_state = XPC_P_DEACTIVATING; + XPC_SET_REASON(part, reason, line); + + spin_unlock_irqrestore(&part->act_lock, irq_flags); + + XPC_DISALLOW_HB(partid, xpc_vars); + + dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", partid, + reason); + + xpc_partition_down(part, reason); +} + + +/* + * Mark specified partition as active. + */ +void +xpc_mark_partition_inactive(struct xpc_partition *part) +{ + unsigned long irq_flags; + + + dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", + XPC_PARTID(part)); + + spin_lock_irqsave(&part->act_lock, irq_flags); + part->act_state = XPC_P_INACTIVE; + spin_unlock_irqrestore(&part->act_lock, irq_flags); + part->remote_rp_pa = 0; +} + + +/* + * SAL has provided a partition and machine mask. The partition mask + * contains a bit for each even nasid in our partition. The machine + * mask contains a bit for each even nasid in the entire machine. + * + * Using those two bit arrays, we can determine which nasids are + * known in the machine. Each should also have a reserved page + * initialized if they are available for partitioning. + */ +void +xpc_discovery(void) +{ + void *remote_rp_base; + struct xpc_rsvd_page *remote_rp; + struct xpc_vars *remote_vars; + u64 remote_rsvd_page_pa; + u64 remote_vars_pa; + int region; + int max_regions; + int nasid; + struct xpc_rsvd_page *rp; + partid_t partid; + struct xpc_partition *part; + u64 *discovered_nasids; + enum xpc_retval ret; + + + remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RSVD_PAGE_ALIGNED_SIZE, + GFP_KERNEL, &remote_rp_base); + if (remote_rp == NULL) { + return; + } + remote_vars = (struct xpc_vars *) remote_rp; + + + discovered_nasids = kmalloc(sizeof(u64) * XP_NASID_MASK_WORDS, + GFP_KERNEL); + if (discovered_nasids == NULL) { + kfree(remote_rp_base); + return; + } + memset(discovered_nasids, 0, sizeof(u64) * XP_NASID_MASK_WORDS); + + rp = (struct xpc_rsvd_page *) xpc_rsvd_page; + + /* + * The term 'region' in this context refers to the minimum number of + * nodes that can comprise an access protection grouping. The access + * protection is in regards to memory, IOI and IPI. + */ +//>>> move the next two #defines into either include/asm-ia64/sn/arch.h or +//>>> include/asm-ia64/sn/addrs.h +#define SH1_MAX_REGIONS 64 +#define SH2_MAX_REGIONS 256 + max_regions = is_shub2() ? SH2_MAX_REGIONS : SH1_MAX_REGIONS; + + for (region = 0; region < max_regions; region++) { + + if ((volatile int) xpc_exiting) { + break; + } + + dev_dbg(xpc_part, "searching region %d\n", region); + + for (nasid = (region * sn_region_size * 2); + nasid < ((region + 1) * sn_region_size * 2); + nasid += 2) { + + if ((volatile int) xpc_exiting) { + break; + } + + dev_dbg(xpc_part, "checking nasid %d\n", nasid); + + + if (XPC_NASID_IN_ARRAY(nasid, rp->part_nasids)) { + dev_dbg(xpc_part, "PROM indicates Nasid %d is " + "part of the local partition; skipping " + "region\n", nasid); + break; + } + + if (!(XPC_NASID_IN_ARRAY(nasid, rp->mach_nasids))) { + dev_dbg(xpc_part, "PROM indicates Nasid %d was " + "not on Numa-Link network at reset\n", + nasid); + continue; + } + + if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) { + dev_dbg(xpc_part, "Nasid %d is part of a " + "partition which was previously " + "discovered\n", nasid); + continue; + } + + + /* pull over the reserved page structure */ + + ret = xpc_get_remote_rp(nasid, discovered_nasids, + remote_rp, &remote_rsvd_page_pa); + if (ret != xpcSuccess) { + dev_dbg(xpc_part, "unable to get reserved page " + "from nasid %d, reason=%d\n", nasid, + ret); + + if (ret == xpcLocalPartid) { + break; + } + continue; + } + + remote_vars_pa = remote_rp->vars_pa; + + partid = remote_rp->partid; + part = &xpc_partitions[partid]; + + + /* pull over the cross partition variables */ + + ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); + if (ret != xpcSuccess) { + dev_dbg(xpc_part, "unable to get XPC variables " + "from nasid %d, reason=%d\n", nasid, + ret); + + XPC_DEACTIVATE_PARTITION(part, ret); + continue; + } + + if (part->act_state != XPC_P_INACTIVE) { + dev_dbg(xpc_part, "partition %d on nasid %d is " + "already activating\n", partid, nasid); + break; + } + + /* + * Register the remote partition's AMOs with SAL so it + * can handle and cleanup errors within that address + * range should the remote partition go down. We don't + * unregister this range because it is difficult to + * tell when outstanding writes to the remote partition + * are finished and thus when it is thus safe to + * unregister. This should not result in wasted space + * in the SAL xp_addr_region table because we should + * get the same page for remote_act_amos_pa after + * module reloads and system reboots. + */ + if (sn_register_xp_addr_region( + remote_vars->amos_page_pa, + PAGE_SIZE, 1) < 0) { + dev_dbg(xpc_part, "partition %d failed to " + "register xp_addr region 0x%016lx\n", + partid, remote_vars->amos_page_pa); + + XPC_SET_REASON(part, xpcPhysAddrRegFailed, + __LINE__); + break; + } + + /* + * The remote nasid is valid and available. + * Send an interrupt to that nasid to notify + * it that we are ready to begin activation. + */ + dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, " + "nasid %d, phys_cpuid 0x%x\n", + remote_vars->amos_page_pa, + remote_vars->act_nasid, + remote_vars->act_phys_cpuid); + + xpc_IPI_send_activate(remote_vars); + } + } + + kfree(discovered_nasids); + kfree(remote_rp_base); +} + + +/* + * Given a partid, get the nasids owned by that partition from the + * remote partitions reserved page. + */ +enum xpc_retval +xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) +{ + struct xpc_partition *part; + u64 part_nasid_pa; + int bte_res; + + + part = &xpc_partitions[partid]; + if (part->remote_rp_pa == 0) { + return xpcPartitionDown; + } + + part_nasid_pa = part->remote_rp_pa + + (u64) &((struct xpc_rsvd_page *) 0)->part_nasids; + + bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask), + L1_CACHE_ALIGN(XP_NASID_MASK_BYTES), + (BTE_NOTIFY | BTE_WACQUIRE), NULL); + + return xpc_map_bte_errors(bte_res); +} + -- cgit v1.1 From a2d974da0afe659cff98913184a97c0ee686d02b Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Wed, 23 Mar 2005 20:50:00 -0700 Subject: [IA64-SGI] SGI Altix cross partition functionality [3/3] This patch contains the cross partition pseudo-ethernet driver (XPNET) functional support module. Signed-off-by: Dean Nelson Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/Makefile | 1 + arch/ia64/sn/kernel/xpnet.c | 715 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 716 insertions(+) create mode 100644 arch/ia64/sn/kernel/xpnet.c (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile index 6959736..4351c4f 100644 --- a/arch/ia64/sn/kernel/Makefile +++ b/arch/ia64/sn/kernel/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o xp-y := xp_main.o xp_nofault.o obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o xpc-y := xpc_main.o xpc_channel.o xpc_partition.o +obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c new file mode 100644 index 0000000..78c13d6 --- /dev/null +++ b/arch/ia64/sn/kernel/xpnet.c @@ -0,0 +1,715 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. + */ + + +/* + * Cross Partition Network Interface (XPNET) support + * + * XPNET provides a virtual network layered on top of the Cross + * Partition communication layer. + * + * XPNET provides direct point-to-point and broadcast-like support + * for an ethernet-like device. The ethernet broadcast medium is + * replaced with a point-to-point message structure which passes + * pointers to a DMA-capable block that a remote partition should + * retrieve and pass to the upper level networking layer. + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * The message payload transferred by XPC. + * + * buf_pa is the physical address where the DMA should pull from. + * + * NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a + * cacheline boundary. To accomplish this, we record the number of + * bytes from the beginning of the first cacheline to the first useful + * byte of the skb (leadin_ignore) and the number of bytes from the + * last useful byte of the skb to the end of the last cacheline + * (tailout_ignore). + * + * size is the number of bytes to transfer which includes the skb->len + * (useful bytes of the senders skb) plus the leadin and tailout + */ +struct xpnet_message { + u16 version; /* Version for this message */ + u16 embedded_bytes; /* #of bytes embedded in XPC message */ + u32 magic; /* Special number indicating this is xpnet */ + u64 buf_pa; /* phys address of buffer to retrieve */ + u32 size; /* #of bytes in buffer */ + u8 leadin_ignore; /* #of bytes to ignore at the beginning */ + u8 tailout_ignore; /* #of bytes to ignore at the end */ + unsigned char data; /* body of small packets */ +}; + +/* + * Determine the size of our message, the cacheline aligned size, + * and then the number of message will request from XPC. + * + * XPC expects each message to exist in an individual cacheline. + */ +#define XPNET_MSG_SIZE (L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET) +#define XPNET_MSG_DATA_MAX \ + (XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data)) +#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE)) +#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE) + + +#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) +#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) + +/* + * Version number of XPNET implementation. XPNET can always talk to versions + * with same major #, and never talk to versions with a different version. + */ +#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor)) +#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) +#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) + +#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */ +#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */ +#define XPNET_MAGIC 0x88786984 /* "XNET" */ + +#define XPNET_VALID_MSG(_m) \ + ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \ + && (msg->magic == XPNET_MAGIC)) + +#define XPNET_DEVICE_NAME "xp0" + + +/* + * When messages are queued with xpc_send_notify, a kmalloc'd buffer + * of the following type is passed as a notification cookie. When the + * notification function is called, we use the cookie to decide + * whether all outstanding message sends have completed. The skb can + * then be released. + */ +struct xpnet_pending_msg { + struct list_head free_list; + struct sk_buff *skb; + atomic_t use_count; +}; + +/* driver specific structure pointed to by the device structure */ +struct xpnet_dev_private { + struct net_device_stats stats; +}; + +struct net_device *xpnet_device; + +/* + * When we are notified of other partitions activating, we add them to + * our bitmask of partitions to which we broadcast. + */ +static u64 xpnet_broadcast_partitions; +/* protect above */ +static spinlock_t xpnet_broadcast_lock = SPIN_LOCK_UNLOCKED; + +/* + * Since the Block Transfer Engine (BTE) is being used for the transfer + * and it relies upon cache-line size transfers, we need to reserve at + * least one cache-line for head and tail alignment. The BTE is + * limited to 8MB transfers. + * + * Testing has shown that changing MTU to greater than 64KB has no effect + * on TCP as the two sides negotiate a Max Segment Size that is limited + * to 64K. Other protocols May use packets greater than this, but for + * now, the default is 64KB. + */ +#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES) +/* 32KB has been determined to be the ideal */ +#define XPNET_DEF_MTU (0x8000UL) + + +/* + * The partition id is encapsulated in the MAC address. The following + * define locates the octet the partid is in. + */ +#define XPNET_PARTID_OCTET 1 +#define XPNET_LICENSE_OCTET 2 + + +/* + * Define the XPNET debug device structure that is to be used with dev_dbg(), + * dev_err(), dev_warn(), and dev_info(). + */ +struct device_driver xpnet_dbg_name = { + .name = "xpnet" +}; + +struct device xpnet_dbg_subname = { + .bus_id = {0}, /* set to "" */ + .driver = &xpnet_dbg_name +}; + +struct device *xpnet = &xpnet_dbg_subname; + +/* + * Packet was recevied by XPC and forwarded to us. + */ +static void +xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) +{ + struct sk_buff *skb; + bte_result_t bret; + struct xpnet_dev_private *priv = + (struct xpnet_dev_private *) xpnet_device->priv; + + + if (!XPNET_VALID_MSG(msg)) { + /* + * Packet with a different XPC version. Ignore. + */ + xpc_received(partid, channel, (void *) msg); + + priv->stats.rx_errors++; + + return; + } + dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); + + + /* reserve an extra cache line */ + skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); + if (!skb) { + dev_err(xpnet, "failed on dev_alloc_skb(%d)\n", + msg->size + L1_CACHE_BYTES); + + xpc_received(partid, channel, (void *) msg); + + priv->stats.rx_errors++; + + return; + } + + /* + * The allocated skb has some reserved space. + * In order to use bte_copy, we need to get the + * skb->data pointer moved forward. + */ + skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data & + (L1_CACHE_BYTES - 1)) + + msg->leadin_ignore)); + + /* + * Update the tail pointer to indicate data actually + * transferred. + */ + skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore)); + + /* + * Move the data over from the the other side. + */ + if ((XPNET_VERSION_MINOR(msg->version) == 1) && + (msg->embedded_bytes != 0)) { + dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, " + "%lu)\n", skb->data, &msg->data, + (size_t) msg->embedded_bytes); + + memcpy(skb->data, &msg->data, (size_t) msg->embedded_bytes); + } else { + dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" + "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, + (void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), + msg->size); + + bret = bte_copy(msg->buf_pa, + __pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), + msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL); + + if (bret != BTE_SUCCESS) { + // >>> Need better way of cleaning skb. Currently skb + // >>> appears in_use and we can't just call + // >>> dev_kfree_skb. + dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " + "error=0x%x\n", (void *)msg->buf_pa, + (void *)__pa((u64)skb->data & + ~(L1_CACHE_BYTES - 1)), + msg->size, bret); + + xpc_received(partid, channel, (void *) msg); + + priv->stats.rx_errors++; + + return; + } + } + + dev_dbg(xpnet, "head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", (void *) skb->head, + (void *) skb->data, (void *) skb->tail, (void *) skb->end, + skb->len); + + skb->dev = xpnet_device; + skb->protocol = eth_type_trans(skb, xpnet_device); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p " + "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n", + (void *) skb->head, (void *) skb->data, (void *) skb->tail, + (void *) skb->end, skb->len); + + + xpnet_device->last_rx = jiffies; + priv->stats.rx_packets++; + priv->stats.rx_bytes += skb->len + ETH_HLEN; + + netif_rx_ni(skb); + xpc_received(partid, channel, (void *) msg); +} + + +/* + * This is the handler which XPC calls during any sort of change in + * state or message reception on a connection. + */ +static void +xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, + void *data, void *key) +{ + long bp; + + + DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(channel != XPC_NET_CHANNEL); + + switch(reason) { + case xpcMsgReceived: /* message received */ + DBUG_ON(data == NULL); + + xpnet_receive(partid, channel, (struct xpnet_message *) data); + break; + + case xpcConnected: /* connection completed to a partition */ + spin_lock_bh(&xpnet_broadcast_lock); + xpnet_broadcast_partitions |= 1UL << (partid -1 ); + bp = xpnet_broadcast_partitions; + spin_unlock_bh(&xpnet_broadcast_lock); + + netif_carrier_on(xpnet_device); + + dev_dbg(xpnet, "%s connection created to partition %d; " + "xpnet_broadcast_partitions=0x%lx\n", + xpnet_device->name, partid, bp); + break; + + default: + spin_lock_bh(&xpnet_broadcast_lock); + xpnet_broadcast_partitions &= ~(1UL << (partid -1 )); + bp = xpnet_broadcast_partitions; + spin_unlock_bh(&xpnet_broadcast_lock); + + if (bp == 0) { + netif_carrier_off(xpnet_device); + } + + dev_dbg(xpnet, "%s disconnected from partition %d; " + "xpnet_broadcast_partitions=0x%lx\n", + xpnet_device->name, partid, bp); + break; + + } +} + + +static int +xpnet_dev_open(struct net_device *dev) +{ + enum xpc_retval ret; + + + dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %d, " + "%d)\n", XPC_NET_CHANNEL, xpnet_connection_activity, + XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, + XPNET_MAX_IDLE_KTHREADS); + + ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, + XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, + XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS); + if (ret != xpcSuccess) { + dev_err(xpnet, "ifconfig up of %s failed on XPC connect, " + "ret=%d\n", dev->name, ret); + + return -ENOMEM; + } + + dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name); + + return 0; +} + + +static int +xpnet_dev_stop(struct net_device *dev) +{ + xpc_disconnect(XPC_NET_CHANNEL); + + dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name); + + return 0; +} + + +static int +xpnet_dev_change_mtu(struct net_device *dev, int new_mtu) +{ + /* 68 comes from min TCP+IP+MAC header */ + if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) { + dev_err(xpnet, "ifconfig %s mtu %d failed; value must be " + "between 68 and %ld\n", dev->name, new_mtu, + XPNET_MAX_MTU); + return -EINVAL; + } + + dev->mtu = new_mtu; + dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu); + return 0; +} + + +/* + * Required for the net_device structure. + */ +static int +xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map) +{ + return 0; +} + + +/* + * Return statistics to the caller. + */ +static struct net_device_stats * +xpnet_dev_get_stats(struct net_device *dev) +{ + struct xpnet_dev_private *priv; + + + priv = (struct xpnet_dev_private *) dev->priv; + + return &priv->stats; +} + + +/* + * Notification that the other end has received the message and + * DMA'd the skb information. At this point, they are done with + * our side. When all recipients are done processing, we + * release the skb and then release our pending message structure. + */ +static void +xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, + void *__qm) +{ + struct xpnet_pending_msg *queued_msg = + (struct xpnet_pending_msg *) __qm; + + + DBUG_ON(queued_msg == NULL); + + dev_dbg(xpnet, "message to %d notified with reason %d\n", + partid, reason); + + if (atomic_dec_return(&queued_msg->use_count) == 0) { + dev_dbg(xpnet, "all acks for skb->head=-x%p\n", + (void *) queued_msg->skb->head); + + dev_kfree_skb_any(queued_msg->skb); + kfree(queued_msg); + } +} + + +/* + * Network layer has formatted a packet (skb) and is ready to place it + * "on the wire". Prepare and send an xpnet_message to all partitions + * which have connected with us and are targets of this packet. + * + * MAC-NOTE: For the XPNET driver, the MAC address contains the + * destination partition_id. If the destination partition id word + * is 0xff, this packet is to broadcast to all partitions. + */ +static int +xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct xpnet_pending_msg *queued_msg; + enum xpc_retval ret; + struct xpnet_message *msg; + u64 start_addr, end_addr; + long dp; + u8 second_mac_octet; + partid_t dest_partid; + struct xpnet_dev_private *priv; + u16 embedded_bytes; + + + priv = (struct xpnet_dev_private *) dev->priv; + + + dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " + "skb->end=0x%p skb->len=%d\n", (void *) skb->head, + (void *) skb->data, (void *) skb->tail, (void *) skb->end, + skb->len); + + + /* + * The xpnet_pending_msg tracks how many outstanding + * xpc_send_notifies are relying on this skb. When none + * remain, release the skb. + */ + queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC); + if (queued_msg == NULL) { + dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " + "packet\n", sizeof(struct xpnet_pending_msg)); + + priv->stats.tx_errors++; + + return -ENOMEM; + } + + + /* get the beginning of the first cacheline and end of last */ + start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1)); + end_addr = L1_CACHE_ALIGN((u64) skb->tail); + + /* calculate how many bytes to embed in the XPC message */ + embedded_bytes = 0; + if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) { + /* skb->data does fit so embed */ + embedded_bytes = skb->len; + } + + + /* + * Since the send occurs asynchronously, we set the count to one + * and begin sending. Any sends that happen to complete before + * we are done sending will not free the skb. We will be left + * with that task during exit. This also handles the case of + * a packet destined for a partition which is no longer up. + */ + atomic_set(&queued_msg->use_count, 1); + queued_msg->skb = skb; + + + second_mac_octet = skb->data[XPNET_PARTID_OCTET]; + if (second_mac_octet == 0xff) { + /* we are being asked to broadcast to all partitions */ + dp = xpnet_broadcast_partitions; + } else if (second_mac_octet != 0) { + dp = xpnet_broadcast_partitions & + (1UL << (second_mac_octet - 1)); + } else { + /* 0 is an invalid partid. Ignore */ + dp = 0; + } + dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp); + + /* + * If we wanted to allow promiscous mode to work like an + * unswitched network, this would be a good point to OR in a + * mask of partitions which should be receiving all packets. + */ + + /* + * Main send loop. + */ + for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS; + dest_partid++) { + + + if (!(dp & (1UL << (dest_partid - 1)))) { + /* not destined for this partition */ + continue; + } + + /* remove this partition from the destinations mask */ + dp &= ~(1UL << (dest_partid - 1)); + + + /* found a partition to send to */ + + ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, + XPC_NOWAIT, (void **)&msg); + if (unlikely(ret != xpcSuccess)) { + continue; + } + + msg->embedded_bytes = embedded_bytes; + if (unlikely(embedded_bytes != 0)) { + msg->version = XPNET_VERSION_EMBED; + dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", + &msg->data, skb->data, (size_t) embedded_bytes); + memcpy(&msg->data, skb->data, (size_t) embedded_bytes); + } else { + msg->version = XPNET_VERSION; + } + msg->magic = XPNET_MAGIC; + msg->size = end_addr - start_addr; + msg->leadin_ignore = (u64) skb->data - start_addr; + msg->tailout_ignore = end_addr - (u64) skb->tail; + msg->buf_pa = __pa(start_addr); + + dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa=" + "0x%lx, msg->size=%u, msg->leadin_ignore=%u, " + "msg->tailout_ignore=%u\n", dest_partid, + XPC_NET_CHANNEL, msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); + + + atomic_inc(&queued_msg->use_count); + + ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, + xpnet_send_completed, queued_msg); + if (unlikely(ret != xpcSuccess)) { + atomic_dec(&queued_msg->use_count); + continue; + } + + } + + if (atomic_dec_return(&queued_msg->use_count) == 0) { + dev_dbg(xpnet, "no partitions to receive packet destined for " + "%d\n", dest_partid); + + + dev_kfree_skb(skb); + kfree(queued_msg); + } + + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len; + + return 0; +} + + +/* + * Deal with transmit timeouts coming from the network layer. + */ +static void +xpnet_dev_tx_timeout (struct net_device *dev) +{ + struct xpnet_dev_private *priv; + + + priv = (struct xpnet_dev_private *) dev->priv; + + priv->stats.tx_errors++; + return; +} + + +static int __init +xpnet_init(void) +{ + int i; + u32 license_num; + int result = -ENOMEM; + + + dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); + + /* + * use ether_setup() to init the majority of our device + * structure and then override the necessary pieces. + */ + xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), + XPNET_DEVICE_NAME, ether_setup); + if (xpnet_device == NULL) { + return -ENOMEM; + } + + netif_carrier_off(xpnet_device); + + xpnet_device->mtu = XPNET_DEF_MTU; + xpnet_device->change_mtu = xpnet_dev_change_mtu; + xpnet_device->open = xpnet_dev_open; + xpnet_device->get_stats = xpnet_dev_get_stats; + xpnet_device->stop = xpnet_dev_stop; + xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit; + xpnet_device->tx_timeout = xpnet_dev_tx_timeout; + xpnet_device->set_config = xpnet_dev_set_config; + + /* + * Multicast assumes the LSB of the first octet is set for multicast + * MAC addresses. We chose the first octet of the MAC to be unlikely + * to collide with any vendor's officially issued MAC. + */ + xpnet_device->dev_addr[0] = 0xfe; + xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id; + license_num = sn_partition_serial_number_val(); + for (i = 3; i >= 0; i--) { + xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] = + license_num & 0xff; + license_num = license_num >> 8; + } + + /* + * ether_setup() sets this to a multicast device. We are + * really not supporting multicast at this time. + */ + xpnet_device->flags &= ~IFF_MULTICAST; + + /* + * No need to checksum as it is a DMA transfer. The BTE will + * report an error if the data is not retrievable and the + * packet will be dropped. + */ + xpnet_device->features = NETIF_F_NO_CSUM; + + result = register_netdev(xpnet_device); + if (result != 0) { + free_netdev(xpnet_device); + } + + return result; +} +module_init(xpnet_init); + + +static void __exit +xpnet_exit(void) +{ + dev_info(xpnet, "unregistering network device %s\n", + xpnet_device[0].name); + + unregister_netdev(xpnet_device); + + free_netdev(xpnet_device); +} +module_exit(xpnet_exit); + + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)"); +MODULE_LICENSE("GPL"); + -- cgit v1.1 From 3a7d555bfc4d4631d9118fb4d0ed7ab62cc2ca1c Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Mon, 4 Apr 2005 13:14:00 -0700 Subject: [IA64-SGI] convert AMO address found in XPC's reserved page This patch detects the existence of an uncached physical AMO address setup by EFI's XPBOOT (SGI) and converts it to an uncached virtual AMO address. Depends on a patch submitted on 23 March 2005 with the subject of: [PATCH 2/3] SGI Altix cross partition functionality (2nd revision) Signed-off-by: Dean Nelson Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/xpc_partition.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c index b31d9988..2c3c4a8 100644 --- a/arch/ia64/sn/kernel/xpc_partition.c +++ b/arch/ia64/sn/kernel/xpc_partition.c @@ -204,6 +204,19 @@ xpc_rsvd_page_init(void) return NULL; } } + } else if (!IS_AMO_ADDRESS((u64) amos_page)) { + /* + * EFI's XPBOOT can also set amos_page in the reserved page, + * but it happens to leave it as an uncached physical address + * and we need it to be an uncached virtual, so we'll have to + * convert it. + */ + if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { + dev_err(xpc_part, "previously used amos_page address " + "is bad = 0x%p\n", (void *) amos_page); + return NULL; + } + amos_page = (AMO_t *) TO_AMO((u64) amos_page); } memset(xpc_vars, 0, sizeof(struct xpc_vars)); @@ -944,7 +957,7 @@ xpc_discovery(void) /* * Given a partid, get the nasids owned by that partition from the - * remote partitions reserved page. + * remote partition's reserved page. */ enum xpc_retval xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) -- cgit v1.1 From c0b12422e5e1d041026dd27074de17d2d7e32c4e Mon Sep 17 00:00:00 2001 From: Colin Ngam Date: Fri, 18 Mar 2005 16:38:00 -0700 Subject: [IA64-SGI] Altix only: Register Error Interrupt The following patch ensures that the correct error interrupt handling routine is initialized. This patch is based on the 2.6.12 ia64 release tree. Signed-off-by: Colin Ngam Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/io_init.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 18160a0..9e07f54 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -174,6 +174,12 @@ static void sn_fixup_ionodes(void) if (status) continue; + /* Attach the error interrupt handlers */ + if (nasid & 1) + ice_error_init(hubdev); + else + hub_error_init(hubdev); + for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev; @@ -211,10 +217,6 @@ static void sn_fixup_ionodes(void) sn_flush_device_list; } - if (!(i & 1)) - hub_error_init(hubdev); - else - ice_error_init(hubdev); } } -- cgit v1.1 From bb0fc085457cf6a865b8232b0cefab3a7819df44 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Thu, 24 Mar 2005 22:58:00 -0700 Subject: [IA64] use common pxm function This patch simplifies a couple places where we search for _PXM values in ACPI namespace. Thanks, Signed-off-by: Alex Williamson Signed-off-by: Tony Luck --- arch/ia64/hp/common/sba_iommu.c | 34 ++++------------------------------ arch/ia64/kernel/acpi.c | 23 +++++------------------ 2 files changed, 9 insertions(+), 48 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 6a8fcba..b8db6e3 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1944,43 +1944,17 @@ sba_connect_bus(struct pci_bus *bus) static void __init sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) { - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - union acpi_object *obj; - acpi_handle phandle; unsigned int node; + int pxm; ioc->node = MAX_NUMNODES; - /* - * Check for a _PXM on this node first. We don't typically see - * one here, so we'll end up getting it from the parent. - */ - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) { - if (ACPI_FAILURE(acpi_get_parent(handle, &phandle))) - return; - - /* Reset the acpi buffer */ - buffer.length = ACPI_ALLOCATE_BUFFER; - buffer.pointer = NULL; - - if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL, - &buffer))) - return; - } + pxm = acpi_get_pxm(handle); - if (!buffer.length || !buffer.pointer) + if (pxm < 0) return; - obj = buffer.pointer; - - if (obj->type != ACPI_TYPE_INTEGER || - obj->integer.value >= MAX_PXM_DOMAINS) { - acpi_os_free(buffer.pointer); - return; - } - - node = pxm_to_nid_map[obj->integer.value]; - acpi_os_free(buffer.pointer); + node = pxm_to_nid_map[pxm]; if (node >= MAX_NUMNODES || !node_online(node)) return; diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index a8e99c5..72dfd9e 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) union acpi_object *obj; struct acpi_table_iosapic *iosapic; unsigned int gsi_base; - int node; + int pxm, node; /* Only care about objects w/ a method that returns the MADT */ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) @@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) gsi_base = iosapic->global_irq_base; acpi_os_free(buffer.pointer); - buffer.length = ACPI_ALLOCATE_BUFFER; - buffer.pointer = NULL; /* - * OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell + * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell * us which node to associate this with. */ - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) - return AE_OK; - - if (!buffer.length || !buffer.pointer) - return AE_OK; - - obj = buffer.pointer; - - if (obj->type != ACPI_TYPE_INTEGER || - obj->integer.value >= MAX_PXM_DOMAINS) { - acpi_os_free(buffer.pointer); + pxm = acpi_get_pxm(handle); + if (pxm < 0) return AE_OK; - } - node = pxm_to_nid_map[obj->integer.value]; - acpi_os_free(buffer.pointer); + node = pxm_to_nid_map[pxm]; if (node >= MAX_NUMNODES || !node_online(node) || cpus_empty(node_to_cpumask(node))) -- cgit v1.1 From de7548d0e202263bb6bfd7574a7889e85a691937 Mon Sep 17 00:00:00 2001 From: Mike Habeck Date: Fri, 25 Mar 2005 19:34:00 -0700 Subject: [IA64-SGI] Altix only: Fix for sn_dma_flush The following patch fixes a bug in the SGI Altix sn_dma_flush code. sn_dma_flush is broken in 2.6. The code isn't waiting for the DMA data to be flushed out of the PIC ASIC. This patch is based off the linux-ia64-test-2.6.12 tree Signed-off-by: Mike Habeck Signed-off-by: Tony Luck --- arch/ia64/sn/pci/pcibr/pcibr_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index c906859..64af2b2 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c @@ -301,7 +301,7 @@ void sn_dma_flush(uint64_t addr) spin_lock_irqsave(&((struct sn_flush_device_list *)p)-> sfdl_flush_lock, flags); - p->sfdl_flush_value = 0; + *p->sfdl_flush_addr = 0; /* force an interrupt. */ *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; -- cgit v1.1 From 7d5f9c0f10255000ca007fb03773c6b825c2b9ce Mon Sep 17 00:00:00 2001 From: Zwane Mwaikambo Date: Wed, 30 Mar 2005 21:40:00 -0700 Subject: [IA64] reduce cacheline bouncing in cpu_idle_wait Andi noted that during normal runtime cpu_idle_map is bounced around a lot, and occassionally at a higher frequency than the timer interrupt wakeup which we normally exit pm_idle from. So switch to a percpu variable. I didn't move things to the slow path because it would involve adding scheduler code to wakeup the idle thread on the cpus we're waiting for. Signed-off-by: Zwane Mwaikambo Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/kernel/process.c | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 7c43aea..c0140f4 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -50,7 +50,7 @@ #include "sigframe.h" void (*ia64_mark_idle)(int); -static cpumask_t cpu_idle_map; +static DEFINE_PER_CPU(unsigned int, cpu_idle_state); unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); @@ -223,20 +223,31 @@ static inline void play_dead(void) } #endif /* CONFIG_HOTPLUG_CPU */ - void cpu_idle_wait(void) { - int cpu; - cpumask_t map; + unsigned int cpu, this_cpu = get_cpu(); + cpumask_t map; + + set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); + put_cpu(); - for_each_online_cpu(cpu) - cpu_set(cpu, cpu_idle_map); + cpus_clear(map); + for_each_online_cpu(cpu) { + per_cpu(cpu_idle_state, cpu) = 1; + cpu_set(cpu, map); + } - wmb(); - do { - ssleep(1); - cpus_and(map, cpu_idle_map, cpu_online_map); - } while (!cpus_empty(map)); + __get_cpu_var(cpu_idle_state) = 0; + + wmb(); + do { + ssleep(1); + for_each_online_cpu(cpu) { + if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) + cpu_clear(cpu, map); + } + cpus_and(map, map, cpu_online_map); + } while (!cpus_empty(map)); } EXPORT_SYMBOL_GPL(cpu_idle_wait); @@ -244,7 +255,6 @@ void __attribute__((noreturn)) cpu_idle (void) { void (*mark_idle)(int) = ia64_mark_idle; - int cpu = smp_processor_id(); /* endless idle loop with no priority at all */ while (1) { @@ -255,12 +265,13 @@ cpu_idle (void) while (!need_resched()) { void (*idle)(void); + if (__get_cpu_var(cpu_idle_state)) + __get_cpu_var(cpu_idle_state) = 0; + + rmb(); if (mark_idle) (*mark_idle)(1); - if (cpu_isset(cpu, cpu_idle_map)) - cpu_clear(cpu, cpu_idle_map); - rmb(); idle = pm_idle; if (!idle) idle = default_idle; -- cgit v1.1 From 446b8831f5acf2076fa58a66286789eb84f3df2c Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 5 Apr 2005 17:47:00 -0700 Subject: [IA64] fix ia64 syscall auditing Attached is a patch against David's audit.17 kernel that adds checks for the TIF_SYSCALL_AUDIT thread flag to the ia64 system call and signal handling code paths. The patch enables auditing of system calls set up via fsys_bubble_down, as well as ensuring that audit_syscall_exit() is called on return from sigreturn. Neglecting to check for TIF_SYSCALL_AUDIT at these points results in incorrect information in audit_context, causing frequent system panics when system call auditing is enabled on an ia64 system. I have tested this patch and have seen no problems with it. [Original patch from Amy Griffis ported to current kernel by David Woodhouse] From: Amy Griffis From: David Woodhouse Signed-off-by: Chris Wright Signed-off-by: Greg Kroah-Hartman Signed-off-by: Tony Luck --- arch/ia64/kernel/fsys.S | 4 +++- arch/ia64/kernel/signal.c | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 0d8650f..4f3cdef 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -611,8 +611,10 @@ GLOBAL_ENTRY(fsys_bubble_down) movl r2=ia64_ret_from_syscall ;; mov rp=r2 // set the real return addr - tbit.z p8,p0=r3,TIF_SYSCALL_TRACE + and r3=_TIF_SYSCALL_TRACEAUDIT,r3 ;; + cmp.eq p8,p0=r3,r0 + (p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8 (p8) br.call.sptk.many b6=b6 // ignore this return addr br.cond.sptk ia64_trace_syscall diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 6891d86..499b7e5 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -224,7 +224,8 @@ ia64_rt_sigreturn (struct sigscratch *scr) * could be corrupted. */ retval = (long) &ia64_leave_kernel; - if (test_thread_flag(TIF_SYSCALL_TRACE)) + if (test_thread_flag(TIF_SYSCALL_TRACE) + || test_thread_flag(TIF_SYSCALL_AUDIT)) /* * strace expects to be notified after sigreturn returns even though the * context to which we return may not be in the middle of a syscall. -- cgit v1.1 From b1b901c2029aa0fd8aa4ac5f04f31648ae2358b4 Mon Sep 17 00:00:00 2001 From: Russ Anderson Date: Wed, 6 Apr 2005 00:07:00 -0700 Subject: [IA64] MCA recovery improvements Jack Steiner uncovered some opportunities for improvement in the MCA recovery code. 1) Set bsp to save registers on the kernel stack. 2) Disable interrupts while in the MCA recovery code. 3) Change the way the user process is killed, to avoid a panic in schedule. Testing shows that these changes make the recovery code much more reliable with the 2.6.12 kernel. Signed-off-by: Russ Anderson Signed-off-by: Tony Luck --- arch/ia64/kernel/mca_drv.c | 4 ++-- arch/ia64/kernel/mca_drv_asm.S | 18 +++++++++++++++--- 2 files changed, 17 insertions(+), 5 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index ab47817..abc0113 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr) spin_unlock(&mca_bh_lock); /* This process is about to be killed itself */ - force_sig(SIGKILL, current); - schedule(); + do_exit(SIGKILL); } /** @@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; psr2->cpl = 0; psr2->ri = 0; + psr2->i = 0; return 1; } diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S index bcfa05a..2d7e021 100644 --- a/arch/ia64/kernel/mca_drv_asm.S +++ b/arch/ia64/kernel/mca_drv_asm.S @@ -10,6 +10,7 @@ #include #include +#include GLOBAL_ENTRY(mca_handler_bhhook) invala // clear RSE ? @@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook) ;; alloc r16=ar.pfs,0,2,1,0 // make a new frame ;; + mov ar.rsc=0 + ;; mov r13=IA64_KR(CURRENT) // current task pointer ;; - adds r12=IA64_TASK_THREAD_KSP_OFFSET,r13 + mov r2=r13 + ;; + addl r22=IA64_RBS_OFFSET,r2 + ;; + mov ar.bspstore=r22 ;; - ld8 r12=[r12] // stack pointer + addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 ;; + adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 + ;; + st1 [r2]=r0 // clear current->thread.on_ustack flag mov loc0=r16 movl loc1=mca_handler_bh // recovery C function ;; @@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook) ;; mov loc1=rp ;; - br.call.sptk.many rp=b6 // not return ... + ssm psr.i + ;; + br.call.sptk.many rp=b6 // does not return ... ;; mov ar.pfs=loc0 mov rp=loc1 -- cgit v1.1 From 32709d8ae67356559839a9a9e4b26f79134e45a6 Mon Sep 17 00:00:00 2001 From: Keith Owens Date: Fri, 8 Apr 2005 14:23:00 -0700 Subject: [IA64] SAL to OS callbacks cannot call sleeping When SAL calls back into the OS, the OS code is running with preempt disabled so it cannot call sleeping functions. Signed-off-by: Keith Owens Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/mca.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c index 857774b..6546db6 100644 --- a/arch/ia64/sn/kernel/mca.c +++ b/arch/ia64/sn/kernel/mca.c @@ -37,6 +37,11 @@ static u64 *sn_oemdata_size, sn_oemdata_bufsize; * This function is the callback routine that SAL calls to log error * info for platform errors. buf is appended to sn_oemdata, resizing as * required. + * Note: this is a SAL to OS callback, running under the same rules as the SAL + * code. SAL calls are run with preempt disabled so this routine must not + * sleep. vmalloc can sleep so print_hook cannot resize the output buffer + * itself, instead it must set the required size and return to let the caller + * resize the buffer then redrive the SAL call. */ static int print_hook(const char *fmt, ...) { @@ -47,18 +52,8 @@ static int print_hook(const char *fmt, ...) vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); len = strlen(buf); - while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) { - u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000); - if (!newbuf) { - printk(KERN_ERR "%s: unable to extend sn_oemdata\n", - __FUNCTION__); - return 0; - } - memcpy(newbuf, *sn_oemdata, *sn_oemdata_size); - vfree(*sn_oemdata); - *sn_oemdata = newbuf; - } - memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1); + if (*sn_oemdata_size + len <= sn_oemdata_bufsize) + memcpy(*sn_oemdata + *sn_oemdata_size, buf, len); *sn_oemdata_size += len; return 0; } @@ -98,7 +93,20 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, sn_oemdata = oemdata; sn_oemdata_size = oemdata_size; sn_oemdata_bufsize = 0; - ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); + *sn_oemdata_size = PAGE_SIZE; /* first guess at how much data will be generated */ + while (*sn_oemdata_size > sn_oemdata_bufsize) { + u8 *newbuf = vmalloc(*sn_oemdata_size); + if (!newbuf) { + printk(KERN_ERR "%s: unable to extend sn_oemdata\n", + __FUNCTION__); + return 1; + } + vfree(*sn_oemdata); + *sn_oemdata = newbuf; + sn_oemdata_bufsize = *sn_oemdata_size; + *sn_oemdata_size = 0; + ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); + } up(&sn_oemdata_mutex); return 0; } -- cgit v1.1 From 3ea8b477b4b9d3e75b5e9b8aea41259f45031823 Mon Sep 17 00:00:00 2001 From: Mark Maule Date: Mon, 11 Apr 2005 21:20:00 -0700 Subject: [IA64] altix: fix TIOCA dmamap list_add Correct a bug where tioca_dma_mapped() is putting tioca dma map structs on the wrong list. Signed-off-by: Mark Maule Signed-off-by: Tony Luck --- arch/ia64/sn/pci/tioca_provider.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 54a0dd4..8dae9eb 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -431,7 +431,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size) ca_dmamap->cad_dma_addr = bus_addr; ca_dmamap->cad_gart_size = entries; ca_dmamap->cad_gart_entry = entry; - list_add(&ca_dmamap->cad_list, &tioca_kern->ca_list); + list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps); if (xio_addr % ps) { tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); -- cgit v1.1 From 8df5a500a3e97f7811cdce0f553ca1917ccd4220 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 11 Apr 2005 13:45:00 -0700 Subject: [IA64] perfmon & PAL_HALT again The pmu_active test is based on the values of PSR.up. THIS IS THE PROBLEM as it does not take into account the lazy restore logic which is as follow (simplified): context switch out: save PMDs clear psr.up release ownership context switch in: if (ctx->last_cpu == smp_processor_id() && ctx->cpu_activation == cpu_activation) { set psr.up return } restore PMD restore PMC ctx->last_cpu = smp_processor_id(); ctx->activation = ++cpu_activation; set psr.up The key here is that on context switch out, we clear psr.up and on context switch in we check if nobody else used the PMU on that processor since last time we came. In that case, we assume the PMD/PMC are ours and we simply reactivate. The Caliper problem is that between the moment we context switch out and the moment we come back, nobody effectively used the PMU BUT the processor went idle. Normally this would have no incidence but PAL_HALT does alter the PMU registers. In default_idle(), the test on psr.up is not strong enough to cover this case and we go into PAL which trashed the PMU resgisters. When we come back we falsely assume that this is our state yet it is corrupted. Very nasty indeed. To avoid the problem it is necessary to forbid going to PAL_HALT as soon as perfmon installs some valid state in the PMU registers. This happens with an application attaches a context to a thread or CPU. It is not enough to check the psr/dcr bits. Hence I propose the attached patch. It adds a callback in process.c to modify the condition to enter PAL on idle. Basically, now it is conditional to pal_halt=1 AND perfmon saying it is okay. Signed-off-by: Tony Luck --- arch/ia64/kernel/perfmon.c | 13 +++++++++++++ arch/ia64/kernel/process.c | 14 +++++++++++--- 2 files changed, 24 insertions(+), 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 376fcbc..fd4f3be 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -1265,6 +1265,8 @@ out: } EXPORT_SYMBOL(pfm_unregister_buffer_fmt); +extern void update_pal_halt_status(int); + static int pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) { @@ -1311,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) is_syswide, cpu)); + /* + * disable default_idle() to go to PAL_HALT + */ + update_pal_halt_status(0); + UNLOCK_PFS(flags); return 0; @@ -1366,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) is_syswide, cpu)); + /* + * if possible, enable default_idle() to go into PAL_HALT + */ + if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) + update_pal_halt_status(1); + UNLOCK_PFS(flags); return 0; diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index c0140f4..474d75f 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -173,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall ia64_do_signal(oldset, scr, in_syscall); } -static int pal_halt = 1; +static int pal_halt = 1; +static int can_do_pal_halt = 1; + static int __init nohalt_setup(char * str) { pal_halt = 0; @@ -181,16 +183,22 @@ static int __init nohalt_setup(char * str) } __setup("nohalt", nohalt_setup); +int +update_pal_halt_status(int status) +{ + can_do_pal_halt = pal_halt && status; +} + /* * We use this if we don't have any better idle routine.. */ void default_idle (void) { - unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP); + int can_do_pal; while (!need_resched()) - if (pal_halt && !pmu_active) + if (can_do_pal_halt) safe_halt(); else cpu_relax(); -- cgit v1.1 From a5a70b75d93b26e14c0c5e759099d602a480b9e2 Mon Sep 17 00:00:00 2001 From: stephane eranian Date: Mon, 18 Apr 2005 11:42:00 -0700 Subject: [IA64] another perfmon fix (take2) - pfm_context_load(): change return value from EINVAL to EBUSY when context is already loaded. - pfm_check_task_state(): pass test if context state is MASKED. It is safe to give access on PFM_CTX_MASKED because the PMU state (PMD) is stable and saved in software state. This helps multiplexing programs such as the example given in libpfm-3.1. Signed-off-by: stephane eranian Signed-off-by: Tony Luck --- arch/ia64/kernel/perfmon.c | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index fd4f3be..71c1016 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -4215,7 +4215,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", req->load_pid, ctx->ctx_state)); - return -EINVAL; + return -EBUSY; } DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); @@ -4717,16 +4717,26 @@ recheck: if (task == current || ctx->ctx_fl_system) return 0; /* - * if context is UNLOADED we are safe to go - */ - if (state == PFM_CTX_UNLOADED) return 0; - - /* - * no command can operate on a zombie context + * we are monitoring another thread */ - if (state == PFM_CTX_ZOMBIE) { - DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); - return -EINVAL; + switch(state) { + case PFM_CTX_UNLOADED: + /* + * if context is UNLOADED we are safe to go + */ + return 0; + case PFM_CTX_ZOMBIE: + /* + * no command can operate on a zombie context + */ + DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); + return -EINVAL; + case PFM_CTX_MASKED: + /* + * PMU state has been saved to software even though + * the thread may still be running. + */ + if (cmd != PFM_UNLOAD_CONTEXT) return 0; } /* -- cgit v1.1 From a71f62edc935fbdc346c5b16bea19f1480d29635 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 3 May 2005 16:21:45 -0700 Subject: [IA64] Fix two warnings introduced by perfmon patches. Signed-off-by: Tony Luck --- arch/ia64/kernel/process.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 474d75f..ebb71f3 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -183,7 +183,7 @@ static int __init nohalt_setup(char * str) } __setup("nohalt", nohalt_setup); -int +void update_pal_halt_status(int status) { can_do_pal_halt = pal_halt && status; @@ -195,8 +195,6 @@ update_pal_halt_status(int status) void default_idle (void) { - int can_do_pal; - while (!need_resched()) if (can_do_pal_halt) safe_halt(); -- cgit v1.1 From c4b07b7b3693228c7b848c00a9d4bbb49506a45f Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 3 May 2005 16:27:44 -0700 Subject: [IA64] Update arch/ia64/configs/tiger_defconfig Kristen did most of the checking, bring this up to -rc2. Signed-off-by: Tony Luck --- arch/ia64/configs/tiger_defconfig | 96 +++++++++++++++++++++++---------------- 1 file changed, 57 insertions(+), 39 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index 99830e8..9086b78 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.11-rc2 -# Sat Jan 22 11:17:02 2005 +# Linux kernel version: 2.6.12-rc3 +# Tue May 3 15:55:04 2005 # # @@ -10,6 +10,7 @@ CONFIG_EXPERIMENTAL=y CONFIG_CLEAN_COMPILE=y CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 # # General setup @@ -21,24 +22,27 @@ CONFIG_POSIX_MQUEUE=y # CONFIG_BSD_PROCESS_ACCT is not set CONFIG_SYSCTL=y # CONFIG_AUDIT is not set -CONFIG_LOG_BUF_SHIFT=20 CONFIG_HOTPLUG=y CONFIG_KOBJECT_UEVENT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +# CONFIG_CPUSETS is not set # CONFIG_EMBEDDED is not set CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y # CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_BASE_FULL=y CONFIG_FUTEX=y CONFIG_EPOLL=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SHMEM=y CONFIG_CC_ALIGN_FUNCTIONS=0 CONFIG_CC_ALIGN_LABELS=0 CONFIG_CC_ALIGN_LOOPS=0 CONFIG_CC_ALIGN_JUMPS=0 # CONFIG_TINY_SHMEM is not set +CONFIG_BASE_SMALL=0 # # Loadable module support @@ -85,6 +89,7 @@ CONFIG_FORCE_MAX_ZONEORDER=18 CONFIG_SMP=y CONFIG_NR_CPUS=4 CONFIG_HOTPLUG_CPU=y +# CONFIG_SCHED_SMT is not set # CONFIG_PREEMPT is not set CONFIG_HAVE_DEC_LOCK=y CONFIG_IA32_SUPPORT=y @@ -135,6 +140,7 @@ CONFIG_PCI_DOMAINS=y # CONFIG_PCI_MSI is not set CONFIG_PCI_LEGACY_PROC=y CONFIG_PCI_NAMES=y +# CONFIG_PCI_DEBUG is not set # # PCI Hotplug Support @@ -152,10 +158,6 @@ CONFIG_HOTPLUG_PCI_ACPI=m # CONFIG_PCCARD is not set # -# PC-card bridges -# - -# # Device Drivers # @@ -195,9 +197,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m # CONFIG_BLK_DEV_SX8 is not set # CONFIG_BLK_DEV_UB is not set -CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" # CONFIG_CDROM_PKTCDVD is not set @@ -313,7 +316,6 @@ CONFIG_SCSI_FC_ATTRS=y # CONFIG_SCSI_BUSLOGIC is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_EATA is not set -# CONFIG_SCSI_EATA_PIO is not set # CONFIG_SCSI_FUTURE_DOMAIN is not set # CONFIG_SCSI_GDTH is not set # CONFIG_SCSI_IPS is not set @@ -325,7 +327,6 @@ CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set # CONFIG_SCSI_IPR is not set -# CONFIG_SCSI_QLOGIC_ISP is not set CONFIG_SCSI_QLOGIC_FC=y # CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set CONFIG_SCSI_QLOGIC_1280=y @@ -336,6 +337,7 @@ CONFIG_SCSI_QLA22XX=m CONFIG_SCSI_QLA2300=m CONFIG_SCSI_QLA2322=m # CONFIG_SCSI_QLA6312 is not set +# CONFIG_SCSI_LPFC is not set # CONFIG_SCSI_DC395x is not set # CONFIG_SCSI_DC390T is not set # CONFIG_SCSI_DEBUG is not set @@ -358,6 +360,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m +# CONFIG_DM_MULTIPATH is not set # # Fusion MPT device support @@ -386,7 +389,6 @@ CONFIG_NET=y # CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set -CONFIG_NETLINK_DEV=y CONFIG_UNIX=y # CONFIG_NET_KEY is not set CONFIG_INET=y @@ -446,7 +448,6 @@ CONFIG_DUMMY=m # CONFIG_BONDING is not set # CONFIG_EQUALIZER is not set # CONFIG_TUN is not set -# CONFIG_ETHERTAP is not set # # ARCnet devices @@ -484,7 +485,6 @@ CONFIG_NET_PCI=y # CONFIG_DGRS is not set CONFIG_EEPRO100=m CONFIG_E100=m -# CONFIG_E100_NAPI is not set # CONFIG_FEALNX is not set # CONFIG_NATSEMI is not set # CONFIG_NE2K_PCI is not set @@ -566,25 +566,6 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 # CONFIG_INPUT_EVBUG is not set # -# Input I/O drivers -# -CONFIG_GAMEPORT=m -CONFIG_SOUND_GAMEPORT=m -# CONFIG_GAMEPORT_NS558 is not set -# CONFIG_GAMEPORT_L4 is not set -# CONFIG_GAMEPORT_EMU10K1 is not set -# CONFIG_GAMEPORT_VORTEX is not set -# CONFIG_GAMEPORT_FM801 is not set -# CONFIG_GAMEPORT_CS461X is not set -CONFIG_SERIO=y -CONFIG_SERIO_I8042=y -# CONFIG_SERIO_SERPORT is not set -# CONFIG_SERIO_CT82C710 is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -# CONFIG_SERIO_RAW is not set - -# # Input Device Drivers # CONFIG_INPUT_KEYBOARD=y @@ -602,6 +583,24 @@ CONFIG_MOUSE_PS2=y # CONFIG_INPUT_MISC is not set # +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_I8042=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +CONFIG_GAMEPORT=m +# CONFIG_GAMEPORT_NS558 is not set +# CONFIG_GAMEPORT_L4 is not set +# CONFIG_GAMEPORT_EMU10K1 is not set +# CONFIG_GAMEPORT_VORTEX is not set +# CONFIG_GAMEPORT_FM801 is not set +# CONFIG_GAMEPORT_CS461X is not set +CONFIG_SOUND_GAMEPORT=m + +# # Character devices # CONFIG_VT=y @@ -615,6 +614,8 @@ CONFIG_SERIAL_NONSTANDARD=y # CONFIG_SYNCLINK is not set # CONFIG_SYNCLINKMP is not set # CONFIG_N_HDLC is not set +# CONFIG_SPECIALIX is not set +# CONFIG_SX is not set # CONFIG_STALDRV is not set # @@ -635,6 +636,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set CONFIG_UNIX98_PTYS=y CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 @@ -670,6 +672,12 @@ CONFIG_HPET=y # CONFIG_HPET_RTC_IRQ is not set CONFIG_HPET_MMAP=y CONFIG_MAX_RAW_DEVS=256 +# CONFIG_HANGCHECK_TIMER is not set + +# +# TPM devices +# +# CONFIG_TCG_TPM is not set # # I2C support @@ -705,7 +713,6 @@ CONFIG_MAX_RAW_DEVS=256 # CONFIG_VGA_CONSOLE=y CONFIG_DUMMY_CONSOLE=y -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set # # Sound @@ -715,6 +722,8 @@ CONFIG_DUMMY_CONSOLE=y # # USB support # +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB_ARCH_HAS_OHCI=y CONFIG_USB=y # CONFIG_USB_DEBUG is not set @@ -726,8 +735,6 @@ CONFIG_USB_DEVICEFS=y # CONFIG_USB_DYNAMIC_MINORS is not set # CONFIG_USB_SUSPEND is not set # CONFIG_USB_OTG is not set -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB_ARCH_HAS_OHCI=y # # USB Host Controller Drivers @@ -736,6 +743,8 @@ CONFIG_USB_EHCI_HCD=m # CONFIG_USB_EHCI_SPLIT_ISO is not set # CONFIG_USB_EHCI_ROOT_HUB_TT is not set CONFIG_USB_OHCI_HCD=m +# CONFIG_USB_OHCI_BIG_ENDIAN is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y CONFIG_USB_UHCI_HCD=y # CONFIG_USB_SL811_HCD is not set @@ -751,12 +760,11 @@ CONFIG_USB_UHCI_HCD=y # CONFIG_USB_STORAGE=m # CONFIG_USB_STORAGE_DEBUG is not set -# CONFIG_USB_STORAGE_RW_DETECT is not set # CONFIG_USB_STORAGE_DATAFAB is not set # CONFIG_USB_STORAGE_FREECOM is not set # CONFIG_USB_STORAGE_ISD200 is not set # CONFIG_USB_STORAGE_DPCM is not set -# CONFIG_USB_STORAGE_HP8200e is not set +# CONFIG_USB_STORAGE_USBAT is not set # CONFIG_USB_STORAGE_SDDR09 is not set # CONFIG_USB_STORAGE_SDDR55 is not set # CONFIG_USB_STORAGE_JUMPSHOT is not set @@ -800,6 +808,7 @@ CONFIG_USB_HIDINPUT=y # CONFIG_USB_PEGASUS is not set # CONFIG_USB_RTL8150 is not set # CONFIG_USB_USBNET is not set +# CONFIG_USB_MON is not set # # USB port drivers @@ -824,6 +833,7 @@ CONFIG_USB_HIDINPUT=y # CONFIG_USB_PHIDGETKIT is not set # CONFIG_USB_PHIDGETSERVO is not set # CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_SISUSBVGA is not set # CONFIG_USB_TEST is not set # @@ -867,7 +877,12 @@ CONFIG_REISERFS_FS_POSIX_ACL=y CONFIG_REISERFS_FS_SECURITY=y # CONFIG_JFS_FS is not set CONFIG_FS_POSIX_ACL=y + +# +# XFS support +# CONFIG_XFS_FS=y +CONFIG_XFS_EXPORT=y # CONFIG_XFS_RT is not set # CONFIG_XFS_QUOTA is not set # CONFIG_XFS_SECURITY is not set @@ -945,7 +960,7 @@ CONFIG_NFSD_V4=y CONFIG_NFSD_TCP=y CONFIG_LOCKD=m CONFIG_LOCKD_V4=y -CONFIG_EXPORTFS=m +CONFIG_EXPORTFS=y CONFIG_SUNRPC=m CONFIG_SUNRPC_GSS=m CONFIG_RPCSEC_GSS_KRB5=m @@ -1042,8 +1057,10 @@ CONFIG_GENERIC_IRQ_PROBE=y # # Kernel hacking # +# CONFIG_PRINTK_TIME is not set CONFIG_DEBUG_KERNEL=y CONFIG_MAGIC_SYSRQ=y +CONFIG_LOG_BUF_SHIFT=20 # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -1077,6 +1094,7 @@ CONFIG_CRYPTO_MD5=m # CONFIG_CRYPTO_SHA256 is not set # CONFIG_CRYPTO_SHA512 is not set # CONFIG_CRYPTO_WP512 is not set +# CONFIG_CRYPTO_TGR192 is not set CONFIG_CRYPTO_DES=m # CONFIG_CRYPTO_BLOWFISH is not set # CONFIG_CRYPTO_TWOFISH is not set -- cgit v1.1 From 9b48b46678989b67cd00658ea88964163eaab616 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 22 Mar 2005 16:00:00 -0700 Subject: [IA64-SGI] move nodepda pointer out of pda Remove the p_nodepda and p_subnodepda pointers from the pda_s structure. And then define a new per-cpu pointer to the nodepda and export it so that it can be accessed by kernel modules. Signed-off-by: Dean Nelson Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/setup.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index fea71ee..4fb4498 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -76,6 +76,9 @@ EXPORT_PER_CPU_SYMBOL(__sn_hub_info); DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); +DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); +EXPORT_PER_CPU_SYMBOL(__sn_nodepda); + partid_t sn_partid = -1; EXPORT_SYMBOL(sn_partid); char sn_system_serial_number_string[128]; @@ -480,7 +483,8 @@ void __init sn_cpu_init(void) cnode = nasid_to_cnodeid(nasid); - pda->p_nodepda = nodepdaindr[cnode]; + sn_nodepda = nodepdaindr[cnode]; + pda->led_address = (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); pda->led_state = LED_ALWAYS_SET; @@ -626,7 +630,8 @@ nasid_slice_to_cpuid(int nasid, int slice) long cpu; for (cpu=0; cpu < NR_CPUS; cpu++) - if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice) + if (cpuid_to_nasid(cpu) == nasid && + cpuid_to_slice(cpu) == slice) return cpu; return -1; -- cgit v1.1