diff options
692 files changed, 57942 insertions, 5980 deletions
diff --git a/Documentation/ABI/testing/sysfs-pps b/Documentation/ABI/testing/sysfs-pps new file mode 100644 index 0000000..25028c7 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-pps @@ -0,0 +1,73 @@ +What: /sys/class/pps/ +Date: February 2008 +Contact: Rodolfo Giometti <giometti@linux.it> +Description: + The /sys/class/pps/ directory will contain files and + directories that will provide a unified interface to + the PPS sources. + +What: /sys/class/pps/ppsX/ +Date: February 2008 +Contact: Rodolfo Giometti <giometti@linux.it> +Description: + The /sys/class/pps/ppsX/ directory is related to X-th + PPS source into the system. Each directory will + contain files to manage and control its PPS source. + +What: /sys/class/pps/ppsX/assert +Date: February 2008 +Contact: Rodolfo Giometti <giometti@linux.it> +Description: + The /sys/class/pps/ppsX/assert file reports the assert events + and the assert sequence number of the X-th source in the form: + + <secs>.<nsec>#<sequence> + + If the source has no assert events the content of this file + is empty. + +What: /sys/class/pps/ppsX/clear +Date: February 2008 +Contact: Rodolfo Giometti <giometti@linux.it> +Description: + The /sys/class/pps/ppsX/clear file reports the clear events + and the clear sequence number of the X-th source in the form: + + <secs>.<nsec>#<sequence> + + If the source has no clear events the content of this file + is empty. + +What: /sys/class/pps/ppsX/mode +Date: February 2008 +Contact: Rodolfo Giometti <giometti@linux.it> +Description: + The /sys/class/pps/ppsX/mode file reports the functioning + mode of the X-th source in hexadecimal encoding. + + Please, refer to linux/include/linux/pps.h for further + info. + +What: /sys/class/pps/ppsX/echo +Date: February 2008 +Contact: Rodolfo Giometti <giometti@linux.it> +Description: + The /sys/class/pps/ppsX/echo file reports if the X-th does + or does not support an "echo" function. + +What: /sys/class/pps/ppsX/name +Date: February 2008 +Contact: Rodolfo Giometti <giometti@linux.it> +Description: + The /sys/class/pps/ppsX/name file reports the name of the + X-th source. + +What: /sys/class/pps/ppsX/path +Date: February 2008 +Contact: Rodolfo Giometti <giometti@linux.it> +Description: + The /sys/class/pps/ppsX/path file reports the path name of + the device connected with the X-th source. + + If the source is not connected with any device the content + of this file is empty. diff --git a/Documentation/Changes b/Documentation/Changes index 6643924..6d0f1ef 100644 --- a/Documentation/Changes +++ b/Documentation/Changes @@ -72,6 +72,13 @@ assembling the 16-bit boot code, removing the need for as86 to compile your kernel. This change does, however, mean that you need a recent release of binutils. +Perl +---- + +You will need perl 5 and the following modules: Getopt::Long, Getopt::Std, +File::Basename, and File::Find to build the kernel. + + System utilities ================ diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index 1a60887..23d1262 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt @@ -152,14 +152,19 @@ When swap is accounted, following files are added. usage of mem+swap is limited by memsw.limit_in_bytes. -Note: why 'mem+swap' rather than swap. +* why 'mem+swap' rather than swap. The global LRU(kswapd) can swap out arbitrary pages. Swap-out means to move account from memory to swap...there is no change in usage of -mem+swap. +mem+swap. In other words, when we want to limit the usage of swap without +affecting global LRU, mem+swap limit is better than just limiting swap from +OS point of view. -In other words, when we want to limit the usage of swap without affecting -global LRU, mem+swap limit is better than just limiting swap from OS point -of view. +* What happens when a cgroup hits memory.memsw.limit_in_bytes +When a cgroup his memory.memsw.limit_in_bytes, it's useless to do swap-out +in this cgroup. Then, swap-out will not be done by cgroup routine and file +caches are dropped. But as mentioned above, global LRU can do swapout memory +from it for sanity of the system's memory management state. You can't forbid +it by cgroup. 2.5 Reclaim @@ -204,6 +209,7 @@ We can alter the memory limit: NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo, mega or gigabytes. +NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited). # cat /cgroups/0/memory.limit_in_bytes 4194304 diff --git a/Documentation/connector/cn_test.c b/Documentation/connector/cn_test.c index 6977c17..f688eba 100644 --- a/Documentation/connector/cn_test.c +++ b/Documentation/connector/cn_test.c @@ -41,6 +41,12 @@ void cn_test_callback(void *data) msg->seq, msg->ack, msg->len, (char *)msg->data); } +/* + * Do not remove this function even if no one is using it as + * this is an example of how to get notifications about new + * connector user registration + */ +#if 0 static int cn_test_want_notify(void) { struct cn_ctl_msg *ctl; @@ -117,6 +123,7 @@ nlmsg_failure: kfree_skb(skb); return -EINVAL; } +#endif static u32 cn_test_timer_counter; static void cn_test_timer_func(unsigned long __data) diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt index 43c7439..75a58d1 100644 --- a/Documentation/cpu-freq/cpu-drivers.txt +++ b/Documentation/cpu-freq/cpu-drivers.txt @@ -155,7 +155,7 @@ actual frequency must be determined using the following rules: - if relation==CPUFREQ_REL_H, try to select a new_freq lower than or equal target_freq. ("H for highest, but no higher than") -Here again the frequency table helper might assist you - see section 3 +Here again the frequency table helper might assist you - see section 2 for details. diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt index ce73f3eb..aed082f 100644 --- a/Documentation/cpu-freq/governors.txt +++ b/Documentation/cpu-freq/governors.txt @@ -119,10 +119,6 @@ want the kernel to look at the CPU usage and to make decisions on what to do about the frequency. Typically this is set to values of around '10000' or more. It's default value is (cmp. with users-guide.txt): transition_latency * 1000 -The lowest value you can set is: -transition_latency * 100 or it may get restricted to a value where it -makes not sense for the kernel anymore to poll that often which depends -on your HZ config variable (HZ=1000: max=20000us, HZ=250: max=5000). Be aware that transition latency is in ns and sampling_rate is in us, so you get the same sysfs value by default. Sampling rate should always get adjusted considering the transition latency @@ -131,14 +127,20 @@ in the bash (as said, 1000 is default), do: echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) \ >ondemand/sampling_rate -show_sampling_rate_(min|max): THIS INTERFACE IS DEPRECATED, DON'T USE IT. -You can use wider ranges now and the general -cpuinfo_transition_latency variable (cmp. with user-guide.txt) can be -used to obtain exactly the same info: -show_sampling_rate_min = transtition_latency * 500 / 1000 -show_sampling_rate_max = transtition_latency * 500000 / 1000 -(divided by 1000 is to illustrate that sampling rate is in us and -transition latency is exported ns). +show_sampling_rate_min: +The sampling rate is limited by the HW transition latency: +transition_latency * 100 +Or by kernel restrictions: +If CONFIG_NO_HZ is set, the limit is 10ms fixed. +If CONFIG_NO_HZ is not set or no_hz=off boot parameter is used, the +limits depend on the CONFIG_HZ option: +HZ=1000: min=20000us (20ms) +HZ=250: min=80000us (80ms) +HZ=100: min=200000us (200ms) +The highest value of kernel and HW latency restrictions is shown and +used as the minimum sampling rate. + +show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT. up_threshold: defines what the average CPU usage between the samplings of 'sampling_rate' needs to be for the kernel to make a decision on diff --git a/Documentation/cpu-freq/user-guide.txt b/Documentation/cpu-freq/user-guide.txt index 75f4119..5d5f5fa 100644 --- a/Documentation/cpu-freq/user-guide.txt +++ b/Documentation/cpu-freq/user-guide.txt @@ -31,7 +31,6 @@ Contents: 3. How to change the CPU cpufreq policy and/or speed 3.1 Preferred interface: sysfs -3.2 Deprecated interfaces diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 7129846a..8d07ed3 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -6,6 +6,20 @@ be removed from this file. --------------------------- +What: IRQF_SAMPLE_RANDOM +Check: IRQF_SAMPLE_RANDOM +When: July 2009 + +Why: Many of IRQF_SAMPLE_RANDOM users are technically bogus as entropy + sources in the kernel's current entropy model. To resolve this, every + input point to the kernel's entropy pool needs to better document the + type of entropy source it actually is. This will be replaced with + additional add_*_randomness functions in drivers/char/random.c + +Who: Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com> + +--------------------------- + What: The ieee80211_regdom module parameter When: March 2010 / desktop catchup diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 3120f8d..229d7b7 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -187,7 +187,7 @@ readpages: no write_begin: no locks the page yes write_end: no yes, unlocks yes perform_write: no n/a yes -bmap: yes +bmap: no invalidatepage: no yes releasepage: no yes direct_IO: no diff --git a/Documentation/filesystems/ext2.txt b/Documentation/filesystems/ext2.txt index e055acb..67639f9 100644 --- a/Documentation/filesystems/ext2.txt +++ b/Documentation/filesystems/ext2.txt @@ -322,7 +322,7 @@ an upper limit on the block size imposed by the page size of the kernel, so 8kB blocks are only allowed on Alpha systems (and other architectures which support larger pages). -There is an upper limit of 32768 subdirectories in a single directory. +There is an upper limit of 32000 subdirectories in a single directory. There is a "soft" upper limit of about 10-15k files in a single directory with the current linear linked-list directory implementation. This limit diff --git a/Documentation/filesystems/isofs.txt b/Documentation/filesystems/isofs.txt index 6973b98..3c367c3 100644 --- a/Documentation/filesystems/isofs.txt +++ b/Documentation/filesystems/isofs.txt @@ -23,8 +23,13 @@ Mount options unique to the isofs filesystem. map=off Do not map non-Rock Ridge filenames to lower case map=normal Map non-Rock Ridge filenames to lower case map=acorn As map=normal but also apply Acorn extensions if present - mode=xxx Sets the permissions on files to xxx - dmode=xxx Sets the permissions on directories to xxx + mode=xxx Sets the permissions on files to xxx unless Rock Ridge + extensions set the permissions otherwise + dmode=xxx Sets the permissions on directories to xxx unless Rock Ridge + extensions set the permissions otherwise + overriderockperm Set permissions on files and directories according to + 'mode' and 'dmode' even though Rock Ridge extensions are + present. nojoliet Ignore Joliet extensions if they are present. norock Ignore Rock Ridge extensions if they are present. hide Completely strip hidden files from the file system. diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index ebff3c1..fad18f9 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -5,11 +5,12 @@ Bodo Bauer <bb@ricochet.net> 2.4.x update Jorge Nerin <comandante@zaralinux.com> November 14 2000 -move /proc/sys Shen Feng <shen@cn.fujitsu.com> April 1 2009 +move /proc/sys Shen Feng <shen@cn.fujitsu.com> April 1 2009 ------------------------------------------------------------------------------ Version 1.3 Kernel version 2.2.12 Kernel version 2.4.0-test11-pre4 ------------------------------------------------------------------------------ +fixes/update part 1.1 Stefani Seibold <stefani@seibold.net> June 9 2009 Table of Contents ----------------- @@ -116,7 +117,7 @@ The link self points to the process reading the file system. Each process subdirectory has the entries listed in Table 1-1. -Table 1-1: Process specific entries in /proc +Table 1-1: Process specific entries in /proc .............................................................................. File Content clear_refs Clears page referenced bits shown in smaps output @@ -134,46 +135,103 @@ Table 1-1: Process specific entries in /proc status Process status in human readable form wchan If CONFIG_KALLSYMS is set, a pre-decoded wchan stack Report full stack trace, enable via CONFIG_STACKTRACE - smaps Extension based on maps, the rss size for each mapped file + smaps a extension based on maps, showing the memory consumption of + each mapping .............................................................................. For example, to get the status information of a process, all you have to do is read the file /proc/PID/status: - >cat /proc/self/status - Name: cat - State: R (running) - Pid: 5452 - PPid: 743 + >cat /proc/self/status + Name: cat + State: R (running) + Tgid: 5452 + Pid: 5452 + PPid: 743 TracerPid: 0 (2.4) - Uid: 501 501 501 501 - Gid: 100 100 100 100 - Groups: 100 14 16 - VmSize: 1112 kB - VmLck: 0 kB - VmRSS: 348 kB - VmData: 24 kB - VmStk: 12 kB - VmExe: 8 kB - VmLib: 1044 kB - SigPnd: 0000000000000000 - SigBlk: 0000000000000000 - SigIgn: 0000000000000000 - SigCgt: 0000000000000000 - CapInh: 00000000fffffeff - CapPrm: 0000000000000000 - CapEff: 0000000000000000 - + Uid: 501 501 501 501 + Gid: 100 100 100 100 + FDSize: 256 + Groups: 100 14 16 + VmPeak: 5004 kB + VmSize: 5004 kB + VmLck: 0 kB + VmHWM: 476 kB + VmRSS: 476 kB + VmData: 156 kB + VmStk: 88 kB + VmExe: 68 kB + VmLib: 1412 kB + VmPTE: 20 kb + Threads: 1 + SigQ: 0/28578 + SigPnd: 0000000000000000 + ShdPnd: 0000000000000000 + SigBlk: 0000000000000000 + SigIgn: 0000000000000000 + SigCgt: 0000000000000000 + CapInh: 00000000fffffeff + CapPrm: 0000000000000000 + CapEff: 0000000000000000 + CapBnd: ffffffffffffffff + voluntary_ctxt_switches: 0 + nonvoluntary_ctxt_switches: 1 This shows you nearly the same information you would get if you viewed it with the ps command. In fact, ps uses the proc file system to obtain its -information. The statm file contains more detailed information about the -process memory usage. Its seven fields are explained in Table 1-2. The stat -file contains details information about the process itself. Its fields are -explained in Table 1-3. +information. But you get a more detailed view of the process by reading the +file /proc/PID/status. It fields are described in table 1-2. + +The statm file contains more detailed information about the process +memory usage. Its seven fields are explained in Table 1-3. The stat file +contains details information about the process itself. Its fields are +explained in Table 1-4. +Table 1-2: Contents of the statm files (as of 2.6.30-rc7) +.............................................................................. + Field Content + Name filename of the executable + State state (R is running, S is sleeping, D is sleeping + in an uninterruptible wait, Z is zombie, + T is traced or stopped) + Tgid thread group ID + Pid process id + PPid process id of the parent process + TracerPid PID of process tracing this process (0 if not) + Uid Real, effective, saved set, and file system UIDs + Gid Real, effective, saved set, and file system GIDs + FDSize number of file descriptor slots currently allocated + Groups supplementary group list + VmPeak peak virtual memory size + VmSize total program size + VmLck locked memory size + VmHWM peak resident set size ("high water mark") + VmRSS size of memory portions + VmData size of data, stack, and text segments + VmStk size of data, stack, and text segments + VmExe size of text segment + VmLib size of shared library code + VmPTE size of page table entries + Threads number of threads + SigQ number of signals queued/max. number for queue + SigPnd bitmap of pending signals for the thread + ShdPnd bitmap of shared pending signals for the process + SigBlk bitmap of blocked signals + SigIgn bitmap of ignored signals + SigCgt bitmap of catched signals + CapInh bitmap of inheritable capabilities + CapPrm bitmap of permitted capabilities + CapEff bitmap of effective capabilities + CapBnd bitmap of capabilities bounding set + Cpus_allowed mask of CPUs on which this process may run + Cpus_allowed_list Same as previous, but in "list format" + Mems_allowed mask of memory nodes allowed to this process + Mems_allowed_list Same as previous, but in "list format" + voluntary_ctxt_switches number of voluntary context switches + nonvoluntary_ctxt_switches number of non voluntary context switches +.............................................................................. -Table 1-2: Contents of the statm files (as of 2.6.8-rc3) +Table 1-3: Contents of the statm files (as of 2.6.8-rc3) .............................................................................. Field Content size total program size (pages) (same as VmSize in status) @@ -188,7 +246,7 @@ Table 1-2: Contents of the statm files (as of 2.6.8-rc3) .............................................................................. -Table 1-3: Contents of the stat files (as of 2.6.22-rc3) +Table 1-4: Contents of the stat files (as of 2.6.30-rc7) .............................................................................. Field Content pid process id @@ -222,10 +280,10 @@ Table 1-3: Contents of the stat files (as of 2.6.22-rc3) start_stack address of the start of the stack esp current value of ESP eip current value of EIP - pending bitmap of pending signals (obsolete) - blocked bitmap of blocked signals (obsolete) - sigign bitmap of ignored signals (obsolete) - sigcatch bitmap of catched signals (obsolete) + pending bitmap of pending signals + blocked bitmap of blocked signals + sigign bitmap of ignored signals + sigcatch bitmap of catched signals wchan address where process went to sleep 0 (place holder) 0 (place holder) @@ -234,19 +292,99 @@ Table 1-3: Contents of the stat files (as of 2.6.22-rc3) rt_priority realtime priority policy scheduling policy (man sched_setscheduler) blkio_ticks time spent waiting for block IO + gtime guest time of the task in jiffies + cgtime guest time of the task children in jiffies .............................................................................. +The /proc/PID/map file containing the currently mapped memory regions and +their access permissions. + +The format is: + +address perms offset dev inode pathname + +08048000-08049000 r-xp 00000000 03:00 8312 /opt/test +08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test +0804a000-0806b000 rw-p 00000000 00:00 0 [heap] +a7cb1000-a7cb2000 ---p 00000000 00:00 0 +a7cb2000-a7eb2000 rw-p 00000000 00:00 0 +a7eb2000-a7eb3000 ---p 00000000 00:00 0 +a7eb3000-a7ed5000 rw-p 00000000 00:00 0 +a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6 +a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6 +a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6 +a800b000-a800e000 rw-p 00000000 00:00 0 +a800e000-a8022000 r-xp 00000000 03:00 14462 /lib/libpthread.so.0 +a8022000-a8023000 r--p 00013000 03:00 14462 /lib/libpthread.so.0 +a8023000-a8024000 rw-p 00014000 03:00 14462 /lib/libpthread.so.0 +a8024000-a8027000 rw-p 00000000 00:00 0 +a8027000-a8043000 r-xp 00000000 03:00 8317 /lib/ld-linux.so.2 +a8043000-a8044000 r--p 0001b000 03:00 8317 /lib/ld-linux.so.2 +a8044000-a8045000 rw-p 0001c000 03:00 8317 /lib/ld-linux.so.2 +aff35000-aff4a000 rw-p 00000000 00:00 0 [stack] +ffffe000-fffff000 r-xp 00000000 00:00 0 [vdso] + +where "address" is the address space in the process that it occupies, "perms" +is a set of permissions: + + r = read + w = write + x = execute + s = shared + p = private (copy on write) + +"offset" is the offset into the mapping, "dev" is the device (major:minor), and +"inode" is the inode on that device. 0 indicates that no inode is associated +with the memory region, as the case would be with BSS (uninitialized data). +The "pathname" shows the name associated file for this mapping. If the mapping +is not associated with a file: + + [heap] = the heap of the program + [stack] = the stack of the main process + [vdso] = the "virtual dynamic shared object", + the kernel system call handler + + or if empty, the mapping is anonymous. + + +The /proc/PID/smaps is an extension based on maps, showing the memory +consumption for each of the process's mappings. For each of mappings there +is a series of lines such as the following: + +08048000-080bc000 r-xp 00000000 03:02 13130 /bin/bash +Size: 1084 kB +Rss: 892 kB +Pss: 374 kB +Shared_Clean: 892 kB +Shared_Dirty: 0 kB +Private_Clean: 0 kB +Private_Dirty: 0 kB +Referenced: 892 kB +Swap: 0 kB +KernelPageSize: 4 kB +MMUPageSize: 4 kB + +The first of these lines shows the same information as is displayed for the +mapping in /proc/PID/maps. The remaining lines show the size of the mapping, +the amount of the mapping that is currently resident in RAM, the "proportional +set size” (divide each shared page by the number of processes sharing it), the +number of clean and dirty shared pages in the mapping, and the number of clean +and dirty private pages in the mapping. The "Referenced" indicates the amount +of memory currently marked as referenced or accessed. + +This file is only present if the CONFIG_MMU kernel configuration option is +enabled. 1.2 Kernel data --------------- Similar to the process entries, the kernel data files give information about the running kernel. The files used to obtain this information are contained in -/proc and are listed in Table 1-4. Not all of these will be present in your +/proc and are listed in Table 1-5. Not all of these will be present in your system. It depends on the kernel configuration and the loaded modules, which files are there, and which are missing. -Table 1-4: Kernel info in /proc +Table 1-5: Kernel info in /proc .............................................................................. File Content apm Advanced power management info @@ -283,6 +421,7 @@ Table 1-4: Kernel info in /proc rtc Real time clock scsi SCSI info (see text) slabinfo Slab pool info + softirqs softirq usage stat Overall statistics swaps Swap space utilization sys See chapter 2 @@ -597,6 +736,25 @@ on the kind of area : 0xffffffffa0017000-0xffffffffa0022000 45056 sys_init_module+0xc27/0x1d00 ... pages=10 vmalloc N0=10 +.............................................................................. + +softirqs: + +Provides counts of softirq handlers serviced since boot time, for each cpu. + +> cat /proc/softirqs + CPU0 CPU1 CPU2 CPU3 + HI: 0 0 0 0 + TIMER: 27166 27120 27097 27034 + NET_TX: 0 0 0 17 + NET_RX: 42 0 0 39 + BLOCK: 0 0 107 1121 + TASKLET: 0 0 0 290 + SCHED: 27035 26983 26971 26746 + HRTIMER: 0 0 0 0 + RCU: 1678 1769 2178 2250 + + 1.3 IDE devices in /proc/ide ---------------------------- @@ -614,10 +772,10 @@ IDE devices: More detailed information can be found in the controller specific subdirectories. These are named ide0, ide1 and so on. Each of these -directories contains the files shown in table 1-5. +directories contains the files shown in table 1-6. -Table 1-5: IDE controller info in /proc/ide/ide? +Table 1-6: IDE controller info in /proc/ide/ide? .............................................................................. File Content channel IDE channel (0 or 1) @@ -627,11 +785,11 @@ Table 1-5: IDE controller info in /proc/ide/ide? .............................................................................. Each device connected to a controller has a separate subdirectory in the -controllers directory. The files listed in table 1-6 are contained in these +controllers directory. The files listed in table 1-7 are contained in these directories. -Table 1-6: IDE device information +Table 1-7: IDE device information .............................................................................. File Content cache The cache @@ -673,12 +831,12 @@ the drive parameters: 1.4 Networking info in /proc/net -------------------------------- -The subdirectory /proc/net follows the usual pattern. Table 1-6 shows the +The subdirectory /proc/net follows the usual pattern. Table 1-8 shows the additional values you get for IP version 6 if you configure the kernel to -support this. Table 1-7 lists the files and their meaning. +support this. Table 1-9 lists the files and their meaning. -Table 1-6: IPv6 info in /proc/net +Table 1-8: IPv6 info in /proc/net .............................................................................. File Content udp6 UDP sockets (IPv6) @@ -693,7 +851,7 @@ Table 1-6: IPv6 info in /proc/net .............................................................................. -Table 1-7: Network info in /proc/net +Table 1-9: Network info in /proc/net .............................................................................. File Content arp Kernel ARP table @@ -817,10 +975,10 @@ The directory /proc/parport contains information about the parallel ports of your system. It has one subdirectory for each port, named after the port number (0,1,2,...). -These directories contain the four files shown in Table 1-8. +These directories contain the four files shown in Table 1-10. -Table 1-8: Files in /proc/parport +Table 1-10: Files in /proc/parport .............................................................................. File Content autoprobe Any IEEE-1284 device ID information that has been acquired. @@ -838,10 +996,10 @@ Table 1-8: Files in /proc/parport Information about the available and actually used tty's can be found in the directory /proc/tty.You'll find entries for drivers and line disciplines in -this directory, as shown in Table 1-9. +this directory, as shown in Table 1-11. -Table 1-9: Files in /proc/tty +Table 1-11: Files in /proc/tty .............................................................................. File Content drivers list of drivers and their usage @@ -883,6 +1041,7 @@ since the system first booted. For a quick look, simply cat the file: processes 2915 procs_running 1 procs_blocked 0 + softirq 183433 0 21755 12 39 1137 231 21459 2263 The very first "cpu" line aggregates the numbers in all of the other "cpuN" lines. These numbers identify the amount of time the CPU has spent performing @@ -918,6 +1077,11 @@ CPUs. The "procs_blocked" line gives the number of processes currently blocked, waiting for I/O to complete. +The "softirq" line gives counts of softirqs serviced since boot time, for each +of the possible system softirqs. The first column is the total of all +softirqs serviced; each subsequent column is the total for that particular +softirq. + 1.9 Ext4 file system parameters ------------------------------ @@ -926,9 +1090,9 @@ Information about mounted ext4 file systems can be found in /proc/fs/ext4. Each mounted filesystem will have a directory in /proc/fs/ext4 based on its device name (i.e., /proc/fs/ext4/hdc or /proc/fs/ext4/dm-0). The files in each per-device directory are shown -in Table 1-10, below. +in Table 1-12, below. -Table 1-10: Files in /proc/fs/ext4/<devname> +Table 1-12: Files in /proc/fs/ext4/<devname> .............................................................................. File Content mb_groups details of multiblock allocator buddy cache of free blocks diff --git a/Documentation/gcov.txt b/Documentation/gcov.txt new file mode 100644 index 0000000..e716aad --- /dev/null +++ b/Documentation/gcov.txt @@ -0,0 +1,246 @@ +Using gcov with the Linux kernel +================================ + +1. Introduction +2. Preparation +3. Customization +4. Files +5. Modules +6. Separated build and test machines +7. Troubleshooting +Appendix A: sample script: gather_on_build.sh +Appendix B: sample script: gather_on_test.sh + + +1. Introduction +=============== + +gcov profiling kernel support enables the use of GCC's coverage testing +tool gcov [1] with the Linux kernel. Coverage data of a running kernel +is exported in gcov-compatible format via the "gcov" debugfs directory. +To get coverage data for a specific file, change to the kernel build +directory and use gcov with the -o option as follows (requires root): + +# cd /tmp/linux-out +# gcov -o /sys/kernel/debug/gcov/tmp/linux-out/kernel spinlock.c + +This will create source code files annotated with execution counts +in the current directory. In addition, graphical gcov front-ends such +as lcov [2] can be used to automate the process of collecting data +for the entire kernel and provide coverage overviews in HTML format. + +Possible uses: + +* debugging (has this line been reached at all?) +* test improvement (how do I change my test to cover these lines?) +* minimizing kernel configurations (do I need this option if the + associated code is never run?) + +-- + +[1] http://gcc.gnu.org/onlinedocs/gcc/Gcov.html +[2] http://ltp.sourceforge.net/coverage/lcov.php + + +2. Preparation +============== + +Configure the kernel with: + + CONFIG_DEBUGFS=y + CONFIG_GCOV_KERNEL=y + +and to get coverage data for the entire kernel: + + CONFIG_GCOV_PROFILE_ALL=y + +Note that kernels compiled with profiling flags will be significantly +larger and run slower. Also CONFIG_GCOV_PROFILE_ALL may not be supported +on all architectures. + +Profiling data will only become accessible once debugfs has been +mounted: + + mount -t debugfs none /sys/kernel/debug + + +3. Customization +================ + +To enable profiling for specific files or directories, add a line +similar to the following to the respective kernel Makefile: + + For a single file (e.g. main.o): + GCOV_PROFILE_main.o := y + + For all files in one directory: + GCOV_PROFILE := y + +To exclude files from being profiled even when CONFIG_GCOV_PROFILE_ALL +is specified, use: + + GCOV_PROFILE_main.o := n + and: + GCOV_PROFILE := n + +Only files which are linked to the main kernel image or are compiled as +kernel modules are supported by this mechanism. + + +4. Files +======== + +The gcov kernel support creates the following files in debugfs: + + /sys/kernel/debug/gcov + Parent directory for all gcov-related files. + + /sys/kernel/debug/gcov/reset + Global reset file: resets all coverage data to zero when + written to. + + /sys/kernel/debug/gcov/path/to/compile/dir/file.gcda + The actual gcov data file as understood by the gcov + tool. Resets file coverage data to zero when written to. + + /sys/kernel/debug/gcov/path/to/compile/dir/file.gcno + Symbolic link to a static data file required by the gcov + tool. This file is generated by gcc when compiling with + option -ftest-coverage. + + +5. Modules +========== + +Kernel modules may contain cleanup code which is only run during +module unload time. The gcov mechanism provides a means to collect +coverage data for such code by keeping a copy of the data associated +with the unloaded module. This data remains available through debugfs. +Once the module is loaded again, the associated coverage counters are +initialized with the data from its previous instantiation. + +This behavior can be deactivated by specifying the gcov_persist kernel +parameter: + + gcov_persist=0 + +At run-time, a user can also choose to discard data for an unloaded +module by writing to its data file or the global reset file. + + +6. Separated build and test machines +==================================== + +The gcov kernel profiling infrastructure is designed to work out-of-the +box for setups where kernels are built and run on the same machine. In +cases where the kernel runs on a separate machine, special preparations +must be made, depending on where the gcov tool is used: + +a) gcov is run on the TEST machine + +The gcov tool version on the test machine must be compatible with the +gcc version used for kernel build. Also the following files need to be +copied from build to test machine: + +from the source tree: + - all C source files + headers + +from the build tree: + - all C source files + headers + - all .gcda and .gcno files + - all links to directories + +It is important to note that these files need to be placed into the +exact same file system location on the test machine as on the build +machine. If any of the path components is symbolic link, the actual +directory needs to be used instead (due to make's CURDIR handling). + +b) gcov is run on the BUILD machine + +The following files need to be copied after each test case from test +to build machine: + +from the gcov directory in sysfs: + - all .gcda files + - all links to .gcno files + +These files can be copied to any location on the build machine. gcov +must then be called with the -o option pointing to that directory. + +Example directory setup on the build machine: + + /tmp/linux: kernel source tree + /tmp/out: kernel build directory as specified by make O= + /tmp/coverage: location of the files copied from the test machine + + [user@build] cd /tmp/out + [user@build] gcov -o /tmp/coverage/tmp/out/init main.c + + +7. Troubleshooting +================== + +Problem: Compilation aborts during linker step. +Cause: Profiling flags are specified for source files which are not + linked to the main kernel or which are linked by a custom + linker procedure. +Solution: Exclude affected source files from profiling by specifying + GCOV_PROFILE := n or GCOV_PROFILE_basename.o := n in the + corresponding Makefile. + + +Appendix A: gather_on_build.sh +============================== + +Sample script to gather coverage meta files on the build machine +(see 6a): + +#!/bin/bash + +KSRC=$1 +KOBJ=$2 +DEST=$3 + +if [ -z "$KSRC" ] || [ -z "$KOBJ" ] || [ -z "$DEST" ]; then + echo "Usage: $0 <ksrc directory> <kobj directory> <output.tar.gz>" >&2 + exit 1 +fi + +KSRC=$(cd $KSRC; printf "all:\n\t@echo \${CURDIR}\n" | make -f -) +KOBJ=$(cd $KOBJ; printf "all:\n\t@echo \${CURDIR}\n" | make -f -) + +find $KSRC $KOBJ \( -name '*.gcno' -o -name '*.[ch]' -o -type l \) -a \ + -perm /u+r,g+r | tar cfz $DEST -P -T - + +if [ $? -eq 0 ] ; then + echo "$DEST successfully created, copy to test system and unpack with:" + echo " tar xfz $DEST -P" +else + echo "Could not create file $DEST" +fi + + +Appendix B: gather_on_test.sh +============================= + +Sample script to gather coverage data files on the test machine +(see 6b): + +#!/bin/bash + +DEST=$1 +GCDA=/sys/kernel/debug/gcov + +if [ -z "$DEST" ] ; then + echo "Usage: $0 <output.tar.gz>" >&2 + exit 1 +fi + +find $GCDA -name '*.gcno' -o -name '*.gcda' | tar cfz $DEST -T - + +if [ $? -eq 0 ] ; then + echo "$DEST successfully created, copy to build system and unpack with:" + echo " tar xfz $DEST" +else + echo "Could not create file $DEST" +fi diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 1f779a2..7bb0d93 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -149,6 +149,8 @@ Code Seq# Include File Comments 'p' 40-7F linux/nvram.h 'p' 80-9F user-space parport <mailto:tim@cyberelk.net> +'p' a1-a4 linux/pps.h LinuxPPS + <mailto:giometti@linux.it> 'q' 00-1F linux/serio.h 'q' 80-FF Internet PhoneJACK, Internet LineJACK <http://www.quicknet.net> diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 5578248..08def8d 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -48,6 +48,7 @@ parameter is applicable: EFI EFI Partitioning (GPT) is enabled EIDE EIDE/ATAPI support is enabled. FB The frame buffer device is enabled. + GCOV GCOV profiling is enabled. HW Appropriate hardware is enabled. IA-64 IA-64 architecture is enabled. IMA Integrity measurement architecture is enabled. @@ -796,6 +797,12 @@ and is between 256 and 4096 characters. It is defined in the file Format: off | on default: on + gcov_persist= [GCOV] When non-zero (default), profiling data for + kernel modules is saved and remains accessible via + debugfs, even when the module is unloaded/reloaded. + When zero, profiling data is discarded and associated + debugfs files are removed at module unload time. + gdth= [HW,SCSI] See header of drivers/scsi/gdth.c. diff --git a/Documentation/pps/pps.txt b/Documentation/pps/pps.txt new file mode 100644 index 0000000..125f4ab --- /dev/null +++ b/Documentation/pps/pps.txt @@ -0,0 +1,172 @@ + + PPS - Pulse Per Second + ---------------------- + +(C) Copyright 2007 Rodolfo Giometti <giometti@enneenne.com> + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + + + +Overview +-------- + +LinuxPPS provides a programming interface (API) to define in the +system several PPS sources. + +PPS means "pulse per second" and a PPS source is just a device which +provides a high precision signal each second so that an application +can use it to adjust system clock time. + +A PPS source can be connected to a serial port (usually to the Data +Carrier Detect pin) or to a parallel port (ACK-pin) or to a special +CPU's GPIOs (this is the common case in embedded systems) but in each +case when a new pulse arrives the system must apply to it a timestamp +and record it for userland. + +Common use is the combination of the NTPD as userland program, with a +GPS receiver as PPS source, to obtain a wallclock-time with +sub-millisecond synchronisation to UTC. + + +RFC considerations +------------------ + +While implementing a PPS API as RFC 2783 defines and using an embedded +CPU GPIO-Pin as physical link to the signal, I encountered a deeper +problem: + + At startup it needs a file descriptor as argument for the function + time_pps_create(). + +This implies that the source has a /dev/... entry. This assumption is +ok for the serial and parallel port, where you can do something +useful besides(!) the gathering of timestamps as it is the central +task for a PPS-API. But this assumption does not work for a single +purpose GPIO line. In this case even basic file-related functionality +(like read() and write()) makes no sense at all and should not be a +precondition for the use of a PPS-API. + +The problem can be simply solved if you consider that a PPS source is +not always connected with a GPS data source. + +So your programs should check if the GPS data source (the serial port +for instance) is a PPS source too, and if not they should provide the +possibility to open another device as PPS source. + +In LinuxPPS the PPS sources are simply char devices usually mapped +into files /dev/pps0, /dev/pps1, etc.. + + +Coding example +-------------- + +To register a PPS source into the kernel you should define a struct +pps_source_info_s as follows: + + static struct pps_source_info pps_ktimer_info = { + .name = "ktimer", + .path = "", + .mode = PPS_CAPTUREASSERT | PPS_OFFSETASSERT | \ + PPS_ECHOASSERT | \ + PPS_CANWAIT | PPS_TSFMT_TSPEC, + .echo = pps_ktimer_echo, + .owner = THIS_MODULE, + }; + +and then calling the function pps_register_source() in your +intialization routine as follows: + + source = pps_register_source(&pps_ktimer_info, + PPS_CAPTUREASSERT | PPS_OFFSETASSERT); + +The pps_register_source() prototype is: + + int pps_register_source(struct pps_source_info_s *info, int default_params) + +where "info" is a pointer to a structure that describes a particular +PPS source, "default_params" tells the system what the initial default +parameters for the device should be (it is obvious that these parameters +must be a subset of ones defined in the struct +pps_source_info_s which describe the capabilities of the driver). + +Once you have registered a new PPS source into the system you can +signal an assert event (for example in the interrupt handler routine) +just using: + + pps_event(source, &ts, PPS_CAPTUREASSERT, ptr) + +where "ts" is the event's timestamp. + +The same function may also run the defined echo function +(pps_ktimer_echo(), passing to it the "ptr" pointer) if the user +asked for that... etc.. + +Please see the file drivers/pps/clients/ktimer.c for example code. + + +SYSFS support +------------- + +If the SYSFS filesystem is enabled in the kernel it provides a new class: + + $ ls /sys/class/pps/ + pps0/ pps1/ pps2/ + +Every directory is the ID of a PPS sources defined in the system and +inside you find several files: + + $ ls /sys/class/pps/pps0/ + assert clear echo mode name path subsystem@ uevent + +Inside each "assert" and "clear" file you can find the timestamp and a +sequence number: + + $ cat /sys/class/pps/pps0/assert + 1170026870.983207967#8 + +Where before the "#" is the timestamp in seconds; after it is the +sequence number. Other files are: + +* echo: reports if the PPS source has an echo function or not; + +* mode: reports available PPS functioning modes; + +* name: reports the PPS source's name; + +* path: reports the PPS source's device path, that is the device the + PPS source is connected to (if it exists). + + +Testing the PPS support +----------------------- + +In order to test the PPS support even without specific hardware you can use +the ktimer driver (see the client subsection in the PPS configuration menu) +and the userland tools provided into Documentaion/pps/ directory. + +Once you have enabled the compilation of ktimer just modprobe it (if +not statically compiled): + + # modprobe ktimer + +and the run ppstest as follow: + + $ ./ppstest /dev/pps0 + trying PPS source "/dev/pps1" + found PPS source "/dev/pps1" + ok, found 1 source(s), now start fetching data... + source 0 - assert 1186592699.388832443, sequence: 364 - clear 0.000000000, sequence: 0 + source 0 - assert 1186592700.388931295, sequence: 365 - clear 0.000000000, sequence: 0 + source 0 - assert 1186592701.389032765, sequence: 366 - clear 0.000000000, sequence: 0 + +Please, note that to compile userland programs you need the file timepps.h +(see Documentation/pps/). diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt index 1b74b5f..c8acd86 100644 --- a/Documentation/rfkill.txt +++ b/Documentation/rfkill.txt @@ -3,9 +3,8 @@ rfkill - RF kill switch support 1. Introduction 2. Implementation details -3. Kernel driver guidelines -4. Kernel API -5. Userspace support +3. Kernel API +4. Userspace support 1. Introduction @@ -19,82 +18,62 @@ disable all transmitters of a certain type (or all). This is intended for situations where transmitters need to be turned off, for example on aircraft. +The rfkill subsystem has a concept of "hard" and "soft" block, which +differ little in their meaning (block == transmitters off) but rather in +whether they can be changed or not: + - hard block: read-only radio block that cannot be overriden by software + - soft block: writable radio block (need not be readable) that is set by + the system software. 2. Implementation details -The rfkill subsystem is composed of various components: the rfkill class, the -rfkill-input module (an input layer handler), and some specific input layer -events. - -The rfkill class is provided for kernel drivers to register their radio -transmitter with the kernel, provide methods for turning it on and off and, -optionally, letting the system know about hardware-disabled states that may -be implemented on the device. This code is enabled with the CONFIG_RFKILL -Kconfig option, which drivers can "select". - -The rfkill class code also notifies userspace of state changes, this is -achieved via uevents. It also provides some sysfs files for userspace to -check the status of radio transmitters. See the "Userspace support" section -below. +The rfkill subsystem is composed of three main components: + * the rfkill core, + * the deprecated rfkill-input module (an input layer handler, being + replaced by userspace policy code) and + * the rfkill drivers. +The rfkill core provides API for kernel drivers to register their radio +transmitter with the kernel, methods for turning it on and off and, letting +the system know about hardware-disabled states that may be implemented on +the device. -The rfkill-input code implements a basic response to rfkill buttons -- it -implements turning on/off all devices of a certain class (or all). +The rfkill core code also notifies userspace of state changes, and provides +ways for userspace to query the current states. See the "Userspace support" +section below. When the device is hard-blocked (either by a call to rfkill_set_hw_state() -or from query_hw_block) set_block() will be invoked but drivers can well -ignore the method call since they can use the return value of the function -rfkill_set_hw_state() to sync the software state instead of keeping track -of calls to set_block(). - - -The entire functionality is spread over more than one subsystem: - - * The kernel input layer generates KEY_WWAN, KEY_WLAN etc. and - SW_RFKILL_ALL -- when the user presses a button. Drivers for radio - transmitters generally do not register to the input layer, unless the - device really provides an input device (i.e. a button that has no - effect other than generating a button press event) - - * The rfkill-input code hooks up to these events and switches the soft-block - of the various radio transmitters, depending on the button type. - - * The rfkill drivers turn off/on their transmitters as requested. - - * The rfkill class will generate userspace notifications (uevents) to tell - userspace what the current state is. +or from query_hw_block) set_block() will be invoked for additional software +block, but drivers can ignore the method call since they can use the return +value of the function rfkill_set_hw_state() to sync the software state +instead of keeping track of calls to set_block(). In fact, drivers should +use the return value of rfkill_set_hw_state() unless the hardware actually +keeps track of soft and hard block separately. +3. Kernel API -3. Kernel driver guidelines - -Drivers for radio transmitters normally implement only the rfkill class. -These drivers may not unblock the transmitter based on own decisions, they -should act on information provided by the rfkill class only. +Drivers for radio transmitters normally implement an rfkill driver. Platform drivers might implement input devices if the rfkill button is just that, a button. If that button influences the hardware then you need to -implement an rfkill class instead. This also applies if the platform provides +implement an rfkill driver instead. This also applies if the platform provides a way to turn on/off the transmitter(s). -During suspend/hibernation, transmitters should only be left enabled when -wake-on wlan or similar functionality requires it and the device wasn't -blocked before suspend/hibernate. Note that it may be necessary to update -the rfkill subsystem's idea of what the current state is at resume time if -the state may have changed over suspend. - +For some platforms, it is possible that the hardware state changes during +suspend/hibernation, in which case it will be necessary to update the rfkill +core with the current state is at resume time. +To create an rfkill driver, driver's Kconfig needs to have -4. Kernel API + depends on RFKILL || !RFKILL -To build a driver with rfkill subsystem support, the driver should depend on -(or select) the Kconfig symbol RFKILL. - -The hardware the driver talks to may be write-only (where the current state -of the hardware is unknown), or read-write (where the hardware can be queried -about its current state). +to ensure the driver cannot be built-in when rfkill is modular. The !RFKILL +case allows the driver to be built when rfkill is not configured, which which +case all rfkill API can still be used but will be provided by static inlines +which compile to almost nothing. Calling rfkill_set_hw_state() when a state change happens is required from rfkill drivers that control devices that can be hard-blocked unless they also @@ -105,10 +84,33 @@ device). Don't do this unless you cannot get the event in any other way. 5. Userspace support -The following sysfs entries exist for every rfkill device: +The recommended userspace interface to use is /dev/rfkill, which is a misc +character device that allows userspace to obtain and set the state of rfkill +devices and sets of devices. It also notifies userspace about device addition +and removal. The API is a simple read/write API that is defined in +linux/rfkill.h, with one ioctl that allows turning off the deprecated input +handler in the kernel for the transition period. + +Except for the one ioctl, communication with the kernel is done via read() +and write() of instances of 'struct rfkill_event'. In this structure, the +soft and hard block are properly separated (unlike sysfs, see below) and +userspace is able to get a consistent snapshot of all rfkill devices in the +system. Also, it is possible to switch all rfkill drivers (or all drivers of +a specified type) into a state which also updates the default state for +hotplugged devices. + +After an application opens /dev/rfkill, it can read the current state of +all devices, and afterwards can poll the descriptor for hotplug or state +change events. + +Applications must ignore operations (the "op" field) they do not handle, +this allows the API to be extended in the future. + +Additionally, each rfkill device is registered in sysfs and there has the +following attributes: name: Name assigned by driver to this key (interface or driver name). - type: Name of the key type ("wlan", "bluetooth", etc). + type: Driver type string ("wlan", "bluetooth", etc). state: Current state of the transmitter 0: RFKILL_STATE_SOFT_BLOCKED transmitter is turned off by software @@ -117,7 +119,12 @@ The following sysfs entries exist for every rfkill device: 2: RFKILL_STATE_HARD_BLOCKED transmitter is forced off by something outside of the driver's control. - claim: 0: Kernel handles events (currently always reads that value) + This file is deprecated because it can only properly show + three of the four possible states, soft-and-hard-blocked is + missing. + claim: 0: Kernel handles events + This file is deprecated because there no longer is a way to + claim just control over a single rfkill instance. rfkill devices also issue uevents (with an action of "change"), with the following environment variables set: @@ -128,9 +135,3 @@ RFKILL_TYPE The contents of these variables corresponds to the "name", "state" and "type" sysfs files explained above. - -An alternative userspace interface exists as a misc device /dev/rfkill, -which allows userspace to obtain and set the state of rfkill devices and -sets of devices. It also notifies userspace about device addition and -removal. The API is a simple read/write API that is defined in -linux/rfkill.h. diff --git a/Documentation/robust-futex-ABI.txt b/Documentation/robust-futex-ABI.txt index 535f69f..fd1cd8a 100644 --- a/Documentation/robust-futex-ABI.txt +++ b/Documentation/robust-futex-ABI.txt @@ -135,7 +135,7 @@ manipulating this list), the user code must observe the following protocol on 'lock entry' insertion and removal: On insertion: - 1) set the 'list_op_pending' word to the address of the 'lock word' + 1) set the 'list_op_pending' word to the address of the 'lock entry' to be inserted, 2) acquire the futex lock, 3) add the lock entry, with its thread id (TID) in the bottom 29 bits @@ -143,7 +143,7 @@ On insertion: 4) clear the 'list_op_pending' word. On removal: - 1) set the 'list_op_pending' word to the address of the 'lock word' + 1) set the 'list_op_pending' word to the address of the 'lock entry' to be removed, 2) remove the lock entry for this lock from the 'head' list, 2) release the futex lock, and diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.txt index e5b071d..d7f1817 100644 --- a/Documentation/scsi/scsi_fc_transport.txt +++ b/Documentation/scsi/scsi_fc_transport.txt @@ -1,10 +1,11 @@ SCSI FC Tansport ============================================= -Date: 4/12/2007 +Date: 11/18/2008 Kernel Revisions for features: rports : <<TBS>> - vports : 2.6.22 (? TBD) + vports : 2.6.22 + bsg support : 2.6.30 (?TBD?) Introduction @@ -15,6 +16,7 @@ The FC transport can be found at: drivers/scsi/scsi_transport_fc.c include/scsi/scsi_transport_fc.h include/scsi/scsi_netlink_fc.h + include/scsi/scsi_bsg_fc.h This file is found at Documentation/scsi/scsi_fc_transport.txt @@ -472,6 +474,14 @@ int fc_vport_terminate(struct fc_vport *vport) +FC BSG support (CT & ELS passthru, and more) +======================================================================== +<< To Be Supplied >> + + + + + Credits ======= The following people have contributed to this document: diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt index a6d5354..de67229 100644 --- a/Documentation/scsi/scsi_mid_low_api.txt +++ b/Documentation/scsi/scsi_mid_low_api.txt @@ -1271,6 +1271,11 @@ of interest: hostdata[0] - area reserved for LLD at end of struct Scsi_Host. Size is set by the second argument (named 'xtr_bytes') to scsi_host_alloc() or scsi_register(). + vendor_id - a unique value that identifies the vendor supplying + the LLD for the Scsi_Host. Used most often in validating + vendor-specific message requests. Value consists of an + identifier type and a vendor-specific value. + See scsi_netlink.h for a description of valid formats. The scsi_host structure is defined in include/scsi/scsi_host.h diff --git a/Documentation/vm/Makefile b/Documentation/vm/Makefile index 27479d4..5bd269b 100644 --- a/Documentation/vm/Makefile +++ b/Documentation/vm/Makefile @@ -2,7 +2,7 @@ obj- := dummy.o # List of programs to build -hostprogs-y := slabinfo slqbinfo page-types +hostprogs-y := slabinfo page-types # Tell kbuild to always build the programs always := $(hostprogs-y) diff --git a/Documentation/watchdog/hpwdt.txt b/Documentation/watchdog/hpwdt.txt new file mode 100644 index 0000000..127839e --- /dev/null +++ b/Documentation/watchdog/hpwdt.txt @@ -0,0 +1,84 @@ +Last reviewed: 06/02/2009 + + HP iLO2 NMI Watchdog Driver + NMI sourcing for iLO2 based ProLiant Servers + Documentation and Driver by + Thomas Mingarelli <thomas.mingarelli@hp.com> + + The HP iLO2 NMI Watchdog driver is a kernel module that provides basic + watchdog functionality and the added benefit of NMI sourcing. Both the + watchdog functionality and the NMI sourcing capability need to be enabled + by the user. Remember that the two modes are not dependant on one another. + A user can have the NMI sourcing without the watchdog timer and vice-versa. + + Watchdog functionality is enabled like any other common watchdog driver. That + is, an application needs to be started that kicks off the watchdog timer. A + basic application exists in the Documentation/watchdog/src directory called + watchdog-test.c. Simply compile the C file and kick it off. If the system + gets into a bad state and hangs, the HP ProLiant iLO 2 timer register will + not be updated in a timely fashion and a hardware system reset (also known as + an Automatic Server Recovery (ASR)) event will occur. + + The hpwdt driver also has three (3) module parameters. They are the following: + + soft_margin - allows the user to set the watchdog timer value + allow_kdump - allows the user to save off a kernel dump image after an NMI + nowayout - basic watchdog parameter that does not allow the timer to + be restarted or an impending ASR to be escaped. + + NOTE: More information about watchdog drivers in general, including the ioctl + interface to /dev/watchdog can be found in + Documentation/watchdog/watchdog-api.txt and Documentation/IPMI.txt. + + The NMI sourcing capability is disabled when the driver discovers that the + nmi_watchdog is turned on (nmi_watchdog = 1). This is due to the inability to + distinguish between "NMI Watchdog Ticks" and "HW generated NMI events" in the + Linux kernel. What this means is that the hpwdt nmi handler code is called + each time the NMI signal fires off. This could amount to several thousands of + NMIs in a matter of seconds. If a user sees the Linux kernel's "dazed and + confused" message in the logs or if the system gets into a hung state, then + the user should reboot with nmi_watchdog=0. + + 1. If the kernel has not been booted with nmi_watchdog turned off then + edit /boot/grub/menu.lst and place the nmi_watchdog=0 at the end of the + currently booting kernel line. + 2. reboot the sever + + Now, the hpwdt can successfully receive and source the NMI and provide a log + message that details the reason for the NMI (as determined by the HP BIOS). + + Below is a list of NMIs the HP BIOS understands along with the associated + code (reason): + + No source found 00h + + Uncorrectable Memory Error 01h + + ASR NMI 1Bh + + PCI Parity Error 20h + + NMI Button Press 27h + + SB_BUS_NMI 28h + + ILO Doorbell NMI 29h + + ILO IOP NMI 2Ah + + ILO Watchdog NMI 2Bh + + Proc Throt NMI 2Ch + + Front Side Bus NMI 2Dh + + PCI Express Error 2Fh + + DMA controller NMI 30h + + Hypertransport/CSI Error 31h + + + + -- Tom Mingarelli + (thomas.mingarelli@hp.com) diff --git a/MAINTAINERS b/MAINTAINERS index fb94add..035df9d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1544,6 +1544,7 @@ L: containers@lists.linux-foundation.org S: Maintained F: include/linux/cgroup* F: kernel/cgroup* +F: mm/*cgroup* CORETEMP HARDWARE MONITORING DRIVER P: Rudolf Marek @@ -2274,11 +2275,9 @@ F: drivers/net/wan/dlci.c F: drivers/net/wan/sdla.c FRAMEBUFFER LAYER -P: Antonino Daplas -M: adaplas@gmail.com L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) W: http://linux-fbdev.sourceforge.net/ -S: Maintained +S: Orphan F: Documentation/fb/ F: drivers/video/fb* F: include/linux/fb.h @@ -4578,6 +4577,13 @@ S: Maintained F: drivers/net/pppol2tp.c F: include/linux/if_pppol2tp.h +PPS SUPPORT +P: Rodolfo Giometti +M: giometti@enneenne.com +W: http://wiki.enneenne.com/index.php/LinuxPPS_support +L: linuxpps@ml.enneenne.com (subscribers-only) +S: Maintained + PREEMPTIBLE KERNEL P: Robert Love M: rml@tech9.net @@ -330,6 +330,7 @@ AFLAGS_MODULE = $(MODFLAGS) LDFLAGS_MODULE = CFLAGS_KERNEL = AFLAGS_KERNEL = +CFLAGS_GCOV = -fprofile-arcs -ftest-coverage # Use LINUXINCLUDE when you must reference the include/ directory. @@ -356,7 +357,7 @@ export CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE # When compiling out-of-tree modules, put MODVERDIR in the module @@ -1216,8 +1217,8 @@ clean: archclean $(clean-dirs) \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \ -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \ -o -name '*.symtypes' -o -name 'modules.order' \ - -o -name 'Module.markers' -o -name '.tmp_*.o.*' \) \ - -type f -print | xargs rm -f + -o -name 'Module.markers' -o -name '.tmp_*.o.*' \ + -o -name '*.gcno' \) -type f -print | xargs rm -f # mrproper - Delete all generated files, including .config # @@ -1421,8 +1422,8 @@ clean: $(clean-dirs) $(call cmd,rmfiles) @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \ \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \ - -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \) \ - -type f -print | xargs rm -f + -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \ + -o -name '*.gcno' \) -type f -print | xargs rm -f help: @echo ' Building external modules.' diff --git a/arch/Kconfig b/arch/Kconfig index 78a35e9..99193b1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -112,3 +112,5 @@ config HAVE_DMA_API_DEBUG config HAVE_DEFAULT_NO_SPIN_MUTEXES bool + +source "kernel/gcov/Kconfig" diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h index cb04eaa..d22ace9 100644 --- a/arch/alpha/include/asm/pci.h +++ b/arch/alpha/include/asm/pci.h @@ -237,19 +237,6 @@ extern void pcibios_resource_to_bus(struct pci_dev *, struct pci_bus_region *, extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index static inline int pci_proc_domain(struct pci_bus *bus) diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 918d0cb..0abf386 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -65,19 +65,6 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - /* * Dummy implementation; always return 0. */ diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index 1bbe1da..93c0342 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -394,8 +394,6 @@ asmlinkage int sys_execve(char __user *ufilename, char __user *__user *uargv, goto out; error = do_execve(filename, uargv, uenvp, regs); - if (error == 0) - current->ptrace &= ~PT_DTRACE; putname(filename); out: diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c index 2042552..fd961e0 100644 --- a/arch/h8300/kernel/asm-offsets.c +++ b/arch/h8300/kernel/asm-offsets.c @@ -55,7 +55,6 @@ int main(void) DEFINE(LRET, offsetof(struct pt_regs, pc) - sizeof(long)); DEFINE(PT_PTRACED, PT_PTRACED); - DEFINE(PT_DTRACE, PT_DTRACE); return 0; } diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 294a3b1..170042b 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -28,6 +28,7 @@ config IA64 select HAVE_DMA_ATTRS select HAVE_KVM select HAVE_ARCH_TRACEHOOK + select HAVE_DMA_API_DEBUG default y help The Itanium Processor Family is Intel's 64-bit successor to diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index fe63b2dc9..8cfb001 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1787,7 +1787,7 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = { }; static struct ioc * __init -ioc_init(u64 hpa, void *handle) +ioc_init(unsigned long hpa, void *handle) { struct ioc *ioc; struct ioc_iommu *info; diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c index acb5047..b272261 100644 --- a/arch/ia64/hp/sim/hpsim_irq.c +++ b/arch/ia64/hp/sim/hpsim_irq.c @@ -27,7 +27,7 @@ hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) return 0; } -static struct hw_interrupt_type irq_type_hp_sim = { +static struct irq_chip irq_type_hp_sim = { .name = "hpsim", .startup = hpsim_irq_startup, .shutdown = hpsim_irq_noop, @@ -41,12 +41,12 @@ static struct hw_interrupt_type irq_type_hp_sim = { void __init hpsim_irq_init (void) { - irq_desc_t *idesc; + struct irq_desc *idesc; int i; for (i = 0; i < NR_IRQS; ++i) { idesc = irq_desc + i; - if (idesc->chip == &no_irq_type) + if (idesc->chip == &no_irq_chip) idesc->chip = &irq_type_hp_sim; } } diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index ccbe8ae..c7d0a71 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -2,7 +2,6 @@ include include/asm-generic/Kbuild.asm header-y += break.h header-y += fpu.h -header-y += fpswa.h header-y += ia64regs.h header-y += intel_intrin.h header-y += perfmon_default_smpl.h diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 36c0009..5a61b5c 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -8,6 +8,7 @@ #include <asm/machvec.h> #include <linux/scatterlist.h> #include <asm/swiotlb.h> +#include <linux/dma-debug.h> #define ARCH_HAS_DMA_GET_REQUIRED_MASK @@ -24,95 +25,28 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *daddr, gfp_t gfp) { struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->alloc_coherent(dev, size, daddr, gfp); + void *caddr; + + caddr = ops->alloc_coherent(dev, size, daddr, gfp); + debug_dma_alloc_coherent(dev, size, *daddr, caddr); + return caddr; } static inline void dma_free_coherent(struct device *dev, size_t size, void *caddr, dma_addr_t daddr) { struct dma_map_ops *ops = platform_dma_get_ops(dev); + debug_dma_free_coherent(dev, size, caddr, daddr); ops->free_coherent(dev, size, caddr, daddr); } #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -static inline dma_addr_t dma_map_single_attrs(struct device *dev, - void *caddr, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->map_page(dev, virt_to_page(caddr), - (unsigned long)caddr & ~PAGE_MASK, size, - dir, attrs); -} - -static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, - size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->unmap_page(dev, daddr, size, dir, attrs); -} - -#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) -#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) - -static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->map_sg(dev, sgl, nents, dir, attrs); -} - -static inline void dma_unmap_sg_attrs(struct device *dev, - struct scatterlist *sgl, int nents, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->unmap_sg(dev, sgl, nents, dir, attrs); -} - -#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) -#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) - -static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, - size_t size, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->sync_single_for_cpu(dev, daddr, size, dir); -} - -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, - int nents, enum dma_data_direction dir) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->sync_sg_for_cpu(dev, sgl, nents, dir); -} +#define get_dma_ops(dev) platform_dma_get_ops(dev) +#define flush_write_buffers() -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t daddr, - size_t size, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->sync_single_for_device(dev, daddr, size, dir); -} - -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, - int nents, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - ops->sync_sg_for_device(dev, sgl, nents, dir); -} +#include <asm-generic/dma-mapping-common.h> static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) { @@ -120,30 +54,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) return ops->mapping_error(dev, daddr); } -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - size_t offset, size_t size, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = platform_dma_get_ops(dev); - return ops->map_page(dev, page, offset, size, dir, NULL); -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - dma_unmap_single(dev, addr, size, dir); -} - -/* - * Rest of this file is part of the "Advanced DMA API". Use at your own risk. - * See Documentation/DMA-API.txt for details. - */ - -#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ - dma_sync_single_for_cpu(dev, dma_handle, size, dir) -#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ - dma_sync_single_for_device(dev, dma_handle, size, dir) - static inline int dma_supported(struct device *dev, u64 mask) { struct dma_map_ops *ops = platform_dma_get_ops(dev); diff --git a/arch/ia64/include/asm/gcc_intrin.h b/arch/ia64/include/asm/gcc_intrin.h index c2c5fd8..21ddee5 100644 --- a/arch/ia64/include/asm/gcc_intrin.h +++ b/arch/ia64/include/asm/gcc_intrin.h @@ -388,7 +388,7 @@ register unsigned long ia64_r13 asm ("r13") __used; #define ia64_native_thash(addr) \ ({ \ - __u64 ia64_intri_res; \ + unsigned long ia64_intri_res; \ asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ ia64_intri_res; \ }) @@ -419,7 +419,7 @@ register unsigned long ia64_r13 asm ("r13") __used; #define ia64_tpa(addr) \ ({ \ - __u64 ia64_pa; \ + unsigned long ia64_pa; \ asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \ ia64_pa; \ }) @@ -444,35 +444,35 @@ register unsigned long ia64_r13 asm ("r13") __used; #define ia64_native_get_cpuid(index) \ ({ \ - __u64 ia64_intri_res; \ + unsigned long ia64_intri_res; \ asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ ia64_intri_res; \ }) #define __ia64_get_dbr(index) \ ({ \ - __u64 ia64_intri_res; \ + unsigned long ia64_intri_res; \ asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \ }) #define ia64_get_ibr(index) \ ({ \ - __u64 ia64_intri_res; \ + unsigned long ia64_intri_res; \ asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \ }) #define ia64_get_pkr(index) \ ({ \ - __u64 ia64_intri_res; \ + unsigned long ia64_intri_res; \ asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \ }) #define ia64_get_pmc(index) \ ({ \ - __u64 ia64_intri_res; \ + unsigned long ia64_intri_res; \ asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \ }) @@ -480,14 +480,14 @@ register unsigned long ia64_r13 asm ("r13") __used; #define ia64_native_get_pmd(index) \ ({ \ - __u64 ia64_intri_res; \ + unsigned long ia64_intri_res; \ asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ ia64_intri_res; \ }) #define ia64_native_get_rr(index) \ ({ \ - __u64 ia64_intri_res; \ + unsigned long ia64_intri_res; \ asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \ ia64_intri_res; \ }) diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h index 5c99cbc..91619b3 100644 --- a/arch/ia64/include/asm/hw_irq.h +++ b/arch/ia64/include/asm/hw_irq.h @@ -106,7 +106,7 @@ extern struct irq_cfg irq_cfg[NR_IRQS]; #define irq_to_domain(x) irq_cfg[(x)].domain DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq); -extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ +extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ #ifdef CONFIG_PARAVIRT_GUEST #include <asm/paravirt.h> @@ -146,7 +146,7 @@ static inline void ia64_native_resend_irq(unsigned int vector) * Default implementations for the irq-descriptor API: */ -extern irq_desc_t irq_desc[NR_IRQS]; +extern struct irq_desc irq_desc[NR_IRQS]; #ifndef CONFIG_IA64_GENERIC static inline ia64_vector __ia64_irq_to_vector(int irq) diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index 18a4321..44a0b53 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -72,39 +72,39 @@ typedef struct ia64_mc_info_s { struct ia64_sal_os_state { /* SAL to OS */ - u64 os_gp; /* GP of the os registered with the SAL, physical */ - u64 pal_proc; /* PAL_PROC entry point, physical */ - u64 sal_proc; /* SAL_PROC entry point, physical */ - u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */ - u64 proc_state_param; /* from R18 */ - u64 monarch; /* 1 for a monarch event, 0 for a slave */ + unsigned long os_gp; /* GP of the os registered with the SAL, physical */ + unsigned long pal_proc; /* PAL_PROC entry point, physical */ + unsigned long sal_proc; /* SAL_PROC entry point, physical */ + unsigned long rv_rc; /* MCA - Rendezvous state, INIT - reason code */ + unsigned long proc_state_param; /* from R18 */ + unsigned long monarch; /* 1 for a monarch event, 0 for a slave */ /* common */ - u64 sal_ra; /* Return address in SAL, physical */ - u64 sal_gp; /* GP of the SAL - physical */ + unsigned long sal_ra; /* Return address in SAL, physical */ + unsigned long sal_gp; /* GP of the SAL - physical */ pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ /* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK). * Note: if the MCA/INIT recovery code wants to resume to a new context * then it must change these values to reflect the new kernel stack. */ - u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ - u64 prev_IA64_KR_CURRENT_STACK; + unsigned long prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ + unsigned long prev_IA64_KR_CURRENT_STACK; struct task_struct *prev_task; /* previous task, NULL if it is not useful */ /* Some interrupt registers are not saved in minstate, pt_regs or * switch_stack. Because MCA/INIT can occur when interrupts are * disabled, we need to save the additional interrupt registers over * MCA/INIT and resume. */ - u64 isr; - u64 ifa; - u64 itir; - u64 iipa; - u64 iim; - u64 iha; + unsigned long isr; + unsigned long ifa; + unsigned long itir; + unsigned long iipa; + unsigned long iim; + unsigned long iha; /* OS to SAL */ - u64 os_status; /* OS status to SAL, enum below */ - u64 context; /* 0 if return to same context + unsigned long os_status; /* OS status to SAL, enum below */ + unsigned long context; /* 0 if return to same context 1 if return to new context */ }; @@ -150,7 +150,7 @@ extern void ia64_slave_init_handler(void); extern void ia64_mca_cmc_vector_setup(void); extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); extern void ia64_unreg_MCA_extension(void); -extern u64 ia64_get_rnat(u64 *); +extern unsigned long ia64_get_rnat(unsigned long *); extern void ia64_mca_printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))); diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index c0cea37..688a812 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h @@ -25,8 +25,8 @@ #define IA64_MAX_RSVD_REGIONS 9 struct rsvd_region { - unsigned long start; /* virtual address of beginning of element */ - unsigned long end; /* virtual address of end of element + 1 */ + u64 start; /* virtual address of beginning of element */ + u64 end; /* virtual address of end of element + 1 */ }; extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1]; @@ -35,13 +35,13 @@ extern int num_rsvd_regions; extern void find_memory (void); extern void reserve_memory (void); extern void find_initrd (void); -extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); -extern int filter_memory (unsigned long start, unsigned long end, void *arg); -extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e); -extern int find_max_min_low_pfn (unsigned long , unsigned long, void *); +extern int filter_rsvd_memory (u64 start, u64 end, void *arg); +extern int filter_memory (u64 start, u64 end, void *arg); +extern unsigned long efi_memmap_init(u64 *s, u64 *e); +extern int find_max_min_low_pfn (u64, u64, void *); extern unsigned long vmcore_find_descriptor_size(unsigned long address); -extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end); +extern int reserve_elfcorehdr(u64 *start, u64 *end); /* * For rounding an address to the next IA64_GRANULE_SIZE or order @@ -63,8 +63,8 @@ extern int register_active_ranges(u64 start, u64 len, int nid); # define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ extern unsigned long vmalloc_end; extern struct page *vmem_map; - extern int find_largest_hole (u64 start, u64 end, void *arg); - extern int create_mem_map_page_table (u64 start, u64 end, void *arg); + extern int find_largest_hole(u64 start, u64 end, void *arg); + extern int create_mem_map_page_table(u64 start, u64 end, void *arg); extern int vmemmap_find_next_valid_pfn(int, int); #else static inline int vmemmap_find_next_valid_pfn(int node, int i) diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h index 67b0290..6a29250 100644 --- a/arch/ia64/include/asm/pal.h +++ b/arch/ia64/include/asm/pal.h @@ -989,8 +989,8 @@ ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr) } /* Return summary information about the hierarchy of caches controlled by the processor */ -static inline s64 -ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches) +static inline long ia64_pal_cache_summary(unsigned long *cache_levels, + unsigned long *unique_caches) { struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0); @@ -1038,8 +1038,8 @@ ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc } /* Return the number of instruction and data debug register pairs */ -static inline s64 -ia64_pal_debug_info (u64 *inst_regs, u64 *data_regs) +static inline long ia64_pal_debug_info(unsigned long *inst_regs, + unsigned long *data_regs) { struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0); @@ -1074,8 +1074,7 @@ ia64_pal_fixed_addr (u64 *global_unique_addr) } /* Get base frequency of the platform if generated by the processor */ -static inline s64 -ia64_pal_freq_base (u64 *platform_base_freq) +static inline long ia64_pal_freq_base(unsigned long *platform_base_freq) { struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0); @@ -1437,7 +1436,7 @@ ia64_pal_proc_set_features (u64 feature_select) * possible. */ typedef struct ia64_ptce_info_s { - u64 base; + unsigned long base; u32 count[2]; u32 stride[2]; } ia64_ptce_info_t; @@ -1478,9 +1477,9 @@ ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2) } typedef union pal_hints_u { - u64 ph_data; + unsigned long ph_data; struct { - u64 si : 1, + unsigned long si : 1, li : 1, reserved : 62; } pal_hints_s; @@ -1489,8 +1488,8 @@ typedef union pal_hints_u { /* Return information about the register stack and RSE for this processor * implementation. */ -static inline s64 -ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints) +static inline long ia64_pal_rse_info(unsigned long *num_phys_stacked, + pal_hints_u_t *hints) { struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0); @@ -1608,8 +1607,7 @@ ia64_pal_vm_info (u64 tc_level, u64 tc_type, pal_tc_info_u_t *tc_info, u64 *tc_ /* Get page size information about the virtual memory characteristics of the processor * implementation. */ -static inline s64 -ia64_pal_vm_page_size (u64 *tr_pages, u64 *vw_pages) +static inline s64 ia64_pal_vm_page_size(u64 *tr_pages, u64 *vw_pages) { struct ia64_pal_retval iprv; PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0); diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 1d660d8..fcfca56 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -135,19 +135,6 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev, extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - #define pcibios_scan_all_fns(a, b) 0 #define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index f88fa05..3eaeedf1 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -187,40 +187,40 @@ union ia64_rr { * state comes earlier: */ struct cpuinfo_ia64 { - __u32 softirq_pending; - __u64 itm_delta; /* # of clock cycles between clock ticks */ - __u64 itm_next; /* interval timer mask value to use for next clock tick */ - __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */ - __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ - __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ - __u64 itc_freq; /* frequency of ITC counter */ - __u64 proc_freq; /* frequency of processor */ - __u64 cyc_per_usec; /* itc_freq/1000000 */ - __u64 ptce_base; - __u32 ptce_count[2]; - __u32 ptce_stride[2]; + unsigned int softirq_pending; + unsigned long itm_delta; /* # of clock cycles between clock ticks */ + unsigned long itm_next; /* interval timer mask value to use for next clock tick */ + unsigned long nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */ + unsigned long unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ + unsigned long unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ + unsigned long itc_freq; /* frequency of ITC counter */ + unsigned long proc_freq; /* frequency of processor */ + unsigned long cyc_per_usec; /* itc_freq/1000000 */ + unsigned long ptce_base; + unsigned int ptce_count[2]; + unsigned int ptce_stride[2]; struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ #ifdef CONFIG_SMP - __u64 loops_per_jiffy; + unsigned long loops_per_jiffy; int cpu; - __u32 socket_id; /* physical processor socket id */ - __u16 core_id; /* core id */ - __u16 thread_id; /* thread id */ - __u16 num_log; /* Total number of logical processors on + unsigned int socket_id; /* physical processor socket id */ + unsigned short core_id; /* core id */ + unsigned short thread_id; /* thread id */ + unsigned short num_log; /* Total number of logical processors on * this socket that were successfully booted */ - __u8 cores_per_socket; /* Cores per processor socket */ - __u8 threads_per_core; /* Threads per core */ + unsigned char cores_per_socket; /* Cores per processor socket */ + unsigned char threads_per_core; /* Threads per core */ #endif /* CPUID-derived information: */ - __u64 ppn; - __u64 features; - __u8 number; - __u8 revision; - __u8 model; - __u8 family; - __u8 archrev; + unsigned long ppn; + unsigned long features; + unsigned char number; + unsigned char revision; + unsigned char model; + unsigned char family; + unsigned char archrev; char vendor[16]; char *model_name; @@ -329,8 +329,8 @@ struct thread_struct { #else # define INIT_THREAD_PM #endif - __u64 dbr[IA64_NUM_DBG_REGS]; - __u64 ibr[IA64_NUM_DBG_REGS]; + unsigned long dbr[IA64_NUM_DBG_REGS]; + unsigned long ibr[IA64_NUM_DBG_REGS]; struct ia64_fpreg fph[96]; /* saved/loaded on demand */ }; diff --git a/arch/ia64/include/asm/sal.h b/arch/ia64/include/asm/sal.h index 966797a..d19ddba 100644 --- a/arch/ia64/include/asm/sal.h +++ b/arch/ia64/include/asm/sal.h @@ -106,10 +106,10 @@ struct ia64_sal_retval { * informational value should be printed (e.g., "reboot for * change to take effect"). */ - s64 status; - u64 v0; - u64 v1; - u64 v2; + long status; + unsigned long v0; + unsigned long v1; + unsigned long v2; }; typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...); diff --git a/arch/ia64/include/asm/sn/sn_sal.h b/arch/ia64/include/asm/sn/sn_sal.h index e310fc0..1f5ff47 100644 --- a/arch/ia64/include/asm/sn/sn_sal.h +++ b/arch/ia64/include/asm/sn/sn_sal.h @@ -929,7 +929,7 @@ ia64_sn_sysctl_tio_clock_reset(nasid_t nasid) /* * Get the associated ioboard type for a given nasid. */ -static inline s64 +static inline long ia64_sn_sysctl_ioboard_get(nasid_t nasid, u16 *ioboard) { struct ia64_sal_retval isrv; diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h index fbf1ed3..bcd260e 100644 --- a/arch/ia64/include/asm/types.h +++ b/arch/ia64/include/asm/types.h @@ -2,10 +2,11 @@ #define _ASM_IA64_TYPES_H /* - * This file is never included by application software unless explicitly requested (e.g., - * via linux/types.h) in which case the application is Linux specific so (user-) name - * space pollution is not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. + * This file is never included by application software unless explicitly + * requested (e.g., via linux/types.h) in which case the application is + * Linux specific so (user-) name space pollution is not a major issue. + * However, for interoperability, libraries still need to be careful to + * avoid naming clashes. * * Based on <asm-alpha/types.h>. * @@ -13,7 +14,11 @@ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co */ +#ifdef __KERNEL__ +#include <asm-generic/int-ll64.h> +#else #include <asm-generic/int-l64.h> +#endif #ifdef __ASSEMBLY__ # define __IA64_UL(x) (x) diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 10a9eb0..5a5347f 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -310,11 +310,12 @@ #define __NR_inotify_init1 1318 #define __NR_preadv 1319 #define __NR_pwritev 1320 +#define __NR_rt_tgsigqueueinfo 1321 #ifdef __KERNEL__ -#define NR_syscalls 297 /* length of syscall table */ +#define NR_syscalls 298 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about @@ -328,6 +329,7 @@ #define __IGNORE_utime /* utimes() */ #define __IGNORE_getpgrp /* getpgid() */ #define __IGNORE_vfork /* clone() */ +#define __IGNORE_umount2 /* umount() */ #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 7ef80e8..c745d0a 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -46,7 +46,7 @@ extern efi_status_t efi_call_phys (void *, ...); struct efi efi; EXPORT_SYMBOL(efi); static efi_runtime_services_t *runtime; -static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; +static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; #define efi_call_virt(f, args...) (*(f))(args) @@ -356,7 +356,7 @@ efi_get_pal_addr (void) if (++pal_code_count > 1) { printk(KERN_ERR "Too many EFI Pal Code memory ranges, " - "dropped @ %lx\n", md->phys_addr); + "dropped @ %llx\n", md->phys_addr); continue; } /* @@ -490,10 +490,10 @@ efi_init (void) } } if (min_addr != 0UL) - printk(KERN_INFO "Ignoring memory below %luMB\n", + printk(KERN_INFO "Ignoring memory below %lluMB\n", min_addr >> 20); if (max_addr != ~0UL) - printk(KERN_INFO "Ignoring memory above %luMB\n", + printk(KERN_INFO "Ignoring memory above %lluMB\n", max_addr >> 20); efi.systab = __va(ia64_boot_param->efi_systab); @@ -1066,7 +1066,7 @@ find_memmap_space (void) * parts exist, and are WB. */ unsigned long -efi_memmap_init(unsigned long *s, unsigned long *e) +efi_memmap_init(u64 *s, u64 *e) { struct kern_memdesc *k, *prev = NULL; u64 contig_low=0, contig_high=0; diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 7bebac0..d0e7d37 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1805,6 +1805,7 @@ sys_call_table: data8 sys_inotify_init1 data8 sys_preadv data8 sys_pwritev // 1320 + data8 sys_rt_tgsigqueueinfo .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index f92cef4..c48b03f 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -451,7 +451,7 @@ iosapic_startup_edge_irq (unsigned int irq) static void iosapic_ack_edge_irq (unsigned int irq) { - irq_desc_t *idesc = irq_desc + irq; + struct irq_desc *idesc = irq_desc + irq; irq_complete_move(irq); move_native_irq(irq); @@ -600,8 +600,8 @@ static int register_intr (unsigned int gsi, int irq, unsigned char delivery, unsigned long polarity, unsigned long trigger) { - irq_desc_t *idesc; - struct hw_interrupt_type *irq_type; + struct irq_desc *idesc; + struct irq_chip *irq_type; int index; struct iosapic_rte_info *rte; @@ -650,7 +650,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, idesc = irq_desc + irq; if (irq_type != NULL && idesc->chip != irq_type) { - if (idesc->chip != &no_irq_type) + if (idesc->chip != &no_irq_chip) printk(KERN_WARNING "%s: changing vector %d from %s to %s\n", __func__, irq_to_vector(irq), @@ -828,7 +828,7 @@ iosapic_unregister_intr (unsigned int gsi) { unsigned long flags; int irq, index; - irq_desc_t *idesc; + struct irq_desc *idesc; u32 low32; unsigned long trigger, polarity; unsigned int dest; diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7429752..7d89512 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -130,7 +130,7 @@ unsigned int vectors_in_migration[NR_IRQS]; */ static void migrate_irqs(void) { - irq_desc_t *desc; + struct irq_desc *desc; int irq, new_cpu; for (irq=0; irq < NR_IRQS; irq++) { diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index b448197..dd9d7b5 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -630,7 +630,7 @@ static struct irqaction tlb_irqaction = { void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) { - irq_desc_t *desc; + struct irq_desc *desc; unsigned int irq; irq = vec; diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c index e56a7a3..fc1549d 100644 --- a/arch/ia64/kernel/irq_lsapic.c +++ b/arch/ia64/kernel/irq_lsapic.c @@ -33,7 +33,7 @@ static int lsapic_retrigger(unsigned int irq) return 1; } -struct hw_interrupt_type irq_type_ia64_lsapic = { +struct irq_chip irq_type_ia64_lsapic = { .name = "LSAPIC", .startup = lsapic_noop_startup, .shutdown = lsapic_noop, diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 5b17bd4..7b30d21 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -850,7 +850,7 @@ EXPORT_SYMBOL(ia64_unreg_MCA_extension); static inline void -copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat) +copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat) { u64 fslot, tslot, nat; *tr = *fr; @@ -914,9 +914,9 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, struct switch_stack *old_sw; unsigned size = sizeof(struct pt_regs) + sizeof(struct switch_stack) + 16; - u64 *old_bspstore, *old_bsp; - u64 *new_bspstore, *new_bsp; - u64 old_unat, old_rnat, new_rnat, nat; + unsigned long *old_bspstore, *old_bsp; + unsigned long *new_bspstore, *new_bsp; + unsigned long old_unat, old_rnat, new_rnat, nat; u64 slots, loadrs = regs->loadrs; u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; u64 ar_bspstore = regs->ar_bspstore; @@ -968,10 +968,10 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, * loadrs for the new stack and save it in the new pt_regs, where * ia64_old_stack() can get it. */ - old_bspstore = (u64 *)ar_bspstore; - old_bsp = (u64 *)ar_bsp; + old_bspstore = (unsigned long *)ar_bspstore; + old_bsp = (unsigned long *)ar_bsp; slots = ia64_rse_num_regs(old_bspstore, old_bsp); - new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET); + new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET); new_bsp = ia64_rse_skip_regs(new_bspstore, slots); regs->loadrs = (new_bsp - new_bspstore) * 8 << 16; @@ -1917,9 +1917,9 @@ ia64_mca_init(void) ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave; ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; int i; - s64 rc; + long rc; struct ia64_sal_retval isrv; - u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ + unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ static struct notifier_block default_init_monarch_nb = { .notifier_call = default_monarch_init_process, .priority = 0/* we need to notified last */ @@ -2092,7 +2092,7 @@ ia64_mca_late_init(void) cpe_poll_timer.function = ia64_mca_cpe_poll; { - irq_desc_t *desc; + struct irq_desc *desc; unsigned int irq; if (cpe_vector >= 0) { diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index da3b0cf..1481b0a 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -171,7 +171,8 @@ apply_imm60 (struct module *mod, struct insn *insn, uint64_t val) return 0; } if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) { - printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (int64_t) val); + printk(KERN_ERR "%s: value %ld out of IMM60 range\n", + mod->name, (long) val); return 0; } ia64_patch_imm60((u64) insn, val); @@ -182,7 +183,8 @@ static int apply_imm22 (struct module *mod, struct insn *insn, uint64_t val) { if (val + (1 << 21) >= (1 << 22)) { - printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val); + printk(KERN_ERR "%s: value %li out of IMM22 range\n", + mod->name, (long)val); return 0; } ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ @@ -196,7 +198,8 @@ static int apply_imm21b (struct module *mod, struct insn *insn, uint64_t val) { if (val + (1 << 20) >= (1 << 21)) { - printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val); + printk(KERN_ERR "%s: value %li out of IMM21b range\n", + mod->name, (long)val); return 0; } ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */ @@ -701,8 +704,9 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, case RV_PCREL2: if (r_type == R_IA64_PCREL21BI) { if (!is_internal(mod, val)) { - printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n", - __func__, reloc_name[r_type], val); + printk(KERN_ERR "%s: %s reloc against " + "non-local symbol (%lx)\n", __func__, + reloc_name[r_type], (unsigned long)val); return -ENOEXEC; } format = RF_INSN21B; diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 0f8ade9..6c89228 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -158,7 +158,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) } #endif /* CONFIG_SMP */ -struct irq_chip dmar_msi_type = { +static struct irq_chip dmar_msi_type = { .name = "DMAR_MSI", .unmask = dmar_msi_unmask, .mask = dmar_msi_mask, diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index a4f19c7..fdf6f9d 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -218,10 +218,10 @@ static int cache_info(char *page) { char *p = page; - u64 i, levels, unique_caches; + unsigned long i, levels, unique_caches; pal_cache_config_info_t cci; int j, k; - s64 status; + long status; if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) { printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status); @@ -303,7 +303,7 @@ vm_info(char *page) ia64_ptce_info_t ptce; const char *sep; int i, j; - s64 status; + long status; if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); @@ -431,9 +431,9 @@ register_info(char *page) char *p = page; u64 reg_info[2]; u64 info; - u64 phys_stacked; + unsigned long phys_stacked; pal_hints_u_t hints; - u64 iregs, dregs; + unsigned long iregs, dregs; char *info_type[]={ "Implemented AR(s)", "AR(s) with read side-effects", @@ -530,8 +530,8 @@ static char **proc_features[]={ NULL, NULL, NULL, NULL, }; -static char * -feature_set_info(char *page, u64 avail, u64 status, u64 control, u64 set) +static char * feature_set_info(char *page, u64 avail, u64 status, u64 control, + unsigned long set) { char *p = page; char **vf, **v; @@ -714,7 +714,7 @@ frequency_info(char *page) { char *p = page; struct pal_freq_ratio proc, itc, bus; - u64 base; + unsigned long base; if (ia64_pal_freq_base(&base) == -1) p += sprintf(p, "Output clock : not implemented\n"); @@ -736,43 +736,43 @@ static int tr_info(char *page) { char *p = page; - s64 status; + long status; pal_tr_valid_u_t tr_valid; u64 tr_buffer[4]; pal_vm_info_1_u_t vm_info_1; pal_vm_info_2_u_t vm_info_2; - u64 i, j; - u64 max[3], pgm; + unsigned long i, j; + unsigned long max[3], pgm; struct ifa_reg { - u64 valid:1; - u64 ig:11; - u64 vpn:52; + unsigned long valid:1; + unsigned long ig:11; + unsigned long vpn:52; } *ifa_reg; struct itir_reg { - u64 rv1:2; - u64 ps:6; - u64 key:24; - u64 rv2:32; + unsigned long rv1:2; + unsigned long ps:6; + unsigned long key:24; + unsigned long rv2:32; } *itir_reg; struct gr_reg { - u64 p:1; - u64 rv1:1; - u64 ma:3; - u64 a:1; - u64 d:1; - u64 pl:2; - u64 ar:3; - u64 ppn:38; - u64 rv2:2; - u64 ed:1; - u64 ig:11; + unsigned long p:1; + unsigned long rv1:1; + unsigned long ma:3; + unsigned long a:1; + unsigned long d:1; + unsigned long pl:2; + unsigned long ar:3; + unsigned long ppn:38; + unsigned long rv2:2; + unsigned long ed:1; + unsigned long ig:11; } *gr_reg; struct rid_reg { - u64 ig1:1; - u64 rv1:1; - u64 ig2:6; - u64 rid:24; - u64 rv2:32; + unsigned long ig1:1; + unsigned long rv1:1; + unsigned long ig2:6; + unsigned long rid:24; + unsigned long rv2:32; } *rid_reg; if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { diff --git a/arch/ia64/kernel/paravirt_patchlist.c b/arch/ia64/kernel/paravirt_patchlist.c index b28082a..0a70720 100644 --- a/arch/ia64/kernel/paravirt_patchlist.c +++ b/arch/ia64/kernel/paravirt_patchlist.c @@ -19,6 +19,8 @@ */ #include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> #include <asm/paravirt.h> #define DECLARE(name) \ diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index eb98738..1376da4 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -91,7 +91,7 @@ int iommu_dma_supported(struct device *dev, u64 mask) type. Normally this doesn't make any difference, but gives more gentle handling of IOMMU overflow. */ if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { - dev_info(dev, "Force SAC with mask %lx\n", mask); + dev_info(dev, "Force SAC with mask %llx\n", mask); return 0; } diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index bdc176c..abce246 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -312,7 +312,7 @@ typedef struct pfm_context { unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */ unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */ - u64 ctx_saved_psr_up; /* only contains psr.up value */ + unsigned long ctx_saved_psr_up; /* only contains psr.up value */ unsigned long ctx_last_activation; /* context last activation number for last_cpu */ unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */ @@ -5213,8 +5213,8 @@ pfm_end_notify_user(pfm_context_t *ctx) * main overflow processing routine. * it can be called from the interrupt path or explicitly during the context switch code */ -static void -pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs) +static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, + unsigned long pmc0, struct pt_regs *regs) { pfm_ovfl_arg_t *ovfl_arg; unsigned long mask; diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 714066a..1b23ec12 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -151,9 +151,9 @@ int num_rsvd_regions __initdata; * This routine does not assume the incoming segments are sorted. */ int __init -filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) +filter_rsvd_memory (u64 start, u64 end, void *arg) { - unsigned long range_start, range_end, prev_start; + u64 range_start, range_end, prev_start; void (*func)(unsigned long, unsigned long, int); int i; @@ -191,7 +191,7 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) * are not filtered out. */ int __init -filter_memory(unsigned long start, unsigned long end, void *arg) +filter_memory(u64 start, u64 end, void *arg) { void (*func)(unsigned long, unsigned long, int); @@ -397,7 +397,7 @@ find_initrd (void) initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); initrd_end = initrd_start+ia64_boot_param->initrd_size; - printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", + printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n", initrd_start, ia64_boot_param->initrd_size); } #endif @@ -505,9 +505,9 @@ static int __init parse_elfcorehdr(char *arg) } early_param("elfcorehdr", parse_elfcorehdr); -int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end) +int __init reserve_elfcorehdr(u64 *start, u64 *end) { - unsigned long length; + u64 length; /* We get the address using the kernel command line, * but the size is extracted from the EFI tables. @@ -588,7 +588,7 @@ setup_arch (char **cmdline_p) ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); #else { - u64 num_phys_stacked; + unsigned long num_phys_stacked; if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); @@ -872,9 +872,9 @@ static void __cpuinit get_cache_info(void) { unsigned long line_size, max = 1; - u64 l, levels, unique_caches; - pal_cache_config_info_t cci; - s64 status; + unsigned long l, levels, unique_caches; + pal_cache_config_info_t cci; + long status; status = ia64_pal_cache_summary(&levels, &unique_caches); if (status != 0) { @@ -892,9 +892,9 @@ get_cache_info(void) /* cache_type (data_or_unified)=2 */ status = ia64_pal_cache_config_info(l, 2, &cci); if (status != 0) { - printk(KERN_ERR - "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", - __func__, l, status); + printk(KERN_ERR "%s: ia64_pal_cache_config_info" + "(l=%lu, 2) failed (status=%ld)\n", + __func__, l, status); max = SMP_CACHE_BYTES; /* The safest setup for "flush_icache_range()" */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; @@ -914,10 +914,10 @@ get_cache_info(void) /* cache_type (instruction)=1*/ status = ia64_pal_cache_config_info(l, 1, &cci); if (status != 0) { - printk(KERN_ERR - "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", + printk(KERN_ERR "%s: ia64_pal_cache_config_info" + "(l=%lu, 1) failed (status=%ld)\n", __func__, l, status); - /* The safest setup for "flush_icache_range()" */ + /* The safest setup for flush_icache_range() */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; } } diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 5230eaa..f0c521b 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -66,7 +66,7 @@ static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cachelin #define IPI_KDUMP_CPU_STOP 3 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ -static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation); +static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation); extern void cpu_halt (void); diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 7700e23..de100aa 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -678,7 +678,7 @@ extern void fixup_irqs(void); int migrate_platform_irqs(unsigned int cpu) { int new_cpei_cpu; - irq_desc_t *desc = NULL; + struct irq_desc *desc = NULL; const struct cpumask *mask; int retval = 0; @@ -865,7 +865,7 @@ init_smp_config(void) void __devinit identify_siblings(struct cpuinfo_ia64 *c) { - s64 status; + long status; u16 pltid; pal_logical_to_physical_t info; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 604c1a3..4990495 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -385,7 +385,7 @@ ia64_init_itm (void) static cycle_t itc_get_cycles(struct clocksource *cs) { - u64 lcycle, now, ret; + unsigned long lcycle, now, ret; if (!itc_jitter_data.itc_jitter) return get_cycles(); diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index a8d61a3..bc80dff 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -306,10 +306,10 @@ static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu) static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) { - u64 i, levels, unique_caches; + unsigned long i, levels, unique_caches; pal_cache_config_info_t cci; int j; - s64 status; + long status; struct cache_info *this_cache; int num_cache_leaves = 0; diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 6ba72ab..a595823 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -250,8 +250,7 @@ EXPORT_SYMBOL(uncached_free_page); * Called at boot time to build a map of pages that can be used for * memory special operations. */ -static int __init uncached_build_memmap(unsigned long uc_start, - unsigned long uc_end, void *arg) +static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg) { int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); struct gen_pool *pool = uncached_pools[nid].pool; diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 0ee085e..2f724d2 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -107,10 +107,10 @@ unsigned long bootmap_start; * bootmap_start. This address must be page-aligned. */ static int __init -find_bootmap_location (unsigned long start, unsigned long end, void *arg) +find_bootmap_location (u64 start, u64 end, void *arg) { - unsigned long needed = *(unsigned long *)arg; - unsigned long range_start, range_end, free_start; + u64 needed = *(unsigned long *)arg; + u64 range_start, range_end, free_start; int i; #if IGNORE_PFN0 @@ -229,8 +229,7 @@ find_memory (void) alloc_per_cpu_data(); } -static int -count_pages (u64 start, u64 end, void *arg) +static int count_pages(u64 start, u64 end, void *arg) { unsigned long *count = arg; diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index c0f3bee..b115b3b 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -422,8 +422,7 @@ retry_pte: return hole_next_pfn - pgdat->node_start_pfn; } -int __init -create_mem_map_page_table (u64 start, u64 end, void *arg) +int __init create_mem_map_page_table(u64 start, u64 end, void *arg) { unsigned long address, start_page, end_page; struct page *map_start, *map_end; @@ -469,7 +468,7 @@ struct memmap_init_callback_data { }; static int __meminit -virtual_memmap_init (u64 start, u64 end, void *arg) +virtual_memmap_init(u64 start, u64 end, void *arg) { struct memmap_init_callback_data *args; struct page *map_start, *map_end; @@ -531,8 +530,7 @@ ia64_pfn_valid (unsigned long pfn) } EXPORT_SYMBOL(ia64_pfn_valid); -int __init -find_largest_hole (u64 start, u64 end, void *arg) +int __init find_largest_hole(u64 start, u64 end, void *arg) { u64 *max_gap = arg; @@ -548,8 +546,7 @@ find_largest_hole (u64 start, u64 end, void *arg) #endif /* CONFIG_VIRTUAL_MEM_MAP */ -int __init -register_active_ranges(u64 start, u64 len, int nid) +int __init register_active_ranges(u64 start, u64 len, int nid) { u64 end = start + len; @@ -567,7 +564,7 @@ register_active_ranges(u64 start, u64 len, int nid) } static int __init -count_reserved_pages (u64 start, u64 end, void *arg) +count_reserved_pages(u64 start, u64 end, void *arg) { unsigned long num_reserved = 0; unsigned long *count = arg; @@ -580,7 +577,7 @@ count_reserved_pages (u64 start, u64 end, void *arg) } int -find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg) +find_max_min_low_pfn (u64 start, u64 end, void *arg) { unsigned long pfn_start, pfn_end; #ifdef CONFIG_FLATMEM diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index b9f3d7b..f426dc7 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -34,7 +34,7 @@ #include <asm/tlb.h> static struct { - unsigned long mask; /* mask of supported purge page-sizes */ + u64 mask; /* mask of supported purge page-sizes */ unsigned long max_bits; /* log2 of largest supported purge page-size */ } purge; @@ -328,7 +328,7 @@ void __devinit ia64_tlb_init (void) { ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */ - unsigned long tr_pgbits; + u64 tr_pgbits; long status; pal_vm_info_1_u_t vm_info_1; pal_vm_info_2_u_t vm_info_2; diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 61f1af5..729298f 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -163,7 +163,7 @@ add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr) { struct resource *resource; char *name; - u64 base, min, max, base_port; + unsigned long base, min, max, base_port; unsigned int sparse = 0, space_nr, len; resource = kzalloc(sizeof(*resource), GFP_KERNEL); @@ -292,7 +292,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data) window->offset = offset; if (insert_resource(root, &window->resource)) { - printk(KERN_ERR "alloc 0x%lx-0x%lx from %s for %s failed\n", + printk(KERN_ERR "alloc 0x%llx-0x%llx from %s for %s failed\n", window->resource.start, window->resource.end, root->name, info->name); } @@ -314,8 +314,8 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) (res->end - res->start < 16)) continue; if (j >= PCI_BUS_NUM_RESOURCES) { - printk("Ignoring range [%lx-%lx] (%lx)\n", res->start, - res->end, res->flags); + printk("Ignoring range [%#llx-%#llx] (%lx)\n", + res->start, res->end, res->flags); continue; } bus->resource[j++] = res; @@ -371,8 +371,6 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) * such quirk. So we just ignore the case now. */ pbus = pci_scan_bus_parented(NULL, bus, &pci_root_ops, controller); - if (pbus) - pcibios_setup_root_windows(pbus, controller); return pbus; @@ -490,6 +488,8 @@ pcibios_fixup_bus (struct pci_bus *b) if (b->self) { pci_read_bridge_bases(b); pcibios_fixup_bridge_resources(b->self); + } else { + pcibios_setup_root_windows(b, b->sysdata); } list_for_each_entry(dev, &b->devices, bus_list) pcibios_fixup_device_resources(dev); @@ -728,8 +728,8 @@ extern u8 pci_cache_line_size; */ static void __init set_pci_cacheline_size(void) { - u64 levels, unique_caches; - s64 status; + unsigned long levels, unique_caches; + long status; pal_cache_config_info_t cci; status = ia64_pal_cache_summary(&levels, &unique_caches); diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c index d0223ab..fd50ff9 100644 --- a/arch/ia64/sn/kernel/io_acpi_init.c +++ b/arch/ia64/sn/kernel/io_acpi_init.c @@ -40,7 +40,7 @@ struct sn_pcidev_match { /* * Perform the early IO init in PROM. */ -static s64 +static long sal_ioif_init(u64 *result) { struct ia64_sal_retval isrv = {0,0,0,0}; @@ -492,7 +492,7 @@ void __init sn_io_acpi_init(void) { u64 result; - s64 status; + long status; /* SN Altix does not follow the IOSAPIC IRQ routing model */ acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM; diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 57f280d..76645cf 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c @@ -342,7 +342,7 @@ sn_common_bus_fixup(struct pci_bus *bus, struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus); printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u " - "L_IO=%lx L_MEM=%lx BASE=%lx\n", + "L_IO=%llx L_MEM=%llx BASE=%llx\n", b->bs_asic_type, b->bs_xid, b->bs_persist_busnum, b->bs_legacy_io, b->bs_legacy_mem, b->bs_base); printk(KERN_WARNING "on node %d but only %d nodes online." diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 764f26a..40d6eed 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -295,13 +295,13 @@ unsigned int sn_local_vector_to_irq(u8 vector) void sn_irq_init(void) { int i; - irq_desc_t *base_desc = irq_desc; + struct irq_desc *base_desc = irq_desc; ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; for (i = 0; i < NR_IRQS; i++) { - if (base_desc[i].chip == &no_irq_type) { + if (base_desc[i].chip == &no_irq_chip) { base_desc[i].chip = &irq_type_sn; } } @@ -377,7 +377,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) int cpu = nasid_slice_to_cpuid(nasid, slice); #ifdef CONFIG_SMP int cpuphys; - irq_desc_t *desc; + struct irq_desc *desc; #endif pci_dev_get(pci_dev); diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 9e6491c..4c7e747 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -414,7 +414,7 @@ static int sn_topology_show(struct seq_file *s, void *d) } seq_printf(s, "partition %u %s local " "shubtype %s, " - "nasid_mask 0x%016lx, " + "nasid_mask 0x%016llx, " "nasid_bits %d:%d, " "system_size %d, " "sharing_size %d, " @@ -683,7 +683,7 @@ static int sn_hwperf_map_err(int hwperf_err) * ioctl for "sn_hwperf" misc device */ static int -sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg) +sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) { struct sn_hwperf_ioctl_args a; struct cpuinfo_ia64 *cdata; diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c index 2526e5c..c76d8dc 100644 --- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c +++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c @@ -36,7 +36,7 @@ static int system_serial_number_open(struct inode *inode, struct file *file) static int licenseID_show(struct seq_file *s, void *p) { - seq_printf(s, "0x%lx\n", sn_partition_serial_number_val()); + seq_printf(s, "0x%llx\n", sn_partition_serial_number_val()); return 0; } diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 3f86423..c1bd1cf 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -368,7 +368,7 @@ static void tio_corelet_reset(nasid_t nasid, int corelet) static int is_fpga_tio(int nasid, int *bt) { u16 uninitialized_var(ioboard_type); /* GCC be quiet */ - s64 rc; + long rc; rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard_type); if (rc) { diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 2c676cc..d13e5a2 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c @@ -79,7 +79,7 @@ static int sal_pcibr_error_interrupt(struct pcibus_info *soft) u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus) { - s64 rc; + long rc; u16 uninitialized_var(ioboard); /* GCC be quiet */ nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base); diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 79165122..35b2a27 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -123,7 +123,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern) if (!tmp) { printk(KERN_ERR "%s: Could not allocate " - "%lu bytes (order %d) for GART\n", + "%llu bytes (order %d) for GART\n", __func__, tioca_kern->ca_gart_size, get_order(tioca_kern->ca_gart_size)); @@ -348,7 +348,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr) agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn); if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { printk(KERN_ERR "%s: coretalk upper node (%u) " - "mismatch with ca_agp_dma_addr_extn (%lu)\n", + "mismatch with ca_agp_dma_addr_extn (%llu)\n", __func__, node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); return 0; @@ -367,7 +367,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr) * dma_addr_t is guaranteed to be contiguous in CA bus space. */ static dma_addr_t -tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size) +tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) { int i, ps, ps_shift, entry, entries, mapsize, last_entry; u64 xio_addr, end_xio_addr; diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 94e5845..012f3b8 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c @@ -493,7 +493,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) { printk(KERN_WARNING - "%s: %s - no map found for bus_addr 0x%lx\n", + "%s: %s - no map found for bus_addr 0x%llx\n", __func__, pci_name(pdev), bus_addr); } else if (--map->refcnt == 0) { for (i = 0; i < map->ate_count; i++) { @@ -642,7 +642,7 @@ dma_map_done: * in the address. */ static u64 -tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) +tioce_dma(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) { return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags); } @@ -657,7 +657,7 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) * in the address. */ static u64 -tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) +tioce_dma_consistent(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) { return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags); } diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c index af93aad..f042e19 100644 --- a/arch/ia64/xen/irq_xen.c +++ b/arch/ia64/xen/irq_xen.c @@ -138,7 +138,7 @@ static void __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, struct irqaction *action, int save) { - irq_desc_t *desc; + struct irq_desc *desc; int irq = 0; if (xen_slab_ready) { diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index 3e876f0..67a01e1 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c @@ -302,11 +302,6 @@ asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv, goto out; error = do_execve(filename, uargv, uenvp, ®s); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - } putname(filename); out: return error; diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index 9aa615d..bf0abe9 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c @@ -676,10 +676,6 @@ arch_ptrace(struct task_struct *child, long request, long addr, long data) if (!valid_signal(data)) break; clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - if ((child->ptrace & PT_DTRACE) == 0) { - /* Spurious delayed TF traps may occur */ - child->ptrace |= PT_DTRACE; - } /* Compute next pc. */ pc = get_stack_long(child, PT_BPC); diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index 184acc9..aacd6d1 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -1057,7 +1057,6 @@ asmlinkage void trap_c(struct frame *fp) if (fp->ptregs.sr & PS_S) { if ((fp->ptregs.vector >> 2) == VEC_TRACE) { /* traced a trapping instruction */ - current->ptrace |= PT_DTRACE; } else bad_super_trap(fp); return; diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c index f500dd6..594ee0e 100644 --- a/arch/m68knommu/kernel/asm-offsets.c +++ b/arch/m68knommu/kernel/asm-offsets.c @@ -73,7 +73,6 @@ int main(void) DEFINE(TRAP_TRACE, TRAP_TRACE); DEFINE(PT_PTRACED, PT_PTRACED); - DEFINE(PT_DTRACE, PT_DTRACE); DEFINE(THREAD_SIZE, THREAD_SIZE); diff --git a/arch/m68knommu/kernel/traps.c b/arch/m68knommu/kernel/traps.c index 5d5d56b..51d3253 100644 --- a/arch/m68knommu/kernel/traps.c +++ b/arch/m68knommu/kernel/traps.c @@ -200,7 +200,6 @@ asmlinkage void trap_c(struct frame *fp) if (fp->ptregs.sr & PS_S) { if ((fp->ptregs.vector >> 2) == VEC_TRACE) { /* traced a trapping instruction */ - current->ptrace |= PT_DTRACE; } else bad_super_trap(fp); return; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 25f3b0a..b29f028 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -618,6 +618,8 @@ config CAVIUM_OCTEON_REFERENCE_BOARD select SYS_HAS_EARLY_PRINTK select SYS_HAS_CPU_CAVIUM_OCTEON select SWAP_IO_SPACE + select HW_HAS_PCI + select ARCH_SUPPORTS_MSI help This option supports all of the Octeon reference boards from Cavium Networks. It builds a kernel that dynamically determines the Octeon @@ -851,6 +853,11 @@ config SYS_SUPPORTS_BIG_ENDIAN config SYS_SUPPORTS_LITTLE_ENDIAN bool +config SYS_SUPPORTS_HUGETLBFS + bool + depends on CPU_SUPPORTS_HUGEPAGES && 64BIT + default y + config IRQ_CPU bool @@ -1055,6 +1062,7 @@ config CPU_MIPS64_R1 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_HUGEPAGES help Choose this option to build a kernel for release 1 or later of the MIPS64 architecture. Many modern embedded systems with a 64-bit @@ -1074,6 +1082,7 @@ config CPU_MIPS64_R2 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_HUGEPAGES help Choose this option to build a kernel for release 2 or later of the MIPS64 architecture. Many modern embedded systems with a 64-bit @@ -1160,6 +1169,7 @@ config CPU_R5500 select CPU_HAS_LLSC select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL + select CPU_SUPPORTS_HUGEPAGES help NEC VR5500 and VR5500A series processors implement 64-bit MIPS IV instruction set. @@ -1245,6 +1255,7 @@ config CPU_CAVIUM_OCTEON select WEAK_ORDERING select WEAK_REORDERING_BEYOND_LLSC select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_HUGEPAGES help The Cavium Octeon processor is a highly integrated chip containing many ethernet hardware widgets for networking tasks. The processor @@ -1364,6 +1375,8 @@ config CPU_SUPPORTS_32BIT_KERNEL bool config CPU_SUPPORTS_64BIT_KERNEL bool +config CPU_SUPPORTS_HUGEPAGES + bool # # Set to y for ptrace access to watch registers. @@ -2121,6 +2134,10 @@ endmenu menu "Power management options" +config ARCH_HIBERNATION_POSSIBLE + def_bool y + depends on !SMP + config ARCH_SUSPEND_POSSIBLE def_bool y depends on !SMP diff --git a/arch/mips/Makefile b/arch/mips/Makefile index c4cae9e..807572a 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -167,7 +167,6 @@ libs-$(CONFIG_ARC) += arch/mips/fw/arc/ libs-$(CONFIG_CFE) += arch/mips/fw/cfe/ libs-$(CONFIG_SNIPROM) += arch/mips/fw/sni/ libs-y += arch/mips/fw/lib/ -libs-$(CONFIG_SIBYTE_CFE) += arch/mips/sibyte/cfe/ # # Board-dependent options and extra files @@ -184,7 +183,6 @@ load-$(CONFIG_MACH_JAZZ) += 0xffffffff80080000 # Common Alchemy Au1x00 stuff # core-$(CONFIG_SOC_AU1X00) += arch/mips/alchemy/common/ -cflags-$(CONFIG_SOC_AU1X00) += -I$(srctree)/arch/mips/include/asm/mach-au1x00 # # AMD Alchemy Pb1000 eval board @@ -282,6 +280,10 @@ load-$(CONFIG_MIPS_MTX1) += 0xffffffff80100000 libs-$(CONFIG_MIPS_XXS1500) += arch/mips/alchemy/xxs1500/ load-$(CONFIG_MIPS_XXS1500) += 0xffffffff80100000 +# must be last for Alchemy systems for GPIO to work properly +cflags-$(CONFIG_SOC_AU1X00) += -I$(srctree)/arch/mips/include/asm/mach-au1x00 + + # # Cobalt Server # @@ -675,6 +677,9 @@ core-y += arch/mips/kernel/ arch/mips/mm/ arch/mips/math-emu/ drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/ +# suspend and hibernation support +drivers-$(CONFIG_PM) += arch/mips/power/ + ifdef CONFIG_LASAT rom.bin rom.sw: vmlinux $(Q)$(MAKE) $(build)=arch/mips/lasat/image $@ diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig index 8128aeb..00b498e 100644 --- a/arch/mips/alchemy/Kconfig +++ b/arch/mips/alchemy/Kconfig @@ -1,3 +1,14 @@ +# au1000-style gpio +config ALCHEMY_GPIO_AU1000 + bool + +# select this in your board config if you don't want to use the gpio +# namespace as documented in the manuals. In this case however you need +# to create the necessary gpio_* functions in your board code/headers! +# see arch/mips/include/asm/mach-au1x00/gpio.h for more information. +config ALCHEMY_GPIO_INDIRECT + def_bool n + choice prompt "Machine type" depends on MACH_ALCHEMY @@ -108,22 +119,27 @@ endchoice config SOC_AU1000 bool select SOC_AU1X00 + select ALCHEMY_GPIO_AU1000 config SOC_AU1100 bool select SOC_AU1X00 + select ALCHEMY_GPIO_AU1000 config SOC_AU1500 bool select SOC_AU1X00 + select ALCHEMY_GPIO_AU1000 config SOC_AU1550 bool select SOC_AU1X00 + select ALCHEMY_GPIO_AU1000 config SOC_AU1200 bool select SOC_AU1X00 + select ALCHEMY_GPIO_AU1000 config SOC_AU1X00 bool @@ -134,4 +150,5 @@ config SOC_AU1X00 select SYS_HAS_CPU_MIPS32_R1 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_APM_EMULATION - select ARCH_REQUIRE_GPIOLIB + select GENERIC_GPIO + select ARCH_WANT_OPTIONAL_GPIOLIB diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile index d50d476..b67fb51 100644 --- a/arch/mips/alchemy/common/Makefile +++ b/arch/mips/alchemy/common/Makefile @@ -7,7 +7,14 @@ obj-y += prom.o irq.o puts.o time.o reset.o \ clocks.o platform.o power.o setup.o \ - sleeper.o dma.o dbdma.o gpio.o + sleeper.o dma.o dbdma.o + +# optional gpiolib support +ifeq ($(CONFIG_ALCHEMY_GPIO_INDIRECT),) + ifeq ($(CONFIG_GPIOLIB),y) + obj-$(CONFIG_ALCHEMY_GPIO_AU1000) += gpiolib-au1000.o + endif +endif obj-$(CONFIG_PCI) += pci.o diff --git a/arch/mips/alchemy/common/gpio.c b/arch/mips/alchemy/common/gpio.c deleted file mode 100644 index 91a9c44..0000000 --- a/arch/mips/alchemy/common/gpio.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright (C) 2007-2009, OpenWrt.org, Florian Fainelli <florian@openwrt.org> - * Architecture specific GPIO support - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Notes : - * au1000 SoC have only one GPIO line : GPIO1 - * others have a second one : GPIO2 - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/platform_device.h> -#include <linux/gpio.h> - -#include <asm/mach-au1x00/au1000.h> -#include <asm/gpio.h> - -struct au1000_gpio_chip { - struct gpio_chip chip; - void __iomem *regbase; -}; - -#if !defined(CONFIG_SOC_AU1000) -static int au1000_gpio2_get(struct gpio_chip *chip, unsigned offset) -{ - u32 mask = 1 << offset; - struct au1000_gpio_chip *gpch; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - return readl(gpch->regbase + AU1000_GPIO2_ST) & mask; -} - -static void au1000_gpio2_set(struct gpio_chip *chip, - unsigned offset, int value) -{ - u32 mask = ((GPIO2_OUT_EN_MASK << offset) | (!!value << offset)); - struct au1000_gpio_chip *gpch; - unsigned long flags; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - - local_irq_save(flags); - writel(mask, gpch->regbase + AU1000_GPIO2_OUT); - local_irq_restore(flags); -} - -static int au1000_gpio2_direction_input(struct gpio_chip *chip, unsigned offset) -{ - u32 mask = 1 << offset; - u32 tmp; - struct au1000_gpio_chip *gpch; - unsigned long flags; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - - local_irq_save(flags); - tmp = readl(gpch->regbase + AU1000_GPIO2_DIR); - tmp &= ~mask; - writel(tmp, gpch->regbase + AU1000_GPIO2_DIR); - local_irq_restore(flags); - - return 0; -} - -static int au1000_gpio2_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - u32 mask = 1 << offset; - u32 out_mask = ((GPIO2_OUT_EN_MASK << offset) | (!!value << offset)); - u32 tmp; - struct au1000_gpio_chip *gpch; - unsigned long flags; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - - local_irq_save(flags); - tmp = readl(gpch->regbase + AU1000_GPIO2_DIR); - tmp |= mask; - writel(tmp, gpch->regbase + AU1000_GPIO2_DIR); - writel(out_mask, gpch->regbase + AU1000_GPIO2_OUT); - local_irq_restore(flags); - - return 0; -} -#endif /* !defined(CONFIG_SOC_AU1000) */ - -static int au1000_gpio1_get(struct gpio_chip *chip, unsigned offset) -{ - u32 mask = 1 << offset; - struct au1000_gpio_chip *gpch; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - return readl(gpch->regbase + AU1000_GPIO1_ST) & mask; -} - -static void au1000_gpio1_set(struct gpio_chip *chip, - unsigned offset, int value) -{ - u32 mask = 1 << offset; - u32 reg_offset; - struct au1000_gpio_chip *gpch; - unsigned long flags; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - - if (value) - reg_offset = AU1000_GPIO1_OUT; - else - reg_offset = AU1000_GPIO1_CLR; - - local_irq_save(flags); - writel(mask, gpch->regbase + reg_offset); - local_irq_restore(flags); -} - -static int au1000_gpio1_direction_input(struct gpio_chip *chip, unsigned offset) -{ - u32 mask = 1 << offset; - struct au1000_gpio_chip *gpch; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - writel(mask, gpch->regbase + AU1000_GPIO1_ST); - - return 0; -} - -static int au1000_gpio1_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - u32 mask = 1 << offset; - struct au1000_gpio_chip *gpch; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - - writel(mask, gpch->regbase + AU1000_GPIO1_TRI_OUT); - au1000_gpio1_set(chip, offset, value); - - return 0; -} - -struct au1000_gpio_chip au1000_gpio_chip[] = { - [0] = { - .regbase = (void __iomem *)SYS_BASE, - .chip = { - .label = "au1000-gpio1", - .direction_input = au1000_gpio1_direction_input, - .direction_output = au1000_gpio1_direction_output, - .get = au1000_gpio1_get, - .set = au1000_gpio1_set, - .base = 0, - .ngpio = 32, - }, - }, -#if !defined(CONFIG_SOC_AU1000) - [1] = { - .regbase = (void __iomem *)GPIO2_BASE, - .chip = { - .label = "au1000-gpio2", - .direction_input = au1000_gpio2_direction_input, - .direction_output = au1000_gpio2_direction_output, - .get = au1000_gpio2_get, - .set = au1000_gpio2_set, - .base = AU1XXX_GPIO_BASE, - .ngpio = 32, - }, - }, -#endif -}; - -static int __init au1000_gpio_init(void) -{ - gpiochip_add(&au1000_gpio_chip[0].chip); -#if !defined(CONFIG_SOC_AU1000) - gpiochip_add(&au1000_gpio_chip[1].chip); -#endif - - return 0; -} -arch_initcall(au1000_gpio_init); - diff --git a/arch/mips/alchemy/common/gpiolib-au1000.c b/arch/mips/alchemy/common/gpiolib-au1000.c new file mode 100644 index 0000000..1bfa91f --- /dev/null +++ b/arch/mips/alchemy/common/gpiolib-au1000.c @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2007-2009, OpenWrt.org, Florian Fainelli <florian@openwrt.org> + * GPIOLIB support for Au1000, Au1500, Au1100, Au1550 and Au12x0. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Notes : + * au1000 SoC have only one GPIO block : GPIO1 + * Au1100, Au15x0, Au12x0 have a second one : GPIO2 + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> + +#include <asm/mach-au1x00/au1000.h> +#include <asm/mach-au1x00/gpio.h> + +#if !defined(CONFIG_SOC_AU1000) +static int gpio2_get(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio2_get_value(offset + ALCHEMY_GPIO2_BASE); +} + +static void gpio2_set(struct gpio_chip *chip, unsigned offset, int value) +{ + alchemy_gpio2_set_value(offset + ALCHEMY_GPIO2_BASE, value); +} + +static int gpio2_direction_input(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio2_direction_input(offset + ALCHEMY_GPIO2_BASE); +} + +static int gpio2_direction_output(struct gpio_chip *chip, unsigned offset, + int value) +{ + return alchemy_gpio2_direction_output(offset + ALCHEMY_GPIO2_BASE, + value); +} + +static int gpio2_to_irq(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio2_to_irq(offset + ALCHEMY_GPIO2_BASE); +} +#endif /* !defined(CONFIG_SOC_AU1000) */ + +static int gpio1_get(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio1_get_value(offset + ALCHEMY_GPIO1_BASE); +} + +static void gpio1_set(struct gpio_chip *chip, + unsigned offset, int value) +{ + alchemy_gpio1_set_value(offset + ALCHEMY_GPIO1_BASE, value); +} + +static int gpio1_direction_input(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio1_direction_input(offset + ALCHEMY_GPIO1_BASE); +} + +static int gpio1_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + return alchemy_gpio1_direction_output(offset + ALCHEMY_GPIO1_BASE, + value); +} + +static int gpio1_to_irq(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio1_to_irq(offset + ALCHEMY_GPIO1_BASE); +} + +struct gpio_chip alchemy_gpio_chip[] = { + [0] = { + .label = "alchemy-gpio1", + .direction_input = gpio1_direction_input, + .direction_output = gpio1_direction_output, + .get = gpio1_get, + .set = gpio1_set, + .to_irq = gpio1_to_irq, + .base = ALCHEMY_GPIO1_BASE, + .ngpio = ALCHEMY_GPIO1_NUM, + }, +#if !defined(CONFIG_SOC_AU1000) + [1] = { + .label = "alchemy-gpio2", + .direction_input = gpio2_direction_input, + .direction_output = gpio2_direction_output, + .get = gpio2_get, + .set = gpio2_set, + .to_irq = gpio2_to_irq, + .base = ALCHEMY_GPIO2_BASE, + .ngpio = ALCHEMY_GPIO2_NUM, + }, +#endif +}; + +static int __init alchemy_gpiolib_init(void) +{ + gpiochip_add(&alchemy_gpio_chip[0]); +#if !defined(CONFIG_SOC_AU1000) + gpiochip_add(&alchemy_gpio_chip[1]); +#endif + + return 0; +} +arch_initcall(alchemy_gpiolib_init); diff --git a/arch/mips/alchemy/common/reset.c b/arch/mips/alchemy/common/reset.c index 0191c93..4791011 100644 --- a/arch/mips/alchemy/common/reset.c +++ b/arch/mips/alchemy/common/reset.c @@ -27,8 +27,9 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <asm/cacheflush.h> +#include <linux/gpio.h> +#include <asm/cacheflush.h> #include <asm/mach-au1x00/au1000.h> void au1000_restart(char *command) @@ -161,7 +162,7 @@ void au1000_halt(void) #else printk(KERN_NOTICE "\n** You can safely turn off the power\n"); #ifdef CONFIG_MIPS_MIRAGE - au_writel((1 << 26) | (1 << 10), GPIO2_OUTPUT); + gpio_direction_output(210, 1); #endif #ifdef CONFIG_MIPS_DB1200 au_writew(au_readw(0xB980001C) | (1 << 14), 0xB980001C); diff --git a/arch/mips/alchemy/devboards/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c index a75ffbf..de30d8e 100644 --- a/arch/mips/alchemy/devboards/db1x00/board_setup.c +++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c @@ -27,6 +27,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/gpio.h> #include <linux/init.h> #include <asm/mach-au1x00/au1000.h> @@ -94,12 +95,12 @@ void __init board_setup(void) #endif bcsr->pcmcia = 0x0000; /* turn off PCMCIA power */ -#ifdef CONFIG_MIPS_MIRAGE /* Enable GPIO[31:0] inputs */ - au_writel(0, SYS_PININPUTEN); + alchemy_gpio1_input_enable(); - /* GPIO[20] is output, tristate the other input primary GPIOs */ - au_writel(~(1 << 20), SYS_TRIOUTCLR); +#ifdef CONFIG_MIPS_MIRAGE + /* GPIO[20] is output */ + alchemy_gpio_direction_output(20, 0); /* Set GPIO[210:208] instead of SSI_0 */ pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; @@ -118,8 +119,7 @@ void __init board_setup(void) * Enable speaker amplifier. This should * be part of the audio driver. */ - au_writel(au_readl(GPIO2_DIR) | 0x200, GPIO2_DIR); - au_writel(0x02000200, GPIO2_OUTPUT); + alchemy_gpio_direction_output(209, 1); #endif au_sync(); diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c index aed2fde..cd273545 100644 --- a/arch/mips/alchemy/devboards/pb1000/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c @@ -24,6 +24,7 @@ */ #include <linux/delay.h> +#include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/mach-au1x00/au1000.h> @@ -130,8 +131,11 @@ void __init board_setup(void) pin_func |= SYS_PF_USB; au_writel(pin_func, SYS_PINFUNC); - au_writel(0x2800, SYS_TRIOUTCLR); - au_writel(0x0030, SYS_OUTPUTCLR); + + alchemy_gpio_direction_input(11); + alchemy_gpio_direction_input(13); + alchemy_gpio_direction_output(4, 0); + alchemy_gpio_direction_output(5, 0); #endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ /* Make GPIO 15 an input (for interrupt line) */ @@ -140,7 +144,7 @@ void __init board_setup(void) pin_func |= SYS_PF_I2S; au_writel(pin_func, SYS_PINFUNC); - au_writel(0x8000, SYS_TRIOUTCLR); + alchemy_gpio_direction_input(15); static_cfg0 = au_readl(MEM_STCFG0) & ~0xc00; au_writel(static_cfg0, MEM_STCFG0); diff --git a/arch/mips/alchemy/devboards/pb1100/board_setup.c b/arch/mips/alchemy/devboards/pb1100/board_setup.c index 4df57fa..6126308 100644 --- a/arch/mips/alchemy/devboards/pb1100/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1100/board_setup.c @@ -23,6 +23,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/gpio.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -88,7 +89,7 @@ void __init board_setup(void) /* Set AUX clock to 12 MHz * 8 = 96 MHz */ au_writel(8, SYS_AUXPLL); - au_writel(0, SYS_PININPUTEN); + alchemy_gpio1_input_enable(); udelay(100); #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c index fed3b09..d7a5656 100644 --- a/arch/mips/alchemy/devboards/pb1500/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c @@ -23,8 +23,9 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <linux/init.h> #include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/init.h> #include <linux/interrupt.h> #include <asm/mach-au1x00/au1000.h> @@ -90,11 +91,12 @@ void __init board_setup(void) au_writel(0, SYS_PINSTATERD); udelay(100); -#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) - /* GPIO201 is input for PCMCIA card detect */ /* GPIO203 is input for PCMCIA interrupt request */ - au_writel(au_readl(GPIO2_DIR) & ~((1 << 1) | (1 << 3)), GPIO2_DIR); + alchemy_gpio_direction_input(201); + alchemy_gpio_direction_input(203); + +#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) /* Zero and disable FREQ2 */ sys_freqctrl = au_readl(SYS_FREQCTRL0); diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c index d5eb9c3..632f986 100644 --- a/arch/mips/alchemy/devboards/pm.c +++ b/arch/mips/alchemy/devboards/pm.c @@ -9,6 +9,7 @@ #include <linux/suspend.h> #include <linux/sysfs.h> #include <asm/mach-au1x00/au1000.h> +#include <asm/mach-au1x00/gpio.h> /* * Generic suspend userspace interface for Alchemy development boards. @@ -26,7 +27,7 @@ static unsigned long db1x_pm_last_wakesrc; static int db1x_pm_enter(suspend_state_t state) { /* enable GPIO based wakeup */ - au_writel(1, SYS_PININPUTEN); + alchemy_gpio1_input_enable(); /* clear and setup wake cause and source */ au_writel(0, SYS_WAKEMSK); diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index 8ed1ae1..cc32c69 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c @@ -28,6 +28,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/gpio.h> #include <linux/init.h> #include <asm/mach-au1x00/au1000.h> @@ -55,10 +56,11 @@ void __init board_setup(void) } #endif + alchemy_gpio2_enable(); + #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) /* Enable USB power switch */ - au_writel(au_readl(GPIO2_DIR) | 0x10, GPIO2_DIR); - au_writel(0x100000, GPIO2_OUTPUT); + alchemy_gpio_direction_output(204, 0); #endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ #ifdef CONFIG_PCI @@ -74,14 +76,14 @@ void __init board_setup(void) /* Initialize GPIO */ au_writel(0xFFFFFFFF, SYS_TRIOUTCLR); - au_writel(0x00000001, SYS_OUTPUTCLR); /* set M66EN (PCI 66MHz) to OFF */ - au_writel(0x00000008, SYS_OUTPUTSET); /* set PCI CLKRUN# to OFF */ - au_writel(0x00000002, SYS_OUTPUTSET); /* set EXT_IO3 ON */ - au_writel(0x00000020, SYS_OUTPUTCLR); /* set eth PHY TX_ER to OFF */ + alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ + alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ + alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ + alchemy_gpio_direction_output(5, 0); /* Disable eth PHY TX_ER */ /* Enable LED and set it to green */ - au_writel(au_readl(GPIO2_DIR) | 0x1800, GPIO2_DIR); - au_writel(0x18000800, GPIO2_OUTPUT); + alchemy_gpio_direction_output(211, 1); /* green on */ + alchemy_gpio_direction_output(212, 0); /* red off */ board_pci_idsel = mtx1_pci_idsel; @@ -101,10 +103,10 @@ mtx1_pci_idsel(unsigned int devsel, int assert) if (assert && devsel != 0) /* Suppress signal to Cardbus */ - au_writel(0x00000002, SYS_OUTPUTCLR); /* set EXT_IO3 OFF */ + gpio_set_value(1, 0); /* set EXT_IO3 OFF */ else - au_writel(0x00000002, SYS_OUTPUTSET); /* set EXT_IO3 ON */ + gpio_set_value(1, 1); /* set EXT_IO3 ON */ + au_sync_udelay(1); return 1; } - diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index a2634fa..4de2d48 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c @@ -23,6 +23,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/gpio.h> #include <linux/init.h> #include <linux/delay.h> @@ -50,6 +51,9 @@ void __init board_setup(void) } #endif + alchemy_gpio1_input_enable(); + alchemy_gpio2_enable(); + /* Set multiple use pins (UART3/GPIO) to UART (it's used as UART too) */ pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_UR3; pin_func |= SYS_PF_UR3; @@ -65,20 +69,19 @@ void __init board_setup(void) au_writel(0x01, UART3_ADDR + UART_MCR); /* UART_MCR_DTR is 0x01??? */ #ifdef CONFIG_PCMCIA_XXS1500 - /* Setup PCMCIA signals */ - au_writel(0, SYS_PININPUTEN); - /* GPIO 0, 1, and 4 are inputs */ - au_writel(1 | (1 << 1) | (1 << 4), SYS_TRIOUTCLR); + alchemy_gpio_direction_input(0); + alchemy_gpio_direction_input(1); + alchemy_gpio_direction_input(4); - /* Enable GPIO2 if not already enabled */ - au_writel(1, GPIO2_ENABLE); /* GPIO2 208/9/10/11 are inputs */ - au_writel((1 << 8) | (1 << 9) | (1 << 10) | (1 << 11), GPIO2_DIR); + alchemy_gpio_direction_input(208); + alchemy_gpio_direction_input(209); + alchemy_gpio_direction_input(210); + alchemy_gpio_direction_input(211); /* Turn off power */ - au_writel((au_readl(GPIO2_PINSTATE) & ~(1 << 14)) | (1 << 30), - GPIO2_OUTPUT); + alchemy_gpio_direction_output(214, 0); #endif #ifdef CONFIG_PCI diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile index d6903c3..7c0528b 100644 --- a/arch/mips/cavium-octeon/Makefile +++ b/arch/mips/cavium-octeon/Makefile @@ -14,5 +14,9 @@ obj-y += dma-octeon.o flash_setup.o obj-y += octeon-memcpy.o obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_PCI) += pci-common.o +obj-$(CONFIG_PCI) += pci.o +obj-$(CONFIG_PCI) += pcie.o +obj-$(CONFIG_PCI_MSI) += msi.o EXTRA_CFLAGS += -Werror diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 01b1ef9..627c162 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -13,20 +13,327 @@ */ #include <linux/types.h> #include <linux/mm.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> + +#include <linux/cache.h> +#include <linux/io.h> + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-npi-defs.h> +#include <asm/octeon/cvmx-pci-defs.h> #include <dma-coherence.h> +#ifdef CONFIG_PCI +#include "pci-common.h" +#endif + +#define BAR2_PCI_ADDRESS 0x8000000000ul + +struct bar1_index_state { + int16_t ref_count; /* Number of PCI mappings using this index */ + uint16_t address_bits; /* Upper bits of physical address. This is + shifted 22 bits */ +}; + +#ifdef CONFIG_PCI +static DEFINE_SPINLOCK(bar1_lock); +static struct bar1_index_state bar1_state[32]; +#endif + dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size) { +#ifndef CONFIG_PCI /* Without PCI/PCIe this function can be called for Octeon internal devices such as USB. These devices all support 64bit addressing */ mb(); return virt_to_phys(ptr); +#else + unsigned long flags; + uint64_t dma_mask; + int64_t start_index; + dma_addr_t result = -1; + uint64_t physical = virt_to_phys(ptr); + int64_t index; + + mb(); + /* + * Use the DMA masks to determine the allowed memory + * region. For us it doesn't limit the actual memory, just the + * address visible over PCI. Devices with limits need to use + * lower indexed Bar1 entries. + */ + if (dev) { + dma_mask = dev->coherent_dma_mask; + if (dev->dma_mask) + dma_mask = *dev->dma_mask; + } else { + dma_mask = 0xfffffffful; + } + + /* + * Platform devices, such as the internal USB, skip all + * translation and use Octeon physical addresses directly. + */ + if (!dev || dev->bus == &platform_bus_type) + return physical; + + switch (octeon_dma_bar_type) { + case OCTEON_DMA_BAR_TYPE_PCIE: + if (unlikely(physical < (16ul << 10))) + panic("dma_map_single: Not allowed to map first 16KB." + " It interferes with BAR0 special area\n"); + else if ((physical + size >= (256ul << 20)) && + (physical < (512ul << 20))) + panic("dma_map_single: Not allowed to map bootbus\n"); + else if ((physical + size >= 0x400000000ull) && + physical < 0x410000000ull) + panic("dma_map_single: " + "Attempt to map illegal memory address 0x%llx\n", + physical); + else if (physical >= 0x420000000ull) + panic("dma_map_single: " + "Attempt to map illegal memory address 0x%llx\n", + physical); + else if ((physical + size >= + (4ull<<30) - (OCTEON_PCI_BAR1_HOLE_SIZE<<20)) + && physical < (4ull<<30)) + pr_warning("dma_map_single: Warning: " + "Mapping memory address that might " + "conflict with devices 0x%llx-0x%llx\n", + physical, physical+size-1); + /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */ + if ((physical >= 0x410000000ull) && physical < 0x420000000ull) + result = physical - 0x400000000ull; + else + result = physical; + if (((result+size-1) & dma_mask) != result+size-1) + panic("dma_map_single: Attempt to map address " + "0x%llx-0x%llx, which can't be accessed " + "according to the dma mask 0x%llx\n", + physical, physical+size-1, dma_mask); + goto done; + + case OCTEON_DMA_BAR_TYPE_BIG: +#ifdef CONFIG_64BIT + /* If the device supports 64bit addressing, then use BAR2 */ + if (dma_mask > BAR2_PCI_ADDRESS) { + result = physical + BAR2_PCI_ADDRESS; + goto done; + } +#endif + if (unlikely(physical < (4ul << 10))) { + panic("dma_map_single: Not allowed to map first 4KB. " + "It interferes with BAR0 special area\n"); + } else if (physical < (256ul << 20)) { + if (unlikely(physical + size > (256ul << 20))) + panic("dma_map_single: Requested memory spans " + "Bar0 0:256MB and bootbus\n"); + result = physical; + goto done; + } else if (unlikely(physical < (512ul << 20))) { + panic("dma_map_single: Not allowed to map bootbus\n"); + } else if (physical < (2ul << 30)) { + if (unlikely(physical + size > (2ul << 30))) + panic("dma_map_single: Requested memory spans " + "Bar0 512MB:2GB and BAR1\n"); + result = physical; + goto done; + } else if (physical < (2ul << 30) + (128 << 20)) { + /* Fall through */ + } else if (physical < + (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) { + if (unlikely + (physical + size > + (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20))) + panic("dma_map_single: Requested memory " + "extends past Bar1 (4GB-%luMB)\n", + OCTEON_PCI_BAR1_HOLE_SIZE); + result = physical; + goto done; + } else if ((physical >= 0x410000000ull) && + (physical < 0x420000000ull)) { + if (unlikely(physical + size > 0x420000000ull)) + panic("dma_map_single: Requested memory spans " + "non existant memory\n"); + /* BAR0 fixed mapping 256MB:512MB -> + * 16GB+256MB:16GB+512MB */ + result = physical - 0x400000000ull; + goto done; + } else { + /* Continued below switch statement */ + } + break; + + case OCTEON_DMA_BAR_TYPE_SMALL: +#ifdef CONFIG_64BIT + /* If the device supports 64bit addressing, then use BAR2 */ + if (dma_mask > BAR2_PCI_ADDRESS) { + result = physical + BAR2_PCI_ADDRESS; + goto done; + } +#endif + /* Continued below switch statement */ + break; + + default: + panic("dma_map_single: Invalid octeon_dma_bar_type\n"); + } + + /* Don't allow mapping to span multiple Bar entries. The hardware guys + won't guarantee that DMA across boards work */ + if (unlikely((physical >> 22) != ((physical + size - 1) >> 22))) + panic("dma_map_single: " + "Requested memory spans more than one Bar1 entry\n"); + + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) + start_index = 31; + else if (unlikely(dma_mask < (1ul << 27))) + start_index = (dma_mask >> 22); + else + start_index = 31; + + /* Only one processor can access the Bar register at once */ + spin_lock_irqsave(&bar1_lock, flags); + + /* Look through Bar1 for existing mapping that will work */ + for (index = start_index; index >= 0; index--) { + if ((bar1_state[index].address_bits == physical >> 22) && + (bar1_state[index].ref_count)) { + /* An existing mapping will work, use it */ + bar1_state[index].ref_count++; + if (unlikely(bar1_state[index].ref_count < 0)) + panic("dma_map_single: " + "Bar1[%d] reference count overflowed\n", + (int) index); + result = (index << 22) | (physical & ((1 << 22) - 1)); + /* Large BAR1 is offset at 2GB */ + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) + result += 2ul << 30; + goto done_unlock; + } + } + + /* No existing mappings, look for a free entry */ + for (index = start_index; index >= 0; index--) { + if (unlikely(bar1_state[index].ref_count == 0)) { + union cvmx_pci_bar1_indexx bar1_index; + /* We have a free entry, use it */ + bar1_state[index].ref_count = 1; + bar1_state[index].address_bits = physical >> 22; + bar1_index.u32 = 0; + /* Address bits[35:22] sent to L2C */ + bar1_index.s.addr_idx = physical >> 22; + /* Don't put PCI accesses in L2. */ + bar1_index.s.ca = 1; + /* Endian Swap Mode */ + bar1_index.s.end_swp = 1; + /* Set '1' when the selected address range is valid. */ + bar1_index.s.addr_v = 1; + octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), + bar1_index.u32); + /* An existing mapping will work, use it */ + result = (index << 22) | (physical & ((1 << 22) - 1)); + /* Large BAR1 is offset at 2GB */ + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) + result += 2ul << 30; + goto done_unlock; + } + } + + pr_err("dma_map_single: " + "Can't find empty BAR1 index for physical mapping 0x%llx\n", + (unsigned long long) physical); + +done_unlock: + spin_unlock_irqrestore(&bar1_lock, flags); +done: + pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result); + return result; +#endif } void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) { - /* Without PCI/PCIe this function can be called for Octeon internal - * devices such as USB. These devices all support 64bit addressing */ +#ifndef CONFIG_PCI + /* + * Without PCI/PCIe this function can be called for Octeon internal + * devices such as USB. These devices all support 64bit addressing. + */ + return; +#else + unsigned long flags; + uint64_t index; + + /* + * Platform devices, such as the internal USB, skip all + * translation and use Octeon physical addresses directly. + */ + if (dev->bus == &platform_bus_type) + return; + + switch (octeon_dma_bar_type) { + case OCTEON_DMA_BAR_TYPE_PCIE: + /* Nothing to do, all mappings are static */ + goto done; + + case OCTEON_DMA_BAR_TYPE_BIG: +#ifdef CONFIG_64BIT + /* Nothing to do for addresses using BAR2 */ + if (dma_addr >= BAR2_PCI_ADDRESS) + goto done; +#endif + if (unlikely(dma_addr < (4ul << 10))) + panic("dma_unmap_single: Unexpect DMA address 0x%llx\n", + dma_addr); + else if (dma_addr < (2ul << 30)) + /* Nothing to do for addresses using BAR0 */ + goto done; + else if (dma_addr < (2ul << 30) + (128ul << 20)) + /* Need to unmap, fall through */ + index = (dma_addr - (2ul << 30)) >> 22; + else if (dma_addr < + (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) + goto done; /* Nothing to do for the rest of BAR1 */ + else + panic("dma_unmap_single: Unexpect DMA address 0x%llx\n", + dma_addr); + /* Continued below switch statement */ + break; + + case OCTEON_DMA_BAR_TYPE_SMALL: +#ifdef CONFIG_64BIT + /* Nothing to do for addresses using BAR2 */ + if (dma_addr >= BAR2_PCI_ADDRESS) + goto done; +#endif + index = dma_addr >> 22; + /* Continued below switch statement */ + break; + + default: + panic("dma_unmap_single: Invalid octeon_dma_bar_type\n"); + } + + if (unlikely(index > 31)) + panic("dma_unmap_single: " + "Attempt to unmap an invalid address (0x%llx)\n", + dma_addr); + + spin_lock_irqsave(&bar1_lock, flags); + bar1_state[index].ref_count--; + if (bar1_state[index].ref_count == 0) + octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0); + else if (unlikely(bar1_state[index].ref_count < 0)) + panic("dma_unmap_single: Bar1[%u] reference count < 0\n", + (int) index); + spin_unlock_irqrestore(&bar1_lock, flags); +done: + pr_debug("dma_unmap_single 0x%llx\n", dma_addr); return; +#endif } diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile index 80d6cb2..2fd66db 100644 --- a/arch/mips/cavium-octeon/executive/Makefile +++ b/arch/mips/cavium-octeon/executive/Makefile @@ -11,3 +11,4 @@ obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o +obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c index 4f5a08b..25666da 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c +++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c @@ -31,6 +31,7 @@ */ #include <linux/kernel.h> +#include <linux/module.h> #include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx-spinlock.h> @@ -97,6 +98,33 @@ void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment) return cvmx_bootmem_alloc_range(size, alignment, 0, 0); } +void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, + uint64_t max_addr, uint64_t align, + char *name) +{ + int64_t addr; + + addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, + align, name, 0); + if (addr >= 0) + return cvmx_phys_to_ptr(addr); + else + return NULL; +} + +void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, + char *name) +{ + return cvmx_bootmem_alloc_named_range(size, address, address + size, + 0, name); +} + +void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name) +{ + return cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name); +} +EXPORT_SYMBOL(cvmx_bootmem_alloc_named); + int cvmx_bootmem_free_named(char *name) { return cvmx_bootmem_phy_named_block_free(name, 0); @@ -106,6 +134,7 @@ struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name) { return cvmx_bootmem_phy_named_block_find(name, 0); } +EXPORT_SYMBOL(cvmx_bootmem_find_named_block); void cvmx_bootmem_lock(void) { @@ -584,3 +613,78 @@ int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags) cvmx_bootmem_unlock(); return named_block_ptr != NULL; /* 0 on failure, 1 on success */ } + +int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, + uint64_t max_addr, + uint64_t alignment, + char *name, + uint32_t flags) +{ + int64_t addr_allocated; + struct cvmx_bootmem_named_block_desc *named_block_desc_ptr; + +#ifdef DEBUG + cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: " + "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n", + (unsigned long long)size, + (unsigned long long)min_addr, + (unsigned long long)max_addr, + (unsigned long long)alignment, + name); +#endif + if (cvmx_bootmem_desc->major_version != 3) { + cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: " + "%d.%d at addr: %p\n", + (int)cvmx_bootmem_desc->major_version, + (int)cvmx_bootmem_desc->minor_version, + cvmx_bootmem_desc); + return -1; + } + + /* + * Take lock here, as name lookup/block alloc/name add need to + * be atomic. + */ + if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) + cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); + + /* Get pointer to first available named block descriptor */ + named_block_desc_ptr = + cvmx_bootmem_phy_named_block_find(NULL, + flags | CVMX_BOOTMEM_FLAG_NO_LOCKING); + + /* + * Check to see if name already in use, return error if name + * not available or no more room for blocks. + */ + if (cvmx_bootmem_phy_named_block_find(name, + flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) { + if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) + cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); + return -1; + } + + + /* + * Round size up to mult of minimum alignment bytes We need + * the actual size allocated to allow for blocks to be + * coallesced when they are freed. The alloc routine does the + * same rounding up on all allocations. + */ + size = __ALIGN_MASK(size, (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)); + + addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, + alignment, + flags | CVMX_BOOTMEM_FLAG_NO_LOCKING); + if (addr_allocated >= 0) { + named_block_desc_ptr->base_addr = addr_allocated; + named_block_desc_ptr->size = size; + strncpy(named_block_desc_ptr->name, name, + cvmx_bootmem_desc->named_block_name_len); + named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0; + } + + if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) + cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); + return addr_allocated; +} diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c b/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c new file mode 100644 index 0000000..868659e --- /dev/null +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c @@ -0,0 +1,73 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * + * Fixes and workaround for Octeon chip errata. This file + * contains functions called by cvmx-helper to workaround known + * chip errata. For the most part, code doesn't need to call + * these functions directly. + * + */ +#include <linux/module.h> + +#include <asm/octeon/octeon.h> + +#include <asm/octeon/cvmx-helper-jtag.h> + +/** + * Due to errata G-720, the 2nd order CDR circuit on CN52XX pass + * 1 doesn't work properly. The following code disables 2nd order + * CDR for the specified QLM. + * + * @qlm: QLM to disable 2nd order CDR for. + */ +void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm) +{ + int lane; + cvmx_helper_qlm_jtag_init(); + /* We need to load all four lanes of the QLM, a total of 1072 bits */ + for (lane = 0; lane < 4; lane++) { + /* + * Each lane has 268 bits. We need to set + * cfg_cdr_incx<67:64> = 3 and cfg_cdr_secord<77> = + * 1. All other bits are zero. Bits go in LSB first, + * so start off with the zeros for bits <63:0>. + */ + cvmx_helper_qlm_jtag_shift_zeros(qlm, 63 - 0 + 1); + /* cfg_cdr_incx<67:64>=3 */ + cvmx_helper_qlm_jtag_shift(qlm, 67 - 64 + 1, 3); + /* Zeros for bits <76:68> */ + cvmx_helper_qlm_jtag_shift_zeros(qlm, 76 - 68 + 1); + /* cfg_cdr_secord<77>=1 */ + cvmx_helper_qlm_jtag_shift(qlm, 77 - 77 + 1, 1); + /* Zeros for bits <267:78> */ + cvmx_helper_qlm_jtag_shift_zeros(qlm, 267 - 78 + 1); + } + cvmx_helper_qlm_jtag_update(qlm); +} +EXPORT_SYMBOL(__cvmx_helper_errata_qlm_disable_2nd_order_cdr); diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c new file mode 100644 index 0000000..c1c5489 --- /dev/null +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c @@ -0,0 +1,144 @@ + +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * + * Helper utilities for qlm_jtag. + * + */ + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-helper-jtag.h> + + +/** + * Initialize the internal QLM JTAG logic to allow programming + * of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions. + * These functions should only be used at the direction of Cavium + * Networks. Programming incorrect values into the JTAG chain + * can cause chip damage. + */ +void cvmx_helper_qlm_jtag_init(void) +{ + union cvmx_ciu_qlm_jtgc jtgc; + uint32_t clock_div = 0; + uint32_t divisor = cvmx_sysinfo_get()->cpu_clock_hz / (25 * 1000000); + divisor = (divisor - 1) >> 2; + /* Convert the divisor into a power of 2 shift */ + while (divisor) { + clock_div++; + divisor = divisor >> 1; + } + + /* + * Clock divider for QLM JTAG operations. eclk is divided by + * 2^(CLK_DIV + 2) + */ + jtgc.u64 = 0; + jtgc.s.clk_div = clock_div; + jtgc.s.mux_sel = 0; + if (OCTEON_IS_MODEL(OCTEON_CN52XX)) + jtgc.s.bypass = 0x3; + else + jtgc.s.bypass = 0xf; + cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64); + cvmx_read_csr(CVMX_CIU_QLM_JTGC); +} + +/** + * Write up to 32bits into the QLM jtag chain. Bits are shifted + * into the MSB and out the LSB, so you should shift in the low + * order bits followed by the high order bits. The JTAG chain is + * 4 * 268 bits long, or 1072. + * + * @qlm: QLM to shift value into + * @bits: Number of bits to shift in (1-32). + * @data: Data to shift in. Bit 0 enters the chain first, followed by + * bit 1, etc. + * + * Returns The low order bits of the JTAG chain that shifted out of the + * circle. + */ +uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data) +{ + union cvmx_ciu_qlm_jtgd jtgd; + jtgd.u64 = 0; + jtgd.s.shift = 1; + jtgd.s.shft_cnt = bits - 1; + jtgd.s.shft_reg = data; + if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) + jtgd.s.select = 1 << qlm; + cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64); + do { + jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD); + } while (jtgd.s.shift); + return jtgd.s.shft_reg >> (32 - bits); +} + +/** + * Shift long sequences of zeros into the QLM JTAG chain. It is + * common to need to shift more than 32 bits of zeros into the + * chain. This function is a convience wrapper around + * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of + * zeros at a time. + * + * @qlm: QLM to shift zeros into + * @bits: + */ +void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits) +{ + while (bits > 0) { + int n = bits; + if (n > 32) + n = 32; + cvmx_helper_qlm_jtag_shift(qlm, n, 0); + bits -= n; + } +} + +/** + * Program the QLM JTAG chain into all lanes of the QLM. You must + * have already shifted in 268*4, or 1072 bits into the JTAG + * chain. Updating invalid values can possibly cause chip damage. + * + * @qlm: QLM to program + */ +void cvmx_helper_qlm_jtag_update(int qlm) +{ + union cvmx_ciu_qlm_jtgd jtgd; + + /* Update the new data */ + jtgd.u64 = 0; + jtgd.s.update = 1; + if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) + jtgd.s.select = 1 << qlm; + cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64); + do { + jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD); + } while (jtgd.s.update); +} diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c index 48123707..e583889 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c +++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c @@ -29,6 +29,7 @@ * This module provides system/board/application information obtained * by the bootloader. */ +#include <linux/module.h> #include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx-spinlock.h> @@ -69,6 +70,7 @@ struct cvmx_sysinfo *cvmx_sysinfo_get(void) { return &(state.sysinfo); } +EXPORT_SYMBOL(cvmx_sysinfo_get); /** * This function is used in non-simple executive environments (such as diff --git a/arch/mips/cavium-octeon/msi.c b/arch/mips/cavium-octeon/msi.c new file mode 100644 index 0000000..964b03b --- /dev/null +++ b/arch/mips/cavium-octeon/msi.c @@ -0,0 +1,288 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005-2007 Cavium Networks + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/msi.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-npi-defs.h> +#include <asm/octeon/cvmx-pci-defs.h> +#include <asm/octeon/cvmx-npei-defs.h> +#include <asm/octeon/cvmx-pexp-defs.h> + +#include "pci-common.h" + +/* + * Each bit in msi_free_irq_bitmask represents a MSI interrupt that is + * in use. + */ +static uint64_t msi_free_irq_bitmask; + +/* + * Each bit in msi_multiple_irq_bitmask tells that the device using + * this bit in msi_free_irq_bitmask is also using the next bit. This + * is used so we can disable all of the MSI interrupts when a device + * uses multiple. + */ +static uint64_t msi_multiple_irq_bitmask; + +/* + * This lock controls updates to msi_free_irq_bitmask and + * msi_multiple_irq_bitmask. + */ +static DEFINE_SPINLOCK(msi_free_irq_bitmask_lock); + + +/** + * Called when a driver request MSI interrupts instead of the + * legacy INT A-D. This routine will allocate multiple interrupts + * for MSI devices that support them. A device can override this by + * programming the MSI control bits [6:4] before calling + * pci_enable_msi(). + * + * @param dev Device requesting MSI interrupts + * @param desc MSI descriptor + * + * Returns 0 on success. + */ +int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) +{ + struct msi_msg msg; + uint16_t control; + int configured_private_bits; + int request_private_bits; + int irq; + int irq_step; + uint64_t search_mask; + + /* + * Read the MSI config to figure out how many IRQs this device + * wants. Most devices only want 1, which will give + * configured_private_bits and request_private_bits equal 0. + */ + pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, + &control); + + /* + * If the number of private bits has been configured then use + * that value instead of the requested number. This gives the + * driver the chance to override the number of interrupts + * before calling pci_enable_msi(). + */ + configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4; + if (configured_private_bits == 0) { + /* Nothing is configured, so use the hardware requested size */ + request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1; + } else { + /* + * Use the number of configured bits, assuming the + * driver wanted to override the hardware request + * value. + */ + request_private_bits = configured_private_bits; + } + + /* + * The PCI 2.3 spec mandates that there are at most 32 + * interrupts. If this device asks for more, only give it one. + */ + if (request_private_bits > 5) + request_private_bits = 0; + +try_only_one: + /* + * The IRQs have to be aligned on a power of two based on the + * number being requested. + */ + irq_step = 1 << request_private_bits; + + /* Mask with one bit for each IRQ */ + search_mask = (1 << irq_step) - 1; + + /* + * We're going to search msi_free_irq_bitmask_lock for zero + * bits. This represents an MSI interrupt number that isn't in + * use. + */ + spin_lock(&msi_free_irq_bitmask_lock); + for (irq = 0; irq < 64; irq += irq_step) { + if ((msi_free_irq_bitmask & (search_mask << irq)) == 0) { + msi_free_irq_bitmask |= search_mask << irq; + msi_multiple_irq_bitmask |= (search_mask >> 1) << irq; + break; + } + } + spin_unlock(&msi_free_irq_bitmask_lock); + + /* Make sure the search for available interrupts didn't fail */ + if (irq >= 64) { + if (request_private_bits) { + pr_err("arch_setup_msi_irq: Unable to find %d free " + "interrupts, trying just one", + 1 << request_private_bits); + request_private_bits = 0; + goto try_only_one; + } else + panic("arch_setup_msi_irq: Unable to find a free MSI " + "interrupt"); + } + + /* MSI interrupts start at logical IRQ OCTEON_IRQ_MSI_BIT0 */ + irq += OCTEON_IRQ_MSI_BIT0; + + switch (octeon_dma_bar_type) { + case OCTEON_DMA_BAR_TYPE_SMALL: + /* When not using big bar, Bar 0 is based at 128MB */ + msg.address_lo = + ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff; + msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32; + case OCTEON_DMA_BAR_TYPE_BIG: + /* When using big bar, Bar 0 is based at 0 */ + msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff; + msg.address_hi = (0 + CVMX_PCI_MSI_RCV) >> 32; + break; + case OCTEON_DMA_BAR_TYPE_PCIE: + /* When using PCIe, Bar 0 is based at 0 */ + /* FIXME CVMX_NPEI_MSI_RCV* other than 0? */ + msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff; + msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32; + break; + default: + panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type\n"); + } + msg.data = irq - OCTEON_IRQ_MSI_BIT0; + + /* Update the number of IRQs the device has available to it */ + control &= ~PCI_MSI_FLAGS_QSIZE; + control |= request_private_bits << 4; + pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, + control); + + set_irq_msi(irq, desc); + write_msi_msg(irq, &msg); + return 0; +} + + +/** + * Called when a device no longer needs its MSI interrupts. All + * MSI interrupts for the device are freed. + * + * @irq: The devices first irq number. There may be multple in sequence. + */ +void arch_teardown_msi_irq(unsigned int irq) +{ + int number_irqs; + uint64_t bitmask; + + if ((irq < OCTEON_IRQ_MSI_BIT0) || (irq > OCTEON_IRQ_MSI_BIT63)) + panic("arch_teardown_msi_irq: Attempted to teardown illegal " + "MSI interrupt (%d)", irq); + irq -= OCTEON_IRQ_MSI_BIT0; + + /* + * Count the number of IRQs we need to free by looking at the + * msi_multiple_irq_bitmask. Each bit set means that the next + * IRQ is also owned by this device. + */ + number_irqs = 0; + while ((irq+number_irqs < 64) && + (msi_multiple_irq_bitmask & (1ull << (irq + number_irqs)))) + number_irqs++; + number_irqs++; + /* Mask with one bit for each IRQ */ + bitmask = (1 << number_irqs) - 1; + /* Shift the mask to the correct bit location */ + bitmask <<= irq; + if ((msi_free_irq_bitmask & bitmask) != bitmask) + panic("arch_teardown_msi_irq: Attempted to teardown MSI " + "interrupt (%d) not in use", irq); + + /* Checks are done, update the in use bitmask */ + spin_lock(&msi_free_irq_bitmask_lock); + msi_free_irq_bitmask &= ~bitmask; + msi_multiple_irq_bitmask &= ~bitmask; + spin_unlock(&msi_free_irq_bitmask_lock); +} + + +/** + * Called by the interrupt handling code when an MSI interrupt + * occurs. + * + * @param cpl + * @param dev_id + * + * @return + */ +static irqreturn_t octeon_msi_interrupt(int cpl, void *dev_id) +{ + uint64_t msi_bits; + int irq; + + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) + msi_bits = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_RCV0); + else + msi_bits = cvmx_read_csr(CVMX_NPI_NPI_MSI_RCV); + irq = fls64(msi_bits); + if (irq) { + irq += OCTEON_IRQ_MSI_BIT0 - 1; + if (irq_desc[irq].action) { + do_IRQ(irq); + return IRQ_HANDLED; + } else { + pr_err("Spurious MSI interrupt %d\n", irq); + if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { + /* These chips have PCIe */ + cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0, + 1ull << (irq - + OCTEON_IRQ_MSI_BIT0)); + } else { + /* These chips have PCI */ + cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV, + 1ull << (irq - + OCTEON_IRQ_MSI_BIT0)); + } + } + } + return IRQ_NONE; +} + + +/** + * Initializes the MSI interrupt handling code + * + * @return + */ +int octeon_msi_initialize(void) +{ + int r; + if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { + r = request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt, + IRQF_SHARED, + "MSI[0:63]", octeon_msi_interrupt); + } else if (octeon_is_pci_host()) { + r = request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt, + IRQF_SHARED, + "MSI[0:15]", octeon_msi_interrupt); + r += request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt, + IRQF_SHARED, + "MSI[16:31]", octeon_msi_interrupt); + r += request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt, + IRQF_SHARED, + "MSI[32:47]", octeon_msi_interrupt); + r += request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt, + IRQF_SHARED, + "MSI[48:63]", octeon_msi_interrupt); + } + return 0; +} + +subsys_initcall(octeon_msi_initialize); diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index d3a0c81..8dfa009 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -10,6 +10,8 @@ #include <linux/hardirq.h> #include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-pexp-defs.h> +#include <asm/octeon/cvmx-npi-defs.h> DEFINE_RWLOCK(octeon_irq_ciu0_rwlock); DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); diff --git a/arch/mips/cavium-octeon/pci-common.c b/arch/mips/cavium-octeon/pci-common.c new file mode 100644 index 0000000..cd029f8 --- /dev/null +++ b/arch/mips/cavium-octeon/pci-common.c @@ -0,0 +1,137 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005-2007 Cavium Networks + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/time.h> +#include <linux/delay.h> +#include "pci-common.h" + +typeof(pcibios_map_irq) *octeon_pcibios_map_irq; +enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID; + +/** + * Map a PCI device to the appropriate interrupt line + * + * @param dev The Linux PCI device structure for the device to map + * @param slot The slot number for this device on __BUS 0__. Linux + * enumerates through all the bridges and figures out the + * slot on Bus 0 where this device eventually hooks to. + * @param pin The PCI interrupt pin read from the device, then swizzled + * as it goes through each bridge. + * @return Interrupt number for the device + */ +int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + if (octeon_pcibios_map_irq) + return octeon_pcibios_map_irq(dev, slot, pin); + else + panic("octeon_pcibios_map_irq doesn't point to a " + "pcibios_map_irq() function"); +} + + +/** + * Called to perform platform specific PCI setup + * + * @param dev + * @return + */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + uint16_t config; + uint32_t dconfig; + int pos; + /* + * Force the Cache line setting to 64 bytes. The standard + * Linux bus scan doesn't seem to set it. Octeon really has + * 128 byte lines, but Intel bridges get really upset if you + * try and set values above 64 bytes. Value is specified in + * 32bit words. + */ + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4); + /* Set latency timers for all devices */ + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 48); + + /* Enable reporting System errors and parity errors on all devices */ + /* Enable parity checking and error reporting */ + pci_read_config_word(dev, PCI_COMMAND, &config); + config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; + pci_write_config_word(dev, PCI_COMMAND, config); + + if (dev->subordinate) { + /* Set latency timers on sub bridges */ + pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 48); + /* More bridge error detection */ + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config); + config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config); + } + + /* Enable the PCIe normal error reporting */ + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (pos) { + /* Update Device Control */ + pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config); + /* Correctable Error Reporting */ + config |= PCI_EXP_DEVCTL_CERE; + /* Non-Fatal Error Reporting */ + config |= PCI_EXP_DEVCTL_NFERE; + /* Fatal Error Reporting */ + config |= PCI_EXP_DEVCTL_FERE; + /* Unsupported Request */ + config |= PCI_EXP_DEVCTL_URRE; + pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config); + } + + /* Find the Advanced Error Reporting capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (pos) { + /* Clear Uncorrectable Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, + &dconfig); + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, + dconfig); + /* Enable reporting of all uncorrectable errors */ + /* Uncorrectable Error Mask - turned on bits disable errors */ + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0); + /* + * Leave severity at HW default. This only controls if + * errors are reported as uncorrectable or + * correctable, not if the error is reported. + */ + /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */ + /* Clear Correctable Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig); + pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig); + /* Enable reporting of all correctable errors */ + /* Correctable Error Mask - turned on bits disable errors */ + pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0); + /* Advanced Error Capabilities */ + pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig); + /* ECRC Generation Enable */ + if (config & PCI_ERR_CAP_ECRC_GENC) + config |= PCI_ERR_CAP_ECRC_GENE; + /* ECRC Check Enable */ + if (config & PCI_ERR_CAP_ECRC_CHKC) + config |= PCI_ERR_CAP_ECRC_CHKE; + pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig); + /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */ + /* Report all errors to the root complex */ + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, + PCI_ERR_ROOT_CMD_COR_EN | + PCI_ERR_ROOT_CMD_NONFATAL_EN | + PCI_ERR_ROOT_CMD_FATAL_EN); + /* Clear the Root status register */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig); + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig); + } + + return 0; +} diff --git a/arch/mips/cavium-octeon/pci-common.h b/arch/mips/cavium-octeon/pci-common.h new file mode 100644 index 0000000..74ae799 --- /dev/null +++ b/arch/mips/cavium-octeon/pci-common.h @@ -0,0 +1,39 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005-2007 Cavium Networks + */ +#ifndef __OCTEON_PCI_COMMON_H__ +#define __OCTEON_PCI_COMMON_H__ + +#include <linux/pci.h> + +/* Some PCI cards require delays when accessing config space. */ +#define PCI_CONFIG_SPACE_DELAY 10000 + +/* pcibios_map_irq() is defined inside pci-common.c. All it does is call the + Octeon specific version pointed to by this variable. This function needs to + change for PCI or PCIe based hosts */ +extern typeof(pcibios_map_irq) *octeon_pcibios_map_irq; + +/* The following defines are only used when octeon_dma_bar_type = + OCTEON_DMA_BAR_TYPE_BIG */ +#define OCTEON_PCI_BAR1_HOLE_BITS 5 +#define OCTEON_PCI_BAR1_HOLE_SIZE (1ul<<(OCTEON_PCI_BAR1_HOLE_BITS+3)) + +enum octeon_dma_bar_type { + OCTEON_DMA_BAR_TYPE_INVALID, + OCTEON_DMA_BAR_TYPE_SMALL, + OCTEON_DMA_BAR_TYPE_BIG, + OCTEON_DMA_BAR_TYPE_PCIE +}; + +/** + * This is a variable to tell the DMA mapping system in dma-octeon.c + * how to map PCI DMA addresses. + */ +extern enum octeon_dma_bar_type octeon_dma_bar_type; + +#endif diff --git a/arch/mips/cavium-octeon/pci.c b/arch/mips/cavium-octeon/pci.c new file mode 100644 index 0000000..67c0ff5 --- /dev/null +++ b/arch/mips/cavium-octeon/pci.c @@ -0,0 +1,568 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005-2007 Cavium Networks + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/time.h> +#include <linux/delay.h> + +#include <asm/time.h> + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-npi-defs.h> +#include <asm/octeon/cvmx-pci-defs.h> + +#include "pci-common.h" + +#define USE_OCTEON_INTERNAL_ARBITER + +/* + * Octeon's PCI controller uses did=3, subdid=2 for PCI IO + * addresses. Use PCI endian swapping 1 so no address swapping is + * necessary. The Linux io routines will endian swap the data. + */ +#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull +#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32) + +/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */ +#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull) + +/** + * This is the bit decoding used for the Octeon PCI controller addresses + */ +union octeon_pci_address { + uint64_t u64; + struct { + uint64_t upper:2; + uint64_t reserved:13; + uint64_t io:1; + uint64_t did:5; + uint64_t subdid:3; + uint64_t reserved2:4; + uint64_t endian_swap:2; + uint64_t reserved3:10; + uint64_t bus:8; + uint64_t dev:5; + uint64_t func:3; + uint64_t reg:8; + } s; +}; + +/** + * Return the mapping of PCI device number to IRQ line. Each + * character in the return string represents the interrupt + * line for the device at that position. Device 1 maps to the + * first character, etc. The characters A-D are used for PCI + * interrupts. + * + * Returns PCI interrupt mapping + */ +const char *octeon_get_pci_interrupts(void) +{ + /* + * Returning an empty string causes the interrupts to be + * routed based on the PCI specification. From the PCI spec: + * + * INTA# of Device Number 0 is connected to IRQW on the system + * board. (Device Number has no significance regarding being + * located on the system board or in a connector.) INTA# of + * Device Number 1 is connected to IRQX on the system + * board. INTA# of Device Number 2 is connected to IRQY on the + * system board. INTA# of Device Number 3 is connected to IRQZ + * on the system board. The table below describes how each + * agent's INTx# lines are connected to the system board + * interrupt lines. The following equation can be used to + * determine to which INTx# signal on the system board a given + * device's INTx# line(s) is connected. + * + * MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0, + * IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I = + * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and + * INTD# = 3) + */ + switch (octeon_bootinfo->board_type) { + case CVMX_BOARD_TYPE_NAO38: + /* This is really the NAC38 */ + return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA"; + case CVMX_BOARD_TYPE_THUNDER: + return ""; + case CVMX_BOARD_TYPE_EBH3000: + return ""; + case CVMX_BOARD_TYPE_EBH3100: + case CVMX_BOARD_TYPE_CN3010_EVB_HS5: + case CVMX_BOARD_TYPE_CN3005_EVB_HS5: + return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; + case CVMX_BOARD_TYPE_BBGW_REF: + return "AABCD"; + default: + return ""; + } +} + +/** + * Map a PCI device to the appropriate interrupt line + * + * @dev: The Linux PCI device structure for the device to map + * @slot: The slot number for this device on __BUS 0__. Linux + * enumerates through all the bridges and figures out the + * slot on Bus 0 where this device eventually hooks to. + * @pin: The PCI interrupt pin read from the device, then swizzled + * as it goes through each bridge. + * Returns Interrupt number for the device + */ +int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev, + u8 slot, u8 pin) +{ + int irq_num; + const char *interrupts; + int dev_num; + + /* Get the board specific interrupt mapping */ + interrupts = octeon_get_pci_interrupts(); + + dev_num = dev->devfn >> 3; + if (dev_num < strlen(interrupts)) + irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) + + OCTEON_IRQ_PCI_INT0; + else + irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0; + return irq_num; +} + + +/** + * Read a value from configuration space + * + */ +static int octeon_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + union octeon_pci_address pci_addr; + + pci_addr.u64 = 0; + pci_addr.s.upper = 2; + pci_addr.s.io = 1; + pci_addr.s.did = 3; + pci_addr.s.subdid = 1; + pci_addr.s.endian_swap = 1; + pci_addr.s.bus = bus->number; + pci_addr.s.dev = devfn >> 3; + pci_addr.s.func = devfn & 0x7; + pci_addr.s.reg = reg; + +#if PCI_CONFIG_SPACE_DELAY + udelay(PCI_CONFIG_SPACE_DELAY); +#endif + switch (size) { + case 4: + *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64)); + return PCIBIOS_SUCCESSFUL; + case 2: + *val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64)); + return PCIBIOS_SUCCESSFUL; + case 1: + *val = cvmx_read64_uint8(pci_addr.u64); + return PCIBIOS_SUCCESSFUL; + } + return PCIBIOS_FUNC_NOT_SUPPORTED; +} + + +/** + * Write a value to PCI configuration space + * + * @bus: + * @devfn: + * @reg: + * @size: + * @val: + * Returns + */ +static int octeon_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + union octeon_pci_address pci_addr; + + pci_addr.u64 = 0; + pci_addr.s.upper = 2; + pci_addr.s.io = 1; + pci_addr.s.did = 3; + pci_addr.s.subdid = 1; + pci_addr.s.endian_swap = 1; + pci_addr.s.bus = bus->number; + pci_addr.s.dev = devfn >> 3; + pci_addr.s.func = devfn & 0x7; + pci_addr.s.reg = reg; + +#if PCI_CONFIG_SPACE_DELAY + udelay(PCI_CONFIG_SPACE_DELAY); +#endif + switch (size) { + case 4: + cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val)); + return PCIBIOS_SUCCESSFUL; + case 2: + cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val)); + return PCIBIOS_SUCCESSFUL; + case 1: + cvmx_write64_uint8(pci_addr.u64, val); + return PCIBIOS_SUCCESSFUL; + } + return PCIBIOS_FUNC_NOT_SUPPORTED; +} + + +static struct pci_ops octeon_pci_ops = { + octeon_read_config, + octeon_write_config, +}; + +static struct resource octeon_pci_mem_resource = { + .start = 0, + .end = 0, + .name = "Octeon PCI MEM", + .flags = IORESOURCE_MEM, +}; + +/* + * PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI + * bridge + */ +static struct resource octeon_pci_io_resource = { + .start = 0x4000, + .end = OCTEON_PCI_IOSPACE_SIZE - 1, + .name = "Octeon PCI IO", + .flags = IORESOURCE_IO, +}; + +static struct pci_controller octeon_pci_controller = { + .pci_ops = &octeon_pci_ops, + .mem_resource = &octeon_pci_mem_resource, + .mem_offset = OCTEON_PCI_MEMSPACE_OFFSET, + .io_resource = &octeon_pci_io_resource, + .io_offset = 0, + .io_map_base = OCTEON_PCI_IOSPACE_BASE, +}; + + +/** + * Low level initialize the Octeon PCI controller + * + * Returns + */ +static void octeon_pci_initialize(void) +{ + union cvmx_pci_cfg01 cfg01; + union cvmx_npi_ctl_status ctl_status; + union cvmx_pci_ctl_status_2 ctl_status_2; + union cvmx_pci_cfg19 cfg19; + union cvmx_pci_cfg16 cfg16; + union cvmx_pci_cfg22 cfg22; + union cvmx_pci_cfg56 cfg56; + + /* Reset the PCI Bus */ + cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1); + cvmx_read_csr(CVMX_CIU_SOFT_PRST); + + udelay(2000); /* Hold PCI reset for 2 ms */ + + ctl_status.u64 = 0; /* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */ + ctl_status.s.max_word = 1; + ctl_status.s.timer = 1; + cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64); + + /* Deassert PCI reset and advertize PCX Host Mode Device Capability + (64b) */ + cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4); + cvmx_read_csr(CVMX_CIU_SOFT_PRST); + + udelay(2000); /* Wait 2 ms after deasserting PCI reset */ + + ctl_status_2.u32 = 0; + ctl_status_2.s.tsr_hwm = 1; /* Initializes to 0. Must be set + before any PCI reads. */ + ctl_status_2.s.bar2pres = 1; /* Enable BAR2 */ + ctl_status_2.s.bar2_enb = 1; + ctl_status_2.s.bar2_cax = 1; /* Don't use L2 */ + ctl_status_2.s.bar2_esx = 1; + ctl_status_2.s.pmo_amod = 1; /* Round robin priority */ + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) { + /* BAR1 hole */ + ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS; + ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */ + ctl_status_2.s.bb_ca = 1; /* Don't use L2 with big bars */ + ctl_status_2.s.bb_es = 1; /* Big bar in byte swap mode */ + ctl_status_2.s.bb1 = 1; /* BAR1 is big */ + ctl_status_2.s.bb0 = 1; /* BAR0 is big */ + } + + octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32); + udelay(2000); /* Wait 2 ms before doing PCI reads */ + + ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2); + pr_notice("PCI Status: %s %s-bit\n", + ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI", + ctl_status_2.s.ap_64ad ? "64" : "32"); + + if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) { + union cvmx_pci_cnt_reg cnt_reg_start; + union cvmx_pci_cnt_reg cnt_reg_end; + unsigned long cycles, pci_clock; + + cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG); + cycles = read_c0_cvmcount(); + udelay(1000); + cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG); + cycles = read_c0_cvmcount() - cycles; + pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) / + (cycles / (mips_hpt_frequency / 1000000)); + pr_notice("PCI Clock: %lu MHz\n", pci_clock); + } + + /* + * TDOMC must be set to one in PCI mode. TDOMC should be set to 4 + * in PCI-X mode to allow four oustanding splits. Otherwise, + * should not change from its reset value. Don't write PCI_CFG19 + * in PCI mode (0x82000001 reset value), write it to 0x82000004 + * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero. + * MRBCM -> must be one. + */ + if (ctl_status_2.s.ap_pcix) { + cfg19.u32 = 0; + /* + * Target Delayed/Split request outstanding maximum + * count. [1..31] and 0=32. NOTE: If the user + * programs these bits beyond the Designed Maximum + * outstanding count, then the designed maximum table + * depth will be used instead. No additional + * Deferred/Split transactions will be accepted if + * this outstanding maximum count is + * reached. Furthermore, no additional deferred/split + * transactions will be accepted if the I/O delay/ I/O + * Split Request outstanding maximum is reached. + */ + cfg19.s.tdomc = 4; + /* + * Master Deferred Read Request Outstanding Max Count + * (PCI only). CR4C[26:24] Max SAC cycles MAX DAC + * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101 + * 5 2 110 6 3 111 7 3 For example, if these bits are + * programmed to 100, the core can support 2 DAC + * cycles, 4 SAC cycles or a combination of 1 DAC and + * 2 SAC cycles. NOTE: For the PCI-X maximum + * outstanding split transactions, refer to + * CRE0[22:20]. + */ + cfg19.s.mdrrmc = 2; + /* + * Master Request (Memory Read) Byte Count/Byte Enable + * select. 0 = Byte Enables valid. In PCI mode, a + * burst transaction cannot be performed using Memory + * Read command=4?h6. 1 = DWORD Byte Count valid + * (default). In PCI Mode, the memory read byte + * enables are automatically generated by the + * core. Note: N3 Master Request transaction sizes are + * always determined through the + * am_attr[<35:32>|<7:0>] field. + */ + cfg19.s.mrbcm = 1; + octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32); + } + + + cfg01.u32 = 0; + cfg01.s.msae = 1; /* Memory Space Access Enable */ + cfg01.s.me = 1; /* Master Enable */ + cfg01.s.pee = 1; /* PERR# Enable */ + cfg01.s.see = 1; /* System Error Enable */ + cfg01.s.fbbe = 1; /* Fast Back to Back Transaction Enable */ + + octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); + +#ifdef USE_OCTEON_INTERNAL_ARBITER + /* + * When OCTEON is a PCI host, most systems will use OCTEON's + * internal arbiter, so must enable it before any PCI/PCI-X + * traffic can occur. + */ + { + union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg; + + pci_int_arb_cfg.u64 = 0; + pci_int_arb_cfg.s.en = 1; /* Internal arbiter enable */ + cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64); + } +#endif /* USE_OCTEON_INTERNAL_ARBITER */ + + /* + * Preferrably written to 1 to set MLTD. [RDSATI,TRTAE, + * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to + * 1..7. + */ + cfg16.u32 = 0; + cfg16.s.mltd = 1; /* Master Latency Timer Disable */ + octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32); + + /* + * Should be written to 0x4ff00. MTTV -> must be zero. + * FLUSH -> must be 1. MRV -> should be 0xFF. + */ + cfg22.u32 = 0; + /* Master Retry Value [1..255] and 0=infinite */ + cfg22.s.mrv = 0xff; + /* + * AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper + * N3K operation. + */ + cfg22.s.flush = 1; + octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32); + + /* + * MOST Indicates the maximum number of outstanding splits (in -1 + * notation) when OCTEON is in PCI-X mode. PCI-X performance is + * affected by the MOST selection. Should generally be written + * with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807, + * depending on the desired MOST of 3, 2, 1, or 0, respectively. + */ + cfg56.u32 = 0; + cfg56.s.pxcid = 7; /* RO - PCI-X Capability ID */ + cfg56.s.ncp = 0xe8; /* RO - Next Capability Pointer */ + cfg56.s.dpere = 1; /* Data Parity Error Recovery Enable */ + cfg56.s.roe = 1; /* Relaxed Ordering Enable */ + cfg56.s.mmbc = 1; /* Maximum Memory Byte Count + [0=512B,1=1024B,2=2048B,3=4096B] */ + cfg56.s.most = 3; /* Maximum outstanding Split transactions [0=1 + .. 7=32] */ + + octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32); + + /* + * Affects PCI performance when OCTEON services reads to its + * BAR1/BAR2. Refer to Section 10.6.1. The recommended values are + * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and + * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700, + * these values need to be changed so they won't possibly prefetch off + * of the end of memory if PCI is DMAing a buffer at the end of + * memory. Note that these values differ from their reset values. + */ + octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21); + octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31); + octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31); +} + + +/** + * Initialize the Octeon PCI controller + * + * Returns + */ +static int __init octeon_pci_setup(void) +{ + union cvmx_npi_mem_access_subidx mem_access; + int index; + + /* Only these chips have PCI */ + if (octeon_has_feature(OCTEON_FEATURE_PCIE)) + return 0; + + /* Point pcibios_map_irq() to the PCI version of it */ + octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; + + /* Only use the big bars on chips that support it */ + if (OCTEON_IS_MODEL(OCTEON_CN31XX) || + OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) || + OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)) + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL; + else + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; + + /* PCI I/O and PCI MEM values */ + set_io_port_base(OCTEON_PCI_IOSPACE_BASE); + ioport_resource.start = 0; + ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1; + if (!octeon_is_pci_host()) { + pr_notice("Not in host mode, PCI Controller not initialized\n"); + return 0; + } + + pr_notice("%s Octeon big bar support\n", + (octeon_dma_bar_type == + OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling"); + + octeon_pci_initialize(); + + mem_access.u64 = 0; + mem_access.s.esr = 1; /* Endian-Swap on read. */ + mem_access.s.esw = 1; /* Endian-Swap on write. */ + mem_access.s.nsr = 0; /* No-Snoop on read. */ + mem_access.s.nsw = 0; /* No-Snoop on write. */ + mem_access.s.ror = 0; /* Relax Read on read. */ + mem_access.s.row = 0; /* Relax Order on write. */ + mem_access.s.ba = 0; /* PCI Address bits [63:36]. */ + cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64); + + /* + * Remap the Octeon BAR 2 above all 32 bit devices + * (0x8000000000ul). This is done here so it is remapped + * before the readl()'s below. We don't want BAR2 overlapping + * with BAR0/BAR1 during these reads. + */ + octeon_npi_write32(CVMX_NPI_PCI_CFG08, 0); + octeon_npi_write32(CVMX_NPI_PCI_CFG09, 0x80); + + /* Disable the BAR1 movable mappings */ + for (index = 0; index < 32; index++) + octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0); + + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) { + /* Remap the Octeon BAR 0 to 0-2GB */ + octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0); + octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0); + + /* + * Remap the Octeon BAR 1 to map 2GB-4GB (minus the + * BAR 1 hole). + */ + octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30); + octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0); + + /* Devices go after BAR1 */ + octeon_pci_mem_resource.start = + OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) - + (OCTEON_PCI_BAR1_HOLE_SIZE << 20); + octeon_pci_mem_resource.end = + octeon_pci_mem_resource.start + (1ul << 30); + } else { + /* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */ + octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20); + octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0); + + /* Remap the Octeon BAR 1 to map 0-128MB */ + octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0); + octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0); + + /* Devices go after BAR0 */ + octeon_pci_mem_resource.start = + OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) + + (4ul << 10); + octeon_pci_mem_resource.end = + octeon_pci_mem_resource.start + (1ul << 30); + } + + register_pci_controller(&octeon_pci_controller); + + /* + * Clear any errors that might be pending from before the bus + * was setup properly. + */ + cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1); + return 0; +} + +arch_initcall(octeon_pci_setup); diff --git a/arch/mips/cavium-octeon/pcie.c b/arch/mips/cavium-octeon/pcie.c new file mode 100644 index 0000000..49d1408 --- /dev/null +++ b/arch/mips/cavium-octeon/pcie.c @@ -0,0 +1,1370 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007, 2008 Cavium Networks + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/time.h> +#include <linux/delay.h> + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-npei-defs.h> +#include <asm/octeon/cvmx-pciercx-defs.h> +#include <asm/octeon/cvmx-pescx-defs.h> +#include <asm/octeon/cvmx-pexp-defs.h> +#include <asm/octeon/cvmx-helper-errata.h> + +#include "pci-common.h" + +union cvmx_pcie_address { + uint64_t u64; + struct { + uint64_t upper:2; /* Normally 2 for XKPHYS */ + uint64_t reserved_49_61:13; /* Must be zero */ + uint64_t io:1; /* 1 for IO space access */ + uint64_t did:5; /* PCIe DID = 3 */ + uint64_t subdid:3; /* PCIe SubDID = 1 */ + uint64_t reserved_36_39:4; /* Must be zero */ + uint64_t es:2; /* Endian swap = 1 */ + uint64_t port:2; /* PCIe port 0,1 */ + uint64_t reserved_29_31:3; /* Must be zero */ + /* + * Selects the type of the configuration request (0 = type 0, + * 1 = type 1). + */ + uint64_t ty:1; + /* Target bus number sent in the ID in the request. */ + uint64_t bus:8; + /* + * Target device number sent in the ID in the + * request. Note that Dev must be zero for type 0 + * configuration requests. + */ + uint64_t dev:5; + /* Target function number sent in the ID in the request. */ + uint64_t func:3; + /* + * Selects a register in the configuration space of + * the target. + */ + uint64_t reg:12; + } config; + struct { + uint64_t upper:2; /* Normally 2 for XKPHYS */ + uint64_t reserved_49_61:13; /* Must be zero */ + uint64_t io:1; /* 1 for IO space access */ + uint64_t did:5; /* PCIe DID = 3 */ + uint64_t subdid:3; /* PCIe SubDID = 2 */ + uint64_t reserved_36_39:4; /* Must be zero */ + uint64_t es:2; /* Endian swap = 1 */ + uint64_t port:2; /* PCIe port 0,1 */ + uint64_t address:32; /* PCIe IO address */ + } io; + struct { + uint64_t upper:2; /* Normally 2 for XKPHYS */ + uint64_t reserved_49_61:13; /* Must be zero */ + uint64_t io:1; /* 1 for IO space access */ + uint64_t did:5; /* PCIe DID = 3 */ + uint64_t subdid:3; /* PCIe SubDID = 3-6 */ + uint64_t reserved_36_39:4; /* Must be zero */ + uint64_t address:36; /* PCIe Mem address */ + } mem; +}; + +/** + * Return the Core virtual base address for PCIe IO access. IOs are + * read/written as an offset from this address. + * + * @pcie_port: PCIe port the IO is for + * + * Returns 64bit Octeon IO base address for read/write + */ +static inline uint64_t cvmx_pcie_get_io_base_address(int pcie_port) +{ + union cvmx_pcie_address pcie_addr; + pcie_addr.u64 = 0; + pcie_addr.io.upper = 0; + pcie_addr.io.io = 1; + pcie_addr.io.did = 3; + pcie_addr.io.subdid = 2; + pcie_addr.io.es = 1; + pcie_addr.io.port = pcie_port; + return pcie_addr.u64; +} + +/** + * Size of the IO address region returned at address + * cvmx_pcie_get_io_base_address() + * + * @pcie_port: PCIe port the IO is for + * + * Returns Size of the IO window + */ +static inline uint64_t cvmx_pcie_get_io_size(int pcie_port) +{ + return 1ull << 32; +} + +/** + * Return the Core virtual base address for PCIe MEM access. Memory is + * read/written as an offset from this address. + * + * @pcie_port: PCIe port the IO is for + * + * Returns 64bit Octeon IO base address for read/write + */ +static inline uint64_t cvmx_pcie_get_mem_base_address(int pcie_port) +{ + union cvmx_pcie_address pcie_addr; + pcie_addr.u64 = 0; + pcie_addr.mem.upper = 0; + pcie_addr.mem.io = 1; + pcie_addr.mem.did = 3; + pcie_addr.mem.subdid = 3 + pcie_port; + return pcie_addr.u64; +} + +/** + * Size of the Mem address region returned at address + * cvmx_pcie_get_mem_base_address() + * + * @pcie_port: PCIe port the IO is for + * + * Returns Size of the Mem window + */ +static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port) +{ + return 1ull << 36; +} + +/** + * Read a PCIe config space register indirectly. This is used for + * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. + * + * @pcie_port: PCIe port to read from + * @cfg_offset: Address to read + * + * Returns Value read + */ +static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset) +{ + union cvmx_pescx_cfg_rd pescx_cfg_rd; + pescx_cfg_rd.u64 = 0; + pescx_cfg_rd.s.addr = cfg_offset; + cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64); + pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port)); + return pescx_cfg_rd.s.data; +} + +/** + * Write a PCIe config space register indirectly. This is used for + * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. + * + * @pcie_port: PCIe port to write to + * @cfg_offset: Address to write + * @val: Value to write + */ +static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, + uint32_t val) +{ + union cvmx_pescx_cfg_wr pescx_cfg_wr; + pescx_cfg_wr.u64 = 0; + pescx_cfg_wr.s.addr = cfg_offset; + pescx_cfg_wr.s.data = val; + cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64); +} + +/** + * Build a PCIe config space request address for a device + * + * @pcie_port: PCIe port to access + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns 64bit Octeon IO address + */ +static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, + int dev, int fn, int reg) +{ + union cvmx_pcie_address pcie_addr; + union cvmx_pciercx_cfg006 pciercx_cfg006; + + pciercx_cfg006.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port)); + if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0)) + return 0; + + pcie_addr.u64 = 0; + pcie_addr.config.upper = 2; + pcie_addr.config.io = 1; + pcie_addr.config.did = 3; + pcie_addr.config.subdid = 1; + pcie_addr.config.es = 1; + pcie_addr.config.port = pcie_port; + pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum); + pcie_addr.config.bus = bus; + pcie_addr.config.dev = dev; + pcie_addr.config.func = fn; + pcie_addr.config.reg = reg; + return pcie_addr.u64; +} + +/** + * Read 8bits from a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns Result of the read + */ +static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, + int fn, int reg) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + return cvmx_read64_uint8(address); + else + return 0xff; +} + +/** + * Read 16bits from a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns Result of the read + */ +static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, + int fn, int reg) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + return le16_to_cpu(cvmx_read64_uint16(address)); + else + return 0xffff; +} + +/** + * Read 32bits from a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns Result of the read + */ +static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, + int fn, int reg) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + return le32_to_cpu(cvmx_read64_uint32(address)); + else + return 0xffffffff; +} + +/** + * Write 8bits to a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * @val: Value to write + */ +static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, + int reg, uint8_t val) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + cvmx_write64_uint8(address, val); +} + +/** + * Write 16bits to a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * @val: Value to write + */ +static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, + int reg, uint16_t val) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + cvmx_write64_uint16(address, cpu_to_le16(val)); +} + +/** + * Write 32bits to a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * @val: Value to write + */ +static void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, + int reg, uint32_t val) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + cvmx_write64_uint32(address, cpu_to_le32(val)); +} + +/** + * Initialize the RC config space CSRs + * + * @pcie_port: PCIe port to initialize + */ +static void __cvmx_pcie_rc_initialize_config_space(int pcie_port) +{ + union cvmx_pciercx_cfg030 pciercx_cfg030; + union cvmx_npei_ctl_status2 npei_ctl_status2; + union cvmx_pciercx_cfg070 pciercx_cfg070; + union cvmx_pciercx_cfg001 pciercx_cfg001; + union cvmx_pciercx_cfg032 pciercx_cfg032; + union cvmx_pciercx_cfg006 pciercx_cfg006; + union cvmx_pciercx_cfg008 pciercx_cfg008; + union cvmx_pciercx_cfg009 pciercx_cfg009; + union cvmx_pciercx_cfg010 pciercx_cfg010; + union cvmx_pciercx_cfg011 pciercx_cfg011; + union cvmx_pciercx_cfg035 pciercx_cfg035; + union cvmx_pciercx_cfg075 pciercx_cfg075; + union cvmx_pciercx_cfg034 pciercx_cfg034; + + /* Max Payload Size (PCIE*_CFG030[MPS]) */ + /* Max Read Request Size (PCIE*_CFG030[MRRS]) */ + /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */ + /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */ + pciercx_cfg030.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port)); + /* + * Max payload size = 128 bytes for best Octeon DMA + * performance. + */ + pciercx_cfg030.s.mps = 0; + /* + * Max read request size = 128 bytes for best Octeon DMA + * performance. + */ + pciercx_cfg030.s.mrrs = 0; + /* Enable relaxed ordering. */ + pciercx_cfg030.s.ro_en = 1; + /* Enable no snoop. */ + pciercx_cfg030.s.ns_en = 1; + /* Correctable error reporting enable. */ + pciercx_cfg030.s.ce_en = 1; + /* Non-fatal error reporting enable. */ + pciercx_cfg030.s.nfe_en = 1; + /* Fatal error reporting enable. */ + pciercx_cfg030.s.fe_en = 1; + /* Unsupported request reporting enable. */ + pciercx_cfg030.s.ur_en = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), + pciercx_cfg030.u32); + + /* + * Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match + * PCIE*_CFG030[MPS] + * + * Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not + * exceed PCIE*_CFG030[MRRS]. + */ + npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2); + /* Max payload size = 128 bytes for best Octeon DMA performance */ + npei_ctl_status2.s.mps = 0; + /* Max read request size = 128 bytes for best Octeon DMA performance */ + npei_ctl_status2.s.mrrs = 0; + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64); + + /* ECRC Generation (PCIE*_CFG070[GE,CE]) */ + pciercx_cfg070.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port)); + pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */ + pciercx_cfg070.s.ce = 1; /* ECRC check enable. */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), + pciercx_cfg070.u32); + + /* + * Access Enables (PCIE*_CFG001[MSAE,ME]) ME and MSAE should + * always be set. + * + * Interrupt Disable (PCIE*_CFG001[I_DIS]) System Error + * Message Enable (PCIE*_CFG001[SEE]) + */ + pciercx_cfg001.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port)); + pciercx_cfg001.s.msae = 1; /* Memory space enable. */ + pciercx_cfg001.s.me = 1; /* Bus master enable. */ + pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */ + pciercx_cfg001.s.see = 1; /* SERR# enable */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), + pciercx_cfg001.u32); + + /* Advanced Error Recovery Message Enables */ + /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0); + /* Use CVMX_PCIERCX_CFG067 hardware default */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0); + + /* Active State Power Management (PCIE*_CFG032[ASLPC]) */ + pciercx_cfg032.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); + pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), + pciercx_cfg032.u32); + + /* Entrance Latencies (PCIE*_CFG451[L0EL,L1EL]) */ + + /* + * Link Width Mode (PCIERCn_CFG452[LME]) - Set during + * cvmx_pcie_rc_initialize_link() + * + * Primary Bus Number (PCIERCn_CFG006[PBNUM]) + * + * We set the primary bus number to 1 so IDT bridges are + * happy. They don't like zero. + */ + pciercx_cfg006.u32 = 0; + pciercx_cfg006.s.pbnum = 1; + pciercx_cfg006.s.sbnum = 1; + pciercx_cfg006.s.subbnum = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), + pciercx_cfg006.u32); + + /* + * Memory-mapped I/O BAR (PCIERCn_CFG008) + * Most applications should disable the memory-mapped I/O BAR by + * setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] + */ + pciercx_cfg008.u32 = 0; + pciercx_cfg008.s.mb_addr = 0x100; + pciercx_cfg008.s.ml_addr = 0; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), + pciercx_cfg008.u32); + + /* + * Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) + * Most applications should disable the prefetchable BAR by setting + * PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < + * PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] + */ + pciercx_cfg009.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port)); + pciercx_cfg010.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port)); + pciercx_cfg011.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port)); + pciercx_cfg009.s.lmem_base = 0x100; + pciercx_cfg009.s.lmem_limit = 0; + pciercx_cfg010.s.umem_base = 0x100; + pciercx_cfg011.s.umem_limit = 0; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), + pciercx_cfg009.u32); + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), + pciercx_cfg010.u32); + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), + pciercx_cfg011.u32); + + /* + * System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) + * PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) + */ + pciercx_cfg035.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port)); + /* System error on correctable error enable. */ + pciercx_cfg035.s.secee = 1; + /* System error on fatal error enable. */ + pciercx_cfg035.s.sefee = 1; + /* System error on non-fatal error enable. */ + pciercx_cfg035.s.senfee = 1; + /* PME interrupt enable. */ + pciercx_cfg035.s.pmeie = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), + pciercx_cfg035.u32); + + /* + * Advanced Error Recovery Interrupt Enables + * (PCIERCn_CFG075[CERE,NFERE,FERE]) + */ + pciercx_cfg075.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port)); + /* Correctable error reporting enable. */ + pciercx_cfg075.s.cere = 1; + /* Non-fatal error reporting enable. */ + pciercx_cfg075.s.nfere = 1; + /* Fatal error reporting enable. */ + pciercx_cfg075.s.fere = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), + pciercx_cfg075.u32); + + /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], + * PCIERCn_CFG034[DLLS_EN,CCINT_EN]) + */ + pciercx_cfg034.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port)); + /* Hot-plug interrupt enable. */ + pciercx_cfg034.s.hpint_en = 1; + /* Data Link Layer state changed enable */ + pciercx_cfg034.s.dlls_en = 1; + /* Command completed interrupt enable. */ + pciercx_cfg034.s.ccint_en = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), + pciercx_cfg034.u32); +} + +/** + * Initialize a host mode PCIe link. This function takes a PCIe + * port from reset to a link up state. Software can then begin + * configuring the rest of the link. + * + * @pcie_port: PCIe port to initialize + * + * Returns Zero on success + */ +static int __cvmx_pcie_rc_initialize_link(int pcie_port) +{ + uint64_t start_cycle; + union cvmx_pescx_ctl_status pescx_ctl_status; + union cvmx_pciercx_cfg452 pciercx_cfg452; + union cvmx_pciercx_cfg032 pciercx_cfg032; + union cvmx_pciercx_cfg448 pciercx_cfg448; + + /* Set the lane width */ + pciercx_cfg452.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port)); + pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port)); + if (pescx_ctl_status.s.qlm_cfg == 0) { + /* We're in 8 lane (56XX) or 4 lane (54XX) mode */ + pciercx_cfg452.s.lme = 0xf; + } else { + /* We're in 4 lane (56XX) or 2 lane (52XX) mode */ + pciercx_cfg452.s.lme = 0x7; + } + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), + pciercx_cfg452.u32); + + /* + * CN52XX pass 1.x has an errata where length mismatches on UR + * responses can cause bus errors on 64bit memory + * reads. Turning off length error checking fixes this. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + union cvmx_pciercx_cfg455 pciercx_cfg455; + pciercx_cfg455.u32 = + cvmx_pcie_cfgx_read(pcie_port, + CVMX_PCIERCX_CFG455(pcie_port)); + pciercx_cfg455.s.m_cpl_len_err = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), + pciercx_cfg455.u32); + } + + /* Lane swap needs to be manually enabled for CN52XX */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) { + pescx_ctl_status.s.lane_swp = 1; + cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), + pescx_ctl_status.u64); + } + + /* Bring up the link */ + pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port)); + pescx_ctl_status.s.lnk_enb = 1; + cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64); + + /* + * CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to + * be disabled. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0)) + __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0); + + /* Wait for the link to come up */ + cvmx_dprintf("PCIe: Waiting for port %d link\n", pcie_port); + start_cycle = cvmx_get_cycle(); + do { + if (cvmx_get_cycle() - start_cycle > + 2 * cvmx_sysinfo_get()->cpu_clock_hz) { + cvmx_dprintf("PCIe: Port %d link timeout\n", + pcie_port); + return -1; + } + cvmx_wait(10000); + pciercx_cfg032.u32 = + cvmx_pcie_cfgx_read(pcie_port, + CVMX_PCIERCX_CFG032(pcie_port)); + } while (pciercx_cfg032.s.dlla == 0); + + /* Display the link status */ + cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, + pciercx_cfg032.s.nlw); + + /* + * Update the Replay Time Limit. Empirically, some PCIe + * devices take a little longer to respond than expected under + * load. As a workaround for this we configure the Replay Time + * Limit to the value expected for a 512 byte MPS instead of + * our actual 256 byte MPS. The numbers below are directly + * from the PCIe spec table 3-4. + */ + pciercx_cfg448.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port)); + switch (pciercx_cfg032.s.nlw) { + case 1: /* 1 lane */ + pciercx_cfg448.s.rtl = 1677; + break; + case 2: /* 2 lanes */ + pciercx_cfg448.s.rtl = 867; + break; + case 4: /* 4 lanes */ + pciercx_cfg448.s.rtl = 462; + break; + case 8: /* 8 lanes */ + pciercx_cfg448.s.rtl = 258; + break; + } + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), + pciercx_cfg448.u32); + + return 0; +} + +/** + * Initialize a PCIe port for use in host(RC) mode. It doesn't + * enumerate the bus. + * + * @pcie_port: PCIe port to initialize + * + * Returns Zero on success + */ +static int cvmx_pcie_rc_initialize(int pcie_port) +{ + int i; + union cvmx_ciu_soft_prst ciu_soft_prst; + union cvmx_pescx_bist_status pescx_bist_status; + union cvmx_pescx_bist_status2 pescx_bist_status2; + union cvmx_npei_ctl_status npei_ctl_status; + union cvmx_npei_mem_access_ctl npei_mem_access_ctl; + union cvmx_npei_mem_access_subidx mem_access_subid; + union cvmx_npei_dbg_data npei_dbg_data; + union cvmx_pescx_ctl_status2 pescx_ctl_status2; + + /* + * Make sure we aren't trying to setup a target mode interface + * in host mode. + */ + npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS); + if ((pcie_port == 0) && !npei_ctl_status.s.host_mode) { + cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called " + "on port0, but port0 is not in host mode\n"); + return -1; + } + + /* + * Make sure a CN52XX isn't trying to bring up port 1 when it + * is disabled. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { + npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); + if ((pcie_port == 1) && npei_dbg_data.cn52xx.qlm0_link_width) { + cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() " + "called on port1, but port1 is " + "disabled\n"); + return -1; + } + } + + /* + * PCIe switch arbitration mode. '0' == fixed priority NPEI, + * PCIe0, then PCIe1. '1' == round robin. + */ + npei_ctl_status.s.arb = 1; + /* Allow up to 0x20 config retries */ + npei_ctl_status.s.cfg_rtry = 0x20; + /* + * CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS + * don't reset. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + npei_ctl_status.s.p0_ntags = 0x20; + npei_ctl_status.s.p1_ntags = 0x20; + } + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64); + + /* Bring the PCIe out of reset */ + if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) { + /* + * The EBH5200 board swapped the PCIe reset lines on + * the board. As a workaround for this bug, we bring + * both PCIe ports out of reset at the same time + * instead of on separate calls. So for port 0, we + * bring both out of reset and do nothing on port 1. + */ + if (pcie_port == 0) { + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + /* + * After a chip reset the PCIe will also be in + * reset. If it isn't, most likely someone is + * trying to init it again without a proper + * PCIe reset. + */ + if (ciu_soft_prst.s.soft_prst == 0) { + /* Reset the ports */ + ciu_soft_prst.s.soft_prst = 1; + cvmx_write_csr(CVMX_CIU_SOFT_PRST, + ciu_soft_prst.u64); + ciu_soft_prst.u64 = + cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + ciu_soft_prst.s.soft_prst = 1; + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, + ciu_soft_prst.u64); + /* Wait until pcie resets the ports. */ + udelay(2000); + } + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); + } + } else { + /* + * The normal case: The PCIe ports are completely + * separate and can be brought out of reset + * independently. + */ + if (pcie_port) + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + else + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + /* + * After a chip reset the PCIe will also be in + * reset. If it isn't, most likely someone is trying + * to init it again without a proper PCIe reset. + */ + if (ciu_soft_prst.s.soft_prst == 0) { + /* Reset the port */ + ciu_soft_prst.s.soft_prst = 1; + if (pcie_port) + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, + ciu_soft_prst.u64); + else + cvmx_write_csr(CVMX_CIU_SOFT_PRST, + ciu_soft_prst.u64); + /* Wait until pcie resets the ports. */ + udelay(2000); + } + if (pcie_port) { + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); + } else { + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); + } + } + + /* + * Wait for PCIe reset to complete. Due to errata PCIE-700, we + * don't poll PESCX_CTL_STATUS2[PCIERST], but simply wait a + * fixed number of cycles. + */ + cvmx_wait(400000); + + /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and + CN52XX, so we only probe it on newer chips */ + if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) + && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + /* Clear PCLK_RUN so we can check if the clock is running */ + pescx_ctl_status2.u64 = + cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port)); + pescx_ctl_status2.s.pclk_run = 1; + cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), + pescx_ctl_status2.u64); + /* + * Now that we cleared PCLK_RUN, wait for it to be set + * again telling us the clock is running. + */ + if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port), + union cvmx_pescx_ctl_status2, + pclk_run, ==, 1, 10000)) { + cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", + pcie_port); + return -1; + } + } + + /* + * Check and make sure PCIe came out of reset. If it doesn't + * the board probably hasn't wired the clocks up and the + * interface should be skipped. + */ + pescx_ctl_status2.u64 = + cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port)); + if (pescx_ctl_status2.s.pcierst) { + cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", + pcie_port); + return -1; + } + + /* + * Check BIST2 status. If any bits are set skip this interface. This + * is an attempt to catch PCIE-813 on pass 1 parts. + */ + pescx_bist_status2.u64 = + cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port)); + if (pescx_bist_status2.u64) { + cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this " + "port isn't hooked up, skipping.\n", + pcie_port); + return -1; + } + + /* Check BIST status */ + pescx_bist_status.u64 = + cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port)); + if (pescx_bist_status.u64) + cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", + pcie_port, CAST64(pescx_bist_status.u64)); + + /* Initialize the config space CSRs */ + __cvmx_pcie_rc_initialize_config_space(pcie_port); + + /* Bring the link up */ + if (__cvmx_pcie_rc_initialize_link(pcie_port)) { + cvmx_dprintf + ("PCIe: ERROR: cvmx_pcie_rc_initialize_link() failed\n"); + return -1; + } + + /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */ + npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL); + /* Allow 16 words to combine */ + npei_mem_access_ctl.s.max_word = 0; + /* Wait up to 127 cycles for more data */ + npei_mem_access_ctl.s.timer = 127; + cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64); + + /* Setup Mem access SubDIDs */ + mem_access_subid.u64 = 0; + /* Port the request is sent to. */ + mem_access_subid.s.port = pcie_port; + /* Due to an errata on pass 1 chips, no merging is allowed. */ + mem_access_subid.s.nmerge = 1; + /* Endian-swap for Reads. */ + mem_access_subid.s.esr = 1; + /* Endian-swap for Writes. */ + mem_access_subid.s.esw = 1; + /* No Snoop for Reads. */ + mem_access_subid.s.nsr = 1; + /* No Snoop for Writes. */ + mem_access_subid.s.nsw = 1; + /* Disable Relaxed Ordering for Reads. */ + mem_access_subid.s.ror = 0; + /* Disable Relaxed Ordering for Writes. */ + mem_access_subid.s.row = 0; + /* PCIe Adddress Bits <63:34>. */ + mem_access_subid.s.ba = 0; + + /* + * Setup mem access 12-15 for port 0, 16-19 for port 1, + * supplying 36 bits of address space. + */ + for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) { + cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), + mem_access_subid.u64); + /* Set each SUBID to extend the addressable range */ + mem_access_subid.s.ba += 1; + } + + /* + * Disable the peer to peer forwarding register. This must be + * setup by the OS after it enumerates the bus and assigns + * addresses to the PCIe busses. + */ + for (i = 0; i < 4; i++) { + cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1); + cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1); + } + + /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */ + cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0); + + /* + * Disable Octeon's BAR1. It isn't needed in RC mode since + * BAR2 maps all of memory. BAR2 also maps 256MB-512MB into + * the 2nd 256MB of memory. + */ + cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), -1); + + /* + * Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take + * precedence where they overlap. It also overlaps with the + * device addresses, so make sure the peer to peer forwarding + * is set right. + */ + cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0); + + /* + * Setup BAR2 attributes + * + * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) + * - PTLP_RO,CTLP_RO should normally be set (except for debug). + * - WAIT_COM=0 will likely work for all applications. + * + * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]). + */ + if (pcie_port) { + union cvmx_npei_ctl_port1 npei_ctl_port; + npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1); + npei_ctl_port.s.bar2_enb = 1; + npei_ctl_port.s.bar2_esx = 1; + npei_ctl_port.s.bar2_cax = 0; + npei_ctl_port.s.ptlp_ro = 1; + npei_ctl_port.s.ctlp_ro = 1; + npei_ctl_port.s.wait_com = 0; + npei_ctl_port.s.waitl_com = 0; + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64); + } else { + union cvmx_npei_ctl_port0 npei_ctl_port; + npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0); + npei_ctl_port.s.bar2_enb = 1; + npei_ctl_port.s.bar2_esx = 1; + npei_ctl_port.s.bar2_cax = 0; + npei_ctl_port.s.ptlp_ro = 1; + npei_ctl_port.s.ctlp_ro = 1; + npei_ctl_port.s.wait_com = 0; + npei_ctl_port.s.waitl_com = 0; + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64); + } + return 0; +} + + +/* Above was cvmx-pcie.c, below original pcie.c */ + + +/** + * Map a PCI device to the appropriate interrupt line + * + * @param dev The Linux PCI device structure for the device to map + * @param slot The slot number for this device on __BUS 0__. Linux + * enumerates through all the bridges and figures out the + * slot on Bus 0 where this device eventually hooks to. + * @param pin The PCI interrupt pin read from the device, then swizzled + * as it goes through each bridge. + * @return Interrupt number for the device + */ +int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, + u8 slot, u8 pin) +{ + /* + * The EBH5600 board with the PCI to PCIe bridge mistakenly + * wires the first slot for both device id 2 and interrupt + * A. According to the PCI spec, device id 2 should be C. The + * following kludge attempts to fix this. + */ + if (strstr(octeon_board_type_string(), "EBH5600") && + dev->bus && dev->bus->parent) { + /* + * Iterate all the way up the device chain and find + * the root bus. + */ + while (dev->bus && dev->bus->parent) + dev = to_pci_dev(dev->bus->bridge); + /* If the root bus is number 0 and the PEX 8114 is the + * root, assume we are behind the miswired bus. We + * need to correct the swizzle level by two. Yuck. + */ + if ((dev->bus->number == 0) && + (dev->vendor == 0x10b5) && (dev->device == 0x8114)) { + /* + * The pin field is one based, not zero. We + * need to swizzle it by minus two. + */ + pin = ((pin - 3) & 3) + 1; + } + } + /* + * The -1 is because pin starts with one, not zero. It might + * be that this equation needs to include the slot number, but + * I don't have hardware to check that against. + */ + return pin - 1 + OCTEON_IRQ_PCI_INT0; +} + +/** + * Read a value from configuration space + * + * @param bus + * @param devfn + * @param reg + * @param size + * @param val + * @return + */ +static inline int octeon_pcie_read_config(int pcie_port, struct pci_bus *bus, + unsigned int devfn, int reg, int size, + u32 *val) +{ + union octeon_cvmemctl cvmmemctl; + union octeon_cvmemctl cvmmemctl_save; + int bus_number = bus->number; + + /* + * We need to force the bus number to be zero on the root + * bus. Linux numbers the 2nd root bus to start after all + * buses on root 0. + */ + if (bus->parent == NULL) + bus_number = 0; + + /* + * PCIe only has a single device connected to Octeon. It is + * always device ID 0. Don't bother doing reads for other + * device IDs on the first segment. + */ + if ((bus_number == 0) && (devfn >> 3 != 0)) + return PCIBIOS_FUNC_NOT_SUPPORTED; + + /* + * The following is a workaround for the CN57XX, CN56XX, + * CN55XX, and CN54XX errata with PCIe config reads from non + * existent devices. These chips will hang the PCIe link if a + * config read is performed that causes a UR response. + */ + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) || + OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) { + /* + * For our EBH5600 board, port 0 has a bridge with two + * PCI-X slots. We need a new special checks to make + * sure we only probe valid stuff. The PCIe->PCI-X + * bridge only respondes to device ID 0, function + * 0-1 + */ + if ((bus_number == 0) && (devfn >= 2)) + return PCIBIOS_FUNC_NOT_SUPPORTED; + /* + * The PCI-X slots are device ID 2,3. Choose one of + * the below "if" blocks based on what is plugged into + * the board. + */ +#if 1 + /* Use this option if you aren't using either slot */ + if (bus_number == 1) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#elif 0 + /* + * Use this option if you are using the first slot but + * not the second. + */ + if ((bus_number == 1) && (devfn >> 3 != 2)) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#elif 0 + /* + * Use this option if you are using the second slot + * but not the first. + */ + if ((bus_number == 1) && (devfn >> 3 != 3)) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#elif 0 + /* Use this opion if you are using both slots */ + if ((bus_number == 1) && + !((devfn == (2 << 3)) || (devfn == (3 << 3)))) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#endif + + /* + * Shorten the DID timeout so bus errors for PCIe + * config reads from non existent devices happen + * faster. This allows us to continue booting even if + * the above "if" checks are wrong. Once one of these + * errors happens, the PCIe port is dead. + */ + cvmmemctl_save.u64 = __read_64bit_c0_register($11, 7); + cvmmemctl.u64 = cvmmemctl_save.u64; + cvmmemctl.s.didtto = 2; + __write_64bit_c0_register($11, 7, cvmmemctl.u64); + } + + switch (size) { + case 4: + *val = cvmx_pcie_config_read32(pcie_port, bus_number, + devfn >> 3, devfn & 0x7, reg); + break; + case 2: + *val = cvmx_pcie_config_read16(pcie_port, bus_number, + devfn >> 3, devfn & 0x7, reg); + break; + case 1: + *val = cvmx_pcie_config_read8(pcie_port, bus_number, devfn >> 3, + devfn & 0x7, reg); + break; + default: + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) || + OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) + __write_64bit_c0_register($11, 7, cvmmemctl_save.u64); + return PCIBIOS_SUCCESSFUL; +} + +static int octeon_pcie0_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + return octeon_pcie_read_config(0, bus, devfn, reg, size, val); +} + +static int octeon_pcie1_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + return octeon_pcie_read_config(1, bus, devfn, reg, size, val); +} + + + +/** + * Write a value to PCI configuration space + * + * @param bus + * @param devfn + * @param reg + * @param size + * @param val + * @return + */ +static inline int octeon_pcie_write_config(int pcie_port, struct pci_bus *bus, + unsigned int devfn, int reg, + int size, u32 val) +{ + int bus_number = bus->number; + /* + * We need to force the bus number to be zero on the root + * bus. Linux numbers the 2nd root bus to start after all + * busses on root 0. + */ + if (bus->parent == NULL) + bus_number = 0; + + switch (size) { + case 4: + cvmx_pcie_config_write32(pcie_port, bus_number, devfn >> 3, + devfn & 0x7, reg, val); + return PCIBIOS_SUCCESSFUL; + case 2: + cvmx_pcie_config_write16(pcie_port, bus_number, devfn >> 3, + devfn & 0x7, reg, val); + return PCIBIOS_SUCCESSFUL; + case 1: + cvmx_pcie_config_write8(pcie_port, bus_number, devfn >> 3, + devfn & 0x7, reg, val); + return PCIBIOS_SUCCESSFUL; + } +#if PCI_CONFIG_SPACE_DELAY + udelay(PCI_CONFIG_SPACE_DELAY); +#endif + return PCIBIOS_FUNC_NOT_SUPPORTED; +} + +static int octeon_pcie0_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + return octeon_pcie_write_config(0, bus, devfn, reg, size, val); +} + +static int octeon_pcie1_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + return octeon_pcie_write_config(1, bus, devfn, reg, size, val); +} + +static struct pci_ops octeon_pcie0_ops = { + octeon_pcie0_read_config, + octeon_pcie0_write_config, +}; + +static struct resource octeon_pcie0_mem_resource = { + .name = "Octeon PCIe0 MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource octeon_pcie0_io_resource = { + .name = "Octeon PCIe0 IO", + .flags = IORESOURCE_IO, +}; + +static struct pci_controller octeon_pcie0_controller = { + .pci_ops = &octeon_pcie0_ops, + .mem_resource = &octeon_pcie0_mem_resource, + .io_resource = &octeon_pcie0_io_resource, +}; + +static struct pci_ops octeon_pcie1_ops = { + octeon_pcie1_read_config, + octeon_pcie1_write_config, +}; + +static struct resource octeon_pcie1_mem_resource = { + .name = "Octeon PCIe1 MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource octeon_pcie1_io_resource = { + .name = "Octeon PCIe1 IO", + .flags = IORESOURCE_IO, +}; + +static struct pci_controller octeon_pcie1_controller = { + .pci_ops = &octeon_pcie1_ops, + .mem_resource = &octeon_pcie1_mem_resource, + .io_resource = &octeon_pcie1_io_resource, +}; + + +/** + * Initialize the Octeon PCIe controllers + * + * @return + */ +static int __init octeon_pcie_setup(void) +{ + union cvmx_npei_ctl_status npei_ctl_status; + int result; + + /* These chips don't have PCIe */ + if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) + return 0; + + /* Point pcibios_map_irq() to the PCIe version of it */ + octeon_pcibios_map_irq = octeon_pcie_pcibios_map_irq; + + /* Use the PCIe based DMA mappings */ + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE; + + /* + * PCIe I/O range. It is based on port 0 but includes up until + * port 1's end. + */ + set_io_port_base(CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0))); + ioport_resource.start = 0; + ioport_resource.end = + cvmx_pcie_get_io_base_address(1) - + cvmx_pcie_get_io_base_address(0) + cvmx_pcie_get_io_size(1) - 1; + + npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS); + if (npei_ctl_status.s.host_mode) { + pr_notice("PCIe: Initializing port 0\n"); + result = cvmx_pcie_rc_initialize(0); + if (result == 0) { + /* Memory offsets are physical addresses */ + octeon_pcie0_controller.mem_offset = + cvmx_pcie_get_mem_base_address(0); + /* IO offsets are Mips virtual addresses */ + octeon_pcie0_controller.io_map_base = + CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address + (0)); + octeon_pcie0_controller.io_offset = 0; + /* + * To keep things similar to PCI, we start + * device addresses at the same place as PCI + * uisng big bar support. This normally + * translates to 4GB-256MB, which is the same + * as most x86 PCs. + */ + octeon_pcie0_controller.mem_resource->start = + cvmx_pcie_get_mem_base_address(0) + + (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20); + octeon_pcie0_controller.mem_resource->end = + cvmx_pcie_get_mem_base_address(0) + + cvmx_pcie_get_mem_size(0) - 1; + /* + * Ports must be above 16KB for the ISA bus + * filtering in the PCI-X to PCI bridge. + */ + octeon_pcie0_controller.io_resource->start = 4 << 10; + octeon_pcie0_controller.io_resource->end = + cvmx_pcie_get_io_size(0) - 1; + register_pci_controller(&octeon_pcie0_controller); + } + } else { + pr_notice("PCIe: Port 0 in endpoint mode, skipping.\n"); + } + + /* Skip the 2nd port on CN52XX if port 0 is in 4 lane mode */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { + union cvmx_npei_dbg_data npei_dbg_data; + npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); + if (npei_dbg_data.cn52xx.qlm0_link_width) + return 0; + } + + pr_notice("PCIe: Initializing port 1\n"); + result = cvmx_pcie_rc_initialize(1); + if (result == 0) { + /* Memory offsets are physical addresses */ + octeon_pcie1_controller.mem_offset = + cvmx_pcie_get_mem_base_address(1); + /* IO offsets are Mips virtual addresses */ + octeon_pcie1_controller.io_map_base = + CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(1)); + octeon_pcie1_controller.io_offset = + cvmx_pcie_get_io_base_address(1) - + cvmx_pcie_get_io_base_address(0); + /* + * To keep things similar to PCI, we start device + * addresses at the same place as PCI uisng big bar + * support. This normally translates to 4GB-256MB, + * which is the same as most x86 PCs. + */ + octeon_pcie1_controller.mem_resource->start = + cvmx_pcie_get_mem_base_address(1) + (4ul << 30) - + (OCTEON_PCI_BAR1_HOLE_SIZE << 20); + octeon_pcie1_controller.mem_resource->end = + cvmx_pcie_get_mem_base_address(1) + + cvmx_pcie_get_mem_size(1) - 1; + /* + * Ports must be above 16KB for the ISA bus filtering + * in the PCI-X to PCI bridge. + */ + octeon_pcie1_controller.io_resource->start = + cvmx_pcie_get_io_base_address(1) - + cvmx_pcie_get_io_base_address(0); + octeon_pcie1_controller.io_resource->end = + octeon_pcie1_controller.io_resource->start + + cvmx_pcie_get_io_size(1) - 1; + register_pci_controller(&octeon_pcie1_controller); + } + return 0; +} + +arch_initcall(octeon_pcie_setup); diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index c0047f8..8ab1d12 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -147,6 +147,10 @@ #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ cpu_has_mips64r1 | cpu_has_mips64r2) +#ifndef cpu_has_mips_r2_exec_hazard +#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2 +#endif + /* * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other * pre-MIPS32/MIPS53 processors have CLO, CLZ. For 64-bit kernels @@ -230,4 +234,8 @@ #define cpu_scache_line_size() cpu_data[0].scache.linesz #endif +#ifndef cpu_hwrena_impl_bits +#define cpu_hwrena_impl_bits 0 +#endif + #endif /* __ASM_CPU_FEATURES_H */ diff --git a/arch/mips/include/asm/delay.h b/arch/mips/include/asm/delay.h index a07e51b..d2d8949 100644 --- a/arch/mips/include/asm/delay.h +++ b/arch/mips/include/asm/delay.h @@ -15,7 +15,7 @@ extern void __delay(unsigned int loops); extern void __ndelay(unsigned int ns); extern void __udelay(unsigned int us); -#define ndelay(ns) __udelay(ns) +#define ndelay(ns) __ndelay(ns) #define udelay(us) __udelay(us) /* make sure "usecs *= ..." in udelay do not overflow. */ diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h new file mode 100644 index 0000000..f5e85601 --- /dev/null +++ b/arch/mips/include/asm/hugetlb.h @@ -0,0 +1,114 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008, 2009 Cavium Networks, Inc. + */ + +#ifndef __ASM_HUGETLB_H +#define __ASM_HUGETLB_H + +#include <asm/page.h> + + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, + unsigned long len) +{ + return 0; +} + +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, + unsigned long len) +{ + unsigned long task_size = STACK_TOP; + struct hstate *h = hstate_file(file); + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (addr & ~huge_page_mask(h)) + return -EINVAL; + if (len > task_size) + return -ENOMEM; + if (task_size - len < addr) + return -EINVAL; + return 0; +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ +} + +static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, + unsigned long end, + unsigned long floor, + unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} + +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t clear; + pte_t pte = *ptep; + + pte_val(clear) = (unsigned long)invalid_pte_table; + set_pte_at(mm, addr, ptep, clear); + return pte; +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL; + return !val || (val == (unsigned long)invalid_pte_table); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t pte, + int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +#endif /* __ASM_HUGETLB_H */ diff --git a/arch/mips/include/asm/ioctl.h b/arch/mips/include/asm/ioctl.h index 9161634..c515a1a 100644 --- a/arch/mips/include/asm/ioctl.h +++ b/arch/mips/include/asm/ioctl.h @@ -3,40 +3,16 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1995, 96, 99, 2001 Ralf Baechle + * Copyright (C) 1995, 96, 99, 2001 Ralf Baechle <ralf@linux-mips.org> + * Copyright (C) 2009 Wind River Systems + * Written by Ralf Baechle <ralf@linux-mips.org> */ -#ifndef _ASM_IOCTL_H -#define _ASM_IOCTL_H +#ifndef __ASM_IOCTL_H +#define __ASM_IOCTL_H -/* - * The original linux ioctl numbering scheme was just a general - * "anything goes" setup, where more or less random numbers were - * assigned. Sorry, I was clueless when I started out on this. - * - * On the alpha, we'll try to clean it up a bit, using a more sane - * ioctl numbering, and also trying to be compatible with OSF/1 in - * the process. I'd like to clean it up for the i386 as well, but - * it's so painful recognizing both the new and the old numbers.. - * - * The same applies for for the MIPS ABI; in fact even the macros - * from Linux/Alpha fit almost perfectly. - */ - -#define _IOC_NRBITS 8 -#define _IOC_TYPEBITS 8 #define _IOC_SIZEBITS 13 #define _IOC_DIRBITS 3 -#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) -#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) -#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) -#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) - -#define _IOC_NRSHIFT 0 -#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) -#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) -#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) - /* * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit. * And this turns out useful to catch old ioctl numbers in header @@ -46,53 +22,6 @@ #define _IOC_READ 2U #define _IOC_WRITE 4U -/* - * The following are included for compatibility - */ -#define _IOC_VOID 0x20000000 -#define _IOC_OUT 0x40000000 -#define _IOC_IN 0x80000000 -#define _IOC_INOUT (IOC_IN|IOC_OUT) - -#define _IOC(dir, type, nr, size) \ - (((dir) << _IOC_DIRSHIFT) | \ - ((type) << _IOC_TYPESHIFT) | \ - ((nr) << _IOC_NRSHIFT) | \ - ((size) << _IOC_SIZESHIFT)) - -#ifdef __KERNEL__ -/* provoke compile error for invalid uses of size argument */ -extern unsigned int __invalid_size_argument_for_IOC; -#define _IOC_TYPECHECK(t) \ - ((sizeof(t) == sizeof(t[1]) && \ - sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ - sizeof(t) : __invalid_size_argument_for_IOC) -#else -#define _IOC_TYPECHECK(t) (sizeof(t)) -#endif - -/* used to create numbers */ -#define _IO(type, nr) _IOC(_IOC_NONE, (type), (nr), 0) -#define _IOR(type, nr, size) _IOC(_IOC_READ, (type), (nr), (_IOC_TYPECHECK(size))) -#define _IOW(type, nr, size) _IOC(_IOC_WRITE, (type), (nr), (_IOC_TYPECHECK(size))) -#define _IOWR(type, nr, size) _IOC(_IOC_READ|_IOC_WRITE, (type), (nr), (_IOC_TYPECHECK(size))) -#define _IOR_BAD(type, nr, size) _IOC(_IOC_READ, (type), (nr), sizeof(size)) -#define _IOW_BAD(type, nr, size) _IOC(_IOC_WRITE, (type), (nr), sizeof(size)) -#define _IOWR_BAD(type, nr, size) _IOC(_IOC_READ|_IOC_WRITE, (type), (nr), sizeof(size)) - - -/* used to decode them.. */ -#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) -#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) -#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) -#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) - -/* ...and for the drivers/sound files... */ - -#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) -#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) -#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) -#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) -#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) +#include <asm-generic/ioctl.h> -#endif /* _ASM_IOCTL_H */ +#endif /* __ASM_IOCTL_H */ diff --git a/arch/mips/include/asm/mach-au1x00/au1000_gpio.h b/arch/mips/include/asm/mach-au1x00/au1000_gpio.h deleted file mode 100644 index d8c96fd..0000000 --- a/arch/mips/include/asm/mach-au1x00/au1000_gpio.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * FILE NAME au1000_gpio.h - * - * BRIEF MODULE DESCRIPTION - * API to Alchemy Au1xx0 GPIO device. - * - * Author: MontaVista Software, Inc. <source@mvista.com> - * Steve Longerbeam - * - * Copyright 2001, 2008 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef __AU1000_GPIO_H -#define __AU1000_GPIO_H - -#include <linux/ioctl.h> - -#define AU1000GPIO_IOC_MAGIC 'A' - -#define AU1000GPIO_IN _IOR(AU1000GPIO_IOC_MAGIC, 0, int) -#define AU1000GPIO_SET _IOW(AU1000GPIO_IOC_MAGIC, 1, int) -#define AU1000GPIO_CLEAR _IOW(AU1000GPIO_IOC_MAGIC, 2, int) -#define AU1000GPIO_OUT _IOW(AU1000GPIO_IOC_MAGIC, 3, int) -#define AU1000GPIO_TRISTATE _IOW(AU1000GPIO_IOC_MAGIC, 4, int) -#define AU1000GPIO_AVAIL_MASK _IOR(AU1000GPIO_IOC_MAGIC, 5, int) - -#ifdef __KERNEL__ -extern u32 get_au1000_avail_gpio_mask(void); -extern int au1000gpio_tristate(u32 data); -extern int au1000gpio_in(u32 *data); -extern int au1000gpio_set(u32 data); -extern int au1000gpio_clear(u32 data); -extern int au1000gpio_out(u32 data); -#endif - -#endif diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h new file mode 100644 index 0000000..127d4ed --- /dev/null +++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h @@ -0,0 +1,604 @@ +/* + * GPIO functions for Au1000, Au1500, Au1100, Au1550, Au1200 + * + * Copyright (c) 2009 Manuel Lauss. + * + * Licensed under the terms outlined in the file COPYING. + */ + +#ifndef _ALCHEMY_GPIO_AU1000_H_ +#define _ALCHEMY_GPIO_AU1000_H_ + +#include <asm/mach-au1x00/au1000.h> + +/* The default GPIO numberspace as documented in the Alchemy manuals. + * GPIO0-31 from GPIO1 block, GPIO200-215 from GPIO2 block. + */ +#define ALCHEMY_GPIO1_BASE 0 +#define ALCHEMY_GPIO2_BASE 200 + +#define ALCHEMY_GPIO1_NUM 32 +#define ALCHEMY_GPIO2_NUM 16 +#define ALCHEMY_GPIO1_MAX (ALCHEMY_GPIO1_BASE + ALCHEMY_GPIO1_NUM - 1) +#define ALCHEMY_GPIO2_MAX (ALCHEMY_GPIO2_BASE + ALCHEMY_GPIO2_NUM - 1) + +#define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off)) + + +static inline int au1000_gpio1_to_irq(int gpio) +{ + return MAKE_IRQ(1, gpio - ALCHEMY_GPIO1_BASE); +} + +static inline int au1000_gpio2_to_irq(int gpio) +{ + return -ENXIO; +} + +#ifdef CONFIG_SOC_AU1000 +static inline int au1000_irq_to_gpio(int irq) +{ + if ((irq >= AU1000_GPIO_0) && (irq <= AU1000_GPIO_31)) + return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0; + + return -ENXIO; +} +#endif + +static inline int au1500_gpio1_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO1_BASE; + + switch (gpio) { + case 0 ... 15: + case 20: + case 23 ... 28: return MAKE_IRQ(1, gpio); + } + + return -ENXIO; +} + +static inline int au1500_gpio2_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO2_BASE; + + switch (gpio) { + case 0 ... 3: return MAKE_IRQ(1, 16 + gpio - 0); + case 4 ... 5: return MAKE_IRQ(1, 21 + gpio - 4); + case 6 ... 7: return MAKE_IRQ(1, 29 + gpio - 6); + } + + return -ENXIO; +} + +#ifdef CONFIG_SOC_AU1500 +static inline int au1500_irq_to_gpio(int irq) +{ + switch (irq) { + case AU1000_GPIO_0 ... AU1000_GPIO_15: + case AU1500_GPIO_20: + case AU1500_GPIO_23 ... AU1500_GPIO_28: + return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0; + case AU1500_GPIO_200 ... AU1500_GPIO_203: + return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_200) + 0; + case AU1500_GPIO_204 ... AU1500_GPIO_205: + return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_204) + 4; + case AU1500_GPIO_206 ... AU1500_GPIO_207: + return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_206) + 6; + case AU1500_GPIO_208_215: + return ALCHEMY_GPIO2_BASE + 8; + } + + return -ENXIO; +} +#endif + +static inline int au1100_gpio1_to_irq(int gpio) +{ + return MAKE_IRQ(1, gpio - ALCHEMY_GPIO1_BASE); +} + +static inline int au1100_gpio2_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO2_BASE; + + if ((gpio >= 8) && (gpio <= 15)) + return MAKE_IRQ(0, 29); /* shared GPIO208_215 */ +} + +#ifdef CONFIG_SOC_AU1100 +static inline int au1100_irq_to_gpio(int irq) +{ + switch (irq) { + case AU1000_GPIO_0 ... AU1000_GPIO_31: + return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0; + case AU1100_GPIO_208_215: + return ALCHEMY_GPIO2_BASE + 8; + } + + return -ENXIO; +} +#endif + +static inline int au1550_gpio1_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO1_BASE; + + switch (gpio) { + case 0 ... 15: + case 20 ... 28: return MAKE_IRQ(1, gpio); + case 16 ... 17: return MAKE_IRQ(1, 18 + gpio - 16); + } + + return -ENXIO; +} + +static inline int au1550_gpio2_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO2_BASE; + + switch (gpio) { + case 0: return MAKE_IRQ(1, 16); + case 1 ... 5: return MAKE_IRQ(1, 17); /* shared GPIO201_205 */ + case 6 ... 7: return MAKE_IRQ(1, 29 + gpio - 6); + case 8 ... 15: return MAKE_IRQ(1, 31); /* shared GPIO208_215 */ + } + + return -ENXIO; +} + +#ifdef CONFIG_SOC_AU1550 +static inline int au1550_irq_to_gpio(int irq) +{ + switch (irq) { + case AU1000_GPIO_0 ... AU1000_GPIO_15: + return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0; + case AU1550_GPIO_200: + case AU1500_GPIO_201_205: + return ALCHEMY_GPIO2_BASE + (irq - AU1550_GPIO_200) + 0; + case AU1500_GPIO_16 ... AU1500_GPIO_28: + return ALCHEMY_GPIO1_BASE + (irq - AU1500_GPIO_16) + 16; + case AU1500_GPIO_206 ... AU1500_GPIO_208_218: + return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_206) + 6; + } + + return -ENXIO; +} +#endif + +static inline int au1200_gpio1_to_irq(int gpio) +{ + return MAKE_IRQ(1, gpio - ALCHEMY_GPIO1_BASE); +} + +static inline int au1200_gpio2_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO2_BASE; + + switch (gpio) { + case 0 ... 2: return MAKE_IRQ(0, 5 + gpio - 0); + case 3: return MAKE_IRQ(0, 22); + case 4 ... 7: return MAKE_IRQ(0, 24 + gpio - 4); + case 8 ... 15: return MAKE_IRQ(0, 28); /* shared GPIO208_215 */ + } + + return -ENXIO; +} + +#ifdef CONFIG_SOC_AU1200 +static inline int au1200_irq_to_gpio(int irq) +{ + switch (irq) { + case AU1000_GPIO_0 ... AU1000_GPIO_31: + return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0; + case AU1200_GPIO_200 ... AU1200_GPIO_202: + return ALCHEMY_GPIO2_BASE + (irq - AU1200_GPIO_200) + 0; + case AU1200_GPIO_203: + return ALCHEMY_GPIO2_BASE + 3; + case AU1200_GPIO_204 ... AU1200_GPIO_208_215: + return ALCHEMY_GPIO2_BASE + (irq - AU1200_GPIO_204) + 4; + } + + return -ENXIO; +} +#endif + +/* + * GPIO1 block macros for common linux gpio functions. + */ +static inline void alchemy_gpio1_set_value(int gpio, int v) +{ + unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); + unsigned long r = v ? SYS_OUTPUTSET : SYS_OUTPUTCLR; + au_writel(mask, r); + au_sync(); +} + +static inline int alchemy_gpio1_get_value(int gpio) +{ + unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); + return au_readl(SYS_PINSTATERD) & mask; +} + +static inline int alchemy_gpio1_direction_input(int gpio) +{ + unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); + au_writel(mask, SYS_TRIOUTCLR); + au_sync(); + return 0; +} + +static inline int alchemy_gpio1_direction_output(int gpio, int v) +{ + /* hardware switches to "output" mode when one of the two + * "set_value" registers is accessed. + */ + alchemy_gpio1_set_value(gpio, v); + return 0; +} + +static inline int alchemy_gpio1_is_valid(int gpio) +{ + return ((gpio >= ALCHEMY_GPIO1_BASE) && (gpio <= ALCHEMY_GPIO1_MAX)); +} + +static inline int alchemy_gpio1_to_irq(int gpio) +{ +#if defined(CONFIG_SOC_AU1000) + return au1000_gpio1_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1100) + return au1100_gpio1_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1500) + return au1500_gpio1_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1550) + return au1550_gpio1_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1200) + return au1200_gpio1_to_irq(gpio); +#else + return -ENXIO; +#endif +} + +/* + * GPIO2 block macros for common linux GPIO functions. The 'gpio' + * parameter must be in range of ALCHEMY_GPIO2_BASE..ALCHEMY_GPIO2_MAX. + */ +static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out) +{ + unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE); + unsigned long d = au_readl(GPIO2_DIR); + if (to_out) + d |= mask; + else + d &= ~mask; + au_writel(d, GPIO2_DIR); + au_sync(); +} + +static inline void alchemy_gpio2_set_value(int gpio, int v) +{ + unsigned long mask; + mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE); + au_writel(mask, GPIO2_OUTPUT); + au_sync(); +} + +static inline int alchemy_gpio2_get_value(int gpio) +{ + return au_readl(GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); +} + +static inline int alchemy_gpio2_direction_input(int gpio) +{ + unsigned long flags; + local_irq_save(flags); + __alchemy_gpio2_mod_dir(gpio, 0); + local_irq_restore(flags); + return 0; +} + +static inline int alchemy_gpio2_direction_output(int gpio, int v) +{ + unsigned long flags; + alchemy_gpio2_set_value(gpio, v); + local_irq_save(flags); + __alchemy_gpio2_mod_dir(gpio, 1); + local_irq_restore(flags); + return 0; +} + +static inline int alchemy_gpio2_is_valid(int gpio) +{ + return ((gpio >= ALCHEMY_GPIO2_BASE) && (gpio <= ALCHEMY_GPIO2_MAX)); +} + +static inline int alchemy_gpio2_to_irq(int gpio) +{ +#if defined(CONFIG_SOC_AU1000) + return au1000_gpio2_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1100) + return au1100_gpio2_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1500) + return au1500_gpio2_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1550) + return au1550_gpio2_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1200) + return au1200_gpio2_to_irq(gpio); +#else + return -ENXIO; +#endif +} + +/**********************************************************************/ + +/* On Au1000, Au1500 and Au1100 GPIOs won't work as inputs before + * SYS_PININPUTEN is written to at least once. On Au1550/Au1200 this + * register enables use of GPIOs as wake source. + */ +static inline void alchemy_gpio1_input_enable(void) +{ + au_writel(0, SYS_PININPUTEN); /* the write op is key */ + au_sync(); +} + +/* GPIO2 shared interrupts and control */ + +static inline void __alchemy_gpio2_mod_int(int gpio2, int en) +{ + unsigned long r = au_readl(GPIO2_INTENABLE); + if (en) + r |= 1 << gpio2; + else + r &= ~(1 << gpio2); + au_writel(r, GPIO2_INTENABLE); + au_sync(); +} + +/** + * alchemy_gpio2_enable_int - Enable a GPIO2 pins' shared irq contribution. + * @gpio2: The GPIO2 pin to activate (200...215). + * + * GPIO208-215 have one shared interrupt line to the INTC. They are + * and'ed with a per-pin enable bit and finally or'ed together to form + * a single irq request (useful for active-high sources). + * With this function, a pins' individual contribution to the int request + * can be enabled. As with all other GPIO-based interrupts, the INTC + * must be programmed to accept the GPIO208_215 interrupt as well. + * + * NOTE: Calling this macro is only necessary for GPIO208-215; all other + * GPIO2-based interrupts have their own request to the INTC. Please + * consult your Alchemy databook for more information! + * + * NOTE: On the Au1550, GPIOs 201-205 also have a shared interrupt request + * line to the INTC, GPIO201_205. This function can be used for those + * as well. + * + * NOTE: 'gpio2' parameter must be in range of the GPIO2 numberspace + * (200-215 by default). No sanity checks are made, + */ +static inline void alchemy_gpio2_enable_int(int gpio2) +{ + unsigned long flags; + + gpio2 -= ALCHEMY_GPIO2_BASE; + +#if defined(CONFIG_SOC_AU1100) || defined(CONFIG_SOC_AU1500) + /* Au1100/Au1500 have GPIO208-215 enable bits at 0..7 */ + gpio2 -= 8; +#endif + local_irq_save(flags); + __alchemy_gpio2_mod_int(gpio2, 1); + local_irq_restore(flags); +} + +/** + * alchemy_gpio2_disable_int - Disable a GPIO2 pins' shared irq contribution. + * @gpio2: The GPIO2 pin to activate (200...215). + * + * see function alchemy_gpio2_enable_int() for more information. + */ +static inline void alchemy_gpio2_disable_int(int gpio2) +{ + unsigned long flags; + + gpio2 -= ALCHEMY_GPIO2_BASE; + +#if defined(CONFIG_SOC_AU1100) || defined(CONFIG_SOC_AU1500) + /* Au1100/Au1500 have GPIO208-215 enable bits at 0..7 */ + gpio2 -= 8; +#endif + local_irq_save(flags); + __alchemy_gpio2_mod_int(gpio2, 0); + local_irq_restore(flags); +} + +/** + * alchemy_gpio2_enable - Activate GPIO2 block. + * + * The GPIO2 block must be enabled excplicitly to work. On systems + * where this isn't done by the bootloader, this macro can be used. + */ +static inline void alchemy_gpio2_enable(void) +{ + au_writel(3, GPIO2_ENABLE); /* reset, clock enabled */ + au_sync(); + au_writel(1, GPIO2_ENABLE); /* clock enabled */ + au_sync(); +} + +/** + * alchemy_gpio2_disable - disable GPIO2 block. + * + * Disable and put GPIO2 block in low-power mode. + */ +static inline void alchemy_gpio2_disable(void) +{ + au_writel(2, GPIO2_ENABLE); /* reset, clock disabled */ + au_sync(); +} + +/**********************************************************************/ + +/* wrappers for on-chip gpios; can be used before gpio chips have been + * registered with gpiolib. + */ +static inline int alchemy_gpio_direction_input(int gpio) +{ + return (gpio >= ALCHEMY_GPIO2_BASE) ? + alchemy_gpio2_direction_input(gpio) : + alchemy_gpio1_direction_input(gpio); +} + +static inline int alchemy_gpio_direction_output(int gpio, int v) +{ + return (gpio >= ALCHEMY_GPIO2_BASE) ? + alchemy_gpio2_direction_output(gpio, v) : + alchemy_gpio1_direction_output(gpio, v); +} + +static inline int alchemy_gpio_get_value(int gpio) +{ + return (gpio >= ALCHEMY_GPIO2_BASE) ? + alchemy_gpio2_get_value(gpio) : + alchemy_gpio1_get_value(gpio); +} + +static inline void alchemy_gpio_set_value(int gpio, int v) +{ + if (gpio >= ALCHEMY_GPIO2_BASE) + alchemy_gpio2_set_value(gpio, v); + else + alchemy_gpio1_set_value(gpio, v); +} + +static inline int alchemy_gpio_is_valid(int gpio) +{ + return (gpio >= ALCHEMY_GPIO2_BASE) ? + alchemy_gpio2_is_valid(gpio) : + alchemy_gpio1_is_valid(gpio); +} + +static inline int alchemy_gpio_cansleep(int gpio) +{ + return 0; /* Alchemy never gets tired */ +} + +static inline int alchemy_gpio_to_irq(int gpio) +{ + return (gpio >= ALCHEMY_GPIO2_BASE) ? + alchemy_gpio2_to_irq(gpio) : + alchemy_gpio1_to_irq(gpio); +} + +static inline int alchemy_irq_to_gpio(int irq) +{ +#if defined(CONFIG_SOC_AU1000) + return au1000_irq_to_gpio(irq); +#elif defined(CONFIG_SOC_AU1100) + return au1100_irq_to_gpio(irq); +#elif defined(CONFIG_SOC_AU1500) + return au1500_irq_to_gpio(irq); +#elif defined(CONFIG_SOC_AU1550) + return au1550_irq_to_gpio(irq); +#elif defined(CONFIG_SOC_AU1200) + return au1200_irq_to_gpio(irq); +#else + return -ENXIO; +#endif +} + +/**********************************************************************/ + +/* Linux gpio framework integration. + * + * 4 use cases of Au1000-Au1200 GPIOS: + *(1) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=y: + * Board must register gpiochips. + *(2) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=n: + * 2 (1 for Au1000) gpio_chips are registered. + * + *(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y: + * the boards' gpio.h must provide the linux gpio wrapper functions, + * + *(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n: + * inlinable gpio functions are provided which enable access to the + * Au1000 gpios only by using the numbers straight out of the data- + * sheets. + + * Cases 1 and 3 are intended for boards which want to provide their own + * GPIO namespace and -operations (i.e. for example you have 8 GPIOs + * which are in part provided by spare Au1000 GPIO pins and in part by + * an external FPGA but you still want them to be accssible in linux + * as gpio0-7. The board can of course use the alchemy_gpioX_* functions + * as required). + */ + +#ifndef CONFIG_GPIOLIB + + +#ifndef CONFIG_ALCHEMY_GPIO_INDIRECT /* case (4) */ + +static inline int gpio_direction_input(int gpio) +{ + return alchemy_gpio_direction_input(gpio); +} + +static inline int gpio_direction_output(int gpio, int v) +{ + return alchemy_gpio_direction_output(gpio, v); +} + +static inline int gpio_get_value(int gpio) +{ + return alchemy_gpio_get_value(gpio); +} + +static inline void gpio_set_value(int gpio, int v) +{ + alchemy_gpio_set_value(gpio, v); +} + +static inline int gpio_is_valid(int gpio) +{ + return alchemy_gpio_is_valid(gpio); +} + +static inline int gpio_cansleep(int gpio) +{ + return alchemy_gpio_cansleep(gpio); +} + +static inline int gpio_to_irq(int gpio) +{ + return alchemy_gpio_to_irq(gpio); +} + +static inline int irq_to_gpio(int irq) +{ + return alchemy_irq_to_gpio(irq); +} + +#endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ + + +#else /* CONFIG GPIOLIB */ + + + /* using gpiolib to provide up to 2 gpio_chips for on-chip gpios */ +#ifndef CONFIG_ALCHEMY_GPIO_INDIRECT /* case (2) */ + +/* get everything through gpiolib */ +#define gpio_to_irq __gpio_to_irq +#define gpio_get_value __gpio_get_value +#define gpio_set_value __gpio_set_value +#define gpio_cansleep __gpio_cansleep +#define irq_to_gpio alchemy_irq_to_gpio + +#include <asm-generic/gpio.h> + +#endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ + + +#endif /* !CONFIG_GPIOLIB */ + +#endif /* _ALCHEMY_GPIO_AU1000_H_ */ diff --git a/arch/mips/include/asm/mach-au1x00/gpio.h b/arch/mips/include/asm/mach-au1x00/gpio.h index 34d9b72..f9b7d41 100644 --- a/arch/mips/include/asm/mach-au1x00/gpio.h +++ b/arch/mips/include/asm/mach-au1x00/gpio.h @@ -1,33 +1,10 @@ -#ifndef _AU1XXX_GPIO_H_ -#define _AU1XXX_GPIO_H_ +#ifndef _ALCHEMY_GPIO_H_ +#define _ALCHEMY_GPIO_H_ -#include <linux/types.h> +#if defined(CONFIG_ALCHEMY_GPIO_AU1000) -#define AU1XXX_GPIO_BASE 200 +#include <asm/mach-au1x00/gpio-au1000.h> -/* GPIO bank 1 offsets */ -#define AU1000_GPIO1_TRI_OUT 0x0100 -#define AU1000_GPIO1_OUT 0x0108 -#define AU1000_GPIO1_ST 0x0110 -#define AU1000_GPIO1_CLR 0x010C +#endif -/* GPIO bank 2 offsets */ -#define AU1000_GPIO2_DIR 0x00 -#define AU1000_GPIO2_RSVD 0x04 -#define AU1000_GPIO2_OUT 0x08 -#define AU1000_GPIO2_ST 0x0C -#define AU1000_GPIO2_INT 0x10 -#define AU1000_GPIO2_EN 0x14 - -#define GPIO2_OUT_EN_MASK 0x00010000 - -#define gpio_to_irq(gpio) NULL - -#define gpio_get_value __gpio_get_value -#define gpio_set_value __gpio_set_value - -#define gpio_cansleep __gpio_cansleep - -#include <asm-generic/gpio.h> - -#endif /* _AU1XXX_GPIO_H_ */ +#endif /* _ALCHEMY_GPIO_H_ */ diff --git a/arch/mips/include/asm/mach-bcm47xx/gpio.h b/arch/mips/include/asm/mach-bcm47xx/gpio.h index 1784fde..9850414 100644 --- a/arch/mips/include/asm/mach-bcm47xx/gpio.h +++ b/arch/mips/include/asm/mach-bcm47xx/gpio.h @@ -37,6 +37,9 @@ static inline int gpio_direction_input(unsigned gpio) static inline int gpio_direction_output(unsigned gpio, int value) { + /* first set the gpio out value */ + ssb_gpio_out(&ssb_bcm47xx, 1 << gpio, value ? 1 << gpio : 0); + /* then set the gpio mode */ ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 1 << gpio); return 0; } diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index 04ce6e6..3d830756 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -47,11 +47,13 @@ #define cpu_has_mips32r2 0 #define cpu_has_mips64r1 0 #define cpu_has_mips64r2 1 +#define cpu_has_mips_r2_exec_hazard 0 #define cpu_has_dsp 0 #define cpu_has_mipsmt 0 #define cpu_has_userlocal 0 #define cpu_has_vint 0 #define cpu_has_veic 0 +#define cpu_hwrena_impl_bits 0xc0000000 #define ARCH_HAS_READ_CURRENT_TIMER 1 #define ARCH_HAS_IRQ_PER_CPU 1 #define ARCH_HAS_SPINLOCK_PREFETCH 1 diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h index f30fce9..17d5794 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h +++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h @@ -30,12 +30,14 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, return octeon_map_dma_mem(dev, page_address(page), PAGE_SIZE); } -static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static inline unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { return dma_addr; } -static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { octeon_unmap_dma_mem(dev, dma_addr); } diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h index 36c611b..8da9807 100644 --- a/arch/mips/include/asm/mach-generic/dma-coherence.h +++ b/arch/mips/include/asm/mach-generic/dma-coherence.h @@ -23,12 +23,14 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, return page_to_phys(page); } -static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static inline unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { return dma_addr; } -static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { } diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h index 4c21bfc..d3d0401 100644 --- a/arch/mips/include/asm/mach-ip27/dma-coherence.h +++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h @@ -33,12 +33,14 @@ static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) return pa; } -static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { return dma_addr & ~(0xffUL << 56); } -static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { } diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h index 7ae40f4..3785595 100644 --- a/arch/mips/include/asm/mach-ip32/dma-coherence.h +++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h @@ -50,7 +50,8 @@ static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) } /* This is almost certainly wrong but it's what dma-ip32.c used to use */ -static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { unsigned long addr = dma_addr & RAM_OFFSET_MASK; @@ -60,7 +61,8 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) return addr; } -static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { } diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h index 1c7cd27..f93aee5 100644 --- a/arch/mips/include/asm/mach-jazz/dma-coherence.h +++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h @@ -22,12 +22,14 @@ static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) return vdma_alloc(page_to_phys(page), PAGE_SIZE); } -static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { return vdma_log2phys(dma_addr); } -static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { vdma_free(dma_addr); } diff --git a/arch/mips/include/asm/mach-lemote/dma-coherence.h b/arch/mips/include/asm/mach-lemote/dma-coherence.h index 38fad7d..c8de5e7 100644 --- a/arch/mips/include/asm/mach-lemote/dma-coherence.h +++ b/arch/mips/include/asm/mach-lemote/dma-coherence.h @@ -25,12 +25,14 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, return page_to_phys(page) | 0x80000000; } -static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static inline unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { return dma_addr & 0x7fffffff; } -static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { } diff --git a/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h b/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h index f3bc7ef..c3e4d3a 100644 --- a/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h @@ -53,11 +53,6 @@ #define cpu_has_smartmips 0 #define cpu_has_vtag_icache 0 -/* #define cpu_has_dc_aliases ? */ -/* #define cpu_has_ic_fills_f_dc ? */ -/* #define cpu_has_pindexed_dcache ? */ - -/* #define cpu_icache_snoops_remote_store ? */ #define cpu_has_mips32r1 1 #define cpu_has_mips32r2 0 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 32ef8be..a581d60 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -220,6 +220,22 @@ #error Bad page size configuration! #endif +/* + * Default huge tlb size for a given kernel configuration + */ +#ifdef CONFIG_PAGE_SIZE_4KB +#define PM_HUGE_MASK PM_1M +#elif defined(CONFIG_PAGE_SIZE_8KB) +#define PM_HUGE_MASK PM_4M +#elif defined(CONFIG_PAGE_SIZE_16KB) +#define PM_HUGE_MASK PM_16M +#elif defined(CONFIG_PAGE_SIZE_32KB) +#define PM_HUGE_MASK PM_64M +#elif defined(CONFIG_PAGE_SIZE_64KB) +#define PM_HUGE_MASK PM_256M +#elif defined(CONFIG_HUGETLB_PAGE) +#error Bad page size configuration for hugetlbfs! +#endif /* * Values used for computation of new tlb entries diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h index 692989a..f3c23a4 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h +++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h @@ -157,6 +157,13 @@ enum cvmx_board_types_enum { CVMX_BOARD_TYPE_NIC_XLE_4G = 21, CVMX_BOARD_TYPE_EBT5600 = 22, CVMX_BOARD_TYPE_EBH5201 = 23, + CVMX_BOARD_TYPE_EBT5200 = 24, + CVMX_BOARD_TYPE_CB5600 = 25, + CVMX_BOARD_TYPE_CB5601 = 26, + CVMX_BOARD_TYPE_CB5200 = 27, + /* Special 'generic' board type, supports many boards */ + CVMX_BOARD_TYPE_GENERIC = 28, + CVMX_BOARD_TYPE_EBH5610 = 29, CVMX_BOARD_TYPE_MAX, /* @@ -228,6 +235,12 @@ static inline const char *cvmx_board_type_to_string(enum ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_4G) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5600) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5201) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5200) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5600) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5601) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5200) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_GENERIC) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5610) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX) /* Customer boards listed here */ diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h index 1cbe4b5..8e708bd 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootmem.h +++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h @@ -183,6 +183,64 @@ extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, * Returns 0 on failure, * !0 on success */ + + +/** + * Allocate a block of memory from the free list that was passed + * to the application by the bootloader, and assign it a name in the + * global named block table. (part of the cvmx_bootmem_descriptor_t structure) + * Named blocks can later be freed. + * + * @size: Size in bytes of block to allocate + * @alignment: Alignment required - must be power of 2 + * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes + * + * Returns a pointer to block of memory, NULL on error + */ +extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, + char *name); + + + +/** + * Allocate a block of memory from the free list that was passed + * to the application by the bootloader, and assign it a name in the + * global named block table. (part of the cvmx_bootmem_descriptor_t structure) + * Named blocks can later be freed. + * + * @size: Size in bytes of block to allocate + * @address: Physical address to allocate memory at. If this + * memory is not available, the allocation fails. + * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN + * bytes + * + * Returns a pointer to block of memory, NULL on error + */ +extern void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, + char *name); + + + +/** + * Allocate a block of memory from a specific range of the free list + * that was passed to the application by the bootloader, and assign it + * a name in the global named block table. (part of the + * cvmx_bootmem_descriptor_t structure) Named blocks can later be + * freed. If request cannot be satisfied within the address range + * specified, NULL is returned + * + * @size: Size in bytes of block to allocate + * @min_addr: minimum address of range + * @max_addr: maximum address of range + * @align: Alignment of memory to be allocated. (must be a power of 2) + * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes + * + * Returns a pointer to block of memory, NULL on error + */ +extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, + uint64_t max_addr, uint64_t align, + char *name); + extern int cvmx_bootmem_free_named(char *name); /** @@ -224,6 +282,33 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint32_t flags); /** + * Allocates a named block of physical memory from the free list, at + * (optional) requested address and alignment. + * + * @param size size of region to allocate. All requests are rounded + * up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE + * bytes size + * @param min_addr Minimum address that block can occupy. + * @param max_addr Specifies the maximum address_min (inclusive) that + * the allocation can use. + * @param alignment Requested alignment of the block. If this + * alignment cannot be met, the allocation fails. + * This must be a power of 2. (Note: Alignment of + * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and + * internally enforced. Requested alignments of less + * than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to + * CVMX_BOOTMEM_ALIGNMENT_SIZE.) + * @param name name to assign to named block + * @param flags Flags to control options for the allocation. + * + * @return physical address of block allocated, or -1 on failure + */ +int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, + uint64_t max_addr, + uint64_t alignment, + char *name, uint32_t flags); + +/** * Finds a named memory block by name. * Also used for finding an unused entry in the named block table. * diff --git a/arch/mips/include/asm/octeon/cvmx-helper-errata.h b/arch/mips/include/asm/octeon/cvmx-helper-errata.h new file mode 100644 index 0000000..5fc9918 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-helper-errata.h @@ -0,0 +1,33 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_HELPER_ERRATA_H__ +#define __CVMX_HELPER_ERRATA_H__ + +extern void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm); + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-helper-jtag.h b/arch/mips/include/asm/octeon/cvmx-helper-jtag.h new file mode 100644 index 0000000..29f016d --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-helper-jtag.h @@ -0,0 +1,43 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * @file + * + * Helper utilities for qlm_jtag. + * + */ + +#ifndef __CVMX_HELPER_JTAG_H__ +#define __CVMX_HELPER_JTAG_H__ + +extern void cvmx_helper_qlm_jtag_init(void); +extern uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data); +extern void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits); +extern void cvmx_helper_qlm_jtag_update(int qlm); + +#endif /* __CVMX_HELPER_JTAG_H__ */ diff --git a/arch/mips/include/asm/octeon/cvmx-npei-defs.h b/arch/mips/include/asm/octeon/cvmx-npei-defs.h new file mode 100644 index 0000000..4b347bb --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-npei-defs.h @@ -0,0 +1,2560 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_NPEI_DEFS_H__ +#define __CVMX_NPEI_DEFS_H__ + +#define CVMX_NPEI_BAR1_INDEXX(offset) \ + (0x0000000000000000ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_BIST_STATUS \ + (0x0000000000000580ull) +#define CVMX_NPEI_BIST_STATUS2 \ + (0x0000000000000680ull) +#define CVMX_NPEI_CTL_PORT0 \ + (0x0000000000000250ull) +#define CVMX_NPEI_CTL_PORT1 \ + (0x0000000000000260ull) +#define CVMX_NPEI_CTL_STATUS \ + (0x0000000000000570ull) +#define CVMX_NPEI_CTL_STATUS2 \ + (0x0000000000003C00ull) +#define CVMX_NPEI_DATA_OUT_CNT \ + (0x00000000000005F0ull) +#define CVMX_NPEI_DBG_DATA \ + (0x0000000000000510ull) +#define CVMX_NPEI_DBG_SELECT \ + (0x0000000000000500ull) +#define CVMX_NPEI_DMA0_INT_LEVEL \ + (0x00000000000005C0ull) +#define CVMX_NPEI_DMA1_INT_LEVEL \ + (0x00000000000005D0ull) +#define CVMX_NPEI_DMAX_COUNTS(offset) \ + (0x0000000000000450ull + (((offset) & 7) * 16)) +#define CVMX_NPEI_DMAX_DBELL(offset) \ + (0x00000000000003B0ull + (((offset) & 7) * 16)) +#define CVMX_NPEI_DMAX_IBUFF_SADDR(offset) \ + (0x0000000000000400ull + (((offset) & 7) * 16)) +#define CVMX_NPEI_DMAX_NADDR(offset) \ + (0x00000000000004A0ull + (((offset) & 7) * 16)) +#define CVMX_NPEI_DMA_CNTS \ + (0x00000000000005E0ull) +#define CVMX_NPEI_DMA_CONTROL \ + (0x00000000000003A0ull) +#define CVMX_NPEI_INT_A_ENB \ + (0x0000000000000560ull) +#define CVMX_NPEI_INT_A_ENB2 \ + (0x0000000000003CE0ull) +#define CVMX_NPEI_INT_A_SUM \ + (0x0000000000000550ull) +#define CVMX_NPEI_INT_ENB \ + (0x0000000000000540ull) +#define CVMX_NPEI_INT_ENB2 \ + (0x0000000000003CD0ull) +#define CVMX_NPEI_INT_INFO \ + (0x0000000000000590ull) +#define CVMX_NPEI_INT_SUM \ + (0x0000000000000530ull) +#define CVMX_NPEI_INT_SUM2 \ + (0x0000000000003CC0ull) +#define CVMX_NPEI_LAST_WIN_RDATA0 \ + (0x0000000000000600ull) +#define CVMX_NPEI_LAST_WIN_RDATA1 \ + (0x0000000000000610ull) +#define CVMX_NPEI_MEM_ACCESS_CTL \ + (0x00000000000004F0ull) +#define CVMX_NPEI_MEM_ACCESS_SUBIDX(offset) \ + (0x0000000000000340ull + (((offset) & 31) * 16) - 16 * 12) +#define CVMX_NPEI_MSI_ENB0 \ + (0x0000000000003C50ull) +#define CVMX_NPEI_MSI_ENB1 \ + (0x0000000000003C60ull) +#define CVMX_NPEI_MSI_ENB2 \ + (0x0000000000003C70ull) +#define CVMX_NPEI_MSI_ENB3 \ + (0x0000000000003C80ull) +#define CVMX_NPEI_MSI_RCV0 \ + (0x0000000000003C10ull) +#define CVMX_NPEI_MSI_RCV1 \ + (0x0000000000003C20ull) +#define CVMX_NPEI_MSI_RCV2 \ + (0x0000000000003C30ull) +#define CVMX_NPEI_MSI_RCV3 \ + (0x0000000000003C40ull) +#define CVMX_NPEI_MSI_RD_MAP \ + (0x0000000000003CA0ull) +#define CVMX_NPEI_MSI_W1C_ENB0 \ + (0x0000000000003CF0ull) +#define CVMX_NPEI_MSI_W1C_ENB1 \ + (0x0000000000003D00ull) +#define CVMX_NPEI_MSI_W1C_ENB2 \ + (0x0000000000003D10ull) +#define CVMX_NPEI_MSI_W1C_ENB3 \ + (0x0000000000003D20ull) +#define CVMX_NPEI_MSI_W1S_ENB0 \ + (0x0000000000003D30ull) +#define CVMX_NPEI_MSI_W1S_ENB1 \ + (0x0000000000003D40ull) +#define CVMX_NPEI_MSI_W1S_ENB2 \ + (0x0000000000003D50ull) +#define CVMX_NPEI_MSI_W1S_ENB3 \ + (0x0000000000003D60ull) +#define CVMX_NPEI_MSI_WR_MAP \ + (0x0000000000003C90ull) +#define CVMX_NPEI_PCIE_CREDIT_CNT \ + (0x0000000000003D70ull) +#define CVMX_NPEI_PCIE_MSI_RCV \ + (0x0000000000003CB0ull) +#define CVMX_NPEI_PCIE_MSI_RCV_B1 \ + (0x0000000000000650ull) +#define CVMX_NPEI_PCIE_MSI_RCV_B2 \ + (0x0000000000000660ull) +#define CVMX_NPEI_PCIE_MSI_RCV_B3 \ + (0x0000000000000670ull) +#define CVMX_NPEI_PKTX_CNTS(offset) \ + (0x0000000000002400ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_INSTR_BADDR(offset) \ + (0x0000000000002800ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) \ + (0x0000000000002C00ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) \ + (0x0000000000003000ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_INSTR_HEADER(offset) \ + (0x0000000000003400ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_IN_BP(offset) \ + (0x0000000000003800ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_SLIST_BADDR(offset) \ + (0x0000000000001400ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) \ + (0x0000000000001800ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) \ + (0x0000000000001C00ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKT_CNT_INT \ + (0x0000000000001110ull) +#define CVMX_NPEI_PKT_CNT_INT_ENB \ + (0x0000000000001130ull) +#define CVMX_NPEI_PKT_DATA_OUT_ES \ + (0x00000000000010B0ull) +#define CVMX_NPEI_PKT_DATA_OUT_NS \ + (0x00000000000010A0ull) +#define CVMX_NPEI_PKT_DATA_OUT_ROR \ + (0x0000000000001090ull) +#define CVMX_NPEI_PKT_DPADDR \ + (0x0000000000001080ull) +#define CVMX_NPEI_PKT_INPUT_CONTROL \ + (0x0000000000001150ull) +#define CVMX_NPEI_PKT_INSTR_ENB \ + (0x0000000000001000ull) +#define CVMX_NPEI_PKT_INSTR_RD_SIZE \ + (0x0000000000001190ull) +#define CVMX_NPEI_PKT_INSTR_SIZE \ + (0x0000000000001020ull) +#define CVMX_NPEI_PKT_INT_LEVELS \ + (0x0000000000001100ull) +#define CVMX_NPEI_PKT_IN_BP \ + (0x00000000000006B0ull) +#define CVMX_NPEI_PKT_IN_DONEX_CNTS(offset) \ + (0x0000000000002000ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKT_IN_INSTR_COUNTS \ + (0x00000000000006A0ull) +#define CVMX_NPEI_PKT_IN_PCIE_PORT \ + (0x00000000000011A0ull) +#define CVMX_NPEI_PKT_IPTR \ + (0x0000000000001070ull) +#define CVMX_NPEI_PKT_OUTPUT_WMARK \ + (0x0000000000001160ull) +#define CVMX_NPEI_PKT_OUT_BMODE \ + (0x00000000000010D0ull) +#define CVMX_NPEI_PKT_OUT_ENB \ + (0x0000000000001010ull) +#define CVMX_NPEI_PKT_PCIE_PORT \ + (0x00000000000010E0ull) +#define CVMX_NPEI_PKT_PORT_IN_RST \ + (0x0000000000000690ull) +#define CVMX_NPEI_PKT_SLIST_ES \ + (0x0000000000001050ull) +#define CVMX_NPEI_PKT_SLIST_ID_SIZE \ + (0x0000000000001180ull) +#define CVMX_NPEI_PKT_SLIST_NS \ + (0x0000000000001040ull) +#define CVMX_NPEI_PKT_SLIST_ROR \ + (0x0000000000001030ull) +#define CVMX_NPEI_PKT_TIME_INT \ + (0x0000000000001120ull) +#define CVMX_NPEI_PKT_TIME_INT_ENB \ + (0x0000000000001140ull) +#define CVMX_NPEI_RSL_INT_BLOCKS \ + (0x0000000000000520ull) +#define CVMX_NPEI_SCRATCH_1 \ + (0x0000000000000270ull) +#define CVMX_NPEI_STATE1 \ + (0x0000000000000620ull) +#define CVMX_NPEI_STATE2 \ + (0x0000000000000630ull) +#define CVMX_NPEI_STATE3 \ + (0x0000000000000640ull) +#define CVMX_NPEI_WINDOW_CTL \ + (0x0000000000000380ull) +#define CVMX_NPEI_WIN_RD_ADDR \ + (0x0000000000000210ull) +#define CVMX_NPEI_WIN_RD_DATA \ + (0x0000000000000240ull) +#define CVMX_NPEI_WIN_WR_ADDR \ + (0x0000000000000200ull) +#define CVMX_NPEI_WIN_WR_DATA \ + (0x0000000000000220ull) +#define CVMX_NPEI_WIN_WR_MASK \ + (0x0000000000000230ull) + +union cvmx_npei_bar1_indexx { + uint32_t u32; + struct cvmx_npei_bar1_indexx_s { + uint32_t reserved_18_31:14; + uint32_t addr_idx:14; + uint32_t ca:1; + uint32_t end_swp:2; + uint32_t addr_v:1; + } s; + struct cvmx_npei_bar1_indexx_s cn52xx; + struct cvmx_npei_bar1_indexx_s cn52xxp1; + struct cvmx_npei_bar1_indexx_s cn56xx; + struct cvmx_npei_bar1_indexx_s cn56xxp1; +}; + +union cvmx_npei_bist_status { + uint64_t u64; + struct cvmx_npei_bist_status_s { + uint64_t pkt_rdf:1; + uint64_t pkt_pmem:1; + uint64_t pkt_p1:1; + uint64_t reserved_60_60:1; + uint64_t pcr_gim:1; + uint64_t pkt_pif:1; + uint64_t pcsr_int:1; + uint64_t pcsr_im:1; + uint64_t pcsr_cnt:1; + uint64_t pcsr_id:1; + uint64_t pcsr_sl:1; + uint64_t reserved_50_52:3; + uint64_t pkt_ind:1; + uint64_t pkt_slm:1; + uint64_t reserved_36_47:12; + uint64_t d0_pst:1; + uint64_t d1_pst:1; + uint64_t d2_pst:1; + uint64_t d3_pst:1; + uint64_t reserved_31_31:1; + uint64_t n2p0_c:1; + uint64_t n2p0_o:1; + uint64_t n2p1_c:1; + uint64_t n2p1_o:1; + uint64_t cpl_p0:1; + uint64_t cpl_p1:1; + uint64_t p2n1_po:1; + uint64_t p2n1_no:1; + uint64_t p2n1_co:1; + uint64_t p2n0_po:1; + uint64_t p2n0_no:1; + uint64_t p2n0_co:1; + uint64_t p2n0_c0:1; + uint64_t p2n0_c1:1; + uint64_t p2n0_n:1; + uint64_t p2n0_p0:1; + uint64_t p2n0_p1:1; + uint64_t p2n1_c0:1; + uint64_t p2n1_c1:1; + uint64_t p2n1_n:1; + uint64_t p2n1_p0:1; + uint64_t p2n1_p1:1; + uint64_t csm0:1; + uint64_t csm1:1; + uint64_t dif0:1; + uint64_t dif1:1; + uint64_t dif2:1; + uint64_t dif3:1; + uint64_t reserved_2_2:1; + uint64_t msi:1; + uint64_t ncb_cmd:1; + } s; + struct cvmx_npei_bist_status_cn52xx { + uint64_t pkt_rdf:1; + uint64_t pkt_pmem:1; + uint64_t pkt_p1:1; + uint64_t reserved_60_60:1; + uint64_t pcr_gim:1; + uint64_t pkt_pif:1; + uint64_t pcsr_int:1; + uint64_t pcsr_im:1; + uint64_t pcsr_cnt:1; + uint64_t pcsr_id:1; + uint64_t pcsr_sl:1; + uint64_t pkt_imem:1; + uint64_t pkt_pfm:1; + uint64_t pkt_pof:1; + uint64_t reserved_48_49:2; + uint64_t pkt_pop0:1; + uint64_t pkt_pop1:1; + uint64_t d0_mem:1; + uint64_t d1_mem:1; + uint64_t d2_mem:1; + uint64_t d3_mem:1; + uint64_t d4_mem:1; + uint64_t ds_mem:1; + uint64_t reserved_36_39:4; + uint64_t d0_pst:1; + uint64_t d1_pst:1; + uint64_t d2_pst:1; + uint64_t d3_pst:1; + uint64_t d4_pst:1; + uint64_t n2p0_c:1; + uint64_t n2p0_o:1; + uint64_t n2p1_c:1; + uint64_t n2p1_o:1; + uint64_t cpl_p0:1; + uint64_t cpl_p1:1; + uint64_t p2n1_po:1; + uint64_t p2n1_no:1; + uint64_t p2n1_co:1; + uint64_t p2n0_po:1; + uint64_t p2n0_no:1; + uint64_t p2n0_co:1; + uint64_t p2n0_c0:1; + uint64_t p2n0_c1:1; + uint64_t p2n0_n:1; + uint64_t p2n0_p0:1; + uint64_t p2n0_p1:1; + uint64_t p2n1_c0:1; + uint64_t p2n1_c1:1; + uint64_t p2n1_n:1; + uint64_t p2n1_p0:1; + uint64_t p2n1_p1:1; + uint64_t csm0:1; + uint64_t csm1:1; + uint64_t dif0:1; + uint64_t dif1:1; + uint64_t dif2:1; + uint64_t dif3:1; + uint64_t dif4:1; + uint64_t msi:1; + uint64_t ncb_cmd:1; + } cn52xx; + struct cvmx_npei_bist_status_cn52xxp1 { + uint64_t reserved_46_63:18; + uint64_t d0_mem0:1; + uint64_t d1_mem1:1; + uint64_t d2_mem2:1; + uint64_t d3_mem3:1; + uint64_t dr0_mem:1; + uint64_t d0_mem:1; + uint64_t d1_mem:1; + uint64_t d2_mem:1; + uint64_t d3_mem:1; + uint64_t dr1_mem:1; + uint64_t d0_pst:1; + uint64_t d1_pst:1; + uint64_t d2_pst:1; + uint64_t d3_pst:1; + uint64_t dr2_mem:1; + uint64_t n2p0_c:1; + uint64_t n2p0_o:1; + uint64_t n2p1_c:1; + uint64_t n2p1_o:1; + uint64_t cpl_p0:1; + uint64_t cpl_p1:1; + uint64_t p2n1_po:1; + uint64_t p2n1_no:1; + uint64_t p2n1_co:1; + uint64_t p2n0_po:1; + uint64_t p2n0_no:1; + uint64_t p2n0_co:1; + uint64_t p2n0_c0:1; + uint64_t p2n0_c1:1; + uint64_t p2n0_n:1; + uint64_t p2n0_p0:1; + uint64_t p2n0_p1:1; + uint64_t p2n1_c0:1; + uint64_t p2n1_c1:1; + uint64_t p2n1_n:1; + uint64_t p2n1_p0:1; + uint64_t p2n1_p1:1; + uint64_t csm0:1; + uint64_t csm1:1; + uint64_t dif0:1; + uint64_t dif1:1; + uint64_t dif2:1; + uint64_t dif3:1; + uint64_t dr3_mem:1; + uint64_t msi:1; + uint64_t ncb_cmd:1; + } cn52xxp1; + struct cvmx_npei_bist_status_cn56xx { + uint64_t pkt_rdf:1; + uint64_t reserved_60_62:3; + uint64_t pcr_gim:1; + uint64_t pkt_pif:1; + uint64_t pcsr_int:1; + uint64_t pcsr_im:1; + uint64_t pcsr_cnt:1; + uint64_t pcsr_id:1; + uint64_t pcsr_sl:1; + uint64_t pkt_imem:1; + uint64_t pkt_pfm:1; + uint64_t pkt_pof:1; + uint64_t reserved_48_49:2; + uint64_t pkt_pop0:1; + uint64_t pkt_pop1:1; + uint64_t d0_mem:1; + uint64_t d1_mem:1; + uint64_t d2_mem:1; + uint64_t d3_mem:1; + uint64_t d4_mem:1; + uint64_t ds_mem:1; + uint64_t reserved_36_39:4; + uint64_t d0_pst:1; + uint64_t d1_pst:1; + uint64_t d2_pst:1; + uint64_t d3_pst:1; + uint64_t d4_pst:1; + uint64_t n2p0_c:1; + uint64_t n2p0_o:1; + uint64_t n2p1_c:1; + uint64_t n2p1_o:1; + uint64_t cpl_p0:1; + uint64_t cpl_p1:1; + uint64_t p2n1_po:1; + uint64_t p2n1_no:1; + uint64_t p2n1_co:1; + uint64_t p2n0_po:1; + uint64_t p2n0_no:1; + uint64_t p2n0_co:1; + uint64_t p2n0_c0:1; + uint64_t p2n0_c1:1; + uint64_t p2n0_n:1; + uint64_t p2n0_p0:1; + uint64_t p2n0_p1:1; + uint64_t p2n1_c0:1; + uint64_t p2n1_c1:1; + uint64_t p2n1_n:1; + uint64_t p2n1_p0:1; + uint64_t p2n1_p1:1; + uint64_t csm0:1; + uint64_t csm1:1; + uint64_t dif0:1; + uint64_t dif1:1; + uint64_t dif2:1; + uint64_t dif3:1; + uint64_t dif4:1; + uint64_t msi:1; + uint64_t ncb_cmd:1; + } cn56xx; + struct cvmx_npei_bist_status_cn56xxp1 { + uint64_t reserved_58_63:6; + uint64_t pcsr_int:1; + uint64_t pcsr_im:1; + uint64_t pcsr_cnt:1; + uint64_t pcsr_id:1; + uint64_t pcsr_sl:1; + uint64_t pkt_pout:1; + uint64_t pkt_imem:1; + uint64_t pkt_cntm:1; + uint64_t pkt_ind:1; + uint64_t pkt_slm:1; + uint64_t pkt_odf:1; + uint64_t pkt_oif:1; + uint64_t pkt_out:1; + uint64_t pkt_i0:1; + uint64_t pkt_i1:1; + uint64_t pkt_s0:1; + uint64_t pkt_s1:1; + uint64_t d0_mem:1; + uint64_t d1_mem:1; + uint64_t d2_mem:1; + uint64_t d3_mem:1; + uint64_t d4_mem:1; + uint64_t d0_pst:1; + uint64_t d1_pst:1; + uint64_t d2_pst:1; + uint64_t d3_pst:1; + uint64_t d4_pst:1; + uint64_t n2p0_c:1; + uint64_t n2p0_o:1; + uint64_t n2p1_c:1; + uint64_t n2p1_o:1; + uint64_t cpl_p0:1; + uint64_t cpl_p1:1; + uint64_t p2n1_po:1; + uint64_t p2n1_no:1; + uint64_t p2n1_co:1; + uint64_t p2n0_po:1; + uint64_t p2n0_no:1; + uint64_t p2n0_co:1; + uint64_t p2n0_c0:1; + uint64_t p2n0_c1:1; + uint64_t p2n0_n:1; + uint64_t p2n0_p0:1; + uint64_t p2n0_p1:1; + uint64_t p2n1_c0:1; + uint64_t p2n1_c1:1; + uint64_t p2n1_n:1; + uint64_t p2n1_p0:1; + uint64_t p2n1_p1:1; + uint64_t csm0:1; + uint64_t csm1:1; + uint64_t dif0:1; + uint64_t dif1:1; + uint64_t dif2:1; + uint64_t dif3:1; + uint64_t dif4:1; + uint64_t msi:1; + uint64_t ncb_cmd:1; + } cn56xxp1; +}; + +union cvmx_npei_bist_status2 { + uint64_t u64; + struct cvmx_npei_bist_status2_s { + uint64_t reserved_5_63:59; + uint64_t psc_p0:1; + uint64_t psc_p1:1; + uint64_t pkt_gd:1; + uint64_t pkt_gl:1; + uint64_t pkt_blk:1; + } s; + struct cvmx_npei_bist_status2_s cn52xx; + struct cvmx_npei_bist_status2_s cn56xx; +}; + +union cvmx_npei_ctl_port0 { + uint64_t u64; + struct cvmx_npei_ctl_port0_s { + uint64_t reserved_21_63:43; + uint64_t waitl_com:1; + uint64_t intd:1; + uint64_t intc:1; + uint64_t intb:1; + uint64_t inta:1; + uint64_t intd_map:2; + uint64_t intc_map:2; + uint64_t intb_map:2; + uint64_t inta_map:2; + uint64_t ctlp_ro:1; + uint64_t reserved_6_6:1; + uint64_t ptlp_ro:1; + uint64_t bar2_enb:1; + uint64_t bar2_esx:2; + uint64_t bar2_cax:1; + uint64_t wait_com:1; + } s; + struct cvmx_npei_ctl_port0_s cn52xx; + struct cvmx_npei_ctl_port0_s cn52xxp1; + struct cvmx_npei_ctl_port0_s cn56xx; + struct cvmx_npei_ctl_port0_s cn56xxp1; +}; + +union cvmx_npei_ctl_port1 { + uint64_t u64; + struct cvmx_npei_ctl_port1_s { + uint64_t reserved_21_63:43; + uint64_t waitl_com:1; + uint64_t intd:1; + uint64_t intc:1; + uint64_t intb:1; + uint64_t inta:1; + uint64_t intd_map:2; + uint64_t intc_map:2; + uint64_t intb_map:2; + uint64_t inta_map:2; + uint64_t ctlp_ro:1; + uint64_t reserved_6_6:1; + uint64_t ptlp_ro:1; + uint64_t bar2_enb:1; + uint64_t bar2_esx:2; + uint64_t bar2_cax:1; + uint64_t wait_com:1; + } s; + struct cvmx_npei_ctl_port1_s cn52xx; + struct cvmx_npei_ctl_port1_s cn52xxp1; + struct cvmx_npei_ctl_port1_s cn56xx; + struct cvmx_npei_ctl_port1_s cn56xxp1; +}; + +union cvmx_npei_ctl_status { + uint64_t u64; + struct cvmx_npei_ctl_status_s { + uint64_t reserved_44_63:20; + uint64_t p1_ntags:6; + uint64_t p0_ntags:6; + uint64_t cfg_rtry:16; + uint64_t ring_en:1; + uint64_t lnk_rst:1; + uint64_t arb:1; + uint64_t pkt_bp:4; + uint64_t host_mode:1; + uint64_t chip_rev:8; + } s; + struct cvmx_npei_ctl_status_s cn52xx; + struct cvmx_npei_ctl_status_cn52xxp1 { + uint64_t reserved_44_63:20; + uint64_t p1_ntags:6; + uint64_t p0_ntags:6; + uint64_t cfg_rtry:16; + uint64_t reserved_15_15:1; + uint64_t lnk_rst:1; + uint64_t arb:1; + uint64_t reserved_9_12:4; + uint64_t host_mode:1; + uint64_t chip_rev:8; + } cn52xxp1; + struct cvmx_npei_ctl_status_s cn56xx; + struct cvmx_npei_ctl_status_cn56xxp1 { + uint64_t reserved_16_63:48; + uint64_t ring_en:1; + uint64_t lnk_rst:1; + uint64_t arb:1; + uint64_t pkt_bp:4; + uint64_t host_mode:1; + uint64_t chip_rev:8; + } cn56xxp1; +}; + +union cvmx_npei_ctl_status2 { + uint64_t u64; + struct cvmx_npei_ctl_status2_s { + uint64_t reserved_16_63:48; + uint64_t mps:1; + uint64_t mrrs:3; + uint64_t c1_w_flt:1; + uint64_t c0_w_flt:1; + uint64_t c1_b1_s:3; + uint64_t c0_b1_s:3; + uint64_t c1_wi_d:1; + uint64_t c1_b0_d:1; + uint64_t c0_wi_d:1; + uint64_t c0_b0_d:1; + } s; + struct cvmx_npei_ctl_status2_s cn52xx; + struct cvmx_npei_ctl_status2_s cn52xxp1; + struct cvmx_npei_ctl_status2_s cn56xx; + struct cvmx_npei_ctl_status2_s cn56xxp1; +}; + +union cvmx_npei_data_out_cnt { + uint64_t u64; + struct cvmx_npei_data_out_cnt_s { + uint64_t reserved_44_63:20; + uint64_t p1_ucnt:16; + uint64_t p1_fcnt:6; + uint64_t p0_ucnt:16; + uint64_t p0_fcnt:6; + } s; + struct cvmx_npei_data_out_cnt_s cn52xx; + struct cvmx_npei_data_out_cnt_s cn52xxp1; + struct cvmx_npei_data_out_cnt_s cn56xx; + struct cvmx_npei_data_out_cnt_s cn56xxp1; +}; + +union cvmx_npei_dbg_data { + uint64_t u64; + struct cvmx_npei_dbg_data_s { + uint64_t reserved_28_63:36; + uint64_t qlm0_rev_lanes:1; + uint64_t reserved_25_26:2; + uint64_t qlm1_spd:2; + uint64_t c_mul:5; + uint64_t dsel_ext:1; + uint64_t data:17; + } s; + struct cvmx_npei_dbg_data_cn52xx { + uint64_t reserved_29_63:35; + uint64_t qlm0_link_width:1; + uint64_t qlm0_rev_lanes:1; + uint64_t qlm1_mode:2; + uint64_t qlm1_spd:2; + uint64_t c_mul:5; + uint64_t dsel_ext:1; + uint64_t data:17; + } cn52xx; + struct cvmx_npei_dbg_data_cn52xx cn52xxp1; + struct cvmx_npei_dbg_data_cn56xx { + uint64_t reserved_29_63:35; + uint64_t qlm2_rev_lanes:1; + uint64_t qlm0_rev_lanes:1; + uint64_t qlm3_spd:2; + uint64_t qlm1_spd:2; + uint64_t c_mul:5; + uint64_t dsel_ext:1; + uint64_t data:17; + } cn56xx; + struct cvmx_npei_dbg_data_cn56xx cn56xxp1; +}; + +union cvmx_npei_dbg_select { + uint64_t u64; + struct cvmx_npei_dbg_select_s { + uint64_t reserved_16_63:48; + uint64_t dbg_sel:16; + } s; + struct cvmx_npei_dbg_select_s cn52xx; + struct cvmx_npei_dbg_select_s cn52xxp1; + struct cvmx_npei_dbg_select_s cn56xx; + struct cvmx_npei_dbg_select_s cn56xxp1; +}; + +union cvmx_npei_dmax_counts { + uint64_t u64; + struct cvmx_npei_dmax_counts_s { + uint64_t reserved_39_63:25; + uint64_t fcnt:7; + uint64_t dbell:32; + } s; + struct cvmx_npei_dmax_counts_s cn52xx; + struct cvmx_npei_dmax_counts_s cn52xxp1; + struct cvmx_npei_dmax_counts_s cn56xx; + struct cvmx_npei_dmax_counts_s cn56xxp1; +}; + +union cvmx_npei_dmax_dbell { + uint32_t u32; + struct cvmx_npei_dmax_dbell_s { + uint32_t reserved_16_31:16; + uint32_t dbell:16; + } s; + struct cvmx_npei_dmax_dbell_s cn52xx; + struct cvmx_npei_dmax_dbell_s cn52xxp1; + struct cvmx_npei_dmax_dbell_s cn56xx; + struct cvmx_npei_dmax_dbell_s cn56xxp1; +}; + +union cvmx_npei_dmax_ibuff_saddr { + uint64_t u64; + struct cvmx_npei_dmax_ibuff_saddr_s { + uint64_t reserved_37_63:27; + uint64_t idle:1; + uint64_t saddr:29; + uint64_t reserved_0_6:7; + } s; + struct cvmx_npei_dmax_ibuff_saddr_cn52xx { + uint64_t reserved_36_63:28; + uint64_t saddr:29; + uint64_t reserved_0_6:7; + } cn52xx; + struct cvmx_npei_dmax_ibuff_saddr_cn52xx cn52xxp1; + struct cvmx_npei_dmax_ibuff_saddr_s cn56xx; + struct cvmx_npei_dmax_ibuff_saddr_cn52xx cn56xxp1; +}; + +union cvmx_npei_dmax_naddr { + uint64_t u64; + struct cvmx_npei_dmax_naddr_s { + uint64_t reserved_36_63:28; + uint64_t addr:36; + } s; + struct cvmx_npei_dmax_naddr_s cn52xx; + struct cvmx_npei_dmax_naddr_s cn52xxp1; + struct cvmx_npei_dmax_naddr_s cn56xx; + struct cvmx_npei_dmax_naddr_s cn56xxp1; +}; + +union cvmx_npei_dma0_int_level { + uint64_t u64; + struct cvmx_npei_dma0_int_level_s { + uint64_t time:32; + uint64_t cnt:32; + } s; + struct cvmx_npei_dma0_int_level_s cn52xx; + struct cvmx_npei_dma0_int_level_s cn52xxp1; + struct cvmx_npei_dma0_int_level_s cn56xx; + struct cvmx_npei_dma0_int_level_s cn56xxp1; +}; + +union cvmx_npei_dma1_int_level { + uint64_t u64; + struct cvmx_npei_dma1_int_level_s { + uint64_t time:32; + uint64_t cnt:32; + } s; + struct cvmx_npei_dma1_int_level_s cn52xx; + struct cvmx_npei_dma1_int_level_s cn52xxp1; + struct cvmx_npei_dma1_int_level_s cn56xx; + struct cvmx_npei_dma1_int_level_s cn56xxp1; +}; + +union cvmx_npei_dma_cnts { + uint64_t u64; + struct cvmx_npei_dma_cnts_s { + uint64_t dma1:32; + uint64_t dma0:32; + } s; + struct cvmx_npei_dma_cnts_s cn52xx; + struct cvmx_npei_dma_cnts_s cn52xxp1; + struct cvmx_npei_dma_cnts_s cn56xx; + struct cvmx_npei_dma_cnts_s cn56xxp1; +}; + +union cvmx_npei_dma_control { + uint64_t u64; + struct cvmx_npei_dma_control_s { + uint64_t reserved_39_63:25; + uint64_t dma4_enb:1; + uint64_t dma3_enb:1; + uint64_t dma2_enb:1; + uint64_t dma1_enb:1; + uint64_t dma0_enb:1; + uint64_t b0_lend:1; + uint64_t dwb_denb:1; + uint64_t dwb_ichk:9; + uint64_t fpa_que:3; + uint64_t o_add1:1; + uint64_t o_ro:1; + uint64_t o_ns:1; + uint64_t o_es:2; + uint64_t o_mode:1; + uint64_t csize:14; + } s; + struct cvmx_npei_dma_control_s cn52xx; + struct cvmx_npei_dma_control_cn52xxp1 { + uint64_t reserved_38_63:26; + uint64_t dma3_enb:1; + uint64_t dma2_enb:1; + uint64_t dma1_enb:1; + uint64_t dma0_enb:1; + uint64_t b0_lend:1; + uint64_t dwb_denb:1; + uint64_t dwb_ichk:9; + uint64_t fpa_que:3; + uint64_t o_add1:1; + uint64_t o_ro:1; + uint64_t o_ns:1; + uint64_t o_es:2; + uint64_t o_mode:1; + uint64_t csize:14; + } cn52xxp1; + struct cvmx_npei_dma_control_s cn56xx; + struct cvmx_npei_dma_control_s cn56xxp1; +}; + +union cvmx_npei_int_a_enb { + uint64_t u64; + struct cvmx_npei_int_a_enb_s { + uint64_t reserved_10_63:54; + uint64_t pout_err:1; + uint64_t pin_bp:1; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } s; + struct cvmx_npei_int_a_enb_cn52xx { + uint64_t reserved_8_63:56; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } cn52xx; + struct cvmx_npei_int_a_enb_cn52xxp1 { + uint64_t reserved_2_63:62; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } cn52xxp1; + struct cvmx_npei_int_a_enb_s cn56xx; +}; + +union cvmx_npei_int_a_enb2 { + uint64_t u64; + struct cvmx_npei_int_a_enb2_s { + uint64_t reserved_10_63:54; + uint64_t pout_err:1; + uint64_t pin_bp:1; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } s; + struct cvmx_npei_int_a_enb2_cn52xx { + uint64_t reserved_8_63:56; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t reserved_0_1:2; + } cn52xx; + struct cvmx_npei_int_a_enb2_cn52xxp1 { + uint64_t reserved_2_63:62; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } cn52xxp1; + struct cvmx_npei_int_a_enb2_s cn56xx; +}; + +union cvmx_npei_int_a_sum { + uint64_t u64; + struct cvmx_npei_int_a_sum_s { + uint64_t reserved_10_63:54; + uint64_t pout_err:1; + uint64_t pin_bp:1; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } s; + struct cvmx_npei_int_a_sum_cn52xx { + uint64_t reserved_8_63:56; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } cn52xx; + struct cvmx_npei_int_a_sum_cn52xxp1 { + uint64_t reserved_2_63:62; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } cn52xxp1; + struct cvmx_npei_int_a_sum_s cn56xx; +}; + +union cvmx_npei_int_enb { + uint64_t u64; + struct cvmx_npei_int_enb_s { + uint64_t mio_inta:1; + uint64_t reserved_62_62:1; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npei_int_enb_s cn52xx; + struct cvmx_npei_int_enb_cn52xxp1 { + uint64_t mio_inta:1; + uint64_t reserved_62_62:1; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t reserved_8_8:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn52xxp1; + struct cvmx_npei_int_enb_s cn56xx; + struct cvmx_npei_int_enb_cn56xxp1 { + uint64_t mio_inta:1; + uint64_t reserved_61_62:2; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t reserved_29_29:1; + uint64_t c1_se:1; + uint64_t reserved_27_27:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t reserved_22_22:1; + uint64_t c0_se:1; + uint64_t reserved_20_20:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn56xxp1; +}; + +union cvmx_npei_int_enb2 { + uint64_t u64; + struct cvmx_npei_int_enb2_s { + uint64_t reserved_62_63:2; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npei_int_enb2_s cn52xx; + struct cvmx_npei_int_enb2_cn52xxp1 { + uint64_t reserved_62_63:2; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t reserved_8_8:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn52xxp1; + struct cvmx_npei_int_enb2_s cn56xx; + struct cvmx_npei_int_enb2_cn56xxp1 { + uint64_t reserved_61_63:3; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t reserved_29_29:1; + uint64_t c1_se:1; + uint64_t reserved_27_27:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t reserved_22_22:1; + uint64_t c0_se:1; + uint64_t reserved_20_20:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn56xxp1; +}; + +union cvmx_npei_int_info { + uint64_t u64; + struct cvmx_npei_int_info_s { + uint64_t reserved_12_63:52; + uint64_t pidbof:6; + uint64_t psldbof:6; + } s; + struct cvmx_npei_int_info_s cn52xx; + struct cvmx_npei_int_info_s cn56xx; + struct cvmx_npei_int_info_s cn56xxp1; +}; + +union cvmx_npei_int_sum { + uint64_t u64; + struct cvmx_npei_int_sum_s { + uint64_t mio_inta:1; + uint64_t reserved_62_62:1; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npei_int_sum_s cn52xx; + struct cvmx_npei_int_sum_cn52xxp1 { + uint64_t mio_inta:1; + uint64_t reserved_62_62:1; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t reserved_15_18:4; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t reserved_8_8:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn52xxp1; + struct cvmx_npei_int_sum_s cn56xx; + struct cvmx_npei_int_sum_cn56xxp1 { + uint64_t mio_inta:1; + uint64_t reserved_61_62:2; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t reserved_29_29:1; + uint64_t c1_se:1; + uint64_t reserved_27_27:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t reserved_22_22:1; + uint64_t c0_se:1; + uint64_t reserved_20_20:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn56xxp1; +}; + +union cvmx_npei_int_sum2 { + uint64_t u64; + struct cvmx_npei_int_sum2_s { + uint64_t mio_inta:1; + uint64_t reserved_62_62:1; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t reserved_15_18:4; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t reserved_8_8:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npei_int_sum2_s cn52xx; + struct cvmx_npei_int_sum2_s cn52xxp1; + struct cvmx_npei_int_sum2_s cn56xx; +}; + +union cvmx_npei_last_win_rdata0 { + uint64_t u64; + struct cvmx_npei_last_win_rdata0_s { + uint64_t data:64; + } s; + struct cvmx_npei_last_win_rdata0_s cn52xx; + struct cvmx_npei_last_win_rdata0_s cn52xxp1; + struct cvmx_npei_last_win_rdata0_s cn56xx; + struct cvmx_npei_last_win_rdata0_s cn56xxp1; +}; + +union cvmx_npei_last_win_rdata1 { + uint64_t u64; + struct cvmx_npei_last_win_rdata1_s { + uint64_t data:64; + } s; + struct cvmx_npei_last_win_rdata1_s cn52xx; + struct cvmx_npei_last_win_rdata1_s cn52xxp1; + struct cvmx_npei_last_win_rdata1_s cn56xx; + struct cvmx_npei_last_win_rdata1_s cn56xxp1; +}; + +union cvmx_npei_mem_access_ctl { + uint64_t u64; + struct cvmx_npei_mem_access_ctl_s { + uint64_t reserved_14_63:50; + uint64_t max_word:4; + uint64_t timer:10; + } s; + struct cvmx_npei_mem_access_ctl_s cn52xx; + struct cvmx_npei_mem_access_ctl_s cn52xxp1; + struct cvmx_npei_mem_access_ctl_s cn56xx; + struct cvmx_npei_mem_access_ctl_s cn56xxp1; +}; + +union cvmx_npei_mem_access_subidx { + uint64_t u64; + struct cvmx_npei_mem_access_subidx_s { + uint64_t reserved_42_63:22; + uint64_t zero:1; + uint64_t port:2; + uint64_t nmerge:1; + uint64_t esr:2; + uint64_t esw:2; + uint64_t nsr:1; + uint64_t nsw:1; + uint64_t ror:1; + uint64_t row:1; + uint64_t ba:30; + } s; + struct cvmx_npei_mem_access_subidx_s cn52xx; + struct cvmx_npei_mem_access_subidx_s cn52xxp1; + struct cvmx_npei_mem_access_subidx_s cn56xx; + struct cvmx_npei_mem_access_subidx_s cn56xxp1; +}; + +union cvmx_npei_msi_enb0 { + uint64_t u64; + struct cvmx_npei_msi_enb0_s { + uint64_t enb:64; + } s; + struct cvmx_npei_msi_enb0_s cn52xx; + struct cvmx_npei_msi_enb0_s cn52xxp1; + struct cvmx_npei_msi_enb0_s cn56xx; + struct cvmx_npei_msi_enb0_s cn56xxp1; +}; + +union cvmx_npei_msi_enb1 { + uint64_t u64; + struct cvmx_npei_msi_enb1_s { + uint64_t enb:64; + } s; + struct cvmx_npei_msi_enb1_s cn52xx; + struct cvmx_npei_msi_enb1_s cn52xxp1; + struct cvmx_npei_msi_enb1_s cn56xx; + struct cvmx_npei_msi_enb1_s cn56xxp1; +}; + +union cvmx_npei_msi_enb2 { + uint64_t u64; + struct cvmx_npei_msi_enb2_s { + uint64_t enb:64; + } s; + struct cvmx_npei_msi_enb2_s cn52xx; + struct cvmx_npei_msi_enb2_s cn52xxp1; + struct cvmx_npei_msi_enb2_s cn56xx; + struct cvmx_npei_msi_enb2_s cn56xxp1; +}; + +union cvmx_npei_msi_enb3 { + uint64_t u64; + struct cvmx_npei_msi_enb3_s { + uint64_t enb:64; + } s; + struct cvmx_npei_msi_enb3_s cn52xx; + struct cvmx_npei_msi_enb3_s cn52xxp1; + struct cvmx_npei_msi_enb3_s cn56xx; + struct cvmx_npei_msi_enb3_s cn56xxp1; +}; + +union cvmx_npei_msi_rcv0 { + uint64_t u64; + struct cvmx_npei_msi_rcv0_s { + uint64_t intr:64; + } s; + struct cvmx_npei_msi_rcv0_s cn52xx; + struct cvmx_npei_msi_rcv0_s cn52xxp1; + struct cvmx_npei_msi_rcv0_s cn56xx; + struct cvmx_npei_msi_rcv0_s cn56xxp1; +}; + +union cvmx_npei_msi_rcv1 { + uint64_t u64; + struct cvmx_npei_msi_rcv1_s { + uint64_t intr:64; + } s; + struct cvmx_npei_msi_rcv1_s cn52xx; + struct cvmx_npei_msi_rcv1_s cn52xxp1; + struct cvmx_npei_msi_rcv1_s cn56xx; + struct cvmx_npei_msi_rcv1_s cn56xxp1; +}; + +union cvmx_npei_msi_rcv2 { + uint64_t u64; + struct cvmx_npei_msi_rcv2_s { + uint64_t intr:64; + } s; + struct cvmx_npei_msi_rcv2_s cn52xx; + struct cvmx_npei_msi_rcv2_s cn52xxp1; + struct cvmx_npei_msi_rcv2_s cn56xx; + struct cvmx_npei_msi_rcv2_s cn56xxp1; +}; + +union cvmx_npei_msi_rcv3 { + uint64_t u64; + struct cvmx_npei_msi_rcv3_s { + uint64_t intr:64; + } s; + struct cvmx_npei_msi_rcv3_s cn52xx; + struct cvmx_npei_msi_rcv3_s cn52xxp1; + struct cvmx_npei_msi_rcv3_s cn56xx; + struct cvmx_npei_msi_rcv3_s cn56xxp1; +}; + +union cvmx_npei_msi_rd_map { + uint64_t u64; + struct cvmx_npei_msi_rd_map_s { + uint64_t reserved_16_63:48; + uint64_t rd_int:8; + uint64_t msi_int:8; + } s; + struct cvmx_npei_msi_rd_map_s cn52xx; + struct cvmx_npei_msi_rd_map_s cn52xxp1; + struct cvmx_npei_msi_rd_map_s cn56xx; + struct cvmx_npei_msi_rd_map_s cn56xxp1; +}; + +union cvmx_npei_msi_w1c_enb0 { + uint64_t u64; + struct cvmx_npei_msi_w1c_enb0_s { + uint64_t clr:64; + } s; + struct cvmx_npei_msi_w1c_enb0_s cn52xx; + struct cvmx_npei_msi_w1c_enb0_s cn56xx; +}; + +union cvmx_npei_msi_w1c_enb1 { + uint64_t u64; + struct cvmx_npei_msi_w1c_enb1_s { + uint64_t clr:64; + } s; + struct cvmx_npei_msi_w1c_enb1_s cn52xx; + struct cvmx_npei_msi_w1c_enb1_s cn56xx; +}; + +union cvmx_npei_msi_w1c_enb2 { + uint64_t u64; + struct cvmx_npei_msi_w1c_enb2_s { + uint64_t clr:64; + } s; + struct cvmx_npei_msi_w1c_enb2_s cn52xx; + struct cvmx_npei_msi_w1c_enb2_s cn56xx; +}; + +union cvmx_npei_msi_w1c_enb3 { + uint64_t u64; + struct cvmx_npei_msi_w1c_enb3_s { + uint64_t clr:64; + } s; + struct cvmx_npei_msi_w1c_enb3_s cn52xx; + struct cvmx_npei_msi_w1c_enb3_s cn56xx; +}; + +union cvmx_npei_msi_w1s_enb0 { + uint64_t u64; + struct cvmx_npei_msi_w1s_enb0_s { + uint64_t set:64; + } s; + struct cvmx_npei_msi_w1s_enb0_s cn52xx; + struct cvmx_npei_msi_w1s_enb0_s cn56xx; +}; + +union cvmx_npei_msi_w1s_enb1 { + uint64_t u64; + struct cvmx_npei_msi_w1s_enb1_s { + uint64_t set:64; + } s; + struct cvmx_npei_msi_w1s_enb1_s cn52xx; + struct cvmx_npei_msi_w1s_enb1_s cn56xx; +}; + +union cvmx_npei_msi_w1s_enb2 { + uint64_t u64; + struct cvmx_npei_msi_w1s_enb2_s { + uint64_t set:64; + } s; + struct cvmx_npei_msi_w1s_enb2_s cn52xx; + struct cvmx_npei_msi_w1s_enb2_s cn56xx; +}; + +union cvmx_npei_msi_w1s_enb3 { + uint64_t u64; + struct cvmx_npei_msi_w1s_enb3_s { + uint64_t set:64; + } s; + struct cvmx_npei_msi_w1s_enb3_s cn52xx; + struct cvmx_npei_msi_w1s_enb3_s cn56xx; +}; + +union cvmx_npei_msi_wr_map { + uint64_t u64; + struct cvmx_npei_msi_wr_map_s { + uint64_t reserved_16_63:48; + uint64_t ciu_int:8; + uint64_t msi_int:8; + } s; + struct cvmx_npei_msi_wr_map_s cn52xx; + struct cvmx_npei_msi_wr_map_s cn52xxp1; + struct cvmx_npei_msi_wr_map_s cn56xx; + struct cvmx_npei_msi_wr_map_s cn56xxp1; +}; + +union cvmx_npei_pcie_credit_cnt { + uint64_t u64; + struct cvmx_npei_pcie_credit_cnt_s { + uint64_t reserved_48_63:16; + uint64_t p1_ccnt:8; + uint64_t p1_ncnt:8; + uint64_t p1_pcnt:8; + uint64_t p0_ccnt:8; + uint64_t p0_ncnt:8; + uint64_t p0_pcnt:8; + } s; + struct cvmx_npei_pcie_credit_cnt_s cn52xx; + struct cvmx_npei_pcie_credit_cnt_s cn56xx; +}; + +union cvmx_npei_pcie_msi_rcv { + uint64_t u64; + struct cvmx_npei_pcie_msi_rcv_s { + uint64_t reserved_8_63:56; + uint64_t intr:8; + } s; + struct cvmx_npei_pcie_msi_rcv_s cn52xx; + struct cvmx_npei_pcie_msi_rcv_s cn52xxp1; + struct cvmx_npei_pcie_msi_rcv_s cn56xx; + struct cvmx_npei_pcie_msi_rcv_s cn56xxp1; +}; + +union cvmx_npei_pcie_msi_rcv_b1 { + uint64_t u64; + struct cvmx_npei_pcie_msi_rcv_b1_s { + uint64_t reserved_16_63:48; + uint64_t intr:8; + uint64_t reserved_0_7:8; + } s; + struct cvmx_npei_pcie_msi_rcv_b1_s cn52xx; + struct cvmx_npei_pcie_msi_rcv_b1_s cn52xxp1; + struct cvmx_npei_pcie_msi_rcv_b1_s cn56xx; + struct cvmx_npei_pcie_msi_rcv_b1_s cn56xxp1; +}; + +union cvmx_npei_pcie_msi_rcv_b2 { + uint64_t u64; + struct cvmx_npei_pcie_msi_rcv_b2_s { + uint64_t reserved_24_63:40; + uint64_t intr:8; + uint64_t reserved_0_15:16; + } s; + struct cvmx_npei_pcie_msi_rcv_b2_s cn52xx; + struct cvmx_npei_pcie_msi_rcv_b2_s cn52xxp1; + struct cvmx_npei_pcie_msi_rcv_b2_s cn56xx; + struct cvmx_npei_pcie_msi_rcv_b2_s cn56xxp1; +}; + +union cvmx_npei_pcie_msi_rcv_b3 { + uint64_t u64; + struct cvmx_npei_pcie_msi_rcv_b3_s { + uint64_t reserved_32_63:32; + uint64_t intr:8; + uint64_t reserved_0_23:24; + } s; + struct cvmx_npei_pcie_msi_rcv_b3_s cn52xx; + struct cvmx_npei_pcie_msi_rcv_b3_s cn52xxp1; + struct cvmx_npei_pcie_msi_rcv_b3_s cn56xx; + struct cvmx_npei_pcie_msi_rcv_b3_s cn56xxp1; +}; + +union cvmx_npei_pktx_cnts { + uint64_t u64; + struct cvmx_npei_pktx_cnts_s { + uint64_t reserved_54_63:10; + uint64_t timer:22; + uint64_t cnt:32; + } s; + struct cvmx_npei_pktx_cnts_s cn52xx; + struct cvmx_npei_pktx_cnts_s cn56xx; + struct cvmx_npei_pktx_cnts_s cn56xxp1; +}; + +union cvmx_npei_pktx_in_bp { + uint64_t u64; + struct cvmx_npei_pktx_in_bp_s { + uint64_t wmark:32; + uint64_t cnt:32; + } s; + struct cvmx_npei_pktx_in_bp_s cn52xx; + struct cvmx_npei_pktx_in_bp_s cn56xx; + struct cvmx_npei_pktx_in_bp_s cn56xxp1; +}; + +union cvmx_npei_pktx_instr_baddr { + uint64_t u64; + struct cvmx_npei_pktx_instr_baddr_s { + uint64_t addr:61; + uint64_t reserved_0_2:3; + } s; + struct cvmx_npei_pktx_instr_baddr_s cn52xx; + struct cvmx_npei_pktx_instr_baddr_s cn56xx; + struct cvmx_npei_pktx_instr_baddr_s cn56xxp1; +}; + +union cvmx_npei_pktx_instr_baoff_dbell { + uint64_t u64; + struct cvmx_npei_pktx_instr_baoff_dbell_s { + uint64_t aoff:32; + uint64_t dbell:32; + } s; + struct cvmx_npei_pktx_instr_baoff_dbell_s cn52xx; + struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xx; + struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xxp1; +}; + +union cvmx_npei_pktx_instr_fifo_rsize { + uint64_t u64; + struct cvmx_npei_pktx_instr_fifo_rsize_s { + uint64_t max:9; + uint64_t rrp:9; + uint64_t wrp:9; + uint64_t fcnt:5; + uint64_t rsize:32; + } s; + struct cvmx_npei_pktx_instr_fifo_rsize_s cn52xx; + struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xx; + struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xxp1; +}; + +union cvmx_npei_pktx_instr_header { + uint64_t u64; + struct cvmx_npei_pktx_instr_header_s { + uint64_t reserved_44_63:20; + uint64_t pbp:1; + uint64_t rsv_f:5; + uint64_t rparmode:2; + uint64_t rsv_e:1; + uint64_t rskp_len:7; + uint64_t rsv_d:6; + uint64_t use_ihdr:1; + uint64_t rsv_c:5; + uint64_t par_mode:2; + uint64_t rsv_b:1; + uint64_t skp_len:7; + uint64_t rsv_a:6; + } s; + struct cvmx_npei_pktx_instr_header_s cn52xx; + struct cvmx_npei_pktx_instr_header_s cn56xx; + struct cvmx_npei_pktx_instr_header_s cn56xxp1; +}; + +union cvmx_npei_pktx_slist_baddr { + uint64_t u64; + struct cvmx_npei_pktx_slist_baddr_s { + uint64_t addr:60; + uint64_t reserved_0_3:4; + } s; + struct cvmx_npei_pktx_slist_baddr_s cn52xx; + struct cvmx_npei_pktx_slist_baddr_s cn56xx; + struct cvmx_npei_pktx_slist_baddr_s cn56xxp1; +}; + +union cvmx_npei_pktx_slist_baoff_dbell { + uint64_t u64; + struct cvmx_npei_pktx_slist_baoff_dbell_s { + uint64_t aoff:32; + uint64_t dbell:32; + } s; + struct cvmx_npei_pktx_slist_baoff_dbell_s cn52xx; + struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xx; + struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xxp1; +}; + +union cvmx_npei_pktx_slist_fifo_rsize { + uint64_t u64; + struct cvmx_npei_pktx_slist_fifo_rsize_s { + uint64_t reserved_32_63:32; + uint64_t rsize:32; + } s; + struct cvmx_npei_pktx_slist_fifo_rsize_s cn52xx; + struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xx; + struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xxp1; +}; + +union cvmx_npei_pkt_cnt_int { + uint64_t u64; + struct cvmx_npei_pkt_cnt_int_s { + uint64_t reserved_32_63:32; + uint64_t port:32; + } s; + struct cvmx_npei_pkt_cnt_int_s cn52xx; + struct cvmx_npei_pkt_cnt_int_s cn56xx; + struct cvmx_npei_pkt_cnt_int_s cn56xxp1; +}; + +union cvmx_npei_pkt_cnt_int_enb { + uint64_t u64; + struct cvmx_npei_pkt_cnt_int_enb_s { + uint64_t reserved_32_63:32; + uint64_t port:32; + } s; + struct cvmx_npei_pkt_cnt_int_enb_s cn52xx; + struct cvmx_npei_pkt_cnt_int_enb_s cn56xx; + struct cvmx_npei_pkt_cnt_int_enb_s cn56xxp1; +}; + +union cvmx_npei_pkt_data_out_es { + uint64_t u64; + struct cvmx_npei_pkt_data_out_es_s { + uint64_t es:64; + } s; + struct cvmx_npei_pkt_data_out_es_s cn52xx; + struct cvmx_npei_pkt_data_out_es_s cn56xx; + struct cvmx_npei_pkt_data_out_es_s cn56xxp1; +}; + +union cvmx_npei_pkt_data_out_ns { + uint64_t u64; + struct cvmx_npei_pkt_data_out_ns_s { + uint64_t reserved_32_63:32; + uint64_t nsr:32; + } s; + struct cvmx_npei_pkt_data_out_ns_s cn52xx; + struct cvmx_npei_pkt_data_out_ns_s cn56xx; + struct cvmx_npei_pkt_data_out_ns_s cn56xxp1; +}; + +union cvmx_npei_pkt_data_out_ror { + uint64_t u64; + struct cvmx_npei_pkt_data_out_ror_s { + uint64_t reserved_32_63:32; + uint64_t ror:32; + } s; + struct cvmx_npei_pkt_data_out_ror_s cn52xx; + struct cvmx_npei_pkt_data_out_ror_s cn56xx; + struct cvmx_npei_pkt_data_out_ror_s cn56xxp1; +}; + +union cvmx_npei_pkt_dpaddr { + uint64_t u64; + struct cvmx_npei_pkt_dpaddr_s { + uint64_t reserved_32_63:32; + uint64_t dptr:32; + } s; + struct cvmx_npei_pkt_dpaddr_s cn52xx; + struct cvmx_npei_pkt_dpaddr_s cn56xx; + struct cvmx_npei_pkt_dpaddr_s cn56xxp1; +}; + +union cvmx_npei_pkt_in_bp { + uint64_t u64; + struct cvmx_npei_pkt_in_bp_s { + uint64_t reserved_32_63:32; + uint64_t bp:32; + } s; + struct cvmx_npei_pkt_in_bp_s cn56xx; +}; + +union cvmx_npei_pkt_in_donex_cnts { + uint64_t u64; + struct cvmx_npei_pkt_in_donex_cnts_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_npei_pkt_in_donex_cnts_s cn52xx; + struct cvmx_npei_pkt_in_donex_cnts_s cn56xx; + struct cvmx_npei_pkt_in_donex_cnts_s cn56xxp1; +}; + +union cvmx_npei_pkt_in_instr_counts { + uint64_t u64; + struct cvmx_npei_pkt_in_instr_counts_s { + uint64_t wr_cnt:32; + uint64_t rd_cnt:32; + } s; + struct cvmx_npei_pkt_in_instr_counts_s cn52xx; + struct cvmx_npei_pkt_in_instr_counts_s cn56xx; +}; + +union cvmx_npei_pkt_in_pcie_port { + uint64_t u64; + struct cvmx_npei_pkt_in_pcie_port_s { + uint64_t pp:64; + } s; + struct cvmx_npei_pkt_in_pcie_port_s cn52xx; + struct cvmx_npei_pkt_in_pcie_port_s cn56xx; +}; + +union cvmx_npei_pkt_input_control { + uint64_t u64; + struct cvmx_npei_pkt_input_control_s { + uint64_t reserved_23_63:41; + uint64_t pkt_rr:1; + uint64_t pbp_dhi:13; + uint64_t d_nsr:1; + uint64_t d_esr:2; + uint64_t d_ror:1; + uint64_t use_csr:1; + uint64_t nsr:1; + uint64_t esr:2; + uint64_t ror:1; + } s; + struct cvmx_npei_pkt_input_control_s cn52xx; + struct cvmx_npei_pkt_input_control_s cn56xx; + struct cvmx_npei_pkt_input_control_s cn56xxp1; +}; + +union cvmx_npei_pkt_instr_enb { + uint64_t u64; + struct cvmx_npei_pkt_instr_enb_s { + uint64_t reserved_32_63:32; + uint64_t enb:32; + } s; + struct cvmx_npei_pkt_instr_enb_s cn52xx; + struct cvmx_npei_pkt_instr_enb_s cn56xx; + struct cvmx_npei_pkt_instr_enb_s cn56xxp1; +}; + +union cvmx_npei_pkt_instr_rd_size { + uint64_t u64; + struct cvmx_npei_pkt_instr_rd_size_s { + uint64_t rdsize:64; + } s; + struct cvmx_npei_pkt_instr_rd_size_s cn52xx; + struct cvmx_npei_pkt_instr_rd_size_s cn56xx; +}; + +union cvmx_npei_pkt_instr_size { + uint64_t u64; + struct cvmx_npei_pkt_instr_size_s { + uint64_t reserved_32_63:32; + uint64_t is_64b:32; + } s; + struct cvmx_npei_pkt_instr_size_s cn52xx; + struct cvmx_npei_pkt_instr_size_s cn56xx; + struct cvmx_npei_pkt_instr_size_s cn56xxp1; +}; + +union cvmx_npei_pkt_int_levels { + uint64_t u64; + struct cvmx_npei_pkt_int_levels_s { + uint64_t reserved_54_63:10; + uint64_t time:22; + uint64_t cnt:32; + } s; + struct cvmx_npei_pkt_int_levels_s cn52xx; + struct cvmx_npei_pkt_int_levels_s cn56xx; + struct cvmx_npei_pkt_int_levels_s cn56xxp1; +}; + +union cvmx_npei_pkt_iptr { + uint64_t u64; + struct cvmx_npei_pkt_iptr_s { + uint64_t reserved_32_63:32; + uint64_t iptr:32; + } s; + struct cvmx_npei_pkt_iptr_s cn52xx; + struct cvmx_npei_pkt_iptr_s cn56xx; + struct cvmx_npei_pkt_iptr_s cn56xxp1; +}; + +union cvmx_npei_pkt_out_bmode { + uint64_t u64; + struct cvmx_npei_pkt_out_bmode_s { + uint64_t reserved_32_63:32; + uint64_t bmode:32; + } s; + struct cvmx_npei_pkt_out_bmode_s cn52xx; + struct cvmx_npei_pkt_out_bmode_s cn56xx; + struct cvmx_npei_pkt_out_bmode_s cn56xxp1; +}; + +union cvmx_npei_pkt_out_enb { + uint64_t u64; + struct cvmx_npei_pkt_out_enb_s { + uint64_t reserved_32_63:32; + uint64_t enb:32; + } s; + struct cvmx_npei_pkt_out_enb_s cn52xx; + struct cvmx_npei_pkt_out_enb_s cn56xx; + struct cvmx_npei_pkt_out_enb_s cn56xxp1; +}; + +union cvmx_npei_pkt_output_wmark { + uint64_t u64; + struct cvmx_npei_pkt_output_wmark_s { + uint64_t reserved_32_63:32; + uint64_t wmark:32; + } s; + struct cvmx_npei_pkt_output_wmark_s cn52xx; + struct cvmx_npei_pkt_output_wmark_s cn56xx; +}; + +union cvmx_npei_pkt_pcie_port { + uint64_t u64; + struct cvmx_npei_pkt_pcie_port_s { + uint64_t pp:64; + } s; + struct cvmx_npei_pkt_pcie_port_s cn52xx; + struct cvmx_npei_pkt_pcie_port_s cn56xx; + struct cvmx_npei_pkt_pcie_port_s cn56xxp1; +}; + +union cvmx_npei_pkt_port_in_rst { + uint64_t u64; + struct cvmx_npei_pkt_port_in_rst_s { + uint64_t in_rst:32; + uint64_t out_rst:32; + } s; + struct cvmx_npei_pkt_port_in_rst_s cn52xx; + struct cvmx_npei_pkt_port_in_rst_s cn56xx; +}; + +union cvmx_npei_pkt_slist_es { + uint64_t u64; + struct cvmx_npei_pkt_slist_es_s { + uint64_t es:64; + } s; + struct cvmx_npei_pkt_slist_es_s cn52xx; + struct cvmx_npei_pkt_slist_es_s cn56xx; + struct cvmx_npei_pkt_slist_es_s cn56xxp1; +}; + +union cvmx_npei_pkt_slist_id_size { + uint64_t u64; + struct cvmx_npei_pkt_slist_id_size_s { + uint64_t reserved_23_63:41; + uint64_t isize:7; + uint64_t bsize:16; + } s; + struct cvmx_npei_pkt_slist_id_size_s cn52xx; + struct cvmx_npei_pkt_slist_id_size_s cn56xx; + struct cvmx_npei_pkt_slist_id_size_s cn56xxp1; +}; + +union cvmx_npei_pkt_slist_ns { + uint64_t u64; + struct cvmx_npei_pkt_slist_ns_s { + uint64_t reserved_32_63:32; + uint64_t nsr:32; + } s; + struct cvmx_npei_pkt_slist_ns_s cn52xx; + struct cvmx_npei_pkt_slist_ns_s cn56xx; + struct cvmx_npei_pkt_slist_ns_s cn56xxp1; +}; + +union cvmx_npei_pkt_slist_ror { + uint64_t u64; + struct cvmx_npei_pkt_slist_ror_s { + uint64_t reserved_32_63:32; + uint64_t ror:32; + } s; + struct cvmx_npei_pkt_slist_ror_s cn52xx; + struct cvmx_npei_pkt_slist_ror_s cn56xx; + struct cvmx_npei_pkt_slist_ror_s cn56xxp1; +}; + +union cvmx_npei_pkt_time_int { + uint64_t u64; + struct cvmx_npei_pkt_time_int_s { + uint64_t reserved_32_63:32; + uint64_t port:32; + } s; + struct cvmx_npei_pkt_time_int_s cn52xx; + struct cvmx_npei_pkt_time_int_s cn56xx; + struct cvmx_npei_pkt_time_int_s cn56xxp1; +}; + +union cvmx_npei_pkt_time_int_enb { + uint64_t u64; + struct cvmx_npei_pkt_time_int_enb_s { + uint64_t reserved_32_63:32; + uint64_t port:32; + } s; + struct cvmx_npei_pkt_time_int_enb_s cn52xx; + struct cvmx_npei_pkt_time_int_enb_s cn56xx; + struct cvmx_npei_pkt_time_int_enb_s cn56xxp1; +}; + +union cvmx_npei_rsl_int_blocks { + uint64_t u64; + struct cvmx_npei_rsl_int_blocks_s { + uint64_t reserved_31_63:33; + uint64_t iob:1; + uint64_t lmc1:1; + uint64_t agl:1; + uint64_t reserved_24_27:4; + uint64_t asxpcs1:1; + uint64_t asxpcs0:1; + uint64_t reserved_21_21:1; + uint64_t pip:1; + uint64_t reserved_18_19:2; + uint64_t lmc0:1; + uint64_t l2c:1; + uint64_t usb1:1; + uint64_t rad:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t reserved_8_8:1; + uint64_t zip:1; + uint64_t reserved_6_6:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npei:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } s; + struct cvmx_npei_rsl_int_blocks_s cn52xx; + struct cvmx_npei_rsl_int_blocks_s cn52xxp1; + struct cvmx_npei_rsl_int_blocks_cn56xx { + uint64_t reserved_31_63:33; + uint64_t iob:1; + uint64_t lmc1:1; + uint64_t agl:1; + uint64_t reserved_24_27:4; + uint64_t asxpcs1:1; + uint64_t asxpcs0:1; + uint64_t reserved_21_21:1; + uint64_t pip:1; + uint64_t reserved_18_19:2; + uint64_t lmc0:1; + uint64_t l2c:1; + uint64_t reserved_15_15:1; + uint64_t rad:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t reserved_8_8:1; + uint64_t zip:1; + uint64_t reserved_6_6:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npei:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn56xx; + struct cvmx_npei_rsl_int_blocks_cn56xx cn56xxp1; +}; + +union cvmx_npei_scratch_1 { + uint64_t u64; + struct cvmx_npei_scratch_1_s { + uint64_t data:64; + } s; + struct cvmx_npei_scratch_1_s cn52xx; + struct cvmx_npei_scratch_1_s cn52xxp1; + struct cvmx_npei_scratch_1_s cn56xx; + struct cvmx_npei_scratch_1_s cn56xxp1; +}; + +union cvmx_npei_state1 { + uint64_t u64; + struct cvmx_npei_state1_s { + uint64_t cpl1:12; + uint64_t cpl0:12; + uint64_t arb:1; + uint64_t csr:39; + } s; + struct cvmx_npei_state1_s cn52xx; + struct cvmx_npei_state1_s cn52xxp1; + struct cvmx_npei_state1_s cn56xx; + struct cvmx_npei_state1_s cn56xxp1; +}; + +union cvmx_npei_state2 { + uint64_t u64; + struct cvmx_npei_state2_s { + uint64_t reserved_48_63:16; + uint64_t npei:1; + uint64_t rac:1; + uint64_t csm1:15; + uint64_t csm0:15; + uint64_t nnp0:8; + uint64_t nnd:8; + } s; + struct cvmx_npei_state2_s cn52xx; + struct cvmx_npei_state2_s cn52xxp1; + struct cvmx_npei_state2_s cn56xx; + struct cvmx_npei_state2_s cn56xxp1; +}; + +union cvmx_npei_state3 { + uint64_t u64; + struct cvmx_npei_state3_s { + uint64_t reserved_56_63:8; + uint64_t psm1:15; + uint64_t psm0:15; + uint64_t nsm1:13; + uint64_t nsm0:13; + } s; + struct cvmx_npei_state3_s cn52xx; + struct cvmx_npei_state3_s cn52xxp1; + struct cvmx_npei_state3_s cn56xx; + struct cvmx_npei_state3_s cn56xxp1; +}; + +union cvmx_npei_win_rd_addr { + uint64_t u64; + struct cvmx_npei_win_rd_addr_s { + uint64_t reserved_51_63:13; + uint64_t ld_cmd:2; + uint64_t iobit:1; + uint64_t rd_addr:48; + } s; + struct cvmx_npei_win_rd_addr_s cn52xx; + struct cvmx_npei_win_rd_addr_s cn52xxp1; + struct cvmx_npei_win_rd_addr_s cn56xx; + struct cvmx_npei_win_rd_addr_s cn56xxp1; +}; + +union cvmx_npei_win_rd_data { + uint64_t u64; + struct cvmx_npei_win_rd_data_s { + uint64_t rd_data:64; + } s; + struct cvmx_npei_win_rd_data_s cn52xx; + struct cvmx_npei_win_rd_data_s cn52xxp1; + struct cvmx_npei_win_rd_data_s cn56xx; + struct cvmx_npei_win_rd_data_s cn56xxp1; +}; + +union cvmx_npei_win_wr_addr { + uint64_t u64; + struct cvmx_npei_win_wr_addr_s { + uint64_t reserved_49_63:15; + uint64_t iobit:1; + uint64_t wr_addr:46; + uint64_t reserved_0_1:2; + } s; + struct cvmx_npei_win_wr_addr_s cn52xx; + struct cvmx_npei_win_wr_addr_s cn52xxp1; + struct cvmx_npei_win_wr_addr_s cn56xx; + struct cvmx_npei_win_wr_addr_s cn56xxp1; +}; + +union cvmx_npei_win_wr_data { + uint64_t u64; + struct cvmx_npei_win_wr_data_s { + uint64_t wr_data:64; + } s; + struct cvmx_npei_win_wr_data_s cn52xx; + struct cvmx_npei_win_wr_data_s cn52xxp1; + struct cvmx_npei_win_wr_data_s cn56xx; + struct cvmx_npei_win_wr_data_s cn56xxp1; +}; + +union cvmx_npei_win_wr_mask { + uint64_t u64; + struct cvmx_npei_win_wr_mask_s { + uint64_t reserved_8_63:56; + uint64_t wr_mask:8; + } s; + struct cvmx_npei_win_wr_mask_s cn52xx; + struct cvmx_npei_win_wr_mask_s cn52xxp1; + struct cvmx_npei_win_wr_mask_s cn56xx; + struct cvmx_npei_win_wr_mask_s cn56xxp1; +}; + +union cvmx_npei_window_ctl { + uint64_t u64; + struct cvmx_npei_window_ctl_s { + uint64_t reserved_32_63:32; + uint64_t time:32; + } s; + struct cvmx_npei_window_ctl_s cn52xx; + struct cvmx_npei_window_ctl_s cn52xxp1; + struct cvmx_npei_window_ctl_s cn56xx; + struct cvmx_npei_window_ctl_s cn56xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-npi-defs.h b/arch/mips/include/asm/octeon/cvmx-npi-defs.h new file mode 100644 index 0000000..4e03cd8 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-npi-defs.h @@ -0,0 +1,1735 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_NPI_DEFS_H__ +#define __CVMX_NPI_DEFS_H__ + +#define CVMX_NPI_BASE_ADDR_INPUT0 \ + CVMX_ADD_IO_SEG(0x00011F0000000070ull) +#define CVMX_NPI_BASE_ADDR_INPUT1 \ + CVMX_ADD_IO_SEG(0x00011F0000000080ull) +#define CVMX_NPI_BASE_ADDR_INPUT2 \ + CVMX_ADD_IO_SEG(0x00011F0000000090ull) +#define CVMX_NPI_BASE_ADDR_INPUT3 \ + CVMX_ADD_IO_SEG(0x00011F00000000A0ull) +#define CVMX_NPI_BASE_ADDR_INPUTX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000070ull + (((offset) & 3) * 16)) +#define CVMX_NPI_BASE_ADDR_OUTPUT0 \ + CVMX_ADD_IO_SEG(0x00011F00000000B8ull) +#define CVMX_NPI_BASE_ADDR_OUTPUT1 \ + CVMX_ADD_IO_SEG(0x00011F00000000C0ull) +#define CVMX_NPI_BASE_ADDR_OUTPUT2 \ + CVMX_ADD_IO_SEG(0x00011F00000000C8ull) +#define CVMX_NPI_BASE_ADDR_OUTPUT3 \ + CVMX_ADD_IO_SEG(0x00011F00000000D0ull) +#define CVMX_NPI_BASE_ADDR_OUTPUTX(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000000B8ull + (((offset) & 3) * 8)) +#define CVMX_NPI_BIST_STATUS \ + CVMX_ADD_IO_SEG(0x00011F00000003F8ull) +#define CVMX_NPI_BUFF_SIZE_OUTPUT0 \ + CVMX_ADD_IO_SEG(0x00011F00000000E0ull) +#define CVMX_NPI_BUFF_SIZE_OUTPUT1 \ + CVMX_ADD_IO_SEG(0x00011F00000000E8ull) +#define CVMX_NPI_BUFF_SIZE_OUTPUT2 \ + CVMX_ADD_IO_SEG(0x00011F00000000F0ull) +#define CVMX_NPI_BUFF_SIZE_OUTPUT3 \ + CVMX_ADD_IO_SEG(0x00011F00000000F8ull) +#define CVMX_NPI_BUFF_SIZE_OUTPUTX(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000000E0ull + (((offset) & 3) * 8)) +#define CVMX_NPI_COMP_CTL \ + CVMX_ADD_IO_SEG(0x00011F0000000218ull) +#define CVMX_NPI_CTL_STATUS \ + CVMX_ADD_IO_SEG(0x00011F0000000010ull) +#define CVMX_NPI_DBG_SELECT \ + CVMX_ADD_IO_SEG(0x00011F0000000008ull) +#define CVMX_NPI_DMA_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F0000000128ull) +#define CVMX_NPI_DMA_HIGHP_COUNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000148ull) +#define CVMX_NPI_DMA_HIGHP_NADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000158ull) +#define CVMX_NPI_DMA_LOWP_COUNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000140ull) +#define CVMX_NPI_DMA_LOWP_NADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000150ull) +#define CVMX_NPI_HIGHP_DBELL \ + CVMX_ADD_IO_SEG(0x00011F0000000120ull) +#define CVMX_NPI_HIGHP_IBUFF_SADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000110ull) +#define CVMX_NPI_INPUT_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F0000000138ull) +#define CVMX_NPI_INT_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000000020ull) +#define CVMX_NPI_INT_SUM \ + CVMX_ADD_IO_SEG(0x00011F0000000018ull) +#define CVMX_NPI_LOWP_DBELL \ + CVMX_ADD_IO_SEG(0x00011F0000000118ull) +#define CVMX_NPI_LOWP_IBUFF_SADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000108ull) +#define CVMX_NPI_MEM_ACCESS_SUBID3 \ + CVMX_ADD_IO_SEG(0x00011F0000000028ull) +#define CVMX_NPI_MEM_ACCESS_SUBID4 \ + CVMX_ADD_IO_SEG(0x00011F0000000030ull) +#define CVMX_NPI_MEM_ACCESS_SUBID5 \ + CVMX_ADD_IO_SEG(0x00011F0000000038ull) +#define CVMX_NPI_MEM_ACCESS_SUBID6 \ + CVMX_ADD_IO_SEG(0x00011F0000000040ull) +#define CVMX_NPI_MEM_ACCESS_SUBIDX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000028ull + (((offset) & 7) * 8) - 8 * 3) +#define CVMX_NPI_MSI_RCV \ + (0x0000000000000190ull) +#define CVMX_NPI_NPI_MSI_RCV \ + CVMX_ADD_IO_SEG(0x00011F0000001190ull) +#define CVMX_NPI_NUM_DESC_OUTPUT0 \ + CVMX_ADD_IO_SEG(0x00011F0000000050ull) +#define CVMX_NPI_NUM_DESC_OUTPUT1 \ + CVMX_ADD_IO_SEG(0x00011F0000000058ull) +#define CVMX_NPI_NUM_DESC_OUTPUT2 \ + CVMX_ADD_IO_SEG(0x00011F0000000060ull) +#define CVMX_NPI_NUM_DESC_OUTPUT3 \ + CVMX_ADD_IO_SEG(0x00011F0000000068ull) +#define CVMX_NPI_NUM_DESC_OUTPUTX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000050ull + (((offset) & 3) * 8)) +#define CVMX_NPI_OUTPUT_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F0000000100ull) +#define CVMX_NPI_P0_DBPAIR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000180ull) +#define CVMX_NPI_P0_INSTR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F00000001C0ull) +#define CVMX_NPI_P0_INSTR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F00000001A0ull) +#define CVMX_NPI_P0_PAIR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000160ull) +#define CVMX_NPI_P1_DBPAIR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000188ull) +#define CVMX_NPI_P1_INSTR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F00000001C8ull) +#define CVMX_NPI_P1_INSTR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F00000001A8ull) +#define CVMX_NPI_P1_PAIR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000168ull) +#define CVMX_NPI_P2_DBPAIR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000190ull) +#define CVMX_NPI_P2_INSTR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F00000001D0ull) +#define CVMX_NPI_P2_INSTR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F00000001B0ull) +#define CVMX_NPI_P2_PAIR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000170ull) +#define CVMX_NPI_P3_DBPAIR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000198ull) +#define CVMX_NPI_P3_INSTR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F00000001D8ull) +#define CVMX_NPI_P3_INSTR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F00000001B8ull) +#define CVMX_NPI_P3_PAIR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000178ull) +#define CVMX_NPI_PCI_BAR1_INDEXX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000001100ull + (((offset) & 31) * 4)) +#define CVMX_NPI_PCI_BIST_REG \ + CVMX_ADD_IO_SEG(0x00011F00000011C0ull) +#define CVMX_NPI_PCI_BURST_SIZE \ + CVMX_ADD_IO_SEG(0x00011F00000000D8ull) +#define CVMX_NPI_PCI_CFG00 \ + CVMX_ADD_IO_SEG(0x00011F0000001800ull) +#define CVMX_NPI_PCI_CFG01 \ + CVMX_ADD_IO_SEG(0x00011F0000001804ull) +#define CVMX_NPI_PCI_CFG02 \ + CVMX_ADD_IO_SEG(0x00011F0000001808ull) +#define CVMX_NPI_PCI_CFG03 \ + CVMX_ADD_IO_SEG(0x00011F000000180Cull) +#define CVMX_NPI_PCI_CFG04 \ + CVMX_ADD_IO_SEG(0x00011F0000001810ull) +#define CVMX_NPI_PCI_CFG05 \ + CVMX_ADD_IO_SEG(0x00011F0000001814ull) +#define CVMX_NPI_PCI_CFG06 \ + CVMX_ADD_IO_SEG(0x00011F0000001818ull) +#define CVMX_NPI_PCI_CFG07 \ + CVMX_ADD_IO_SEG(0x00011F000000181Cull) +#define CVMX_NPI_PCI_CFG08 \ + CVMX_ADD_IO_SEG(0x00011F0000001820ull) +#define CVMX_NPI_PCI_CFG09 \ + CVMX_ADD_IO_SEG(0x00011F0000001824ull) +#define CVMX_NPI_PCI_CFG10 \ + CVMX_ADD_IO_SEG(0x00011F0000001828ull) +#define CVMX_NPI_PCI_CFG11 \ + CVMX_ADD_IO_SEG(0x00011F000000182Cull) +#define CVMX_NPI_PCI_CFG12 \ + CVMX_ADD_IO_SEG(0x00011F0000001830ull) +#define CVMX_NPI_PCI_CFG13 \ + CVMX_ADD_IO_SEG(0x00011F0000001834ull) +#define CVMX_NPI_PCI_CFG15 \ + CVMX_ADD_IO_SEG(0x00011F000000183Cull) +#define CVMX_NPI_PCI_CFG16 \ + CVMX_ADD_IO_SEG(0x00011F0000001840ull) +#define CVMX_NPI_PCI_CFG17 \ + CVMX_ADD_IO_SEG(0x00011F0000001844ull) +#define CVMX_NPI_PCI_CFG18 \ + CVMX_ADD_IO_SEG(0x00011F0000001848ull) +#define CVMX_NPI_PCI_CFG19 \ + CVMX_ADD_IO_SEG(0x00011F000000184Cull) +#define CVMX_NPI_PCI_CFG20 \ + CVMX_ADD_IO_SEG(0x00011F0000001850ull) +#define CVMX_NPI_PCI_CFG21 \ + CVMX_ADD_IO_SEG(0x00011F0000001854ull) +#define CVMX_NPI_PCI_CFG22 \ + CVMX_ADD_IO_SEG(0x00011F0000001858ull) +#define CVMX_NPI_PCI_CFG56 \ + CVMX_ADD_IO_SEG(0x00011F00000018E0ull) +#define CVMX_NPI_PCI_CFG57 \ + CVMX_ADD_IO_SEG(0x00011F00000018E4ull) +#define CVMX_NPI_PCI_CFG58 \ + CVMX_ADD_IO_SEG(0x00011F00000018E8ull) +#define CVMX_NPI_PCI_CFG59 \ + CVMX_ADD_IO_SEG(0x00011F00000018ECull) +#define CVMX_NPI_PCI_CFG60 \ + CVMX_ADD_IO_SEG(0x00011F00000018F0ull) +#define CVMX_NPI_PCI_CFG61 \ + CVMX_ADD_IO_SEG(0x00011F00000018F4ull) +#define CVMX_NPI_PCI_CFG62 \ + CVMX_ADD_IO_SEG(0x00011F00000018F8ull) +#define CVMX_NPI_PCI_CFG63 \ + CVMX_ADD_IO_SEG(0x00011F00000018FCull) +#define CVMX_NPI_PCI_CNT_REG \ + CVMX_ADD_IO_SEG(0x00011F00000011B8ull) +#define CVMX_NPI_PCI_CTL_STATUS_2 \ + CVMX_ADD_IO_SEG(0x00011F000000118Cull) +#define CVMX_NPI_PCI_INT_ARB_CFG \ + CVMX_ADD_IO_SEG(0x00011F0000000130ull) +#define CVMX_NPI_PCI_INT_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F00000011A0ull) +#define CVMX_NPI_PCI_INT_SUM2 \ + CVMX_ADD_IO_SEG(0x00011F0000001198ull) +#define CVMX_NPI_PCI_READ_CMD \ + CVMX_ADD_IO_SEG(0x00011F0000000048ull) +#define CVMX_NPI_PCI_READ_CMD_6 \ + CVMX_ADD_IO_SEG(0x00011F0000001180ull) +#define CVMX_NPI_PCI_READ_CMD_C \ + CVMX_ADD_IO_SEG(0x00011F0000001184ull) +#define CVMX_NPI_PCI_READ_CMD_E \ + CVMX_ADD_IO_SEG(0x00011F0000001188ull) +#define CVMX_NPI_PCI_SCM_REG \ + CVMX_ADD_IO_SEG(0x00011F00000011A8ull) +#define CVMX_NPI_PCI_TSR_REG \ + CVMX_ADD_IO_SEG(0x00011F00000011B0ull) +#define CVMX_NPI_PORT32_INSTR_HDR \ + CVMX_ADD_IO_SEG(0x00011F00000001F8ull) +#define CVMX_NPI_PORT33_INSTR_HDR \ + CVMX_ADD_IO_SEG(0x00011F0000000200ull) +#define CVMX_NPI_PORT34_INSTR_HDR \ + CVMX_ADD_IO_SEG(0x00011F0000000208ull) +#define CVMX_NPI_PORT35_INSTR_HDR \ + CVMX_ADD_IO_SEG(0x00011F0000000210ull) +#define CVMX_NPI_PORT_BP_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F00000001F0ull) +#define CVMX_NPI_PX_DBPAIR_ADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000180ull + (((offset) & 3) * 8)) +#define CVMX_NPI_PX_INSTR_ADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000001C0ull + (((offset) & 3) * 8)) +#define CVMX_NPI_PX_INSTR_CNTS(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000001A0ull + (((offset) & 3) * 8)) +#define CVMX_NPI_PX_PAIR_CNTS(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000160ull + (((offset) & 3) * 8)) +#define CVMX_NPI_RSL_INT_BLOCKS \ + CVMX_ADD_IO_SEG(0x00011F0000000000ull) +#define CVMX_NPI_SIZE_INPUT0 \ + CVMX_ADD_IO_SEG(0x00011F0000000078ull) +#define CVMX_NPI_SIZE_INPUT1 \ + CVMX_ADD_IO_SEG(0x00011F0000000088ull) +#define CVMX_NPI_SIZE_INPUT2 \ + CVMX_ADD_IO_SEG(0x00011F0000000098ull) +#define CVMX_NPI_SIZE_INPUT3 \ + CVMX_ADD_IO_SEG(0x00011F00000000A8ull) +#define CVMX_NPI_SIZE_INPUTX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000078ull + (((offset) & 3) * 16)) +#define CVMX_NPI_WIN_READ_TO \ + CVMX_ADD_IO_SEG(0x00011F00000001E0ull) + +union cvmx_npi_base_addr_inputx { + uint64_t u64; + struct cvmx_npi_base_addr_inputx_s { + uint64_t baddr:61; + uint64_t reserved_0_2:3; + } s; + struct cvmx_npi_base_addr_inputx_s cn30xx; + struct cvmx_npi_base_addr_inputx_s cn31xx; + struct cvmx_npi_base_addr_inputx_s cn38xx; + struct cvmx_npi_base_addr_inputx_s cn38xxp2; + struct cvmx_npi_base_addr_inputx_s cn50xx; + struct cvmx_npi_base_addr_inputx_s cn58xx; + struct cvmx_npi_base_addr_inputx_s cn58xxp1; +}; + +union cvmx_npi_base_addr_outputx { + uint64_t u64; + struct cvmx_npi_base_addr_outputx_s { + uint64_t baddr:61; + uint64_t reserved_0_2:3; + } s; + struct cvmx_npi_base_addr_outputx_s cn30xx; + struct cvmx_npi_base_addr_outputx_s cn31xx; + struct cvmx_npi_base_addr_outputx_s cn38xx; + struct cvmx_npi_base_addr_outputx_s cn38xxp2; + struct cvmx_npi_base_addr_outputx_s cn50xx; + struct cvmx_npi_base_addr_outputx_s cn58xx; + struct cvmx_npi_base_addr_outputx_s cn58xxp1; +}; + +union cvmx_npi_bist_status { + uint64_t u64; + struct cvmx_npi_bist_status_s { + uint64_t reserved_20_63:44; + uint64_t csr_bs:1; + uint64_t dif_bs:1; + uint64_t rdp_bs:1; + uint64_t pcnc_bs:1; + uint64_t pcn_bs:1; + uint64_t rdn_bs:1; + uint64_t pcac_bs:1; + uint64_t pcad_bs:1; + uint64_t rdnl_bs:1; + uint64_t pgf_bs:1; + uint64_t pig_bs:1; + uint64_t pof0_bs:1; + uint64_t pof1_bs:1; + uint64_t pof2_bs:1; + uint64_t pof3_bs:1; + uint64_t pos_bs:1; + uint64_t nus_bs:1; + uint64_t dob_bs:1; + uint64_t pdf_bs:1; + uint64_t dpi_bs:1; + } s; + struct cvmx_npi_bist_status_cn30xx { + uint64_t reserved_20_63:44; + uint64_t csr_bs:1; + uint64_t dif_bs:1; + uint64_t rdp_bs:1; + uint64_t pcnc_bs:1; + uint64_t pcn_bs:1; + uint64_t rdn_bs:1; + uint64_t pcac_bs:1; + uint64_t pcad_bs:1; + uint64_t rdnl_bs:1; + uint64_t pgf_bs:1; + uint64_t pig_bs:1; + uint64_t pof0_bs:1; + uint64_t reserved_5_7:3; + uint64_t pos_bs:1; + uint64_t nus_bs:1; + uint64_t dob_bs:1; + uint64_t pdf_bs:1; + uint64_t dpi_bs:1; + } cn30xx; + struct cvmx_npi_bist_status_s cn31xx; + struct cvmx_npi_bist_status_s cn38xx; + struct cvmx_npi_bist_status_s cn38xxp2; + struct cvmx_npi_bist_status_cn50xx { + uint64_t reserved_20_63:44; + uint64_t csr_bs:1; + uint64_t dif_bs:1; + uint64_t rdp_bs:1; + uint64_t pcnc_bs:1; + uint64_t pcn_bs:1; + uint64_t rdn_bs:1; + uint64_t pcac_bs:1; + uint64_t pcad_bs:1; + uint64_t rdnl_bs:1; + uint64_t pgf_bs:1; + uint64_t pig_bs:1; + uint64_t pof0_bs:1; + uint64_t pof1_bs:1; + uint64_t reserved_5_6:2; + uint64_t pos_bs:1; + uint64_t nus_bs:1; + uint64_t dob_bs:1; + uint64_t pdf_bs:1; + uint64_t dpi_bs:1; + } cn50xx; + struct cvmx_npi_bist_status_s cn58xx; + struct cvmx_npi_bist_status_s cn58xxp1; +}; + +union cvmx_npi_buff_size_outputx { + uint64_t u64; + struct cvmx_npi_buff_size_outputx_s { + uint64_t reserved_23_63:41; + uint64_t isize:7; + uint64_t bsize:16; + } s; + struct cvmx_npi_buff_size_outputx_s cn30xx; + struct cvmx_npi_buff_size_outputx_s cn31xx; + struct cvmx_npi_buff_size_outputx_s cn38xx; + struct cvmx_npi_buff_size_outputx_s cn38xxp2; + struct cvmx_npi_buff_size_outputx_s cn50xx; + struct cvmx_npi_buff_size_outputx_s cn58xx; + struct cvmx_npi_buff_size_outputx_s cn58xxp1; +}; + +union cvmx_npi_comp_ctl { + uint64_t u64; + struct cvmx_npi_comp_ctl_s { + uint64_t reserved_10_63:54; + uint64_t pctl:5; + uint64_t nctl:5; + } s; + struct cvmx_npi_comp_ctl_s cn50xx; + struct cvmx_npi_comp_ctl_s cn58xx; + struct cvmx_npi_comp_ctl_s cn58xxp1; +}; + +union cvmx_npi_ctl_status { + uint64_t u64; + struct cvmx_npi_ctl_status_s { + uint64_t reserved_63_63:1; + uint64_t chip_rev:8; + uint64_t dis_pniw:1; + uint64_t out3_enb:1; + uint64_t out2_enb:1; + uint64_t out1_enb:1; + uint64_t out0_enb:1; + uint64_t ins3_enb:1; + uint64_t ins2_enb:1; + uint64_t ins1_enb:1; + uint64_t ins0_enb:1; + uint64_t ins3_64b:1; + uint64_t ins2_64b:1; + uint64_t ins1_64b:1; + uint64_t ins0_64b:1; + uint64_t pci_wdis:1; + uint64_t wait_com:1; + uint64_t reserved_37_39:3; + uint64_t max_word:5; + uint64_t reserved_10_31:22; + uint64_t timer:10; + } s; + struct cvmx_npi_ctl_status_cn30xx { + uint64_t reserved_63_63:1; + uint64_t chip_rev:8; + uint64_t dis_pniw:1; + uint64_t reserved_51_53:3; + uint64_t out0_enb:1; + uint64_t reserved_47_49:3; + uint64_t ins0_enb:1; + uint64_t reserved_43_45:3; + uint64_t ins0_64b:1; + uint64_t pci_wdis:1; + uint64_t wait_com:1; + uint64_t reserved_37_39:3; + uint64_t max_word:5; + uint64_t reserved_10_31:22; + uint64_t timer:10; + } cn30xx; + struct cvmx_npi_ctl_status_cn31xx { + uint64_t reserved_63_63:1; + uint64_t chip_rev:8; + uint64_t dis_pniw:1; + uint64_t reserved_52_53:2; + uint64_t out1_enb:1; + uint64_t out0_enb:1; + uint64_t reserved_48_49:2; + uint64_t ins1_enb:1; + uint64_t ins0_enb:1; + uint64_t reserved_44_45:2; + uint64_t ins1_64b:1; + uint64_t ins0_64b:1; + uint64_t pci_wdis:1; + uint64_t wait_com:1; + uint64_t reserved_37_39:3; + uint64_t max_word:5; + uint64_t reserved_10_31:22; + uint64_t timer:10; + } cn31xx; + struct cvmx_npi_ctl_status_s cn38xx; + struct cvmx_npi_ctl_status_s cn38xxp2; + struct cvmx_npi_ctl_status_cn31xx cn50xx; + struct cvmx_npi_ctl_status_s cn58xx; + struct cvmx_npi_ctl_status_s cn58xxp1; +}; + +union cvmx_npi_dbg_select { + uint64_t u64; + struct cvmx_npi_dbg_select_s { + uint64_t reserved_16_63:48; + uint64_t dbg_sel:16; + } s; + struct cvmx_npi_dbg_select_s cn30xx; + struct cvmx_npi_dbg_select_s cn31xx; + struct cvmx_npi_dbg_select_s cn38xx; + struct cvmx_npi_dbg_select_s cn38xxp2; + struct cvmx_npi_dbg_select_s cn50xx; + struct cvmx_npi_dbg_select_s cn58xx; + struct cvmx_npi_dbg_select_s cn58xxp1; +}; + +union cvmx_npi_dma_control { + uint64_t u64; + struct cvmx_npi_dma_control_s { + uint64_t reserved_36_63:28; + uint64_t b0_lend:1; + uint64_t dwb_denb:1; + uint64_t dwb_ichk:9; + uint64_t fpa_que:3; + uint64_t o_add1:1; + uint64_t o_ro:1; + uint64_t o_ns:1; + uint64_t o_es:2; + uint64_t o_mode:1; + uint64_t hp_enb:1; + uint64_t lp_enb:1; + uint64_t csize:14; + } s; + struct cvmx_npi_dma_control_s cn30xx; + struct cvmx_npi_dma_control_s cn31xx; + struct cvmx_npi_dma_control_s cn38xx; + struct cvmx_npi_dma_control_s cn38xxp2; + struct cvmx_npi_dma_control_s cn50xx; + struct cvmx_npi_dma_control_s cn58xx; + struct cvmx_npi_dma_control_s cn58xxp1; +}; + +union cvmx_npi_dma_highp_counts { + uint64_t u64; + struct cvmx_npi_dma_highp_counts_s { + uint64_t reserved_39_63:25; + uint64_t fcnt:7; + uint64_t dbell:32; + } s; + struct cvmx_npi_dma_highp_counts_s cn30xx; + struct cvmx_npi_dma_highp_counts_s cn31xx; + struct cvmx_npi_dma_highp_counts_s cn38xx; + struct cvmx_npi_dma_highp_counts_s cn38xxp2; + struct cvmx_npi_dma_highp_counts_s cn50xx; + struct cvmx_npi_dma_highp_counts_s cn58xx; + struct cvmx_npi_dma_highp_counts_s cn58xxp1; +}; + +union cvmx_npi_dma_highp_naddr { + uint64_t u64; + struct cvmx_npi_dma_highp_naddr_s { + uint64_t reserved_40_63:24; + uint64_t state:4; + uint64_t addr:36; + } s; + struct cvmx_npi_dma_highp_naddr_s cn30xx; + struct cvmx_npi_dma_highp_naddr_s cn31xx; + struct cvmx_npi_dma_highp_naddr_s cn38xx; + struct cvmx_npi_dma_highp_naddr_s cn38xxp2; + struct cvmx_npi_dma_highp_naddr_s cn50xx; + struct cvmx_npi_dma_highp_naddr_s cn58xx; + struct cvmx_npi_dma_highp_naddr_s cn58xxp1; +}; + +union cvmx_npi_dma_lowp_counts { + uint64_t u64; + struct cvmx_npi_dma_lowp_counts_s { + uint64_t reserved_39_63:25; + uint64_t fcnt:7; + uint64_t dbell:32; + } s; + struct cvmx_npi_dma_lowp_counts_s cn30xx; + struct cvmx_npi_dma_lowp_counts_s cn31xx; + struct cvmx_npi_dma_lowp_counts_s cn38xx; + struct cvmx_npi_dma_lowp_counts_s cn38xxp2; + struct cvmx_npi_dma_lowp_counts_s cn50xx; + struct cvmx_npi_dma_lowp_counts_s cn58xx; + struct cvmx_npi_dma_lowp_counts_s cn58xxp1; +}; + +union cvmx_npi_dma_lowp_naddr { + uint64_t u64; + struct cvmx_npi_dma_lowp_naddr_s { + uint64_t reserved_40_63:24; + uint64_t state:4; + uint64_t addr:36; + } s; + struct cvmx_npi_dma_lowp_naddr_s cn30xx; + struct cvmx_npi_dma_lowp_naddr_s cn31xx; + struct cvmx_npi_dma_lowp_naddr_s cn38xx; + struct cvmx_npi_dma_lowp_naddr_s cn38xxp2; + struct cvmx_npi_dma_lowp_naddr_s cn50xx; + struct cvmx_npi_dma_lowp_naddr_s cn58xx; + struct cvmx_npi_dma_lowp_naddr_s cn58xxp1; +}; + +union cvmx_npi_highp_dbell { + uint64_t u64; + struct cvmx_npi_highp_dbell_s { + uint64_t reserved_16_63:48; + uint64_t dbell:16; + } s; + struct cvmx_npi_highp_dbell_s cn30xx; + struct cvmx_npi_highp_dbell_s cn31xx; + struct cvmx_npi_highp_dbell_s cn38xx; + struct cvmx_npi_highp_dbell_s cn38xxp2; + struct cvmx_npi_highp_dbell_s cn50xx; + struct cvmx_npi_highp_dbell_s cn58xx; + struct cvmx_npi_highp_dbell_s cn58xxp1; +}; + +union cvmx_npi_highp_ibuff_saddr { + uint64_t u64; + struct cvmx_npi_highp_ibuff_saddr_s { + uint64_t reserved_36_63:28; + uint64_t saddr:36; + } s; + struct cvmx_npi_highp_ibuff_saddr_s cn30xx; + struct cvmx_npi_highp_ibuff_saddr_s cn31xx; + struct cvmx_npi_highp_ibuff_saddr_s cn38xx; + struct cvmx_npi_highp_ibuff_saddr_s cn38xxp2; + struct cvmx_npi_highp_ibuff_saddr_s cn50xx; + struct cvmx_npi_highp_ibuff_saddr_s cn58xx; + struct cvmx_npi_highp_ibuff_saddr_s cn58xxp1; +}; + +union cvmx_npi_input_control { + uint64_t u64; + struct cvmx_npi_input_control_s { + uint64_t reserved_23_63:41; + uint64_t pkt_rr:1; + uint64_t pbp_dhi:13; + uint64_t d_nsr:1; + uint64_t d_esr:2; + uint64_t d_ror:1; + uint64_t use_csr:1; + uint64_t nsr:1; + uint64_t esr:2; + uint64_t ror:1; + } s; + struct cvmx_npi_input_control_cn30xx { + uint64_t reserved_22_63:42; + uint64_t pbp_dhi:13; + uint64_t d_nsr:1; + uint64_t d_esr:2; + uint64_t d_ror:1; + uint64_t use_csr:1; + uint64_t nsr:1; + uint64_t esr:2; + uint64_t ror:1; + } cn30xx; + struct cvmx_npi_input_control_cn30xx cn31xx; + struct cvmx_npi_input_control_s cn38xx; + struct cvmx_npi_input_control_cn30xx cn38xxp2; + struct cvmx_npi_input_control_s cn50xx; + struct cvmx_npi_input_control_s cn58xx; + struct cvmx_npi_input_control_s cn58xxp1; +}; + +union cvmx_npi_int_enb { + uint64_t u64; + struct cvmx_npi_int_enb_s { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t i3_pperr:1; + uint64_t i2_pperr:1; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t p3_ptout:1; + uint64_t p2_ptout:1; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t p3_pperr:1; + uint64_t p2_pperr:1; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t g3_rtout:1; + uint64_t g2_rtout:1; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t p3_perr:1; + uint64_t p2_perr:1; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t p3_rtout:1; + uint64_t p2_rtout:1; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t i3_overf:1; + uint64_t i2_overf:1; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t i3_rtout:1; + uint64_t i2_rtout:1; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t po3_2sml:1; + uint64_t po2_2sml:1; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npi_int_enb_cn30xx { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t reserved_36_38:3; + uint64_t i0_pperr:1; + uint64_t reserved_32_34:3; + uint64_t p0_ptout:1; + uint64_t reserved_28_30:3; + uint64_t p0_pperr:1; + uint64_t reserved_24_26:3; + uint64_t g0_rtout:1; + uint64_t reserved_20_22:3; + uint64_t p0_perr:1; + uint64_t reserved_16_18:3; + uint64_t p0_rtout:1; + uint64_t reserved_12_14:3; + uint64_t i0_overf:1; + uint64_t reserved_8_10:3; + uint64_t i0_rtout:1; + uint64_t reserved_4_6:3; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn30xx; + struct cvmx_npi_int_enb_cn31xx { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t reserved_37_38:2; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t reserved_33_34:2; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t reserved_29_30:2; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t reserved_25_26:2; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t reserved_21_22:2; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t reserved_17_18:2; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t reserved_13_14:2; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t reserved_9_10:2; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t reserved_5_6:2; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn31xx; + struct cvmx_npi_int_enb_s cn38xx; + struct cvmx_npi_int_enb_cn38xxp2 { + uint64_t reserved_42_63:22; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t i3_pperr:1; + uint64_t i2_pperr:1; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t p3_ptout:1; + uint64_t p2_ptout:1; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t p3_pperr:1; + uint64_t p2_pperr:1; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t g3_rtout:1; + uint64_t g2_rtout:1; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t p3_perr:1; + uint64_t p2_perr:1; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t p3_rtout:1; + uint64_t p2_rtout:1; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t i3_overf:1; + uint64_t i2_overf:1; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t i3_rtout:1; + uint64_t i2_rtout:1; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t po3_2sml:1; + uint64_t po2_2sml:1; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn38xxp2; + struct cvmx_npi_int_enb_cn31xx cn50xx; + struct cvmx_npi_int_enb_s cn58xx; + struct cvmx_npi_int_enb_s cn58xxp1; +}; + +union cvmx_npi_int_sum { + uint64_t u64; + struct cvmx_npi_int_sum_s { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t i3_pperr:1; + uint64_t i2_pperr:1; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t p3_ptout:1; + uint64_t p2_ptout:1; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t p3_pperr:1; + uint64_t p2_pperr:1; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t g3_rtout:1; + uint64_t g2_rtout:1; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t p3_perr:1; + uint64_t p2_perr:1; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t p3_rtout:1; + uint64_t p2_rtout:1; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t i3_overf:1; + uint64_t i2_overf:1; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t i3_rtout:1; + uint64_t i2_rtout:1; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t po3_2sml:1; + uint64_t po2_2sml:1; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npi_int_sum_cn30xx { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t reserved_36_38:3; + uint64_t i0_pperr:1; + uint64_t reserved_32_34:3; + uint64_t p0_ptout:1; + uint64_t reserved_28_30:3; + uint64_t p0_pperr:1; + uint64_t reserved_24_26:3; + uint64_t g0_rtout:1; + uint64_t reserved_20_22:3; + uint64_t p0_perr:1; + uint64_t reserved_16_18:3; + uint64_t p0_rtout:1; + uint64_t reserved_12_14:3; + uint64_t i0_overf:1; + uint64_t reserved_8_10:3; + uint64_t i0_rtout:1; + uint64_t reserved_4_6:3; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn30xx; + struct cvmx_npi_int_sum_cn31xx { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t reserved_37_38:2; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t reserved_33_34:2; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t reserved_29_30:2; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t reserved_25_26:2; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t reserved_21_22:2; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t reserved_17_18:2; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t reserved_13_14:2; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t reserved_9_10:2; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t reserved_5_6:2; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn31xx; + struct cvmx_npi_int_sum_s cn38xx; + struct cvmx_npi_int_sum_cn38xxp2 { + uint64_t reserved_42_63:22; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t i3_pperr:1; + uint64_t i2_pperr:1; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t p3_ptout:1; + uint64_t p2_ptout:1; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t p3_pperr:1; + uint64_t p2_pperr:1; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t g3_rtout:1; + uint64_t g2_rtout:1; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t p3_perr:1; + uint64_t p2_perr:1; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t p3_rtout:1; + uint64_t p2_rtout:1; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t i3_overf:1; + uint64_t i2_overf:1; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t i3_rtout:1; + uint64_t i2_rtout:1; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t po3_2sml:1; + uint64_t po2_2sml:1; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn38xxp2; + struct cvmx_npi_int_sum_cn31xx cn50xx; + struct cvmx_npi_int_sum_s cn58xx; + struct cvmx_npi_int_sum_s cn58xxp1; +}; + +union cvmx_npi_lowp_dbell { + uint64_t u64; + struct cvmx_npi_lowp_dbell_s { + uint64_t reserved_16_63:48; + uint64_t dbell:16; + } s; + struct cvmx_npi_lowp_dbell_s cn30xx; + struct cvmx_npi_lowp_dbell_s cn31xx; + struct cvmx_npi_lowp_dbell_s cn38xx; + struct cvmx_npi_lowp_dbell_s cn38xxp2; + struct cvmx_npi_lowp_dbell_s cn50xx; + struct cvmx_npi_lowp_dbell_s cn58xx; + struct cvmx_npi_lowp_dbell_s cn58xxp1; +}; + +union cvmx_npi_lowp_ibuff_saddr { + uint64_t u64; + struct cvmx_npi_lowp_ibuff_saddr_s { + uint64_t reserved_36_63:28; + uint64_t saddr:36; + } s; + struct cvmx_npi_lowp_ibuff_saddr_s cn30xx; + struct cvmx_npi_lowp_ibuff_saddr_s cn31xx; + struct cvmx_npi_lowp_ibuff_saddr_s cn38xx; + struct cvmx_npi_lowp_ibuff_saddr_s cn38xxp2; + struct cvmx_npi_lowp_ibuff_saddr_s cn50xx; + struct cvmx_npi_lowp_ibuff_saddr_s cn58xx; + struct cvmx_npi_lowp_ibuff_saddr_s cn58xxp1; +}; + +union cvmx_npi_mem_access_subidx { + uint64_t u64; + struct cvmx_npi_mem_access_subidx_s { + uint64_t reserved_38_63:26; + uint64_t shortl:1; + uint64_t nmerge:1; + uint64_t esr:2; + uint64_t esw:2; + uint64_t nsr:1; + uint64_t nsw:1; + uint64_t ror:1; + uint64_t row:1; + uint64_t ba:28; + } s; + struct cvmx_npi_mem_access_subidx_s cn30xx; + struct cvmx_npi_mem_access_subidx_cn31xx { + uint64_t reserved_36_63:28; + uint64_t esr:2; + uint64_t esw:2; + uint64_t nsr:1; + uint64_t nsw:1; + uint64_t ror:1; + uint64_t row:1; + uint64_t ba:28; + } cn31xx; + struct cvmx_npi_mem_access_subidx_s cn38xx; + struct cvmx_npi_mem_access_subidx_cn31xx cn38xxp2; + struct cvmx_npi_mem_access_subidx_s cn50xx; + struct cvmx_npi_mem_access_subidx_s cn58xx; + struct cvmx_npi_mem_access_subidx_s cn58xxp1; +}; + +union cvmx_npi_msi_rcv { + uint64_t u64; + struct cvmx_npi_msi_rcv_s { + uint64_t int_vec:64; + } s; + struct cvmx_npi_msi_rcv_s cn30xx; + struct cvmx_npi_msi_rcv_s cn31xx; + struct cvmx_npi_msi_rcv_s cn38xx; + struct cvmx_npi_msi_rcv_s cn38xxp2; + struct cvmx_npi_msi_rcv_s cn50xx; + struct cvmx_npi_msi_rcv_s cn58xx; + struct cvmx_npi_msi_rcv_s cn58xxp1; +}; + +union cvmx_npi_num_desc_outputx { + uint64_t u64; + struct cvmx_npi_num_desc_outputx_s { + uint64_t reserved_32_63:32; + uint64_t size:32; + } s; + struct cvmx_npi_num_desc_outputx_s cn30xx; + struct cvmx_npi_num_desc_outputx_s cn31xx; + struct cvmx_npi_num_desc_outputx_s cn38xx; + struct cvmx_npi_num_desc_outputx_s cn38xxp2; + struct cvmx_npi_num_desc_outputx_s cn50xx; + struct cvmx_npi_num_desc_outputx_s cn58xx; + struct cvmx_npi_num_desc_outputx_s cn58xxp1; +}; + +union cvmx_npi_output_control { + uint64_t u64; + struct cvmx_npi_output_control_s { + uint64_t reserved_49_63:15; + uint64_t pkt_rr:1; + uint64_t p3_bmode:1; + uint64_t p2_bmode:1; + uint64_t p1_bmode:1; + uint64_t p0_bmode:1; + uint64_t o3_es:2; + uint64_t o3_ns:1; + uint64_t o3_ro:1; + uint64_t o2_es:2; + uint64_t o2_ns:1; + uint64_t o2_ro:1; + uint64_t o1_es:2; + uint64_t o1_ns:1; + uint64_t o1_ro:1; + uint64_t o0_es:2; + uint64_t o0_ns:1; + uint64_t o0_ro:1; + uint64_t o3_csrm:1; + uint64_t o2_csrm:1; + uint64_t o1_csrm:1; + uint64_t o0_csrm:1; + uint64_t reserved_20_23:4; + uint64_t iptr_o3:1; + uint64_t iptr_o2:1; + uint64_t iptr_o1:1; + uint64_t iptr_o0:1; + uint64_t esr_sl3:2; + uint64_t nsr_sl3:1; + uint64_t ror_sl3:1; + uint64_t esr_sl2:2; + uint64_t nsr_sl2:1; + uint64_t ror_sl2:1; + uint64_t esr_sl1:2; + uint64_t nsr_sl1:1; + uint64_t ror_sl1:1; + uint64_t esr_sl0:2; + uint64_t nsr_sl0:1; + uint64_t ror_sl0:1; + } s; + struct cvmx_npi_output_control_cn30xx { + uint64_t reserved_45_63:19; + uint64_t p0_bmode:1; + uint64_t reserved_32_43:12; + uint64_t o0_es:2; + uint64_t o0_ns:1; + uint64_t o0_ro:1; + uint64_t reserved_25_27:3; + uint64_t o0_csrm:1; + uint64_t reserved_17_23:7; + uint64_t iptr_o0:1; + uint64_t reserved_4_15:12; + uint64_t esr_sl0:2; + uint64_t nsr_sl0:1; + uint64_t ror_sl0:1; + } cn30xx; + struct cvmx_npi_output_control_cn31xx { + uint64_t reserved_46_63:18; + uint64_t p1_bmode:1; + uint64_t p0_bmode:1; + uint64_t reserved_36_43:8; + uint64_t o1_es:2; + uint64_t o1_ns:1; + uint64_t o1_ro:1; + uint64_t o0_es:2; + uint64_t o0_ns:1; + uint64_t o0_ro:1; + uint64_t reserved_26_27:2; + uint64_t o1_csrm:1; + uint64_t o0_csrm:1; + uint64_t reserved_18_23:6; + uint64_t iptr_o1:1; + uint64_t iptr_o0:1; + uint64_t reserved_8_15:8; + uint64_t esr_sl1:2; + uint64_t nsr_sl1:1; + uint64_t ror_sl1:1; + uint64_t esr_sl0:2; + uint64_t nsr_sl0:1; + uint64_t ror_sl0:1; + } cn31xx; + struct cvmx_npi_output_control_s cn38xx; + struct cvmx_npi_output_control_cn38xxp2 { + uint64_t reserved_48_63:16; + uint64_t p3_bmode:1; + uint64_t p2_bmode:1; + uint64_t p1_bmode:1; + uint64_t p0_bmode:1; + uint64_t o3_es:2; + uint64_t o3_ns:1; + uint64_t o3_ro:1; + uint64_t o2_es:2; + uint64_t o2_ns:1; + uint64_t o2_ro:1; + uint64_t o1_es:2; + uint64_t o1_ns:1; + uint64_t o1_ro:1; + uint64_t o0_es:2; + uint64_t o0_ns:1; + uint64_t o0_ro:1; + uint64_t o3_csrm:1; + uint64_t o2_csrm:1; + uint64_t o1_csrm:1; + uint64_t o0_csrm:1; + uint64_t reserved_20_23:4; + uint64_t iptr_o3:1; + uint64_t iptr_o2:1; + uint64_t iptr_o1:1; + uint64_t iptr_o0:1; + uint64_t esr_sl3:2; + uint64_t nsr_sl3:1; + uint64_t ror_sl3:1; + uint64_t esr_sl2:2; + uint64_t nsr_sl2:1; + uint64_t ror_sl2:1; + uint64_t esr_sl1:2; + uint64_t nsr_sl1:1; + uint64_t ror_sl1:1; + uint64_t esr_sl0:2; + uint64_t nsr_sl0:1; + uint64_t ror_sl0:1; + } cn38xxp2; + struct cvmx_npi_output_control_cn50xx { + uint64_t reserved_49_63:15; + uint64_t pkt_rr:1; + uint64_t reserved_46_47:2; + uint64_t p1_bmode:1; + uint64_t p0_bmode:1; + uint64_t reserved_36_43:8; + uint64_t o1_es:2; + uint64_t o1_ns:1; + uint64_t o1_ro:1; + uint64_t o0_es:2; + uint64_t o0_ns:1; + uint64_t o0_ro:1; + uint64_t reserved_26_27:2; + uint64_t o1_csrm:1; + uint64_t o0_csrm:1; + uint64_t reserved_18_23:6; + uint64_t iptr_o1:1; + uint64_t iptr_o0:1; + uint64_t reserved_8_15:8; + uint64_t esr_sl1:2; + uint64_t nsr_sl1:1; + uint64_t ror_sl1:1; + uint64_t esr_sl0:2; + uint64_t nsr_sl0:1; + uint64_t ror_sl0:1; + } cn50xx; + struct cvmx_npi_output_control_s cn58xx; + struct cvmx_npi_output_control_s cn58xxp1; +}; + +union cvmx_npi_px_dbpair_addr { + uint64_t u64; + struct cvmx_npi_px_dbpair_addr_s { + uint64_t reserved_63_63:1; + uint64_t state:2; + uint64_t naddr:61; + } s; + struct cvmx_npi_px_dbpair_addr_s cn30xx; + struct cvmx_npi_px_dbpair_addr_s cn31xx; + struct cvmx_npi_px_dbpair_addr_s cn38xx; + struct cvmx_npi_px_dbpair_addr_s cn38xxp2; + struct cvmx_npi_px_dbpair_addr_s cn50xx; + struct cvmx_npi_px_dbpair_addr_s cn58xx; + struct cvmx_npi_px_dbpair_addr_s cn58xxp1; +}; + +union cvmx_npi_px_instr_addr { + uint64_t u64; + struct cvmx_npi_px_instr_addr_s { + uint64_t state:3; + uint64_t naddr:61; + } s; + struct cvmx_npi_px_instr_addr_s cn30xx; + struct cvmx_npi_px_instr_addr_s cn31xx; + struct cvmx_npi_px_instr_addr_s cn38xx; + struct cvmx_npi_px_instr_addr_s cn38xxp2; + struct cvmx_npi_px_instr_addr_s cn50xx; + struct cvmx_npi_px_instr_addr_s cn58xx; + struct cvmx_npi_px_instr_addr_s cn58xxp1; +}; + +union cvmx_npi_px_instr_cnts { + uint64_t u64; + struct cvmx_npi_px_instr_cnts_s { + uint64_t reserved_38_63:26; + uint64_t fcnt:6; + uint64_t avail:32; + } s; + struct cvmx_npi_px_instr_cnts_s cn30xx; + struct cvmx_npi_px_instr_cnts_s cn31xx; + struct cvmx_npi_px_instr_cnts_s cn38xx; + struct cvmx_npi_px_instr_cnts_s cn38xxp2; + struct cvmx_npi_px_instr_cnts_s cn50xx; + struct cvmx_npi_px_instr_cnts_s cn58xx; + struct cvmx_npi_px_instr_cnts_s cn58xxp1; +}; + +union cvmx_npi_px_pair_cnts { + uint64_t u64; + struct cvmx_npi_px_pair_cnts_s { + uint64_t reserved_37_63:27; + uint64_t fcnt:5; + uint64_t avail:32; + } s; + struct cvmx_npi_px_pair_cnts_s cn30xx; + struct cvmx_npi_px_pair_cnts_s cn31xx; + struct cvmx_npi_px_pair_cnts_s cn38xx; + struct cvmx_npi_px_pair_cnts_s cn38xxp2; + struct cvmx_npi_px_pair_cnts_s cn50xx; + struct cvmx_npi_px_pair_cnts_s cn58xx; + struct cvmx_npi_px_pair_cnts_s cn58xxp1; +}; + +union cvmx_npi_pci_burst_size { + uint64_t u64; + struct cvmx_npi_pci_burst_size_s { + uint64_t reserved_14_63:50; + uint64_t wr_brst:7; + uint64_t rd_brst:7; + } s; + struct cvmx_npi_pci_burst_size_s cn30xx; + struct cvmx_npi_pci_burst_size_s cn31xx; + struct cvmx_npi_pci_burst_size_s cn38xx; + struct cvmx_npi_pci_burst_size_s cn38xxp2; + struct cvmx_npi_pci_burst_size_s cn50xx; + struct cvmx_npi_pci_burst_size_s cn58xx; + struct cvmx_npi_pci_burst_size_s cn58xxp1; +}; + +union cvmx_npi_pci_int_arb_cfg { + uint64_t u64; + struct cvmx_npi_pci_int_arb_cfg_s { + uint64_t reserved_13_63:51; + uint64_t hostmode:1; + uint64_t pci_ovr:4; + uint64_t reserved_5_7:3; + uint64_t en:1; + uint64_t park_mod:1; + uint64_t park_dev:3; + } s; + struct cvmx_npi_pci_int_arb_cfg_cn30xx { + uint64_t reserved_5_63:59; + uint64_t en:1; + uint64_t park_mod:1; + uint64_t park_dev:3; + } cn30xx; + struct cvmx_npi_pci_int_arb_cfg_cn30xx cn31xx; + struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xx; + struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xxp2; + struct cvmx_npi_pci_int_arb_cfg_s cn50xx; + struct cvmx_npi_pci_int_arb_cfg_s cn58xx; + struct cvmx_npi_pci_int_arb_cfg_s cn58xxp1; +}; + +union cvmx_npi_pci_read_cmd { + uint64_t u64; + struct cvmx_npi_pci_read_cmd_s { + uint64_t reserved_11_63:53; + uint64_t cmd_size:11; + } s; + struct cvmx_npi_pci_read_cmd_s cn30xx; + struct cvmx_npi_pci_read_cmd_s cn31xx; + struct cvmx_npi_pci_read_cmd_s cn38xx; + struct cvmx_npi_pci_read_cmd_s cn38xxp2; + struct cvmx_npi_pci_read_cmd_s cn50xx; + struct cvmx_npi_pci_read_cmd_s cn58xx; + struct cvmx_npi_pci_read_cmd_s cn58xxp1; +}; + +union cvmx_npi_port32_instr_hdr { + uint64_t u64; + struct cvmx_npi_port32_instr_hdr_s { + uint64_t reserved_44_63:20; + uint64_t pbp:1; + uint64_t rsv_f:5; + uint64_t rparmode:2; + uint64_t rsv_e:1; + uint64_t rskp_len:7; + uint64_t rsv_d:6; + uint64_t use_ihdr:1; + uint64_t rsv_c:5; + uint64_t par_mode:2; + uint64_t rsv_b:1; + uint64_t skp_len:7; + uint64_t rsv_a:6; + } s; + struct cvmx_npi_port32_instr_hdr_s cn30xx; + struct cvmx_npi_port32_instr_hdr_s cn31xx; + struct cvmx_npi_port32_instr_hdr_s cn38xx; + struct cvmx_npi_port32_instr_hdr_s cn38xxp2; + struct cvmx_npi_port32_instr_hdr_s cn50xx; + struct cvmx_npi_port32_instr_hdr_s cn58xx; + struct cvmx_npi_port32_instr_hdr_s cn58xxp1; +}; + +union cvmx_npi_port33_instr_hdr { + uint64_t u64; + struct cvmx_npi_port33_instr_hdr_s { + uint64_t reserved_44_63:20; + uint64_t pbp:1; + uint64_t rsv_f:5; + uint64_t rparmode:2; + uint64_t rsv_e:1; + uint64_t rskp_len:7; + uint64_t rsv_d:6; + uint64_t use_ihdr:1; + uint64_t rsv_c:5; + uint64_t par_mode:2; + uint64_t rsv_b:1; + uint64_t skp_len:7; + uint64_t rsv_a:6; + } s; + struct cvmx_npi_port33_instr_hdr_s cn31xx; + struct cvmx_npi_port33_instr_hdr_s cn38xx; + struct cvmx_npi_port33_instr_hdr_s cn38xxp2; + struct cvmx_npi_port33_instr_hdr_s cn50xx; + struct cvmx_npi_port33_instr_hdr_s cn58xx; + struct cvmx_npi_port33_instr_hdr_s cn58xxp1; +}; + +union cvmx_npi_port34_instr_hdr { + uint64_t u64; + struct cvmx_npi_port34_instr_hdr_s { + uint64_t reserved_44_63:20; + uint64_t pbp:1; + uint64_t rsv_f:5; + uint64_t rparmode:2; + uint64_t rsv_e:1; + uint64_t rskp_len:7; + uint64_t rsv_d:6; + uint64_t use_ihdr:1; + uint64_t rsv_c:5; + uint64_t par_mode:2; + uint64_t rsv_b:1; + uint64_t skp_len:7; + uint64_t rsv_a:6; + } s; + struct cvmx_npi_port34_instr_hdr_s cn38xx; + struct cvmx_npi_port34_instr_hdr_s cn38xxp2; + struct cvmx_npi_port34_instr_hdr_s cn58xx; + struct cvmx_npi_port34_instr_hdr_s cn58xxp1; +}; + +union cvmx_npi_port35_instr_hdr { + uint64_t u64; + struct cvmx_npi_port35_instr_hdr_s { + uint64_t reserved_44_63:20; + uint64_t pbp:1; + uint64_t rsv_f:5; + uint64_t rparmode:2; + uint64_t rsv_e:1; + uint64_t rskp_len:7; + uint64_t rsv_d:6; + uint64_t use_ihdr:1; + uint64_t rsv_c:5; + uint64_t par_mode:2; + uint64_t rsv_b:1; + uint64_t skp_len:7; + uint64_t rsv_a:6; + } s; + struct cvmx_npi_port35_instr_hdr_s cn38xx; + struct cvmx_npi_port35_instr_hdr_s cn38xxp2; + struct cvmx_npi_port35_instr_hdr_s cn58xx; + struct cvmx_npi_port35_instr_hdr_s cn58xxp1; +}; + +union cvmx_npi_port_bp_control { + uint64_t u64; + struct cvmx_npi_port_bp_control_s { + uint64_t reserved_8_63:56; + uint64_t bp_on:4; + uint64_t enb:4; + } s; + struct cvmx_npi_port_bp_control_s cn30xx; + struct cvmx_npi_port_bp_control_s cn31xx; + struct cvmx_npi_port_bp_control_s cn38xx; + struct cvmx_npi_port_bp_control_s cn38xxp2; + struct cvmx_npi_port_bp_control_s cn50xx; + struct cvmx_npi_port_bp_control_s cn58xx; + struct cvmx_npi_port_bp_control_s cn58xxp1; +}; + +union cvmx_npi_rsl_int_blocks { + uint64_t u64; + struct cvmx_npi_rsl_int_blocks_s { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t reserved_28_29:2; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t reserved_13_14:2; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } s; + struct cvmx_npi_rsl_int_blocks_cn30xx { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t rint_29:1; + uint64_t rint_28:1; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t rint_14:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn30xx; + struct cvmx_npi_rsl_int_blocks_cn30xx cn31xx; + struct cvmx_npi_rsl_int_blocks_cn38xx { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t rint_29:1; + uint64_t rint_28:1; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t rint_14:1; + uint64_t rint_13:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn38xx; + struct cvmx_npi_rsl_int_blocks_cn38xx cn38xxp2; + struct cvmx_npi_rsl_int_blocks_cn50xx { + uint64_t reserved_31_63:33; + uint64_t iob:1; + uint64_t lmc1:1; + uint64_t agl:1; + uint64_t reserved_24_27:4; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t reserved_21_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t reserved_15_15:1; + uint64_t rad:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t reserved_8_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn50xx; + struct cvmx_npi_rsl_int_blocks_cn38xx cn58xx; + struct cvmx_npi_rsl_int_blocks_cn38xx cn58xxp1; +}; + +union cvmx_npi_size_inputx { + uint64_t u64; + struct cvmx_npi_size_inputx_s { + uint64_t reserved_32_63:32; + uint64_t size:32; + } s; + struct cvmx_npi_size_inputx_s cn30xx; + struct cvmx_npi_size_inputx_s cn31xx; + struct cvmx_npi_size_inputx_s cn38xx; + struct cvmx_npi_size_inputx_s cn38xxp2; + struct cvmx_npi_size_inputx_s cn50xx; + struct cvmx_npi_size_inputx_s cn58xx; + struct cvmx_npi_size_inputx_s cn58xxp1; +}; + +union cvmx_npi_win_read_to { + uint64_t u64; + struct cvmx_npi_win_read_to_s { + uint64_t reserved_32_63:32; + uint64_t time:32; + } s; + struct cvmx_npi_win_read_to_s cn30xx; + struct cvmx_npi_win_read_to_s cn31xx; + struct cvmx_npi_win_read_to_s cn38xx; + struct cvmx_npi_win_read_to_s cn38xxp2; + struct cvmx_npi_win_read_to_s cn50xx; + struct cvmx_npi_win_read_to_s cn58xx; + struct cvmx_npi_win_read_to_s cn58xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-pci-defs.h b/arch/mips/include/asm/octeon/cvmx-pci-defs.h new file mode 100644 index 0000000..90f8d65 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-pci-defs.h @@ -0,0 +1,1645 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PCI_DEFS_H__ +#define __CVMX_PCI_DEFS_H__ + +#define CVMX_PCI_BAR1_INDEXX(offset) \ + (0x0000000000000100ull + (((offset) & 31) * 4)) +#define CVMX_PCI_BIST_REG \ + (0x00000000000001C0ull) +#define CVMX_PCI_CFG00 \ + (0x0000000000000000ull) +#define CVMX_PCI_CFG01 \ + (0x0000000000000004ull) +#define CVMX_PCI_CFG02 \ + (0x0000000000000008ull) +#define CVMX_PCI_CFG03 \ + (0x000000000000000Cull) +#define CVMX_PCI_CFG04 \ + (0x0000000000000010ull) +#define CVMX_PCI_CFG05 \ + (0x0000000000000014ull) +#define CVMX_PCI_CFG06 \ + (0x0000000000000018ull) +#define CVMX_PCI_CFG07 \ + (0x000000000000001Cull) +#define CVMX_PCI_CFG08 \ + (0x0000000000000020ull) +#define CVMX_PCI_CFG09 \ + (0x0000000000000024ull) +#define CVMX_PCI_CFG10 \ + (0x0000000000000028ull) +#define CVMX_PCI_CFG11 \ + (0x000000000000002Cull) +#define CVMX_PCI_CFG12 \ + (0x0000000000000030ull) +#define CVMX_PCI_CFG13 \ + (0x0000000000000034ull) +#define CVMX_PCI_CFG15 \ + (0x000000000000003Cull) +#define CVMX_PCI_CFG16 \ + (0x0000000000000040ull) +#define CVMX_PCI_CFG17 \ + (0x0000000000000044ull) +#define CVMX_PCI_CFG18 \ + (0x0000000000000048ull) +#define CVMX_PCI_CFG19 \ + (0x000000000000004Cull) +#define CVMX_PCI_CFG20 \ + (0x0000000000000050ull) +#define CVMX_PCI_CFG21 \ + (0x0000000000000054ull) +#define CVMX_PCI_CFG22 \ + (0x0000000000000058ull) +#define CVMX_PCI_CFG56 \ + (0x00000000000000E0ull) +#define CVMX_PCI_CFG57 \ + (0x00000000000000E4ull) +#define CVMX_PCI_CFG58 \ + (0x00000000000000E8ull) +#define CVMX_PCI_CFG59 \ + (0x00000000000000ECull) +#define CVMX_PCI_CFG60 \ + (0x00000000000000F0ull) +#define CVMX_PCI_CFG61 \ + (0x00000000000000F4ull) +#define CVMX_PCI_CFG62 \ + (0x00000000000000F8ull) +#define CVMX_PCI_CFG63 \ + (0x00000000000000FCull) +#define CVMX_PCI_CNT_REG \ + (0x00000000000001B8ull) +#define CVMX_PCI_CTL_STATUS_2 \ + (0x000000000000018Cull) +#define CVMX_PCI_DBELL_0 \ + (0x0000000000000080ull) +#define CVMX_PCI_DBELL_1 \ + (0x0000000000000088ull) +#define CVMX_PCI_DBELL_2 \ + (0x0000000000000090ull) +#define CVMX_PCI_DBELL_3 \ + (0x0000000000000098ull) +#define CVMX_PCI_DBELL_X(offset) \ + (0x0000000000000080ull + (((offset) & 3) * 8)) +#define CVMX_PCI_DMA_CNT0 \ + (0x00000000000000A0ull) +#define CVMX_PCI_DMA_CNT1 \ + (0x00000000000000A8ull) +#define CVMX_PCI_DMA_CNTX(offset) \ + (0x00000000000000A0ull + (((offset) & 1) * 8)) +#define CVMX_PCI_DMA_INT_LEV0 \ + (0x00000000000000A4ull) +#define CVMX_PCI_DMA_INT_LEV1 \ + (0x00000000000000ACull) +#define CVMX_PCI_DMA_INT_LEVX(offset) \ + (0x00000000000000A4ull + (((offset) & 1) * 8)) +#define CVMX_PCI_DMA_TIME0 \ + (0x00000000000000B0ull) +#define CVMX_PCI_DMA_TIME1 \ + (0x00000000000000B4ull) +#define CVMX_PCI_DMA_TIMEX(offset) \ + (0x00000000000000B0ull + (((offset) & 1) * 4)) +#define CVMX_PCI_INSTR_COUNT0 \ + (0x0000000000000084ull) +#define CVMX_PCI_INSTR_COUNT1 \ + (0x000000000000008Cull) +#define CVMX_PCI_INSTR_COUNT2 \ + (0x0000000000000094ull) +#define CVMX_PCI_INSTR_COUNT3 \ + (0x000000000000009Cull) +#define CVMX_PCI_INSTR_COUNTX(offset) \ + (0x0000000000000084ull + (((offset) & 3) * 8)) +#define CVMX_PCI_INT_ENB \ + (0x0000000000000038ull) +#define CVMX_PCI_INT_ENB2 \ + (0x00000000000001A0ull) +#define CVMX_PCI_INT_SUM \ + (0x0000000000000030ull) +#define CVMX_PCI_INT_SUM2 \ + (0x0000000000000198ull) +#define CVMX_PCI_MSI_RCV \ + (0x00000000000000F0ull) +#define CVMX_PCI_PKTS_SENT0 \ + (0x0000000000000040ull) +#define CVMX_PCI_PKTS_SENT1 \ + (0x0000000000000050ull) +#define CVMX_PCI_PKTS_SENT2 \ + (0x0000000000000060ull) +#define CVMX_PCI_PKTS_SENT3 \ + (0x0000000000000070ull) +#define CVMX_PCI_PKTS_SENTX(offset) \ + (0x0000000000000040ull + (((offset) & 3) * 16)) +#define CVMX_PCI_PKTS_SENT_INT_LEV0 \ + (0x0000000000000048ull) +#define CVMX_PCI_PKTS_SENT_INT_LEV1 \ + (0x0000000000000058ull) +#define CVMX_PCI_PKTS_SENT_INT_LEV2 \ + (0x0000000000000068ull) +#define CVMX_PCI_PKTS_SENT_INT_LEV3 \ + (0x0000000000000078ull) +#define CVMX_PCI_PKTS_SENT_INT_LEVX(offset) \ + (0x0000000000000048ull + (((offset) & 3) * 16)) +#define CVMX_PCI_PKTS_SENT_TIME0 \ + (0x000000000000004Cull) +#define CVMX_PCI_PKTS_SENT_TIME1 \ + (0x000000000000005Cull) +#define CVMX_PCI_PKTS_SENT_TIME2 \ + (0x000000000000006Cull) +#define CVMX_PCI_PKTS_SENT_TIME3 \ + (0x000000000000007Cull) +#define CVMX_PCI_PKTS_SENT_TIMEX(offset) \ + (0x000000000000004Cull + (((offset) & 3) * 16)) +#define CVMX_PCI_PKT_CREDITS0 \ + (0x0000000000000044ull) +#define CVMX_PCI_PKT_CREDITS1 \ + (0x0000000000000054ull) +#define CVMX_PCI_PKT_CREDITS2 \ + (0x0000000000000064ull) +#define CVMX_PCI_PKT_CREDITS3 \ + (0x0000000000000074ull) +#define CVMX_PCI_PKT_CREDITSX(offset) \ + (0x0000000000000044ull + (((offset) & 3) * 16)) +#define CVMX_PCI_READ_CMD_6 \ + (0x0000000000000180ull) +#define CVMX_PCI_READ_CMD_C \ + (0x0000000000000184ull) +#define CVMX_PCI_READ_CMD_E \ + (0x0000000000000188ull) +#define CVMX_PCI_READ_TIMEOUT \ + CVMX_ADD_IO_SEG(0x00011F00000000B0ull) +#define CVMX_PCI_SCM_REG \ + (0x00000000000001A8ull) +#define CVMX_PCI_TSR_REG \ + (0x00000000000001B0ull) +#define CVMX_PCI_WIN_RD_ADDR \ + (0x0000000000000008ull) +#define CVMX_PCI_WIN_RD_DATA \ + (0x0000000000000020ull) +#define CVMX_PCI_WIN_WR_ADDR \ + (0x0000000000000000ull) +#define CVMX_PCI_WIN_WR_DATA \ + (0x0000000000000010ull) +#define CVMX_PCI_WIN_WR_MASK \ + (0x0000000000000018ull) + +union cvmx_pci_bar1_indexx { + uint32_t u32; + struct cvmx_pci_bar1_indexx_s { + uint32_t reserved_18_31:14; + uint32_t addr_idx:14; + uint32_t ca:1; + uint32_t end_swp:2; + uint32_t addr_v:1; + } s; + struct cvmx_pci_bar1_indexx_s cn30xx; + struct cvmx_pci_bar1_indexx_s cn31xx; + struct cvmx_pci_bar1_indexx_s cn38xx; + struct cvmx_pci_bar1_indexx_s cn38xxp2; + struct cvmx_pci_bar1_indexx_s cn50xx; + struct cvmx_pci_bar1_indexx_s cn58xx; + struct cvmx_pci_bar1_indexx_s cn58xxp1; +}; + +union cvmx_pci_bist_reg { + uint64_t u64; + struct cvmx_pci_bist_reg_s { + uint64_t reserved_10_63:54; + uint64_t rsp_bs:1; + uint64_t dma0_bs:1; + uint64_t cmd0_bs:1; + uint64_t cmd_bs:1; + uint64_t csr2p_bs:1; + uint64_t csrr_bs:1; + uint64_t rsp2p_bs:1; + uint64_t csr2n_bs:1; + uint64_t dat2n_bs:1; + uint64_t dbg2n_bs:1; + } s; + struct cvmx_pci_bist_reg_s cn50xx; +}; + +union cvmx_pci_cfg00 { + uint32_t u32; + struct cvmx_pci_cfg00_s { + uint32_t devid:16; + uint32_t vendid:16; + } s; + struct cvmx_pci_cfg00_s cn30xx; + struct cvmx_pci_cfg00_s cn31xx; + struct cvmx_pci_cfg00_s cn38xx; + struct cvmx_pci_cfg00_s cn38xxp2; + struct cvmx_pci_cfg00_s cn50xx; + struct cvmx_pci_cfg00_s cn58xx; + struct cvmx_pci_cfg00_s cn58xxp1; +}; + +union cvmx_pci_cfg01 { + uint32_t u32; + struct cvmx_pci_cfg01_s { + uint32_t dpe:1; + uint32_t sse:1; + uint32_t rma:1; + uint32_t rta:1; + uint32_t sta:1; + uint32_t devt:2; + uint32_t mdpe:1; + uint32_t fbb:1; + uint32_t reserved_22_22:1; + uint32_t m66:1; + uint32_t cle:1; + uint32_t i_stat:1; + uint32_t reserved_11_18:8; + uint32_t i_dis:1; + uint32_t fbbe:1; + uint32_t see:1; + uint32_t ads:1; + uint32_t pee:1; + uint32_t vps:1; + uint32_t mwice:1; + uint32_t scse:1; + uint32_t me:1; + uint32_t msae:1; + uint32_t isae:1; + } s; + struct cvmx_pci_cfg01_s cn30xx; + struct cvmx_pci_cfg01_s cn31xx; + struct cvmx_pci_cfg01_s cn38xx; + struct cvmx_pci_cfg01_s cn38xxp2; + struct cvmx_pci_cfg01_s cn50xx; + struct cvmx_pci_cfg01_s cn58xx; + struct cvmx_pci_cfg01_s cn58xxp1; +}; + +union cvmx_pci_cfg02 { + uint32_t u32; + struct cvmx_pci_cfg02_s { + uint32_t cc:24; + uint32_t rid:8; + } s; + struct cvmx_pci_cfg02_s cn30xx; + struct cvmx_pci_cfg02_s cn31xx; + struct cvmx_pci_cfg02_s cn38xx; + struct cvmx_pci_cfg02_s cn38xxp2; + struct cvmx_pci_cfg02_s cn50xx; + struct cvmx_pci_cfg02_s cn58xx; + struct cvmx_pci_cfg02_s cn58xxp1; +}; + +union cvmx_pci_cfg03 { + uint32_t u32; + struct cvmx_pci_cfg03_s { + uint32_t bcap:1; + uint32_t brb:1; + uint32_t reserved_28_29:2; + uint32_t bcod:4; + uint32_t ht:8; + uint32_t lt:8; + uint32_t cls:8; + } s; + struct cvmx_pci_cfg03_s cn30xx; + struct cvmx_pci_cfg03_s cn31xx; + struct cvmx_pci_cfg03_s cn38xx; + struct cvmx_pci_cfg03_s cn38xxp2; + struct cvmx_pci_cfg03_s cn50xx; + struct cvmx_pci_cfg03_s cn58xx; + struct cvmx_pci_cfg03_s cn58xxp1; +}; + +union cvmx_pci_cfg04 { + uint32_t u32; + struct cvmx_pci_cfg04_s { + uint32_t lbase:20; + uint32_t lbasez:8; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pci_cfg04_s cn30xx; + struct cvmx_pci_cfg04_s cn31xx; + struct cvmx_pci_cfg04_s cn38xx; + struct cvmx_pci_cfg04_s cn38xxp2; + struct cvmx_pci_cfg04_s cn50xx; + struct cvmx_pci_cfg04_s cn58xx; + struct cvmx_pci_cfg04_s cn58xxp1; +}; + +union cvmx_pci_cfg05 { + uint32_t u32; + struct cvmx_pci_cfg05_s { + uint32_t hbase:32; + } s; + struct cvmx_pci_cfg05_s cn30xx; + struct cvmx_pci_cfg05_s cn31xx; + struct cvmx_pci_cfg05_s cn38xx; + struct cvmx_pci_cfg05_s cn38xxp2; + struct cvmx_pci_cfg05_s cn50xx; + struct cvmx_pci_cfg05_s cn58xx; + struct cvmx_pci_cfg05_s cn58xxp1; +}; + +union cvmx_pci_cfg06 { + uint32_t u32; + struct cvmx_pci_cfg06_s { + uint32_t lbase:5; + uint32_t lbasez:23; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pci_cfg06_s cn30xx; + struct cvmx_pci_cfg06_s cn31xx; + struct cvmx_pci_cfg06_s cn38xx; + struct cvmx_pci_cfg06_s cn38xxp2; + struct cvmx_pci_cfg06_s cn50xx; + struct cvmx_pci_cfg06_s cn58xx; + struct cvmx_pci_cfg06_s cn58xxp1; +}; + +union cvmx_pci_cfg07 { + uint32_t u32; + struct cvmx_pci_cfg07_s { + uint32_t hbase:32; + } s; + struct cvmx_pci_cfg07_s cn30xx; + struct cvmx_pci_cfg07_s cn31xx; + struct cvmx_pci_cfg07_s cn38xx; + struct cvmx_pci_cfg07_s cn38xxp2; + struct cvmx_pci_cfg07_s cn50xx; + struct cvmx_pci_cfg07_s cn58xx; + struct cvmx_pci_cfg07_s cn58xxp1; +}; + +union cvmx_pci_cfg08 { + uint32_t u32; + struct cvmx_pci_cfg08_s { + uint32_t lbasez:28; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pci_cfg08_s cn30xx; + struct cvmx_pci_cfg08_s cn31xx; + struct cvmx_pci_cfg08_s cn38xx; + struct cvmx_pci_cfg08_s cn38xxp2; + struct cvmx_pci_cfg08_s cn50xx; + struct cvmx_pci_cfg08_s cn58xx; + struct cvmx_pci_cfg08_s cn58xxp1; +}; + +union cvmx_pci_cfg09 { + uint32_t u32; + struct cvmx_pci_cfg09_s { + uint32_t hbase:25; + uint32_t hbasez:7; + } s; + struct cvmx_pci_cfg09_s cn30xx; + struct cvmx_pci_cfg09_s cn31xx; + struct cvmx_pci_cfg09_s cn38xx; + struct cvmx_pci_cfg09_s cn38xxp2; + struct cvmx_pci_cfg09_s cn50xx; + struct cvmx_pci_cfg09_s cn58xx; + struct cvmx_pci_cfg09_s cn58xxp1; +}; + +union cvmx_pci_cfg10 { + uint32_t u32; + struct cvmx_pci_cfg10_s { + uint32_t cisp:32; + } s; + struct cvmx_pci_cfg10_s cn30xx; + struct cvmx_pci_cfg10_s cn31xx; + struct cvmx_pci_cfg10_s cn38xx; + struct cvmx_pci_cfg10_s cn38xxp2; + struct cvmx_pci_cfg10_s cn50xx; + struct cvmx_pci_cfg10_s cn58xx; + struct cvmx_pci_cfg10_s cn58xxp1; +}; + +union cvmx_pci_cfg11 { + uint32_t u32; + struct cvmx_pci_cfg11_s { + uint32_t ssid:16; + uint32_t ssvid:16; + } s; + struct cvmx_pci_cfg11_s cn30xx; + struct cvmx_pci_cfg11_s cn31xx; + struct cvmx_pci_cfg11_s cn38xx; + struct cvmx_pci_cfg11_s cn38xxp2; + struct cvmx_pci_cfg11_s cn50xx; + struct cvmx_pci_cfg11_s cn58xx; + struct cvmx_pci_cfg11_s cn58xxp1; +}; + +union cvmx_pci_cfg12 { + uint32_t u32; + struct cvmx_pci_cfg12_s { + uint32_t erbar:16; + uint32_t erbarz:5; + uint32_t reserved_1_10:10; + uint32_t erbar_en:1; + } s; + struct cvmx_pci_cfg12_s cn30xx; + struct cvmx_pci_cfg12_s cn31xx; + struct cvmx_pci_cfg12_s cn38xx; + struct cvmx_pci_cfg12_s cn38xxp2; + struct cvmx_pci_cfg12_s cn50xx; + struct cvmx_pci_cfg12_s cn58xx; + struct cvmx_pci_cfg12_s cn58xxp1; +}; + +union cvmx_pci_cfg13 { + uint32_t u32; + struct cvmx_pci_cfg13_s { + uint32_t reserved_8_31:24; + uint32_t cp:8; + } s; + struct cvmx_pci_cfg13_s cn30xx; + struct cvmx_pci_cfg13_s cn31xx; + struct cvmx_pci_cfg13_s cn38xx; + struct cvmx_pci_cfg13_s cn38xxp2; + struct cvmx_pci_cfg13_s cn50xx; + struct cvmx_pci_cfg13_s cn58xx; + struct cvmx_pci_cfg13_s cn58xxp1; +}; + +union cvmx_pci_cfg15 { + uint32_t u32; + struct cvmx_pci_cfg15_s { + uint32_t ml:8; + uint32_t mg:8; + uint32_t inta:8; + uint32_t il:8; + } s; + struct cvmx_pci_cfg15_s cn30xx; + struct cvmx_pci_cfg15_s cn31xx; + struct cvmx_pci_cfg15_s cn38xx; + struct cvmx_pci_cfg15_s cn38xxp2; + struct cvmx_pci_cfg15_s cn50xx; + struct cvmx_pci_cfg15_s cn58xx; + struct cvmx_pci_cfg15_s cn58xxp1; +}; + +union cvmx_pci_cfg16 { + uint32_t u32; + struct cvmx_pci_cfg16_s { + uint32_t trdnpr:1; + uint32_t trdard:1; + uint32_t rdsati:1; + uint32_t trdrs:1; + uint32_t trtae:1; + uint32_t twsei:1; + uint32_t twsen:1; + uint32_t twtae:1; + uint32_t tmae:1; + uint32_t tslte:3; + uint32_t tilt:4; + uint32_t pbe:12; + uint32_t dppmr:1; + uint32_t reserved_2_2:1; + uint32_t tswc:1; + uint32_t mltd:1; + } s; + struct cvmx_pci_cfg16_s cn30xx; + struct cvmx_pci_cfg16_s cn31xx; + struct cvmx_pci_cfg16_s cn38xx; + struct cvmx_pci_cfg16_s cn38xxp2; + struct cvmx_pci_cfg16_s cn50xx; + struct cvmx_pci_cfg16_s cn58xx; + struct cvmx_pci_cfg16_s cn58xxp1; +}; + +union cvmx_pci_cfg17 { + uint32_t u32; + struct cvmx_pci_cfg17_s { + uint32_t tscme:32; + } s; + struct cvmx_pci_cfg17_s cn30xx; + struct cvmx_pci_cfg17_s cn31xx; + struct cvmx_pci_cfg17_s cn38xx; + struct cvmx_pci_cfg17_s cn38xxp2; + struct cvmx_pci_cfg17_s cn50xx; + struct cvmx_pci_cfg17_s cn58xx; + struct cvmx_pci_cfg17_s cn58xxp1; +}; + +union cvmx_pci_cfg18 { + uint32_t u32; + struct cvmx_pci_cfg18_s { + uint32_t tdsrps:32; + } s; + struct cvmx_pci_cfg18_s cn30xx; + struct cvmx_pci_cfg18_s cn31xx; + struct cvmx_pci_cfg18_s cn38xx; + struct cvmx_pci_cfg18_s cn38xxp2; + struct cvmx_pci_cfg18_s cn50xx; + struct cvmx_pci_cfg18_s cn58xx; + struct cvmx_pci_cfg18_s cn58xxp1; +}; + +union cvmx_pci_cfg19 { + uint32_t u32; + struct cvmx_pci_cfg19_s { + uint32_t mrbcm:1; + uint32_t mrbci:1; + uint32_t mdwe:1; + uint32_t mdre:1; + uint32_t mdrimc:1; + uint32_t mdrrmc:3; + uint32_t tmes:8; + uint32_t teci:1; + uint32_t tmei:1; + uint32_t tmse:1; + uint32_t tmdpes:1; + uint32_t tmapes:1; + uint32_t reserved_9_10:2; + uint32_t tibcd:1; + uint32_t tibde:1; + uint32_t reserved_6_6:1; + uint32_t tidomc:1; + uint32_t tdomc:5; + } s; + struct cvmx_pci_cfg19_s cn30xx; + struct cvmx_pci_cfg19_s cn31xx; + struct cvmx_pci_cfg19_s cn38xx; + struct cvmx_pci_cfg19_s cn38xxp2; + struct cvmx_pci_cfg19_s cn50xx; + struct cvmx_pci_cfg19_s cn58xx; + struct cvmx_pci_cfg19_s cn58xxp1; +}; + +union cvmx_pci_cfg20 { + uint32_t u32; + struct cvmx_pci_cfg20_s { + uint32_t mdsp:32; + } s; + struct cvmx_pci_cfg20_s cn30xx; + struct cvmx_pci_cfg20_s cn31xx; + struct cvmx_pci_cfg20_s cn38xx; + struct cvmx_pci_cfg20_s cn38xxp2; + struct cvmx_pci_cfg20_s cn50xx; + struct cvmx_pci_cfg20_s cn58xx; + struct cvmx_pci_cfg20_s cn58xxp1; +}; + +union cvmx_pci_cfg21 { + uint32_t u32; + struct cvmx_pci_cfg21_s { + uint32_t scmre:32; + } s; + struct cvmx_pci_cfg21_s cn30xx; + struct cvmx_pci_cfg21_s cn31xx; + struct cvmx_pci_cfg21_s cn38xx; + struct cvmx_pci_cfg21_s cn38xxp2; + struct cvmx_pci_cfg21_s cn50xx; + struct cvmx_pci_cfg21_s cn58xx; + struct cvmx_pci_cfg21_s cn58xxp1; +}; + +union cvmx_pci_cfg22 { + uint32_t u32; + struct cvmx_pci_cfg22_s { + uint32_t mac:7; + uint32_t reserved_19_24:6; + uint32_t flush:1; + uint32_t mra:1; + uint32_t mtta:1; + uint32_t mrv:8; + uint32_t mttv:8; + } s; + struct cvmx_pci_cfg22_s cn30xx; + struct cvmx_pci_cfg22_s cn31xx; + struct cvmx_pci_cfg22_s cn38xx; + struct cvmx_pci_cfg22_s cn38xxp2; + struct cvmx_pci_cfg22_s cn50xx; + struct cvmx_pci_cfg22_s cn58xx; + struct cvmx_pci_cfg22_s cn58xxp1; +}; + +union cvmx_pci_cfg56 { + uint32_t u32; + struct cvmx_pci_cfg56_s { + uint32_t reserved_23_31:9; + uint32_t most:3; + uint32_t mmbc:2; + uint32_t roe:1; + uint32_t dpere:1; + uint32_t ncp:8; + uint32_t pxcid:8; + } s; + struct cvmx_pci_cfg56_s cn30xx; + struct cvmx_pci_cfg56_s cn31xx; + struct cvmx_pci_cfg56_s cn38xx; + struct cvmx_pci_cfg56_s cn38xxp2; + struct cvmx_pci_cfg56_s cn50xx; + struct cvmx_pci_cfg56_s cn58xx; + struct cvmx_pci_cfg56_s cn58xxp1; +}; + +union cvmx_pci_cfg57 { + uint32_t u32; + struct cvmx_pci_cfg57_s { + uint32_t reserved_30_31:2; + uint32_t scemr:1; + uint32_t mcrsd:3; + uint32_t mostd:3; + uint32_t mmrbcd:2; + uint32_t dc:1; + uint32_t usc:1; + uint32_t scd:1; + uint32_t m133:1; + uint32_t w64:1; + uint32_t bn:8; + uint32_t dn:5; + uint32_t fn:3; + } s; + struct cvmx_pci_cfg57_s cn30xx; + struct cvmx_pci_cfg57_s cn31xx; + struct cvmx_pci_cfg57_s cn38xx; + struct cvmx_pci_cfg57_s cn38xxp2; + struct cvmx_pci_cfg57_s cn50xx; + struct cvmx_pci_cfg57_s cn58xx; + struct cvmx_pci_cfg57_s cn58xxp1; +}; + +union cvmx_pci_cfg58 { + uint32_t u32; + struct cvmx_pci_cfg58_s { + uint32_t pmes:5; + uint32_t d2s:1; + uint32_t d1s:1; + uint32_t auxc:3; + uint32_t dsi:1; + uint32_t reserved_20_20:1; + uint32_t pmec:1; + uint32_t pcimiv:3; + uint32_t ncp:8; + uint32_t pmcid:8; + } s; + struct cvmx_pci_cfg58_s cn30xx; + struct cvmx_pci_cfg58_s cn31xx; + struct cvmx_pci_cfg58_s cn38xx; + struct cvmx_pci_cfg58_s cn38xxp2; + struct cvmx_pci_cfg58_s cn50xx; + struct cvmx_pci_cfg58_s cn58xx; + struct cvmx_pci_cfg58_s cn58xxp1; +}; + +union cvmx_pci_cfg59 { + uint32_t u32; + struct cvmx_pci_cfg59_s { + uint32_t pmdia:8; + uint32_t bpccen:1; + uint32_t bd3h:1; + uint32_t reserved_16_21:6; + uint32_t pmess:1; + uint32_t pmedsia:2; + uint32_t pmds:4; + uint32_t pmeens:1; + uint32_t reserved_2_7:6; + uint32_t ps:2; + } s; + struct cvmx_pci_cfg59_s cn30xx; + struct cvmx_pci_cfg59_s cn31xx; + struct cvmx_pci_cfg59_s cn38xx; + struct cvmx_pci_cfg59_s cn38xxp2; + struct cvmx_pci_cfg59_s cn50xx; + struct cvmx_pci_cfg59_s cn58xx; + struct cvmx_pci_cfg59_s cn58xxp1; +}; + +union cvmx_pci_cfg60 { + uint32_t u32; + struct cvmx_pci_cfg60_s { + uint32_t reserved_24_31:8; + uint32_t m64:1; + uint32_t mme:3; + uint32_t mmc:3; + uint32_t msien:1; + uint32_t ncp:8; + uint32_t msicid:8; + } s; + struct cvmx_pci_cfg60_s cn30xx; + struct cvmx_pci_cfg60_s cn31xx; + struct cvmx_pci_cfg60_s cn38xx; + struct cvmx_pci_cfg60_s cn38xxp2; + struct cvmx_pci_cfg60_s cn50xx; + struct cvmx_pci_cfg60_s cn58xx; + struct cvmx_pci_cfg60_s cn58xxp1; +}; + +union cvmx_pci_cfg61 { + uint32_t u32; + struct cvmx_pci_cfg61_s { + uint32_t msi31t2:30; + uint32_t reserved_0_1:2; + } s; + struct cvmx_pci_cfg61_s cn30xx; + struct cvmx_pci_cfg61_s cn31xx; + struct cvmx_pci_cfg61_s cn38xx; + struct cvmx_pci_cfg61_s cn38xxp2; + struct cvmx_pci_cfg61_s cn50xx; + struct cvmx_pci_cfg61_s cn58xx; + struct cvmx_pci_cfg61_s cn58xxp1; +}; + +union cvmx_pci_cfg62 { + uint32_t u32; + struct cvmx_pci_cfg62_s { + uint32_t msi:32; + } s; + struct cvmx_pci_cfg62_s cn30xx; + struct cvmx_pci_cfg62_s cn31xx; + struct cvmx_pci_cfg62_s cn38xx; + struct cvmx_pci_cfg62_s cn38xxp2; + struct cvmx_pci_cfg62_s cn50xx; + struct cvmx_pci_cfg62_s cn58xx; + struct cvmx_pci_cfg62_s cn58xxp1; +}; + +union cvmx_pci_cfg63 { + uint32_t u32; + struct cvmx_pci_cfg63_s { + uint32_t reserved_16_31:16; + uint32_t msimd:16; + } s; + struct cvmx_pci_cfg63_s cn30xx; + struct cvmx_pci_cfg63_s cn31xx; + struct cvmx_pci_cfg63_s cn38xx; + struct cvmx_pci_cfg63_s cn38xxp2; + struct cvmx_pci_cfg63_s cn50xx; + struct cvmx_pci_cfg63_s cn58xx; + struct cvmx_pci_cfg63_s cn58xxp1; +}; + +union cvmx_pci_cnt_reg { + uint64_t u64; + struct cvmx_pci_cnt_reg_s { + uint64_t reserved_38_63:26; + uint64_t hm_pcix:1; + uint64_t hm_speed:2; + uint64_t ap_pcix:1; + uint64_t ap_speed:2; + uint64_t pcicnt:32; + } s; + struct cvmx_pci_cnt_reg_s cn50xx; + struct cvmx_pci_cnt_reg_s cn58xx; + struct cvmx_pci_cnt_reg_s cn58xxp1; +}; + +union cvmx_pci_ctl_status_2 { + uint32_t u32; + struct cvmx_pci_ctl_status_2_s { + uint32_t reserved_29_31:3; + uint32_t bb1_hole:3; + uint32_t bb1_siz:1; + uint32_t bb_ca:1; + uint32_t bb_es:2; + uint32_t bb1:1; + uint32_t bb0:1; + uint32_t erst_n:1; + uint32_t bar2pres:1; + uint32_t scmtyp:1; + uint32_t scm:1; + uint32_t en_wfilt:1; + uint32_t reserved_14_14:1; + uint32_t ap_pcix:1; + uint32_t ap_64ad:1; + uint32_t b12_bist:1; + uint32_t pmo_amod:1; + uint32_t pmo_fpc:3; + uint32_t tsr_hwm:3; + uint32_t bar2_enb:1; + uint32_t bar2_esx:2; + uint32_t bar2_cax:1; + } s; + struct cvmx_pci_ctl_status_2_s cn30xx; + struct cvmx_pci_ctl_status_2_cn31xx { + uint32_t reserved_20_31:12; + uint32_t erst_n:1; + uint32_t bar2pres:1; + uint32_t scmtyp:1; + uint32_t scm:1; + uint32_t en_wfilt:1; + uint32_t reserved_14_14:1; + uint32_t ap_pcix:1; + uint32_t ap_64ad:1; + uint32_t b12_bist:1; + uint32_t pmo_amod:1; + uint32_t pmo_fpc:3; + uint32_t tsr_hwm:3; + uint32_t bar2_enb:1; + uint32_t bar2_esx:2; + uint32_t bar2_cax:1; + } cn31xx; + struct cvmx_pci_ctl_status_2_s cn38xx; + struct cvmx_pci_ctl_status_2_cn31xx cn38xxp2; + struct cvmx_pci_ctl_status_2_s cn50xx; + struct cvmx_pci_ctl_status_2_s cn58xx; + struct cvmx_pci_ctl_status_2_s cn58xxp1; +}; + +union cvmx_pci_dbellx { + uint32_t u32; + struct cvmx_pci_dbellx_s { + uint32_t reserved_16_31:16; + uint32_t inc_val:16; + } s; + struct cvmx_pci_dbellx_s cn30xx; + struct cvmx_pci_dbellx_s cn31xx; + struct cvmx_pci_dbellx_s cn38xx; + struct cvmx_pci_dbellx_s cn38xxp2; + struct cvmx_pci_dbellx_s cn50xx; + struct cvmx_pci_dbellx_s cn58xx; + struct cvmx_pci_dbellx_s cn58xxp1; +}; + +union cvmx_pci_dma_cntx { + uint32_t u32; + struct cvmx_pci_dma_cntx_s { + uint32_t dma_cnt:32; + } s; + struct cvmx_pci_dma_cntx_s cn30xx; + struct cvmx_pci_dma_cntx_s cn31xx; + struct cvmx_pci_dma_cntx_s cn38xx; + struct cvmx_pci_dma_cntx_s cn38xxp2; + struct cvmx_pci_dma_cntx_s cn50xx; + struct cvmx_pci_dma_cntx_s cn58xx; + struct cvmx_pci_dma_cntx_s cn58xxp1; +}; + +union cvmx_pci_dma_int_levx { + uint32_t u32; + struct cvmx_pci_dma_int_levx_s { + uint32_t pkt_cnt:32; + } s; + struct cvmx_pci_dma_int_levx_s cn30xx; + struct cvmx_pci_dma_int_levx_s cn31xx; + struct cvmx_pci_dma_int_levx_s cn38xx; + struct cvmx_pci_dma_int_levx_s cn38xxp2; + struct cvmx_pci_dma_int_levx_s cn50xx; + struct cvmx_pci_dma_int_levx_s cn58xx; + struct cvmx_pci_dma_int_levx_s cn58xxp1; +}; + +union cvmx_pci_dma_timex { + uint32_t u32; + struct cvmx_pci_dma_timex_s { + uint32_t dma_time:32; + } s; + struct cvmx_pci_dma_timex_s cn30xx; + struct cvmx_pci_dma_timex_s cn31xx; + struct cvmx_pci_dma_timex_s cn38xx; + struct cvmx_pci_dma_timex_s cn38xxp2; + struct cvmx_pci_dma_timex_s cn50xx; + struct cvmx_pci_dma_timex_s cn58xx; + struct cvmx_pci_dma_timex_s cn58xxp1; +}; + +union cvmx_pci_instr_countx { + uint32_t u32; + struct cvmx_pci_instr_countx_s { + uint32_t icnt:32; + } s; + struct cvmx_pci_instr_countx_s cn30xx; + struct cvmx_pci_instr_countx_s cn31xx; + struct cvmx_pci_instr_countx_s cn38xx; + struct cvmx_pci_instr_countx_s cn38xxp2; + struct cvmx_pci_instr_countx_s cn50xx; + struct cvmx_pci_instr_countx_s cn58xx; + struct cvmx_pci_instr_countx_s cn58xxp1; +}; + +union cvmx_pci_int_enb { + uint64_t u64; + struct cvmx_pci_int_enb_s { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t idtime1:1; + uint64_t idtime0:1; + uint64_t idcnt1:1; + uint64_t idcnt0:1; + uint64_t iptime3:1; + uint64_t iptime2:1; + uint64_t iptime1:1; + uint64_t iptime0:1; + uint64_t ipcnt3:1; + uint64_t ipcnt2:1; + uint64_t ipcnt1:1; + uint64_t ipcnt0:1; + uint64_t irsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t idperr:1; + uint64_t iaperr:1; + uint64_t iserr:1; + uint64_t itsr_abt:1; + uint64_t imsc_msg:1; + uint64_t imsi_mabt:1; + uint64_t imsi_tabt:1; + uint64_t imsi_per:1; + uint64_t imr_tto:1; + uint64_t imr_abt:1; + uint64_t itr_abt:1; + uint64_t imr_wtto:1; + uint64_t imr_wabt:1; + uint64_t itr_wabt:1; + } s; + struct cvmx_pci_int_enb_cn30xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t idtime1:1; + uint64_t idtime0:1; + uint64_t idcnt1:1; + uint64_t idcnt0:1; + uint64_t reserved_22_24:3; + uint64_t iptime0:1; + uint64_t reserved_18_20:3; + uint64_t ipcnt0:1; + uint64_t irsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t idperr:1; + uint64_t iaperr:1; + uint64_t iserr:1; + uint64_t itsr_abt:1; + uint64_t imsc_msg:1; + uint64_t imsi_mabt:1; + uint64_t imsi_tabt:1; + uint64_t imsi_per:1; + uint64_t imr_tto:1; + uint64_t imr_abt:1; + uint64_t itr_abt:1; + uint64_t imr_wtto:1; + uint64_t imr_wabt:1; + uint64_t itr_wabt:1; + } cn30xx; + struct cvmx_pci_int_enb_cn31xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t idtime1:1; + uint64_t idtime0:1; + uint64_t idcnt1:1; + uint64_t idcnt0:1; + uint64_t reserved_23_24:2; + uint64_t iptime1:1; + uint64_t iptime0:1; + uint64_t reserved_19_20:2; + uint64_t ipcnt1:1; + uint64_t ipcnt0:1; + uint64_t irsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t idperr:1; + uint64_t iaperr:1; + uint64_t iserr:1; + uint64_t itsr_abt:1; + uint64_t imsc_msg:1; + uint64_t imsi_mabt:1; + uint64_t imsi_tabt:1; + uint64_t imsi_per:1; + uint64_t imr_tto:1; + uint64_t imr_abt:1; + uint64_t itr_abt:1; + uint64_t imr_wtto:1; + uint64_t imr_wabt:1; + uint64_t itr_wabt:1; + } cn31xx; + struct cvmx_pci_int_enb_s cn38xx; + struct cvmx_pci_int_enb_s cn38xxp2; + struct cvmx_pci_int_enb_cn31xx cn50xx; + struct cvmx_pci_int_enb_s cn58xx; + struct cvmx_pci_int_enb_s cn58xxp1; +}; + +union cvmx_pci_int_enb2 { + uint64_t u64; + struct cvmx_pci_int_enb2_s { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t rdtime1:1; + uint64_t rdtime0:1; + uint64_t rdcnt1:1; + uint64_t rdcnt0:1; + uint64_t rptime3:1; + uint64_t rptime2:1; + uint64_t rptime1:1; + uint64_t rptime0:1; + uint64_t rpcnt3:1; + uint64_t rpcnt2:1; + uint64_t rpcnt1:1; + uint64_t rpcnt0:1; + uint64_t rrsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t rdperr:1; + uint64_t raperr:1; + uint64_t rserr:1; + uint64_t rtsr_abt:1; + uint64_t rmsc_msg:1; + uint64_t rmsi_mabt:1; + uint64_t rmsi_tabt:1; + uint64_t rmsi_per:1; + uint64_t rmr_tto:1; + uint64_t rmr_abt:1; + uint64_t rtr_abt:1; + uint64_t rmr_wtto:1; + uint64_t rmr_wabt:1; + uint64_t rtr_wabt:1; + } s; + struct cvmx_pci_int_enb2_cn30xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t rdtime1:1; + uint64_t rdtime0:1; + uint64_t rdcnt1:1; + uint64_t rdcnt0:1; + uint64_t reserved_22_24:3; + uint64_t rptime0:1; + uint64_t reserved_18_20:3; + uint64_t rpcnt0:1; + uint64_t rrsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t rdperr:1; + uint64_t raperr:1; + uint64_t rserr:1; + uint64_t rtsr_abt:1; + uint64_t rmsc_msg:1; + uint64_t rmsi_mabt:1; + uint64_t rmsi_tabt:1; + uint64_t rmsi_per:1; + uint64_t rmr_tto:1; + uint64_t rmr_abt:1; + uint64_t rtr_abt:1; + uint64_t rmr_wtto:1; + uint64_t rmr_wabt:1; + uint64_t rtr_wabt:1; + } cn30xx; + struct cvmx_pci_int_enb2_cn31xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t rdtime1:1; + uint64_t rdtime0:1; + uint64_t rdcnt1:1; + uint64_t rdcnt0:1; + uint64_t reserved_23_24:2; + uint64_t rptime1:1; + uint64_t rptime0:1; + uint64_t reserved_19_20:2; + uint64_t rpcnt1:1; + uint64_t rpcnt0:1; + uint64_t rrsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t rdperr:1; + uint64_t raperr:1; + uint64_t rserr:1; + uint64_t rtsr_abt:1; + uint64_t rmsc_msg:1; + uint64_t rmsi_mabt:1; + uint64_t rmsi_tabt:1; + uint64_t rmsi_per:1; + uint64_t rmr_tto:1; + uint64_t rmr_abt:1; + uint64_t rtr_abt:1; + uint64_t rmr_wtto:1; + uint64_t rmr_wabt:1; + uint64_t rtr_wabt:1; + } cn31xx; + struct cvmx_pci_int_enb2_s cn38xx; + struct cvmx_pci_int_enb2_s cn38xxp2; + struct cvmx_pci_int_enb2_cn31xx cn50xx; + struct cvmx_pci_int_enb2_s cn58xx; + struct cvmx_pci_int_enb2_s cn58xxp1; +}; + +union cvmx_pci_int_sum { + uint64_t u64; + struct cvmx_pci_int_sum_s { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t ptime3:1; + uint64_t ptime2:1; + uint64_t ptime1:1; + uint64_t ptime0:1; + uint64_t pcnt3:1; + uint64_t pcnt2:1; + uint64_t pcnt1:1; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } s; + struct cvmx_pci_int_sum_cn30xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t reserved_22_24:3; + uint64_t ptime0:1; + uint64_t reserved_18_20:3; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } cn30xx; + struct cvmx_pci_int_sum_cn31xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t reserved_23_24:2; + uint64_t ptime1:1; + uint64_t ptime0:1; + uint64_t reserved_19_20:2; + uint64_t pcnt1:1; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } cn31xx; + struct cvmx_pci_int_sum_s cn38xx; + struct cvmx_pci_int_sum_s cn38xxp2; + struct cvmx_pci_int_sum_cn31xx cn50xx; + struct cvmx_pci_int_sum_s cn58xx; + struct cvmx_pci_int_sum_s cn58xxp1; +}; + +union cvmx_pci_int_sum2 { + uint64_t u64; + struct cvmx_pci_int_sum2_s { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t ptime3:1; + uint64_t ptime2:1; + uint64_t ptime1:1; + uint64_t ptime0:1; + uint64_t pcnt3:1; + uint64_t pcnt2:1; + uint64_t pcnt1:1; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } s; + struct cvmx_pci_int_sum2_cn30xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t reserved_22_24:3; + uint64_t ptime0:1; + uint64_t reserved_18_20:3; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } cn30xx; + struct cvmx_pci_int_sum2_cn31xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t reserved_23_24:2; + uint64_t ptime1:1; + uint64_t ptime0:1; + uint64_t reserved_19_20:2; + uint64_t pcnt1:1; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } cn31xx; + struct cvmx_pci_int_sum2_s cn38xx; + struct cvmx_pci_int_sum2_s cn38xxp2; + struct cvmx_pci_int_sum2_cn31xx cn50xx; + struct cvmx_pci_int_sum2_s cn58xx; + struct cvmx_pci_int_sum2_s cn58xxp1; +}; + +union cvmx_pci_msi_rcv { + uint32_t u32; + struct cvmx_pci_msi_rcv_s { + uint32_t reserved_6_31:26; + uint32_t intr:6; + } s; + struct cvmx_pci_msi_rcv_s cn30xx; + struct cvmx_pci_msi_rcv_s cn31xx; + struct cvmx_pci_msi_rcv_s cn38xx; + struct cvmx_pci_msi_rcv_s cn38xxp2; + struct cvmx_pci_msi_rcv_s cn50xx; + struct cvmx_pci_msi_rcv_s cn58xx; + struct cvmx_pci_msi_rcv_s cn58xxp1; +}; + +union cvmx_pci_pkt_creditsx { + uint32_t u32; + struct cvmx_pci_pkt_creditsx_s { + uint32_t pkt_cnt:16; + uint32_t ptr_cnt:16; + } s; + struct cvmx_pci_pkt_creditsx_s cn30xx; + struct cvmx_pci_pkt_creditsx_s cn31xx; + struct cvmx_pci_pkt_creditsx_s cn38xx; + struct cvmx_pci_pkt_creditsx_s cn38xxp2; + struct cvmx_pci_pkt_creditsx_s cn50xx; + struct cvmx_pci_pkt_creditsx_s cn58xx; + struct cvmx_pci_pkt_creditsx_s cn58xxp1; +}; + +union cvmx_pci_pkts_sentx { + uint32_t u32; + struct cvmx_pci_pkts_sentx_s { + uint32_t pkt_cnt:32; + } s; + struct cvmx_pci_pkts_sentx_s cn30xx; + struct cvmx_pci_pkts_sentx_s cn31xx; + struct cvmx_pci_pkts_sentx_s cn38xx; + struct cvmx_pci_pkts_sentx_s cn38xxp2; + struct cvmx_pci_pkts_sentx_s cn50xx; + struct cvmx_pci_pkts_sentx_s cn58xx; + struct cvmx_pci_pkts_sentx_s cn58xxp1; +}; + +union cvmx_pci_pkts_sent_int_levx { + uint32_t u32; + struct cvmx_pci_pkts_sent_int_levx_s { + uint32_t pkt_cnt:32; + } s; + struct cvmx_pci_pkts_sent_int_levx_s cn30xx; + struct cvmx_pci_pkts_sent_int_levx_s cn31xx; + struct cvmx_pci_pkts_sent_int_levx_s cn38xx; + struct cvmx_pci_pkts_sent_int_levx_s cn38xxp2; + struct cvmx_pci_pkts_sent_int_levx_s cn50xx; + struct cvmx_pci_pkts_sent_int_levx_s cn58xx; + struct cvmx_pci_pkts_sent_int_levx_s cn58xxp1; +}; + +union cvmx_pci_pkts_sent_timex { + uint32_t u32; + struct cvmx_pci_pkts_sent_timex_s { + uint32_t pkt_time:32; + } s; + struct cvmx_pci_pkts_sent_timex_s cn30xx; + struct cvmx_pci_pkts_sent_timex_s cn31xx; + struct cvmx_pci_pkts_sent_timex_s cn38xx; + struct cvmx_pci_pkts_sent_timex_s cn38xxp2; + struct cvmx_pci_pkts_sent_timex_s cn50xx; + struct cvmx_pci_pkts_sent_timex_s cn58xx; + struct cvmx_pci_pkts_sent_timex_s cn58xxp1; +}; + +union cvmx_pci_read_cmd_6 { + uint32_t u32; + struct cvmx_pci_read_cmd_6_s { + uint32_t reserved_9_31:23; + uint32_t min_data:6; + uint32_t prefetch:3; + } s; + struct cvmx_pci_read_cmd_6_s cn30xx; + struct cvmx_pci_read_cmd_6_s cn31xx; + struct cvmx_pci_read_cmd_6_s cn38xx; + struct cvmx_pci_read_cmd_6_s cn38xxp2; + struct cvmx_pci_read_cmd_6_s cn50xx; + struct cvmx_pci_read_cmd_6_s cn58xx; + struct cvmx_pci_read_cmd_6_s cn58xxp1; +}; + +union cvmx_pci_read_cmd_c { + uint32_t u32; + struct cvmx_pci_read_cmd_c_s { + uint32_t reserved_9_31:23; + uint32_t min_data:6; + uint32_t prefetch:3; + } s; + struct cvmx_pci_read_cmd_c_s cn30xx; + struct cvmx_pci_read_cmd_c_s cn31xx; + struct cvmx_pci_read_cmd_c_s cn38xx; + struct cvmx_pci_read_cmd_c_s cn38xxp2; + struct cvmx_pci_read_cmd_c_s cn50xx; + struct cvmx_pci_read_cmd_c_s cn58xx; + struct cvmx_pci_read_cmd_c_s cn58xxp1; +}; + +union cvmx_pci_read_cmd_e { + uint32_t u32; + struct cvmx_pci_read_cmd_e_s { + uint32_t reserved_9_31:23; + uint32_t min_data:6; + uint32_t prefetch:3; + } s; + struct cvmx_pci_read_cmd_e_s cn30xx; + struct cvmx_pci_read_cmd_e_s cn31xx; + struct cvmx_pci_read_cmd_e_s cn38xx; + struct cvmx_pci_read_cmd_e_s cn38xxp2; + struct cvmx_pci_read_cmd_e_s cn50xx; + struct cvmx_pci_read_cmd_e_s cn58xx; + struct cvmx_pci_read_cmd_e_s cn58xxp1; +}; + +union cvmx_pci_read_timeout { + uint64_t u64; + struct cvmx_pci_read_timeout_s { + uint64_t reserved_32_63:32; + uint64_t enb:1; + uint64_t cnt:31; + } s; + struct cvmx_pci_read_timeout_s cn30xx; + struct cvmx_pci_read_timeout_s cn31xx; + struct cvmx_pci_read_timeout_s cn38xx; + struct cvmx_pci_read_timeout_s cn38xxp2; + struct cvmx_pci_read_timeout_s cn50xx; + struct cvmx_pci_read_timeout_s cn58xx; + struct cvmx_pci_read_timeout_s cn58xxp1; +}; + +union cvmx_pci_scm_reg { + uint64_t u64; + struct cvmx_pci_scm_reg_s { + uint64_t reserved_32_63:32; + uint64_t scm:32; + } s; + struct cvmx_pci_scm_reg_s cn30xx; + struct cvmx_pci_scm_reg_s cn31xx; + struct cvmx_pci_scm_reg_s cn38xx; + struct cvmx_pci_scm_reg_s cn38xxp2; + struct cvmx_pci_scm_reg_s cn50xx; + struct cvmx_pci_scm_reg_s cn58xx; + struct cvmx_pci_scm_reg_s cn58xxp1; +}; + +union cvmx_pci_tsr_reg { + uint64_t u64; + struct cvmx_pci_tsr_reg_s { + uint64_t reserved_36_63:28; + uint64_t tsr:36; + } s; + struct cvmx_pci_tsr_reg_s cn30xx; + struct cvmx_pci_tsr_reg_s cn31xx; + struct cvmx_pci_tsr_reg_s cn38xx; + struct cvmx_pci_tsr_reg_s cn38xxp2; + struct cvmx_pci_tsr_reg_s cn50xx; + struct cvmx_pci_tsr_reg_s cn58xx; + struct cvmx_pci_tsr_reg_s cn58xxp1; +}; + +union cvmx_pci_win_rd_addr { + uint64_t u64; + struct cvmx_pci_win_rd_addr_s { + uint64_t reserved_49_63:15; + uint64_t iobit:1; + uint64_t reserved_0_47:48; + } s; + struct cvmx_pci_win_rd_addr_cn30xx { + uint64_t reserved_49_63:15; + uint64_t iobit:1; + uint64_t rd_addr:46; + uint64_t reserved_0_1:2; + } cn30xx; + struct cvmx_pci_win_rd_addr_cn30xx cn31xx; + struct cvmx_pci_win_rd_addr_cn38xx { + uint64_t reserved_49_63:15; + uint64_t iobit:1; + uint64_t rd_addr:45; + uint64_t reserved_0_2:3; + } cn38xx; + struct cvmx_pci_win_rd_addr_cn38xx cn38xxp2; + struct cvmx_pci_win_rd_addr_cn30xx cn50xx; + struct cvmx_pci_win_rd_addr_cn38xx cn58xx; + struct cvmx_pci_win_rd_addr_cn38xx cn58xxp1; +}; + +union cvmx_pci_win_rd_data { + uint64_t u64; + struct cvmx_pci_win_rd_data_s { + uint64_t rd_data:64; + } s; + struct cvmx_pci_win_rd_data_s cn30xx; + struct cvmx_pci_win_rd_data_s cn31xx; + struct cvmx_pci_win_rd_data_s cn38xx; + struct cvmx_pci_win_rd_data_s cn38xxp2; + struct cvmx_pci_win_rd_data_s cn50xx; + struct cvmx_pci_win_rd_data_s cn58xx; + struct cvmx_pci_win_rd_data_s cn58xxp1; +}; + +union cvmx_pci_win_wr_addr { + uint64_t u64; + struct cvmx_pci_win_wr_addr_s { + uint64_t reserved_49_63:15; + uint64_t iobit:1; + uint64_t wr_addr:45; + uint64_t reserved_0_2:3; + } s; + struct cvmx_pci_win_wr_addr_s cn30xx; + struct cvmx_pci_win_wr_addr_s cn31xx; + struct cvmx_pci_win_wr_addr_s cn38xx; + struct cvmx_pci_win_wr_addr_s cn38xxp2; + struct cvmx_pci_win_wr_addr_s cn50xx; + struct cvmx_pci_win_wr_addr_s cn58xx; + struct cvmx_pci_win_wr_addr_s cn58xxp1; +}; + +union cvmx_pci_win_wr_data { + uint64_t u64; + struct cvmx_pci_win_wr_data_s { + uint64_t wr_data:64; + } s; + struct cvmx_pci_win_wr_data_s cn30xx; + struct cvmx_pci_win_wr_data_s cn31xx; + struct cvmx_pci_win_wr_data_s cn38xx; + struct cvmx_pci_win_wr_data_s cn38xxp2; + struct cvmx_pci_win_wr_data_s cn50xx; + struct cvmx_pci_win_wr_data_s cn58xx; + struct cvmx_pci_win_wr_data_s cn58xxp1; +}; + +union cvmx_pci_win_wr_mask { + uint64_t u64; + struct cvmx_pci_win_wr_mask_s { + uint64_t reserved_8_63:56; + uint64_t wr_mask:8; + } s; + struct cvmx_pci_win_wr_mask_s cn30xx; + struct cvmx_pci_win_wr_mask_s cn31xx; + struct cvmx_pci_win_wr_mask_s cn38xx; + struct cvmx_pci_win_wr_mask_s cn38xxp2; + struct cvmx_pci_win_wr_mask_s cn50xx; + struct cvmx_pci_win_wr_mask_s cn58xx; + struct cvmx_pci_win_wr_mask_s cn58xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h b/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h new file mode 100644 index 0000000..d553f8e --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h @@ -0,0 +1,1365 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PCIEEP_DEFS_H__ +#define __CVMX_PCIEEP_DEFS_H__ + +#define CVMX_PCIEEP_CFG000 \ + (0x0000000000000000ull) +#define CVMX_PCIEEP_CFG001 \ + (0x0000000000000004ull) +#define CVMX_PCIEEP_CFG002 \ + (0x0000000000000008ull) +#define CVMX_PCIEEP_CFG003 \ + (0x000000000000000Cull) +#define CVMX_PCIEEP_CFG004 \ + (0x0000000000000010ull) +#define CVMX_PCIEEP_CFG004_MASK \ + (0x0000000080000010ull) +#define CVMX_PCIEEP_CFG005 \ + (0x0000000000000014ull) +#define CVMX_PCIEEP_CFG005_MASK \ + (0x0000000080000014ull) +#define CVMX_PCIEEP_CFG006 \ + (0x0000000000000018ull) +#define CVMX_PCIEEP_CFG006_MASK \ + (0x0000000080000018ull) +#define CVMX_PCIEEP_CFG007 \ + (0x000000000000001Cull) +#define CVMX_PCIEEP_CFG007_MASK \ + (0x000000008000001Cull) +#define CVMX_PCIEEP_CFG008 \ + (0x0000000000000020ull) +#define CVMX_PCIEEP_CFG008_MASK \ + (0x0000000080000020ull) +#define CVMX_PCIEEP_CFG009 \ + (0x0000000000000024ull) +#define CVMX_PCIEEP_CFG009_MASK \ + (0x0000000080000024ull) +#define CVMX_PCIEEP_CFG010 \ + (0x0000000000000028ull) +#define CVMX_PCIEEP_CFG011 \ + (0x000000000000002Cull) +#define CVMX_PCIEEP_CFG012 \ + (0x0000000000000030ull) +#define CVMX_PCIEEP_CFG012_MASK \ + (0x0000000080000030ull) +#define CVMX_PCIEEP_CFG013 \ + (0x0000000000000034ull) +#define CVMX_PCIEEP_CFG015 \ + (0x000000000000003Cull) +#define CVMX_PCIEEP_CFG016 \ + (0x0000000000000040ull) +#define CVMX_PCIEEP_CFG017 \ + (0x0000000000000044ull) +#define CVMX_PCIEEP_CFG020 \ + (0x0000000000000050ull) +#define CVMX_PCIEEP_CFG021 \ + (0x0000000000000054ull) +#define CVMX_PCIEEP_CFG022 \ + (0x0000000000000058ull) +#define CVMX_PCIEEP_CFG023 \ + (0x000000000000005Cull) +#define CVMX_PCIEEP_CFG028 \ + (0x0000000000000070ull) +#define CVMX_PCIEEP_CFG029 \ + (0x0000000000000074ull) +#define CVMX_PCIEEP_CFG030 \ + (0x0000000000000078ull) +#define CVMX_PCIEEP_CFG031 \ + (0x000000000000007Cull) +#define CVMX_PCIEEP_CFG032 \ + (0x0000000000000080ull) +#define CVMX_PCIEEP_CFG033 \ + (0x0000000000000084ull) +#define CVMX_PCIEEP_CFG034 \ + (0x0000000000000088ull) +#define CVMX_PCIEEP_CFG037 \ + (0x0000000000000094ull) +#define CVMX_PCIEEP_CFG038 \ + (0x0000000000000098ull) +#define CVMX_PCIEEP_CFG039 \ + (0x000000000000009Cull) +#define CVMX_PCIEEP_CFG040 \ + (0x00000000000000A0ull) +#define CVMX_PCIEEP_CFG041 \ + (0x00000000000000A4ull) +#define CVMX_PCIEEP_CFG042 \ + (0x00000000000000A8ull) +#define CVMX_PCIEEP_CFG064 \ + (0x0000000000000100ull) +#define CVMX_PCIEEP_CFG065 \ + (0x0000000000000104ull) +#define CVMX_PCIEEP_CFG066 \ + (0x0000000000000108ull) +#define CVMX_PCIEEP_CFG067 \ + (0x000000000000010Cull) +#define CVMX_PCIEEP_CFG068 \ + (0x0000000000000110ull) +#define CVMX_PCIEEP_CFG069 \ + (0x0000000000000114ull) +#define CVMX_PCIEEP_CFG070 \ + (0x0000000000000118ull) +#define CVMX_PCIEEP_CFG071 \ + (0x000000000000011Cull) +#define CVMX_PCIEEP_CFG072 \ + (0x0000000000000120ull) +#define CVMX_PCIEEP_CFG073 \ + (0x0000000000000124ull) +#define CVMX_PCIEEP_CFG074 \ + (0x0000000000000128ull) +#define CVMX_PCIEEP_CFG448 \ + (0x0000000000000700ull) +#define CVMX_PCIEEP_CFG449 \ + (0x0000000000000704ull) +#define CVMX_PCIEEP_CFG450 \ + (0x0000000000000708ull) +#define CVMX_PCIEEP_CFG451 \ + (0x000000000000070Cull) +#define CVMX_PCIEEP_CFG452 \ + (0x0000000000000710ull) +#define CVMX_PCIEEP_CFG453 \ + (0x0000000000000714ull) +#define CVMX_PCIEEP_CFG454 \ + (0x0000000000000718ull) +#define CVMX_PCIEEP_CFG455 \ + (0x000000000000071Cull) +#define CVMX_PCIEEP_CFG456 \ + (0x0000000000000720ull) +#define CVMX_PCIEEP_CFG458 \ + (0x0000000000000728ull) +#define CVMX_PCIEEP_CFG459 \ + (0x000000000000072Cull) +#define CVMX_PCIEEP_CFG460 \ + (0x0000000000000730ull) +#define CVMX_PCIEEP_CFG461 \ + (0x0000000000000734ull) +#define CVMX_PCIEEP_CFG462 \ + (0x0000000000000738ull) +#define CVMX_PCIEEP_CFG463 \ + (0x000000000000073Cull) +#define CVMX_PCIEEP_CFG464 \ + (0x0000000000000740ull) +#define CVMX_PCIEEP_CFG465 \ + (0x0000000000000744ull) +#define CVMX_PCIEEP_CFG466 \ + (0x0000000000000748ull) +#define CVMX_PCIEEP_CFG467 \ + (0x000000000000074Cull) +#define CVMX_PCIEEP_CFG468 \ + (0x0000000000000750ull) +#define CVMX_PCIEEP_CFG490 \ + (0x00000000000007A8ull) +#define CVMX_PCIEEP_CFG491 \ + (0x00000000000007ACull) +#define CVMX_PCIEEP_CFG492 \ + (0x00000000000007B0ull) +#define CVMX_PCIEEP_CFG516 \ + (0x0000000000000810ull) +#define CVMX_PCIEEP_CFG517 \ + (0x0000000000000814ull) + +union cvmx_pcieep_cfg000 { + uint32_t u32; + struct cvmx_pcieep_cfg000_s { + uint32_t devid:16; + uint32_t vendid:16; + } s; + struct cvmx_pcieep_cfg000_s cn52xx; + struct cvmx_pcieep_cfg000_s cn52xxp1; + struct cvmx_pcieep_cfg000_s cn56xx; + struct cvmx_pcieep_cfg000_s cn56xxp1; +}; + +union cvmx_pcieep_cfg001 { + uint32_t u32; + struct cvmx_pcieep_cfg001_s { + uint32_t dpe:1; + uint32_t sse:1; + uint32_t rma:1; + uint32_t rta:1; + uint32_t sta:1; + uint32_t devt:2; + uint32_t mdpe:1; + uint32_t fbb:1; + uint32_t reserved_22_22:1; + uint32_t m66:1; + uint32_t cl:1; + uint32_t i_stat:1; + uint32_t reserved_11_18:8; + uint32_t i_dis:1; + uint32_t fbbe:1; + uint32_t see:1; + uint32_t ids_wcc:1; + uint32_t per:1; + uint32_t vps:1; + uint32_t mwice:1; + uint32_t scse:1; + uint32_t me:1; + uint32_t msae:1; + uint32_t isae:1; + } s; + struct cvmx_pcieep_cfg001_s cn52xx; + struct cvmx_pcieep_cfg001_s cn52xxp1; + struct cvmx_pcieep_cfg001_s cn56xx; + struct cvmx_pcieep_cfg001_s cn56xxp1; +}; + +union cvmx_pcieep_cfg002 { + uint32_t u32; + struct cvmx_pcieep_cfg002_s { + uint32_t bcc:8; + uint32_t sc:8; + uint32_t pi:8; + uint32_t rid:8; + } s; + struct cvmx_pcieep_cfg002_s cn52xx; + struct cvmx_pcieep_cfg002_s cn52xxp1; + struct cvmx_pcieep_cfg002_s cn56xx; + struct cvmx_pcieep_cfg002_s cn56xxp1; +}; + +union cvmx_pcieep_cfg003 { + uint32_t u32; + struct cvmx_pcieep_cfg003_s { + uint32_t bist:8; + uint32_t mfd:1; + uint32_t chf:7; + uint32_t lt:8; + uint32_t cls:8; + } s; + struct cvmx_pcieep_cfg003_s cn52xx; + struct cvmx_pcieep_cfg003_s cn52xxp1; + struct cvmx_pcieep_cfg003_s cn56xx; + struct cvmx_pcieep_cfg003_s cn56xxp1; +}; + +union cvmx_pcieep_cfg004 { + uint32_t u32; + struct cvmx_pcieep_cfg004_s { + uint32_t lbab:18; + uint32_t reserved_4_13:10; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pcieep_cfg004_s cn52xx; + struct cvmx_pcieep_cfg004_s cn52xxp1; + struct cvmx_pcieep_cfg004_s cn56xx; + struct cvmx_pcieep_cfg004_s cn56xxp1; +}; + +union cvmx_pcieep_cfg004_mask { + uint32_t u32; + struct cvmx_pcieep_cfg004_mask_s { + uint32_t lmask:31; + uint32_t enb:1; + } s; + struct cvmx_pcieep_cfg004_mask_s cn52xx; + struct cvmx_pcieep_cfg004_mask_s cn52xxp1; + struct cvmx_pcieep_cfg004_mask_s cn56xx; + struct cvmx_pcieep_cfg004_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg005 { + uint32_t u32; + struct cvmx_pcieep_cfg005_s { + uint32_t ubab:32; + } s; + struct cvmx_pcieep_cfg005_s cn52xx; + struct cvmx_pcieep_cfg005_s cn52xxp1; + struct cvmx_pcieep_cfg005_s cn56xx; + struct cvmx_pcieep_cfg005_s cn56xxp1; +}; + +union cvmx_pcieep_cfg005_mask { + uint32_t u32; + struct cvmx_pcieep_cfg005_mask_s { + uint32_t umask:32; + } s; + struct cvmx_pcieep_cfg005_mask_s cn52xx; + struct cvmx_pcieep_cfg005_mask_s cn52xxp1; + struct cvmx_pcieep_cfg005_mask_s cn56xx; + struct cvmx_pcieep_cfg005_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg006 { + uint32_t u32; + struct cvmx_pcieep_cfg006_s { + uint32_t lbab:6; + uint32_t reserved_4_25:22; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pcieep_cfg006_s cn52xx; + struct cvmx_pcieep_cfg006_s cn52xxp1; + struct cvmx_pcieep_cfg006_s cn56xx; + struct cvmx_pcieep_cfg006_s cn56xxp1; +}; + +union cvmx_pcieep_cfg006_mask { + uint32_t u32; + struct cvmx_pcieep_cfg006_mask_s { + uint32_t lmask:31; + uint32_t enb:1; + } s; + struct cvmx_pcieep_cfg006_mask_s cn52xx; + struct cvmx_pcieep_cfg006_mask_s cn52xxp1; + struct cvmx_pcieep_cfg006_mask_s cn56xx; + struct cvmx_pcieep_cfg006_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg007 { + uint32_t u32; + struct cvmx_pcieep_cfg007_s { + uint32_t ubab:32; + } s; + struct cvmx_pcieep_cfg007_s cn52xx; + struct cvmx_pcieep_cfg007_s cn52xxp1; + struct cvmx_pcieep_cfg007_s cn56xx; + struct cvmx_pcieep_cfg007_s cn56xxp1; +}; + +union cvmx_pcieep_cfg007_mask { + uint32_t u32; + struct cvmx_pcieep_cfg007_mask_s { + uint32_t umask:32; + } s; + struct cvmx_pcieep_cfg007_mask_s cn52xx; + struct cvmx_pcieep_cfg007_mask_s cn52xxp1; + struct cvmx_pcieep_cfg007_mask_s cn56xx; + struct cvmx_pcieep_cfg007_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg008 { + uint32_t u32; + struct cvmx_pcieep_cfg008_s { + uint32_t reserved_4_31:28; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pcieep_cfg008_s cn52xx; + struct cvmx_pcieep_cfg008_s cn52xxp1; + struct cvmx_pcieep_cfg008_s cn56xx; + struct cvmx_pcieep_cfg008_s cn56xxp1; +}; + +union cvmx_pcieep_cfg008_mask { + uint32_t u32; + struct cvmx_pcieep_cfg008_mask_s { + uint32_t lmask:31; + uint32_t enb:1; + } s; + struct cvmx_pcieep_cfg008_mask_s cn52xx; + struct cvmx_pcieep_cfg008_mask_s cn52xxp1; + struct cvmx_pcieep_cfg008_mask_s cn56xx; + struct cvmx_pcieep_cfg008_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg009 { + uint32_t u32; + struct cvmx_pcieep_cfg009_s { + uint32_t ubab:25; + uint32_t reserved_0_6:7; + } s; + struct cvmx_pcieep_cfg009_s cn52xx; + struct cvmx_pcieep_cfg009_s cn52xxp1; + struct cvmx_pcieep_cfg009_s cn56xx; + struct cvmx_pcieep_cfg009_s cn56xxp1; +}; + +union cvmx_pcieep_cfg009_mask { + uint32_t u32; + struct cvmx_pcieep_cfg009_mask_s { + uint32_t umask:32; + } s; + struct cvmx_pcieep_cfg009_mask_s cn52xx; + struct cvmx_pcieep_cfg009_mask_s cn52xxp1; + struct cvmx_pcieep_cfg009_mask_s cn56xx; + struct cvmx_pcieep_cfg009_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg010 { + uint32_t u32; + struct cvmx_pcieep_cfg010_s { + uint32_t cisp:32; + } s; + struct cvmx_pcieep_cfg010_s cn52xx; + struct cvmx_pcieep_cfg010_s cn52xxp1; + struct cvmx_pcieep_cfg010_s cn56xx; + struct cvmx_pcieep_cfg010_s cn56xxp1; +}; + +union cvmx_pcieep_cfg011 { + uint32_t u32; + struct cvmx_pcieep_cfg011_s { + uint32_t ssid:16; + uint32_t ssvid:16; + } s; + struct cvmx_pcieep_cfg011_s cn52xx; + struct cvmx_pcieep_cfg011_s cn52xxp1; + struct cvmx_pcieep_cfg011_s cn56xx; + struct cvmx_pcieep_cfg011_s cn56xxp1; +}; + +union cvmx_pcieep_cfg012 { + uint32_t u32; + struct cvmx_pcieep_cfg012_s { + uint32_t eraddr:16; + uint32_t reserved_1_15:15; + uint32_t er_en:1; + } s; + struct cvmx_pcieep_cfg012_s cn52xx; + struct cvmx_pcieep_cfg012_s cn52xxp1; + struct cvmx_pcieep_cfg012_s cn56xx; + struct cvmx_pcieep_cfg012_s cn56xxp1; +}; + +union cvmx_pcieep_cfg012_mask { + uint32_t u32; + struct cvmx_pcieep_cfg012_mask_s { + uint32_t mask:31; + uint32_t enb:1; + } s; + struct cvmx_pcieep_cfg012_mask_s cn52xx; + struct cvmx_pcieep_cfg012_mask_s cn52xxp1; + struct cvmx_pcieep_cfg012_mask_s cn56xx; + struct cvmx_pcieep_cfg012_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg013 { + uint32_t u32; + struct cvmx_pcieep_cfg013_s { + uint32_t reserved_8_31:24; + uint32_t cp:8; + } s; + struct cvmx_pcieep_cfg013_s cn52xx; + struct cvmx_pcieep_cfg013_s cn52xxp1; + struct cvmx_pcieep_cfg013_s cn56xx; + struct cvmx_pcieep_cfg013_s cn56xxp1; +}; + +union cvmx_pcieep_cfg015 { + uint32_t u32; + struct cvmx_pcieep_cfg015_s { + uint32_t ml:8; + uint32_t mg:8; + uint32_t inta:8; + uint32_t il:8; + } s; + struct cvmx_pcieep_cfg015_s cn52xx; + struct cvmx_pcieep_cfg015_s cn52xxp1; + struct cvmx_pcieep_cfg015_s cn56xx; + struct cvmx_pcieep_cfg015_s cn56xxp1; +}; + +union cvmx_pcieep_cfg016 { + uint32_t u32; + struct cvmx_pcieep_cfg016_s { + uint32_t pmes:5; + uint32_t d2s:1; + uint32_t d1s:1; + uint32_t auxc:3; + uint32_t dsi:1; + uint32_t reserved_20_20:1; + uint32_t pme_clock:1; + uint32_t pmsv:3; + uint32_t ncp:8; + uint32_t pmcid:8; + } s; + struct cvmx_pcieep_cfg016_s cn52xx; + struct cvmx_pcieep_cfg016_s cn52xxp1; + struct cvmx_pcieep_cfg016_s cn56xx; + struct cvmx_pcieep_cfg016_s cn56xxp1; +}; + +union cvmx_pcieep_cfg017 { + uint32_t u32; + struct cvmx_pcieep_cfg017_s { + uint32_t pmdia:8; + uint32_t bpccee:1; + uint32_t bd3h:1; + uint32_t reserved_16_21:6; + uint32_t pmess:1; + uint32_t pmedsia:2; + uint32_t pmds:4; + uint32_t pmeens:1; + uint32_t reserved_4_7:4; + uint32_t nsr:1; + uint32_t reserved_2_2:1; + uint32_t ps:2; + } s; + struct cvmx_pcieep_cfg017_s cn52xx; + struct cvmx_pcieep_cfg017_s cn52xxp1; + struct cvmx_pcieep_cfg017_s cn56xx; + struct cvmx_pcieep_cfg017_s cn56xxp1; +}; + +union cvmx_pcieep_cfg020 { + uint32_t u32; + struct cvmx_pcieep_cfg020_s { + uint32_t reserved_24_31:8; + uint32_t m64:1; + uint32_t mme:3; + uint32_t mmc:3; + uint32_t msien:1; + uint32_t ncp:8; + uint32_t msicid:8; + } s; + struct cvmx_pcieep_cfg020_s cn52xx; + struct cvmx_pcieep_cfg020_s cn52xxp1; + struct cvmx_pcieep_cfg020_s cn56xx; + struct cvmx_pcieep_cfg020_s cn56xxp1; +}; + +union cvmx_pcieep_cfg021 { + uint32_t u32; + struct cvmx_pcieep_cfg021_s { + uint32_t lmsi:30; + uint32_t reserved_0_1:2; + } s; + struct cvmx_pcieep_cfg021_s cn52xx; + struct cvmx_pcieep_cfg021_s cn52xxp1; + struct cvmx_pcieep_cfg021_s cn56xx; + struct cvmx_pcieep_cfg021_s cn56xxp1; +}; + +union cvmx_pcieep_cfg022 { + uint32_t u32; + struct cvmx_pcieep_cfg022_s { + uint32_t umsi:32; + } s; + struct cvmx_pcieep_cfg022_s cn52xx; + struct cvmx_pcieep_cfg022_s cn52xxp1; + struct cvmx_pcieep_cfg022_s cn56xx; + struct cvmx_pcieep_cfg022_s cn56xxp1; +}; + +union cvmx_pcieep_cfg023 { + uint32_t u32; + struct cvmx_pcieep_cfg023_s { + uint32_t reserved_16_31:16; + uint32_t msimd:16; + } s; + struct cvmx_pcieep_cfg023_s cn52xx; + struct cvmx_pcieep_cfg023_s cn52xxp1; + struct cvmx_pcieep_cfg023_s cn56xx; + struct cvmx_pcieep_cfg023_s cn56xxp1; +}; + +union cvmx_pcieep_cfg028 { + uint32_t u32; + struct cvmx_pcieep_cfg028_s { + uint32_t reserved_30_31:2; + uint32_t imn:5; + uint32_t si:1; + uint32_t dpt:4; + uint32_t pciecv:4; + uint32_t ncp:8; + uint32_t pcieid:8; + } s; + struct cvmx_pcieep_cfg028_s cn52xx; + struct cvmx_pcieep_cfg028_s cn52xxp1; + struct cvmx_pcieep_cfg028_s cn56xx; + struct cvmx_pcieep_cfg028_s cn56xxp1; +}; + +union cvmx_pcieep_cfg029 { + uint32_t u32; + struct cvmx_pcieep_cfg029_s { + uint32_t reserved_28_31:4; + uint32_t cspls:2; + uint32_t csplv:8; + uint32_t reserved_16_17:2; + uint32_t rber:1; + uint32_t reserved_12_14:3; + uint32_t el1al:3; + uint32_t el0al:3; + uint32_t etfs:1; + uint32_t pfs:2; + uint32_t mpss:3; + } s; + struct cvmx_pcieep_cfg029_s cn52xx; + struct cvmx_pcieep_cfg029_s cn52xxp1; + struct cvmx_pcieep_cfg029_s cn56xx; + struct cvmx_pcieep_cfg029_s cn56xxp1; +}; + +union cvmx_pcieep_cfg030 { + uint32_t u32; + struct cvmx_pcieep_cfg030_s { + uint32_t reserved_22_31:10; + uint32_t tp:1; + uint32_t ap_d:1; + uint32_t ur_d:1; + uint32_t fe_d:1; + uint32_t nfe_d:1; + uint32_t ce_d:1; + uint32_t reserved_15_15:1; + uint32_t mrrs:3; + uint32_t ns_en:1; + uint32_t ap_en:1; + uint32_t pf_en:1; + uint32_t etf_en:1; + uint32_t mps:3; + uint32_t ro_en:1; + uint32_t ur_en:1; + uint32_t fe_en:1; + uint32_t nfe_en:1; + uint32_t ce_en:1; + } s; + struct cvmx_pcieep_cfg030_s cn52xx; + struct cvmx_pcieep_cfg030_s cn52xxp1; + struct cvmx_pcieep_cfg030_s cn56xx; + struct cvmx_pcieep_cfg030_s cn56xxp1; +}; + +union cvmx_pcieep_cfg031 { + uint32_t u32; + struct cvmx_pcieep_cfg031_s { + uint32_t pnum:8; + uint32_t reserved_22_23:2; + uint32_t lbnc:1; + uint32_t dllarc:1; + uint32_t sderc:1; + uint32_t cpm:1; + uint32_t l1el:3; + uint32_t l0el:3; + uint32_t aslpms:2; + uint32_t mlw:6; + uint32_t mls:4; + } s; + struct cvmx_pcieep_cfg031_s cn52xx; + struct cvmx_pcieep_cfg031_s cn52xxp1; + struct cvmx_pcieep_cfg031_s cn56xx; + struct cvmx_pcieep_cfg031_s cn56xxp1; +}; + +union cvmx_pcieep_cfg032 { + uint32_t u32; + struct cvmx_pcieep_cfg032_s { + uint32_t reserved_30_31:2; + uint32_t dlla:1; + uint32_t scc:1; + uint32_t lt:1; + uint32_t reserved_26_26:1; + uint32_t nlw:6; + uint32_t ls:4; + uint32_t reserved_10_15:6; + uint32_t hawd:1; + uint32_t ecpm:1; + uint32_t es:1; + uint32_t ccc:1; + uint32_t rl:1; + uint32_t ld:1; + uint32_t rcb:1; + uint32_t reserved_2_2:1; + uint32_t aslpc:2; + } s; + struct cvmx_pcieep_cfg032_s cn52xx; + struct cvmx_pcieep_cfg032_s cn52xxp1; + struct cvmx_pcieep_cfg032_s cn56xx; + struct cvmx_pcieep_cfg032_s cn56xxp1; +}; + +union cvmx_pcieep_cfg033 { + uint32_t u32; + struct cvmx_pcieep_cfg033_s { + uint32_t ps_num:13; + uint32_t nccs:1; + uint32_t emip:1; + uint32_t sp_ls:2; + uint32_t sp_lv:8; + uint32_t hp_c:1; + uint32_t hp_s:1; + uint32_t pip:1; + uint32_t aip:1; + uint32_t mrlsp:1; + uint32_t pcp:1; + uint32_t abp:1; + } s; + struct cvmx_pcieep_cfg033_s cn52xx; + struct cvmx_pcieep_cfg033_s cn52xxp1; + struct cvmx_pcieep_cfg033_s cn56xx; + struct cvmx_pcieep_cfg033_s cn56xxp1; +}; + +union cvmx_pcieep_cfg034 { + uint32_t u32; + struct cvmx_pcieep_cfg034_s { + uint32_t reserved_25_31:7; + uint32_t dlls_c:1; + uint32_t emis:1; + uint32_t pds:1; + uint32_t mrlss:1; + uint32_t ccint_d:1; + uint32_t pd_c:1; + uint32_t mrls_c:1; + uint32_t pf_d:1; + uint32_t abp_d:1; + uint32_t reserved_13_15:3; + uint32_t dlls_en:1; + uint32_t emic:1; + uint32_t pcc:1; + uint32_t pic:2; + uint32_t aic:2; + uint32_t hpint_en:1; + uint32_t ccint_en:1; + uint32_t pd_en:1; + uint32_t mrls_en:1; + uint32_t pf_en:1; + uint32_t abp_en:1; + } s; + struct cvmx_pcieep_cfg034_s cn52xx; + struct cvmx_pcieep_cfg034_s cn52xxp1; + struct cvmx_pcieep_cfg034_s cn56xx; + struct cvmx_pcieep_cfg034_s cn56xxp1; +}; + +union cvmx_pcieep_cfg037 { + uint32_t u32; + struct cvmx_pcieep_cfg037_s { + uint32_t reserved_5_31:27; + uint32_t ctds:1; + uint32_t ctrs:4; + } s; + struct cvmx_pcieep_cfg037_s cn52xx; + struct cvmx_pcieep_cfg037_s cn52xxp1; + struct cvmx_pcieep_cfg037_s cn56xx; + struct cvmx_pcieep_cfg037_s cn56xxp1; +}; + +union cvmx_pcieep_cfg038 { + uint32_t u32; + struct cvmx_pcieep_cfg038_s { + uint32_t reserved_5_31:27; + uint32_t ctd:1; + uint32_t ctv:4; + } s; + struct cvmx_pcieep_cfg038_s cn52xx; + struct cvmx_pcieep_cfg038_s cn52xxp1; + struct cvmx_pcieep_cfg038_s cn56xx; + struct cvmx_pcieep_cfg038_s cn56xxp1; +}; + +union cvmx_pcieep_cfg039 { + uint32_t u32; + struct cvmx_pcieep_cfg039_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pcieep_cfg039_s cn52xx; + struct cvmx_pcieep_cfg039_s cn52xxp1; + struct cvmx_pcieep_cfg039_s cn56xx; + struct cvmx_pcieep_cfg039_s cn56xxp1; +}; + +union cvmx_pcieep_cfg040 { + uint32_t u32; + struct cvmx_pcieep_cfg040_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pcieep_cfg040_s cn52xx; + struct cvmx_pcieep_cfg040_s cn52xxp1; + struct cvmx_pcieep_cfg040_s cn56xx; + struct cvmx_pcieep_cfg040_s cn56xxp1; +}; + +union cvmx_pcieep_cfg041 { + uint32_t u32; + struct cvmx_pcieep_cfg041_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pcieep_cfg041_s cn52xx; + struct cvmx_pcieep_cfg041_s cn52xxp1; + struct cvmx_pcieep_cfg041_s cn56xx; + struct cvmx_pcieep_cfg041_s cn56xxp1; +}; + +union cvmx_pcieep_cfg042 { + uint32_t u32; + struct cvmx_pcieep_cfg042_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pcieep_cfg042_s cn52xx; + struct cvmx_pcieep_cfg042_s cn52xxp1; + struct cvmx_pcieep_cfg042_s cn56xx; + struct cvmx_pcieep_cfg042_s cn56xxp1; +}; + +union cvmx_pcieep_cfg064 { + uint32_t u32; + struct cvmx_pcieep_cfg064_s { + uint32_t nco:12; + uint32_t cv:4; + uint32_t pcieec:16; + } s; + struct cvmx_pcieep_cfg064_s cn52xx; + struct cvmx_pcieep_cfg064_s cn52xxp1; + struct cvmx_pcieep_cfg064_s cn56xx; + struct cvmx_pcieep_cfg064_s cn56xxp1; +}; + +union cvmx_pcieep_cfg065 { + uint32_t u32; + struct cvmx_pcieep_cfg065_s { + uint32_t reserved_21_31:11; + uint32_t ures:1; + uint32_t ecrces:1; + uint32_t mtlps:1; + uint32_t ros:1; + uint32_t ucs:1; + uint32_t cas:1; + uint32_t cts:1; + uint32_t fcpes:1; + uint32_t ptlps:1; + uint32_t reserved_6_11:6; + uint32_t sdes:1; + uint32_t dlpes:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pcieep_cfg065_s cn52xx; + struct cvmx_pcieep_cfg065_s cn52xxp1; + struct cvmx_pcieep_cfg065_s cn56xx; + struct cvmx_pcieep_cfg065_s cn56xxp1; +}; + +union cvmx_pcieep_cfg066 { + uint32_t u32; + struct cvmx_pcieep_cfg066_s { + uint32_t reserved_21_31:11; + uint32_t urem:1; + uint32_t ecrcem:1; + uint32_t mtlpm:1; + uint32_t rom:1; + uint32_t ucm:1; + uint32_t cam:1; + uint32_t ctm:1; + uint32_t fcpem:1; + uint32_t ptlpm:1; + uint32_t reserved_6_11:6; + uint32_t sdem:1; + uint32_t dlpem:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pcieep_cfg066_s cn52xx; + struct cvmx_pcieep_cfg066_s cn52xxp1; + struct cvmx_pcieep_cfg066_s cn56xx; + struct cvmx_pcieep_cfg066_s cn56xxp1; +}; + +union cvmx_pcieep_cfg067 { + uint32_t u32; + struct cvmx_pcieep_cfg067_s { + uint32_t reserved_21_31:11; + uint32_t ures:1; + uint32_t ecrces:1; + uint32_t mtlps:1; + uint32_t ros:1; + uint32_t ucs:1; + uint32_t cas:1; + uint32_t cts:1; + uint32_t fcpes:1; + uint32_t ptlps:1; + uint32_t reserved_6_11:6; + uint32_t sdes:1; + uint32_t dlpes:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pcieep_cfg067_s cn52xx; + struct cvmx_pcieep_cfg067_s cn52xxp1; + struct cvmx_pcieep_cfg067_s cn56xx; + struct cvmx_pcieep_cfg067_s cn56xxp1; +}; + +union cvmx_pcieep_cfg068 { + uint32_t u32; + struct cvmx_pcieep_cfg068_s { + uint32_t reserved_14_31:18; + uint32_t anfes:1; + uint32_t rtts:1; + uint32_t reserved_9_11:3; + uint32_t rnrs:1; + uint32_t bdllps:1; + uint32_t btlps:1; + uint32_t reserved_1_5:5; + uint32_t res:1; + } s; + struct cvmx_pcieep_cfg068_s cn52xx; + struct cvmx_pcieep_cfg068_s cn52xxp1; + struct cvmx_pcieep_cfg068_s cn56xx; + struct cvmx_pcieep_cfg068_s cn56xxp1; +}; + +union cvmx_pcieep_cfg069 { + uint32_t u32; + struct cvmx_pcieep_cfg069_s { + uint32_t reserved_14_31:18; + uint32_t anfem:1; + uint32_t rttm:1; + uint32_t reserved_9_11:3; + uint32_t rnrm:1; + uint32_t bdllpm:1; + uint32_t btlpm:1; + uint32_t reserved_1_5:5; + uint32_t rem:1; + } s; + struct cvmx_pcieep_cfg069_s cn52xx; + struct cvmx_pcieep_cfg069_s cn52xxp1; + struct cvmx_pcieep_cfg069_s cn56xx; + struct cvmx_pcieep_cfg069_s cn56xxp1; +}; + +union cvmx_pcieep_cfg070 { + uint32_t u32; + struct cvmx_pcieep_cfg070_s { + uint32_t reserved_9_31:23; + uint32_t ce:1; + uint32_t cc:1; + uint32_t ge:1; + uint32_t gc:1; + uint32_t fep:5; + } s; + struct cvmx_pcieep_cfg070_s cn52xx; + struct cvmx_pcieep_cfg070_s cn52xxp1; + struct cvmx_pcieep_cfg070_s cn56xx; + struct cvmx_pcieep_cfg070_s cn56xxp1; +}; + +union cvmx_pcieep_cfg071 { + uint32_t u32; + struct cvmx_pcieep_cfg071_s { + uint32_t dword1:32; + } s; + struct cvmx_pcieep_cfg071_s cn52xx; + struct cvmx_pcieep_cfg071_s cn52xxp1; + struct cvmx_pcieep_cfg071_s cn56xx; + struct cvmx_pcieep_cfg071_s cn56xxp1; +}; + +union cvmx_pcieep_cfg072 { + uint32_t u32; + struct cvmx_pcieep_cfg072_s { + uint32_t dword2:32; + } s; + struct cvmx_pcieep_cfg072_s cn52xx; + struct cvmx_pcieep_cfg072_s cn52xxp1; + struct cvmx_pcieep_cfg072_s cn56xx; + struct cvmx_pcieep_cfg072_s cn56xxp1; +}; + +union cvmx_pcieep_cfg073 { + uint32_t u32; + struct cvmx_pcieep_cfg073_s { + uint32_t dword3:32; + } s; + struct cvmx_pcieep_cfg073_s cn52xx; + struct cvmx_pcieep_cfg073_s cn52xxp1; + struct cvmx_pcieep_cfg073_s cn56xx; + struct cvmx_pcieep_cfg073_s cn56xxp1; +}; + +union cvmx_pcieep_cfg074 { + uint32_t u32; + struct cvmx_pcieep_cfg074_s { + uint32_t dword4:32; + } s; + struct cvmx_pcieep_cfg074_s cn52xx; + struct cvmx_pcieep_cfg074_s cn52xxp1; + struct cvmx_pcieep_cfg074_s cn56xx; + struct cvmx_pcieep_cfg074_s cn56xxp1; +}; + +union cvmx_pcieep_cfg448 { + uint32_t u32; + struct cvmx_pcieep_cfg448_s { + uint32_t rtl:16; + uint32_t rtltl:16; + } s; + struct cvmx_pcieep_cfg448_s cn52xx; + struct cvmx_pcieep_cfg448_s cn52xxp1; + struct cvmx_pcieep_cfg448_s cn56xx; + struct cvmx_pcieep_cfg448_s cn56xxp1; +}; + +union cvmx_pcieep_cfg449 { + uint32_t u32; + struct cvmx_pcieep_cfg449_s { + uint32_t omr:32; + } s; + struct cvmx_pcieep_cfg449_s cn52xx; + struct cvmx_pcieep_cfg449_s cn52xxp1; + struct cvmx_pcieep_cfg449_s cn56xx; + struct cvmx_pcieep_cfg449_s cn56xxp1; +}; + +union cvmx_pcieep_cfg450 { + uint32_t u32; + struct cvmx_pcieep_cfg450_s { + uint32_t lpec:8; + uint32_t reserved_22_23:2; + uint32_t link_state:6; + uint32_t force_link:1; + uint32_t reserved_8_14:7; + uint32_t link_num:8; + } s; + struct cvmx_pcieep_cfg450_s cn52xx; + struct cvmx_pcieep_cfg450_s cn52xxp1; + struct cvmx_pcieep_cfg450_s cn56xx; + struct cvmx_pcieep_cfg450_s cn56xxp1; +}; + +union cvmx_pcieep_cfg451 { + uint32_t u32; + struct cvmx_pcieep_cfg451_s { + uint32_t reserved_30_31:2; + uint32_t l1el:3; + uint32_t l0el:3; + uint32_t n_fts_cc:8; + uint32_t n_fts:8; + uint32_t ack_freq:8; + } s; + struct cvmx_pcieep_cfg451_s cn52xx; + struct cvmx_pcieep_cfg451_s cn52xxp1; + struct cvmx_pcieep_cfg451_s cn56xx; + struct cvmx_pcieep_cfg451_s cn56xxp1; +}; + +union cvmx_pcieep_cfg452 { + uint32_t u32; + struct cvmx_pcieep_cfg452_s { + uint32_t reserved_26_31:6; + uint32_t eccrc:1; + uint32_t reserved_22_24:3; + uint32_t lme:6; + uint32_t reserved_8_15:8; + uint32_t flm:1; + uint32_t reserved_6_6:1; + uint32_t dllle:1; + uint32_t reserved_4_4:1; + uint32_t ra:1; + uint32_t le:1; + uint32_t sd:1; + uint32_t omr:1; + } s; + struct cvmx_pcieep_cfg452_s cn52xx; + struct cvmx_pcieep_cfg452_s cn52xxp1; + struct cvmx_pcieep_cfg452_s cn56xx; + struct cvmx_pcieep_cfg452_s cn56xxp1; +}; + +union cvmx_pcieep_cfg453 { + uint32_t u32; + struct cvmx_pcieep_cfg453_s { + uint32_t dlld:1; + uint32_t reserved_26_30:5; + uint32_t ack_nak:1; + uint32_t fcd:1; + uint32_t ilst:24; + } s; + struct cvmx_pcieep_cfg453_s cn52xx; + struct cvmx_pcieep_cfg453_s cn52xxp1; + struct cvmx_pcieep_cfg453_s cn56xx; + struct cvmx_pcieep_cfg453_s cn56xxp1; +}; + +union cvmx_pcieep_cfg454 { + uint32_t u32; + struct cvmx_pcieep_cfg454_s { + uint32_t reserved_29_31:3; + uint32_t tmfcwt:5; + uint32_t tmanlt:5; + uint32_t tmrt:5; + uint32_t reserved_11_13:3; + uint32_t nskps:3; + uint32_t reserved_4_7:4; + uint32_t ntss:4; + } s; + struct cvmx_pcieep_cfg454_s cn52xx; + struct cvmx_pcieep_cfg454_s cn52xxp1; + struct cvmx_pcieep_cfg454_s cn56xx; + struct cvmx_pcieep_cfg454_s cn56xxp1; +}; + +union cvmx_pcieep_cfg455 { + uint32_t u32; + struct cvmx_pcieep_cfg455_s { + uint32_t m_cfg0_filt:1; + uint32_t m_io_filt:1; + uint32_t msg_ctrl:1; + uint32_t m_cpl_ecrc_filt:1; + uint32_t m_ecrc_filt:1; + uint32_t m_cpl_len_err:1; + uint32_t m_cpl_attr_err:1; + uint32_t m_cpl_tc_err:1; + uint32_t m_cpl_fun_err:1; + uint32_t m_cpl_rid_err:1; + uint32_t m_cpl_tag_err:1; + uint32_t m_lk_filt:1; + uint32_t m_cfg1_filt:1; + uint32_t m_bar_match:1; + uint32_t m_pois_filt:1; + uint32_t m_fun:1; + uint32_t dfcwt:1; + uint32_t reserved_11_14:4; + uint32_t skpiv:11; + } s; + struct cvmx_pcieep_cfg455_s cn52xx; + struct cvmx_pcieep_cfg455_s cn52xxp1; + struct cvmx_pcieep_cfg455_s cn56xx; + struct cvmx_pcieep_cfg455_s cn56xxp1; +}; + +union cvmx_pcieep_cfg456 { + uint32_t u32; + struct cvmx_pcieep_cfg456_s { + uint32_t reserved_2_31:30; + uint32_t m_vend1_drp:1; + uint32_t m_vend0_drp:1; + } s; + struct cvmx_pcieep_cfg456_s cn52xx; + struct cvmx_pcieep_cfg456_s cn52xxp1; + struct cvmx_pcieep_cfg456_s cn56xx; + struct cvmx_pcieep_cfg456_s cn56xxp1; +}; + +union cvmx_pcieep_cfg458 { + uint32_t u32; + struct cvmx_pcieep_cfg458_s { + uint32_t dbg_info_l32:32; + } s; + struct cvmx_pcieep_cfg458_s cn52xx; + struct cvmx_pcieep_cfg458_s cn52xxp1; + struct cvmx_pcieep_cfg458_s cn56xx; + struct cvmx_pcieep_cfg458_s cn56xxp1; +}; + +union cvmx_pcieep_cfg459 { + uint32_t u32; + struct cvmx_pcieep_cfg459_s { + uint32_t dbg_info_u32:32; + } s; + struct cvmx_pcieep_cfg459_s cn52xx; + struct cvmx_pcieep_cfg459_s cn52xxp1; + struct cvmx_pcieep_cfg459_s cn56xx; + struct cvmx_pcieep_cfg459_s cn56xxp1; +}; + +union cvmx_pcieep_cfg460 { + uint32_t u32; + struct cvmx_pcieep_cfg460_s { + uint32_t reserved_20_31:12; + uint32_t tphfcc:8; + uint32_t tpdfcc:12; + } s; + struct cvmx_pcieep_cfg460_s cn52xx; + struct cvmx_pcieep_cfg460_s cn52xxp1; + struct cvmx_pcieep_cfg460_s cn56xx; + struct cvmx_pcieep_cfg460_s cn56xxp1; +}; + +union cvmx_pcieep_cfg461 { + uint32_t u32; + struct cvmx_pcieep_cfg461_s { + uint32_t reserved_20_31:12; + uint32_t tchfcc:8; + uint32_t tcdfcc:12; + } s; + struct cvmx_pcieep_cfg461_s cn52xx; + struct cvmx_pcieep_cfg461_s cn52xxp1; + struct cvmx_pcieep_cfg461_s cn56xx; + struct cvmx_pcieep_cfg461_s cn56xxp1; +}; + +union cvmx_pcieep_cfg462 { + uint32_t u32; + struct cvmx_pcieep_cfg462_s { + uint32_t reserved_20_31:12; + uint32_t tchfcc:8; + uint32_t tcdfcc:12; + } s; + struct cvmx_pcieep_cfg462_s cn52xx; + struct cvmx_pcieep_cfg462_s cn52xxp1; + struct cvmx_pcieep_cfg462_s cn56xx; + struct cvmx_pcieep_cfg462_s cn56xxp1; +}; + +union cvmx_pcieep_cfg463 { + uint32_t u32; + struct cvmx_pcieep_cfg463_s { + uint32_t reserved_3_31:29; + uint32_t rqne:1; + uint32_t trbne:1; + uint32_t rtlpfccnr:1; + } s; + struct cvmx_pcieep_cfg463_s cn52xx; + struct cvmx_pcieep_cfg463_s cn52xxp1; + struct cvmx_pcieep_cfg463_s cn56xx; + struct cvmx_pcieep_cfg463_s cn56xxp1; +}; + +union cvmx_pcieep_cfg464 { + uint32_t u32; + struct cvmx_pcieep_cfg464_s { + uint32_t wrr_vc3:8; + uint32_t wrr_vc2:8; + uint32_t wrr_vc1:8; + uint32_t wrr_vc0:8; + } s; + struct cvmx_pcieep_cfg464_s cn52xx; + struct cvmx_pcieep_cfg464_s cn52xxp1; + struct cvmx_pcieep_cfg464_s cn56xx; + struct cvmx_pcieep_cfg464_s cn56xxp1; +}; + +union cvmx_pcieep_cfg465 { + uint32_t u32; + struct cvmx_pcieep_cfg465_s { + uint32_t wrr_vc7:8; + uint32_t wrr_vc6:8; + uint32_t wrr_vc5:8; + uint32_t wrr_vc4:8; + } s; + struct cvmx_pcieep_cfg465_s cn52xx; + struct cvmx_pcieep_cfg465_s cn52xxp1; + struct cvmx_pcieep_cfg465_s cn56xx; + struct cvmx_pcieep_cfg465_s cn56xxp1; +}; + +union cvmx_pcieep_cfg466 { + uint32_t u32; + struct cvmx_pcieep_cfg466_s { + uint32_t rx_queue_order:1; + uint32_t type_ordering:1; + uint32_t reserved_24_29:6; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pcieep_cfg466_s cn52xx; + struct cvmx_pcieep_cfg466_s cn52xxp1; + struct cvmx_pcieep_cfg466_s cn56xx; + struct cvmx_pcieep_cfg466_s cn56xxp1; +}; + +union cvmx_pcieep_cfg467 { + uint32_t u32; + struct cvmx_pcieep_cfg467_s { + uint32_t reserved_24_31:8; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pcieep_cfg467_s cn52xx; + struct cvmx_pcieep_cfg467_s cn52xxp1; + struct cvmx_pcieep_cfg467_s cn56xx; + struct cvmx_pcieep_cfg467_s cn56xxp1; +}; + +union cvmx_pcieep_cfg468 { + uint32_t u32; + struct cvmx_pcieep_cfg468_s { + uint32_t reserved_24_31:8; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pcieep_cfg468_s cn52xx; + struct cvmx_pcieep_cfg468_s cn52xxp1; + struct cvmx_pcieep_cfg468_s cn56xx; + struct cvmx_pcieep_cfg468_s cn56xxp1; +}; + +union cvmx_pcieep_cfg490 { + uint32_t u32; + struct cvmx_pcieep_cfg490_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pcieep_cfg490_s cn52xx; + struct cvmx_pcieep_cfg490_s cn52xxp1; + struct cvmx_pcieep_cfg490_s cn56xx; + struct cvmx_pcieep_cfg490_s cn56xxp1; +}; + +union cvmx_pcieep_cfg491 { + uint32_t u32; + struct cvmx_pcieep_cfg491_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pcieep_cfg491_s cn52xx; + struct cvmx_pcieep_cfg491_s cn52xxp1; + struct cvmx_pcieep_cfg491_s cn56xx; + struct cvmx_pcieep_cfg491_s cn56xxp1; +}; + +union cvmx_pcieep_cfg492 { + uint32_t u32; + struct cvmx_pcieep_cfg492_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pcieep_cfg492_s cn52xx; + struct cvmx_pcieep_cfg492_s cn52xxp1; + struct cvmx_pcieep_cfg492_s cn56xx; + struct cvmx_pcieep_cfg492_s cn56xxp1; +}; + +union cvmx_pcieep_cfg516 { + uint32_t u32; + struct cvmx_pcieep_cfg516_s { + uint32_t phy_stat:32; + } s; + struct cvmx_pcieep_cfg516_s cn52xx; + struct cvmx_pcieep_cfg516_s cn52xxp1; + struct cvmx_pcieep_cfg516_s cn56xx; + struct cvmx_pcieep_cfg516_s cn56xxp1; +}; + +union cvmx_pcieep_cfg517 { + uint32_t u32; + struct cvmx_pcieep_cfg517_s { + uint32_t phy_ctrl:32; + } s; + struct cvmx_pcieep_cfg517_s cn52xx; + struct cvmx_pcieep_cfg517_s cn52xxp1; + struct cvmx_pcieep_cfg517_s cn56xx; + struct cvmx_pcieep_cfg517_s cn56xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h new file mode 100644 index 0000000..75574c9 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h @@ -0,0 +1,1397 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PCIERCX_DEFS_H__ +#define __CVMX_PCIERCX_DEFS_H__ + +#define CVMX_PCIERCX_CFG000(offset) \ + (0x0000000000000000ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG001(offset) \ + (0x0000000000000004ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG002(offset) \ + (0x0000000000000008ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG003(offset) \ + (0x000000000000000Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG004(offset) \ + (0x0000000000000010ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG005(offset) \ + (0x0000000000000014ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG006(offset) \ + (0x0000000000000018ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG007(offset) \ + (0x000000000000001Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG008(offset) \ + (0x0000000000000020ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG009(offset) \ + (0x0000000000000024ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG010(offset) \ + (0x0000000000000028ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG011(offset) \ + (0x000000000000002Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG012(offset) \ + (0x0000000000000030ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG013(offset) \ + (0x0000000000000034ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG014(offset) \ + (0x0000000000000038ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG015(offset) \ + (0x000000000000003Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG016(offset) \ + (0x0000000000000040ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG017(offset) \ + (0x0000000000000044ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG020(offset) \ + (0x0000000000000050ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG021(offset) \ + (0x0000000000000054ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG022(offset) \ + (0x0000000000000058ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG023(offset) \ + (0x000000000000005Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG028(offset) \ + (0x0000000000000070ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG029(offset) \ + (0x0000000000000074ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG030(offset) \ + (0x0000000000000078ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG031(offset) \ + (0x000000000000007Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG032(offset) \ + (0x0000000000000080ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG033(offset) \ + (0x0000000000000084ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG034(offset) \ + (0x0000000000000088ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG035(offset) \ + (0x000000000000008Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG036(offset) \ + (0x0000000000000090ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG037(offset) \ + (0x0000000000000094ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG038(offset) \ + (0x0000000000000098ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG039(offset) \ + (0x000000000000009Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG040(offset) \ + (0x00000000000000A0ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG041(offset) \ + (0x00000000000000A4ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG042(offset) \ + (0x00000000000000A8ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG064(offset) \ + (0x0000000000000100ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG065(offset) \ + (0x0000000000000104ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG066(offset) \ + (0x0000000000000108ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG067(offset) \ + (0x000000000000010Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG068(offset) \ + (0x0000000000000110ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG069(offset) \ + (0x0000000000000114ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG070(offset) \ + (0x0000000000000118ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG071(offset) \ + (0x000000000000011Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG072(offset) \ + (0x0000000000000120ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG073(offset) \ + (0x0000000000000124ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG074(offset) \ + (0x0000000000000128ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG075(offset) \ + (0x000000000000012Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG076(offset) \ + (0x0000000000000130ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG077(offset) \ + (0x0000000000000134ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG448(offset) \ + (0x0000000000000700ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG449(offset) \ + (0x0000000000000704ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG450(offset) \ + (0x0000000000000708ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG451(offset) \ + (0x000000000000070Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG452(offset) \ + (0x0000000000000710ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG453(offset) \ + (0x0000000000000714ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG454(offset) \ + (0x0000000000000718ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG455(offset) \ + (0x000000000000071Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG456(offset) \ + (0x0000000000000720ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG458(offset) \ + (0x0000000000000728ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG459(offset) \ + (0x000000000000072Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG460(offset) \ + (0x0000000000000730ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG461(offset) \ + (0x0000000000000734ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG462(offset) \ + (0x0000000000000738ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG463(offset) \ + (0x000000000000073Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG464(offset) \ + (0x0000000000000740ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG465(offset) \ + (0x0000000000000744ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG466(offset) \ + (0x0000000000000748ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG467(offset) \ + (0x000000000000074Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG468(offset) \ + (0x0000000000000750ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG490(offset) \ + (0x00000000000007A8ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG491(offset) \ + (0x00000000000007ACull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG492(offset) \ + (0x00000000000007B0ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG516(offset) \ + (0x0000000000000810ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG517(offset) \ + (0x0000000000000814ull + (((offset) & 1) * 0)) + +union cvmx_pciercx_cfg000 { + uint32_t u32; + struct cvmx_pciercx_cfg000_s { + uint32_t devid:16; + uint32_t vendid:16; + } s; + struct cvmx_pciercx_cfg000_s cn52xx; + struct cvmx_pciercx_cfg000_s cn52xxp1; + struct cvmx_pciercx_cfg000_s cn56xx; + struct cvmx_pciercx_cfg000_s cn56xxp1; +}; + +union cvmx_pciercx_cfg001 { + uint32_t u32; + struct cvmx_pciercx_cfg001_s { + uint32_t dpe:1; + uint32_t sse:1; + uint32_t rma:1; + uint32_t rta:1; + uint32_t sta:1; + uint32_t devt:2; + uint32_t mdpe:1; + uint32_t fbb:1; + uint32_t reserved_22_22:1; + uint32_t m66:1; + uint32_t cl:1; + uint32_t i_stat:1; + uint32_t reserved_11_18:8; + uint32_t i_dis:1; + uint32_t fbbe:1; + uint32_t see:1; + uint32_t ids_wcc:1; + uint32_t per:1; + uint32_t vps:1; + uint32_t mwice:1; + uint32_t scse:1; + uint32_t me:1; + uint32_t msae:1; + uint32_t isae:1; + } s; + struct cvmx_pciercx_cfg001_s cn52xx; + struct cvmx_pciercx_cfg001_s cn52xxp1; + struct cvmx_pciercx_cfg001_s cn56xx; + struct cvmx_pciercx_cfg001_s cn56xxp1; +}; + +union cvmx_pciercx_cfg002 { + uint32_t u32; + struct cvmx_pciercx_cfg002_s { + uint32_t bcc:8; + uint32_t sc:8; + uint32_t pi:8; + uint32_t rid:8; + } s; + struct cvmx_pciercx_cfg002_s cn52xx; + struct cvmx_pciercx_cfg002_s cn52xxp1; + struct cvmx_pciercx_cfg002_s cn56xx; + struct cvmx_pciercx_cfg002_s cn56xxp1; +}; + +union cvmx_pciercx_cfg003 { + uint32_t u32; + struct cvmx_pciercx_cfg003_s { + uint32_t bist:8; + uint32_t mfd:1; + uint32_t chf:7; + uint32_t lt:8; + uint32_t cls:8; + } s; + struct cvmx_pciercx_cfg003_s cn52xx; + struct cvmx_pciercx_cfg003_s cn52xxp1; + struct cvmx_pciercx_cfg003_s cn56xx; + struct cvmx_pciercx_cfg003_s cn56xxp1; +}; + +union cvmx_pciercx_cfg004 { + uint32_t u32; + struct cvmx_pciercx_cfg004_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg004_s cn52xx; + struct cvmx_pciercx_cfg004_s cn52xxp1; + struct cvmx_pciercx_cfg004_s cn56xx; + struct cvmx_pciercx_cfg004_s cn56xxp1; +}; + +union cvmx_pciercx_cfg005 { + uint32_t u32; + struct cvmx_pciercx_cfg005_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg005_s cn52xx; + struct cvmx_pciercx_cfg005_s cn52xxp1; + struct cvmx_pciercx_cfg005_s cn56xx; + struct cvmx_pciercx_cfg005_s cn56xxp1; +}; + +union cvmx_pciercx_cfg006 { + uint32_t u32; + struct cvmx_pciercx_cfg006_s { + uint32_t slt:8; + uint32_t subbnum:8; + uint32_t sbnum:8; + uint32_t pbnum:8; + } s; + struct cvmx_pciercx_cfg006_s cn52xx; + struct cvmx_pciercx_cfg006_s cn52xxp1; + struct cvmx_pciercx_cfg006_s cn56xx; + struct cvmx_pciercx_cfg006_s cn56xxp1; +}; + +union cvmx_pciercx_cfg007 { + uint32_t u32; + struct cvmx_pciercx_cfg007_s { + uint32_t dpe:1; + uint32_t sse:1; + uint32_t rma:1; + uint32_t rta:1; + uint32_t sta:1; + uint32_t devt:2; + uint32_t mdpe:1; + uint32_t fbb:1; + uint32_t reserved_22_22:1; + uint32_t m66:1; + uint32_t reserved_16_20:5; + uint32_t lio_limi:4; + uint32_t reserved_9_11:3; + uint32_t io32b:1; + uint32_t lio_base:4; + uint32_t reserved_1_3:3; + uint32_t io32a:1; + } s; + struct cvmx_pciercx_cfg007_s cn52xx; + struct cvmx_pciercx_cfg007_s cn52xxp1; + struct cvmx_pciercx_cfg007_s cn56xx; + struct cvmx_pciercx_cfg007_s cn56xxp1; +}; + +union cvmx_pciercx_cfg008 { + uint32_t u32; + struct cvmx_pciercx_cfg008_s { + uint32_t ml_addr:12; + uint32_t reserved_16_19:4; + uint32_t mb_addr:12; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pciercx_cfg008_s cn52xx; + struct cvmx_pciercx_cfg008_s cn52xxp1; + struct cvmx_pciercx_cfg008_s cn56xx; + struct cvmx_pciercx_cfg008_s cn56xxp1; +}; + +union cvmx_pciercx_cfg009 { + uint32_t u32; + struct cvmx_pciercx_cfg009_s { + uint32_t lmem_limit:12; + uint32_t reserved_17_19:3; + uint32_t mem64b:1; + uint32_t lmem_base:12; + uint32_t reserved_1_3:3; + uint32_t mem64a:1; + } s; + struct cvmx_pciercx_cfg009_s cn52xx; + struct cvmx_pciercx_cfg009_s cn52xxp1; + struct cvmx_pciercx_cfg009_s cn56xx; + struct cvmx_pciercx_cfg009_s cn56xxp1; +}; + +union cvmx_pciercx_cfg010 { + uint32_t u32; + struct cvmx_pciercx_cfg010_s { + uint32_t umem_base:32; + } s; + struct cvmx_pciercx_cfg010_s cn52xx; + struct cvmx_pciercx_cfg010_s cn52xxp1; + struct cvmx_pciercx_cfg010_s cn56xx; + struct cvmx_pciercx_cfg010_s cn56xxp1; +}; + +union cvmx_pciercx_cfg011 { + uint32_t u32; + struct cvmx_pciercx_cfg011_s { + uint32_t umem_limit:32; + } s; + struct cvmx_pciercx_cfg011_s cn52xx; + struct cvmx_pciercx_cfg011_s cn52xxp1; + struct cvmx_pciercx_cfg011_s cn56xx; + struct cvmx_pciercx_cfg011_s cn56xxp1; +}; + +union cvmx_pciercx_cfg012 { + uint32_t u32; + struct cvmx_pciercx_cfg012_s { + uint32_t uio_limit:16; + uint32_t uio_base:16; + } s; + struct cvmx_pciercx_cfg012_s cn52xx; + struct cvmx_pciercx_cfg012_s cn52xxp1; + struct cvmx_pciercx_cfg012_s cn56xx; + struct cvmx_pciercx_cfg012_s cn56xxp1; +}; + +union cvmx_pciercx_cfg013 { + uint32_t u32; + struct cvmx_pciercx_cfg013_s { + uint32_t reserved_8_31:24; + uint32_t cp:8; + } s; + struct cvmx_pciercx_cfg013_s cn52xx; + struct cvmx_pciercx_cfg013_s cn52xxp1; + struct cvmx_pciercx_cfg013_s cn56xx; + struct cvmx_pciercx_cfg013_s cn56xxp1; +}; + +union cvmx_pciercx_cfg014 { + uint32_t u32; + struct cvmx_pciercx_cfg014_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg014_s cn52xx; + struct cvmx_pciercx_cfg014_s cn52xxp1; + struct cvmx_pciercx_cfg014_s cn56xx; + struct cvmx_pciercx_cfg014_s cn56xxp1; +}; + +union cvmx_pciercx_cfg015 { + uint32_t u32; + struct cvmx_pciercx_cfg015_s { + uint32_t reserved_28_31:4; + uint32_t dtsees:1; + uint32_t dts:1; + uint32_t sdt:1; + uint32_t pdt:1; + uint32_t fbbe:1; + uint32_t sbrst:1; + uint32_t mam:1; + uint32_t vga16d:1; + uint32_t vgae:1; + uint32_t isae:1; + uint32_t see:1; + uint32_t pere:1; + uint32_t inta:8; + uint32_t il:8; + } s; + struct cvmx_pciercx_cfg015_s cn52xx; + struct cvmx_pciercx_cfg015_s cn52xxp1; + struct cvmx_pciercx_cfg015_s cn56xx; + struct cvmx_pciercx_cfg015_s cn56xxp1; +}; + +union cvmx_pciercx_cfg016 { + uint32_t u32; + struct cvmx_pciercx_cfg016_s { + uint32_t pmes:5; + uint32_t d2s:1; + uint32_t d1s:1; + uint32_t auxc:3; + uint32_t dsi:1; + uint32_t reserved_20_20:1; + uint32_t pme_clock:1; + uint32_t pmsv:3; + uint32_t ncp:8; + uint32_t pmcid:8; + } s; + struct cvmx_pciercx_cfg016_s cn52xx; + struct cvmx_pciercx_cfg016_s cn52xxp1; + struct cvmx_pciercx_cfg016_s cn56xx; + struct cvmx_pciercx_cfg016_s cn56xxp1; +}; + +union cvmx_pciercx_cfg017 { + uint32_t u32; + struct cvmx_pciercx_cfg017_s { + uint32_t pmdia:8; + uint32_t bpccee:1; + uint32_t bd3h:1; + uint32_t reserved_16_21:6; + uint32_t pmess:1; + uint32_t pmedsia:2; + uint32_t pmds:4; + uint32_t pmeens:1; + uint32_t reserved_4_7:4; + uint32_t nsr:1; + uint32_t reserved_2_2:1; + uint32_t ps:2; + } s; + struct cvmx_pciercx_cfg017_s cn52xx; + struct cvmx_pciercx_cfg017_s cn52xxp1; + struct cvmx_pciercx_cfg017_s cn56xx; + struct cvmx_pciercx_cfg017_s cn56xxp1; +}; + +union cvmx_pciercx_cfg020 { + uint32_t u32; + struct cvmx_pciercx_cfg020_s { + uint32_t reserved_24_31:8; + uint32_t m64:1; + uint32_t mme:3; + uint32_t mmc:3; + uint32_t msien:1; + uint32_t ncp:8; + uint32_t msicid:8; + } s; + struct cvmx_pciercx_cfg020_s cn52xx; + struct cvmx_pciercx_cfg020_s cn52xxp1; + struct cvmx_pciercx_cfg020_s cn56xx; + struct cvmx_pciercx_cfg020_s cn56xxp1; +}; + +union cvmx_pciercx_cfg021 { + uint32_t u32; + struct cvmx_pciercx_cfg021_s { + uint32_t lmsi:30; + uint32_t reserved_0_1:2; + } s; + struct cvmx_pciercx_cfg021_s cn52xx; + struct cvmx_pciercx_cfg021_s cn52xxp1; + struct cvmx_pciercx_cfg021_s cn56xx; + struct cvmx_pciercx_cfg021_s cn56xxp1; +}; + +union cvmx_pciercx_cfg022 { + uint32_t u32; + struct cvmx_pciercx_cfg022_s { + uint32_t umsi:32; + } s; + struct cvmx_pciercx_cfg022_s cn52xx; + struct cvmx_pciercx_cfg022_s cn52xxp1; + struct cvmx_pciercx_cfg022_s cn56xx; + struct cvmx_pciercx_cfg022_s cn56xxp1; +}; + +union cvmx_pciercx_cfg023 { + uint32_t u32; + struct cvmx_pciercx_cfg023_s { + uint32_t reserved_16_31:16; + uint32_t msimd:16; + } s; + struct cvmx_pciercx_cfg023_s cn52xx; + struct cvmx_pciercx_cfg023_s cn52xxp1; + struct cvmx_pciercx_cfg023_s cn56xx; + struct cvmx_pciercx_cfg023_s cn56xxp1; +}; + +union cvmx_pciercx_cfg028 { + uint32_t u32; + struct cvmx_pciercx_cfg028_s { + uint32_t reserved_30_31:2; + uint32_t imn:5; + uint32_t si:1; + uint32_t dpt:4; + uint32_t pciecv:4; + uint32_t ncp:8; + uint32_t pcieid:8; + } s; + struct cvmx_pciercx_cfg028_s cn52xx; + struct cvmx_pciercx_cfg028_s cn52xxp1; + struct cvmx_pciercx_cfg028_s cn56xx; + struct cvmx_pciercx_cfg028_s cn56xxp1; +}; + +union cvmx_pciercx_cfg029 { + uint32_t u32; + struct cvmx_pciercx_cfg029_s { + uint32_t reserved_28_31:4; + uint32_t cspls:2; + uint32_t csplv:8; + uint32_t reserved_16_17:2; + uint32_t rber:1; + uint32_t reserved_12_14:3; + uint32_t el1al:3; + uint32_t el0al:3; + uint32_t etfs:1; + uint32_t pfs:2; + uint32_t mpss:3; + } s; + struct cvmx_pciercx_cfg029_s cn52xx; + struct cvmx_pciercx_cfg029_s cn52xxp1; + struct cvmx_pciercx_cfg029_s cn56xx; + struct cvmx_pciercx_cfg029_s cn56xxp1; +}; + +union cvmx_pciercx_cfg030 { + uint32_t u32; + struct cvmx_pciercx_cfg030_s { + uint32_t reserved_22_31:10; + uint32_t tp:1; + uint32_t ap_d:1; + uint32_t ur_d:1; + uint32_t fe_d:1; + uint32_t nfe_d:1; + uint32_t ce_d:1; + uint32_t reserved_15_15:1; + uint32_t mrrs:3; + uint32_t ns_en:1; + uint32_t ap_en:1; + uint32_t pf_en:1; + uint32_t etf_en:1; + uint32_t mps:3; + uint32_t ro_en:1; + uint32_t ur_en:1; + uint32_t fe_en:1; + uint32_t nfe_en:1; + uint32_t ce_en:1; + } s; + struct cvmx_pciercx_cfg030_s cn52xx; + struct cvmx_pciercx_cfg030_s cn52xxp1; + struct cvmx_pciercx_cfg030_s cn56xx; + struct cvmx_pciercx_cfg030_s cn56xxp1; +}; + +union cvmx_pciercx_cfg031 { + uint32_t u32; + struct cvmx_pciercx_cfg031_s { + uint32_t pnum:8; + uint32_t reserved_22_23:2; + uint32_t lbnc:1; + uint32_t dllarc:1; + uint32_t sderc:1; + uint32_t cpm:1; + uint32_t l1el:3; + uint32_t l0el:3; + uint32_t aslpms:2; + uint32_t mlw:6; + uint32_t mls:4; + } s; + struct cvmx_pciercx_cfg031_s cn52xx; + struct cvmx_pciercx_cfg031_s cn52xxp1; + struct cvmx_pciercx_cfg031_s cn56xx; + struct cvmx_pciercx_cfg031_s cn56xxp1; +}; + +union cvmx_pciercx_cfg032 { + uint32_t u32; + struct cvmx_pciercx_cfg032_s { + uint32_t lab:1; + uint32_t lbm:1; + uint32_t dlla:1; + uint32_t scc:1; + uint32_t lt:1; + uint32_t reserved_26_26:1; + uint32_t nlw:6; + uint32_t ls:4; + uint32_t reserved_12_15:4; + uint32_t lab_int_enb:1; + uint32_t lbm_int_enb:1; + uint32_t hawd:1; + uint32_t ecpm:1; + uint32_t es:1; + uint32_t ccc:1; + uint32_t rl:1; + uint32_t ld:1; + uint32_t rcb:1; + uint32_t reserved_2_2:1; + uint32_t aslpc:2; + } s; + struct cvmx_pciercx_cfg032_s cn52xx; + struct cvmx_pciercx_cfg032_s cn52xxp1; + struct cvmx_pciercx_cfg032_s cn56xx; + struct cvmx_pciercx_cfg032_s cn56xxp1; +}; + +union cvmx_pciercx_cfg033 { + uint32_t u32; + struct cvmx_pciercx_cfg033_s { + uint32_t ps_num:13; + uint32_t nccs:1; + uint32_t emip:1; + uint32_t sp_ls:2; + uint32_t sp_lv:8; + uint32_t hp_c:1; + uint32_t hp_s:1; + uint32_t pip:1; + uint32_t aip:1; + uint32_t mrlsp:1; + uint32_t pcp:1; + uint32_t abp:1; + } s; + struct cvmx_pciercx_cfg033_s cn52xx; + struct cvmx_pciercx_cfg033_s cn52xxp1; + struct cvmx_pciercx_cfg033_s cn56xx; + struct cvmx_pciercx_cfg033_s cn56xxp1; +}; + +union cvmx_pciercx_cfg034 { + uint32_t u32; + struct cvmx_pciercx_cfg034_s { + uint32_t reserved_25_31:7; + uint32_t dlls_c:1; + uint32_t emis:1; + uint32_t pds:1; + uint32_t mrlss:1; + uint32_t ccint_d:1; + uint32_t pd_c:1; + uint32_t mrls_c:1; + uint32_t pf_d:1; + uint32_t abp_d:1; + uint32_t reserved_13_15:3; + uint32_t dlls_en:1; + uint32_t emic:1; + uint32_t pcc:1; + uint32_t pic:2; + uint32_t aic:2; + uint32_t hpint_en:1; + uint32_t ccint_en:1; + uint32_t pd_en:1; + uint32_t mrls_en:1; + uint32_t pf_en:1; + uint32_t abp_en:1; + } s; + struct cvmx_pciercx_cfg034_s cn52xx; + struct cvmx_pciercx_cfg034_s cn52xxp1; + struct cvmx_pciercx_cfg034_s cn56xx; + struct cvmx_pciercx_cfg034_s cn56xxp1; +}; + +union cvmx_pciercx_cfg035 { + uint32_t u32; + struct cvmx_pciercx_cfg035_s { + uint32_t reserved_17_31:15; + uint32_t crssv:1; + uint32_t reserved_5_15:11; + uint32_t crssve:1; + uint32_t pmeie:1; + uint32_t sefee:1; + uint32_t senfee:1; + uint32_t secee:1; + } s; + struct cvmx_pciercx_cfg035_s cn52xx; + struct cvmx_pciercx_cfg035_s cn52xxp1; + struct cvmx_pciercx_cfg035_s cn56xx; + struct cvmx_pciercx_cfg035_s cn56xxp1; +}; + +union cvmx_pciercx_cfg036 { + uint32_t u32; + struct cvmx_pciercx_cfg036_s { + uint32_t reserved_18_31:14; + uint32_t pme_pend:1; + uint32_t pme_stat:1; + uint32_t pme_rid:16; + } s; + struct cvmx_pciercx_cfg036_s cn52xx; + struct cvmx_pciercx_cfg036_s cn52xxp1; + struct cvmx_pciercx_cfg036_s cn56xx; + struct cvmx_pciercx_cfg036_s cn56xxp1; +}; + +union cvmx_pciercx_cfg037 { + uint32_t u32; + struct cvmx_pciercx_cfg037_s { + uint32_t reserved_5_31:27; + uint32_t ctds:1; + uint32_t ctrs:4; + } s; + struct cvmx_pciercx_cfg037_s cn52xx; + struct cvmx_pciercx_cfg037_s cn52xxp1; + struct cvmx_pciercx_cfg037_s cn56xx; + struct cvmx_pciercx_cfg037_s cn56xxp1; +}; + +union cvmx_pciercx_cfg038 { + uint32_t u32; + struct cvmx_pciercx_cfg038_s { + uint32_t reserved_5_31:27; + uint32_t ctd:1; + uint32_t ctv:4; + } s; + struct cvmx_pciercx_cfg038_s cn52xx; + struct cvmx_pciercx_cfg038_s cn52xxp1; + struct cvmx_pciercx_cfg038_s cn56xx; + struct cvmx_pciercx_cfg038_s cn56xxp1; +}; + +union cvmx_pciercx_cfg039 { + uint32_t u32; + struct cvmx_pciercx_cfg039_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg039_s cn52xx; + struct cvmx_pciercx_cfg039_s cn52xxp1; + struct cvmx_pciercx_cfg039_s cn56xx; + struct cvmx_pciercx_cfg039_s cn56xxp1; +}; + +union cvmx_pciercx_cfg040 { + uint32_t u32; + struct cvmx_pciercx_cfg040_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg040_s cn52xx; + struct cvmx_pciercx_cfg040_s cn52xxp1; + struct cvmx_pciercx_cfg040_s cn56xx; + struct cvmx_pciercx_cfg040_s cn56xxp1; +}; + +union cvmx_pciercx_cfg041 { + uint32_t u32; + struct cvmx_pciercx_cfg041_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg041_s cn52xx; + struct cvmx_pciercx_cfg041_s cn52xxp1; + struct cvmx_pciercx_cfg041_s cn56xx; + struct cvmx_pciercx_cfg041_s cn56xxp1; +}; + +union cvmx_pciercx_cfg042 { + uint32_t u32; + struct cvmx_pciercx_cfg042_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg042_s cn52xx; + struct cvmx_pciercx_cfg042_s cn52xxp1; + struct cvmx_pciercx_cfg042_s cn56xx; + struct cvmx_pciercx_cfg042_s cn56xxp1; +}; + +union cvmx_pciercx_cfg064 { + uint32_t u32; + struct cvmx_pciercx_cfg064_s { + uint32_t nco:12; + uint32_t cv:4; + uint32_t pcieec:16; + } s; + struct cvmx_pciercx_cfg064_s cn52xx; + struct cvmx_pciercx_cfg064_s cn52xxp1; + struct cvmx_pciercx_cfg064_s cn56xx; + struct cvmx_pciercx_cfg064_s cn56xxp1; +}; + +union cvmx_pciercx_cfg065 { + uint32_t u32; + struct cvmx_pciercx_cfg065_s { + uint32_t reserved_21_31:11; + uint32_t ures:1; + uint32_t ecrces:1; + uint32_t mtlps:1; + uint32_t ros:1; + uint32_t ucs:1; + uint32_t cas:1; + uint32_t cts:1; + uint32_t fcpes:1; + uint32_t ptlps:1; + uint32_t reserved_6_11:6; + uint32_t sdes:1; + uint32_t dlpes:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pciercx_cfg065_s cn52xx; + struct cvmx_pciercx_cfg065_s cn52xxp1; + struct cvmx_pciercx_cfg065_s cn56xx; + struct cvmx_pciercx_cfg065_s cn56xxp1; +}; + +union cvmx_pciercx_cfg066 { + uint32_t u32; + struct cvmx_pciercx_cfg066_s { + uint32_t reserved_21_31:11; + uint32_t urem:1; + uint32_t ecrcem:1; + uint32_t mtlpm:1; + uint32_t rom:1; + uint32_t ucm:1; + uint32_t cam:1; + uint32_t ctm:1; + uint32_t fcpem:1; + uint32_t ptlpm:1; + uint32_t reserved_6_11:6; + uint32_t sdem:1; + uint32_t dlpem:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pciercx_cfg066_s cn52xx; + struct cvmx_pciercx_cfg066_s cn52xxp1; + struct cvmx_pciercx_cfg066_s cn56xx; + struct cvmx_pciercx_cfg066_s cn56xxp1; +}; + +union cvmx_pciercx_cfg067 { + uint32_t u32; + struct cvmx_pciercx_cfg067_s { + uint32_t reserved_21_31:11; + uint32_t ures:1; + uint32_t ecrces:1; + uint32_t mtlps:1; + uint32_t ros:1; + uint32_t ucs:1; + uint32_t cas:1; + uint32_t cts:1; + uint32_t fcpes:1; + uint32_t ptlps:1; + uint32_t reserved_6_11:6; + uint32_t sdes:1; + uint32_t dlpes:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pciercx_cfg067_s cn52xx; + struct cvmx_pciercx_cfg067_s cn52xxp1; + struct cvmx_pciercx_cfg067_s cn56xx; + struct cvmx_pciercx_cfg067_s cn56xxp1; +}; + +union cvmx_pciercx_cfg068 { + uint32_t u32; + struct cvmx_pciercx_cfg068_s { + uint32_t reserved_14_31:18; + uint32_t anfes:1; + uint32_t rtts:1; + uint32_t reserved_9_11:3; + uint32_t rnrs:1; + uint32_t bdllps:1; + uint32_t btlps:1; + uint32_t reserved_1_5:5; + uint32_t res:1; + } s; + struct cvmx_pciercx_cfg068_s cn52xx; + struct cvmx_pciercx_cfg068_s cn52xxp1; + struct cvmx_pciercx_cfg068_s cn56xx; + struct cvmx_pciercx_cfg068_s cn56xxp1; +}; + +union cvmx_pciercx_cfg069 { + uint32_t u32; + struct cvmx_pciercx_cfg069_s { + uint32_t reserved_14_31:18; + uint32_t anfem:1; + uint32_t rttm:1; + uint32_t reserved_9_11:3; + uint32_t rnrm:1; + uint32_t bdllpm:1; + uint32_t btlpm:1; + uint32_t reserved_1_5:5; + uint32_t rem:1; + } s; + struct cvmx_pciercx_cfg069_s cn52xx; + struct cvmx_pciercx_cfg069_s cn52xxp1; + struct cvmx_pciercx_cfg069_s cn56xx; + struct cvmx_pciercx_cfg069_s cn56xxp1; +}; + +union cvmx_pciercx_cfg070 { + uint32_t u32; + struct cvmx_pciercx_cfg070_s { + uint32_t reserved_9_31:23; + uint32_t ce:1; + uint32_t cc:1; + uint32_t ge:1; + uint32_t gc:1; + uint32_t fep:5; + } s; + struct cvmx_pciercx_cfg070_s cn52xx; + struct cvmx_pciercx_cfg070_s cn52xxp1; + struct cvmx_pciercx_cfg070_s cn56xx; + struct cvmx_pciercx_cfg070_s cn56xxp1; +}; + +union cvmx_pciercx_cfg071 { + uint32_t u32; + struct cvmx_pciercx_cfg071_s { + uint32_t dword1:32; + } s; + struct cvmx_pciercx_cfg071_s cn52xx; + struct cvmx_pciercx_cfg071_s cn52xxp1; + struct cvmx_pciercx_cfg071_s cn56xx; + struct cvmx_pciercx_cfg071_s cn56xxp1; +}; + +union cvmx_pciercx_cfg072 { + uint32_t u32; + struct cvmx_pciercx_cfg072_s { + uint32_t dword2:32; + } s; + struct cvmx_pciercx_cfg072_s cn52xx; + struct cvmx_pciercx_cfg072_s cn52xxp1; + struct cvmx_pciercx_cfg072_s cn56xx; + struct cvmx_pciercx_cfg072_s cn56xxp1; +}; + +union cvmx_pciercx_cfg073 { + uint32_t u32; + struct cvmx_pciercx_cfg073_s { + uint32_t dword3:32; + } s; + struct cvmx_pciercx_cfg073_s cn52xx; + struct cvmx_pciercx_cfg073_s cn52xxp1; + struct cvmx_pciercx_cfg073_s cn56xx; + struct cvmx_pciercx_cfg073_s cn56xxp1; +}; + +union cvmx_pciercx_cfg074 { + uint32_t u32; + struct cvmx_pciercx_cfg074_s { + uint32_t dword4:32; + } s; + struct cvmx_pciercx_cfg074_s cn52xx; + struct cvmx_pciercx_cfg074_s cn52xxp1; + struct cvmx_pciercx_cfg074_s cn56xx; + struct cvmx_pciercx_cfg074_s cn56xxp1; +}; + +union cvmx_pciercx_cfg075 { + uint32_t u32; + struct cvmx_pciercx_cfg075_s { + uint32_t reserved_3_31:29; + uint32_t fere:1; + uint32_t nfere:1; + uint32_t cere:1; + } s; + struct cvmx_pciercx_cfg075_s cn52xx; + struct cvmx_pciercx_cfg075_s cn52xxp1; + struct cvmx_pciercx_cfg075_s cn56xx; + struct cvmx_pciercx_cfg075_s cn56xxp1; +}; + +union cvmx_pciercx_cfg076 { + uint32_t u32; + struct cvmx_pciercx_cfg076_s { + uint32_t aeimn:5; + uint32_t reserved_7_26:20; + uint32_t femr:1; + uint32_t nfemr:1; + uint32_t fuf:1; + uint32_t multi_efnfr:1; + uint32_t efnfr:1; + uint32_t multi_ecr:1; + uint32_t ecr:1; + } s; + struct cvmx_pciercx_cfg076_s cn52xx; + struct cvmx_pciercx_cfg076_s cn52xxp1; + struct cvmx_pciercx_cfg076_s cn56xx; + struct cvmx_pciercx_cfg076_s cn56xxp1; +}; + +union cvmx_pciercx_cfg077 { + uint32_t u32; + struct cvmx_pciercx_cfg077_s { + uint32_t efnfsi:16; + uint32_t ecsi:16; + } s; + struct cvmx_pciercx_cfg077_s cn52xx; + struct cvmx_pciercx_cfg077_s cn52xxp1; + struct cvmx_pciercx_cfg077_s cn56xx; + struct cvmx_pciercx_cfg077_s cn56xxp1; +}; + +union cvmx_pciercx_cfg448 { + uint32_t u32; + struct cvmx_pciercx_cfg448_s { + uint32_t rtl:16; + uint32_t rtltl:16; + } s; + struct cvmx_pciercx_cfg448_s cn52xx; + struct cvmx_pciercx_cfg448_s cn52xxp1; + struct cvmx_pciercx_cfg448_s cn56xx; + struct cvmx_pciercx_cfg448_s cn56xxp1; +}; + +union cvmx_pciercx_cfg449 { + uint32_t u32; + struct cvmx_pciercx_cfg449_s { + uint32_t omr:32; + } s; + struct cvmx_pciercx_cfg449_s cn52xx; + struct cvmx_pciercx_cfg449_s cn52xxp1; + struct cvmx_pciercx_cfg449_s cn56xx; + struct cvmx_pciercx_cfg449_s cn56xxp1; +}; + +union cvmx_pciercx_cfg450 { + uint32_t u32; + struct cvmx_pciercx_cfg450_s { + uint32_t lpec:8; + uint32_t reserved_22_23:2; + uint32_t link_state:6; + uint32_t force_link:1; + uint32_t reserved_8_14:7; + uint32_t link_num:8; + } s; + struct cvmx_pciercx_cfg450_s cn52xx; + struct cvmx_pciercx_cfg450_s cn52xxp1; + struct cvmx_pciercx_cfg450_s cn56xx; + struct cvmx_pciercx_cfg450_s cn56xxp1; +}; + +union cvmx_pciercx_cfg451 { + uint32_t u32; + struct cvmx_pciercx_cfg451_s { + uint32_t reserved_30_31:2; + uint32_t l1el:3; + uint32_t l0el:3; + uint32_t n_fts_cc:8; + uint32_t n_fts:8; + uint32_t ack_freq:8; + } s; + struct cvmx_pciercx_cfg451_s cn52xx; + struct cvmx_pciercx_cfg451_s cn52xxp1; + struct cvmx_pciercx_cfg451_s cn56xx; + struct cvmx_pciercx_cfg451_s cn56xxp1; +}; + +union cvmx_pciercx_cfg452 { + uint32_t u32; + struct cvmx_pciercx_cfg452_s { + uint32_t reserved_26_31:6; + uint32_t eccrc:1; + uint32_t reserved_22_24:3; + uint32_t lme:6; + uint32_t reserved_8_15:8; + uint32_t flm:1; + uint32_t reserved_6_6:1; + uint32_t dllle:1; + uint32_t reserved_4_4:1; + uint32_t ra:1; + uint32_t le:1; + uint32_t sd:1; + uint32_t omr:1; + } s; + struct cvmx_pciercx_cfg452_s cn52xx; + struct cvmx_pciercx_cfg452_s cn52xxp1; + struct cvmx_pciercx_cfg452_s cn56xx; + struct cvmx_pciercx_cfg452_s cn56xxp1; +}; + +union cvmx_pciercx_cfg453 { + uint32_t u32; + struct cvmx_pciercx_cfg453_s { + uint32_t dlld:1; + uint32_t reserved_26_30:5; + uint32_t ack_nak:1; + uint32_t fcd:1; + uint32_t ilst:24; + } s; + struct cvmx_pciercx_cfg453_s cn52xx; + struct cvmx_pciercx_cfg453_s cn52xxp1; + struct cvmx_pciercx_cfg453_s cn56xx; + struct cvmx_pciercx_cfg453_s cn56xxp1; +}; + +union cvmx_pciercx_cfg454 { + uint32_t u32; + struct cvmx_pciercx_cfg454_s { + uint32_t reserved_29_31:3; + uint32_t tmfcwt:5; + uint32_t tmanlt:5; + uint32_t tmrt:5; + uint32_t reserved_11_13:3; + uint32_t nskps:3; + uint32_t reserved_4_7:4; + uint32_t ntss:4; + } s; + struct cvmx_pciercx_cfg454_s cn52xx; + struct cvmx_pciercx_cfg454_s cn52xxp1; + struct cvmx_pciercx_cfg454_s cn56xx; + struct cvmx_pciercx_cfg454_s cn56xxp1; +}; + +union cvmx_pciercx_cfg455 { + uint32_t u32; + struct cvmx_pciercx_cfg455_s { + uint32_t m_cfg0_filt:1; + uint32_t m_io_filt:1; + uint32_t msg_ctrl:1; + uint32_t m_cpl_ecrc_filt:1; + uint32_t m_ecrc_filt:1; + uint32_t m_cpl_len_err:1; + uint32_t m_cpl_attr_err:1; + uint32_t m_cpl_tc_err:1; + uint32_t m_cpl_fun_err:1; + uint32_t m_cpl_rid_err:1; + uint32_t m_cpl_tag_err:1; + uint32_t m_lk_filt:1; + uint32_t m_cfg1_filt:1; + uint32_t m_bar_match:1; + uint32_t m_pois_filt:1; + uint32_t m_fun:1; + uint32_t dfcwt:1; + uint32_t reserved_11_14:4; + uint32_t skpiv:11; + } s; + struct cvmx_pciercx_cfg455_s cn52xx; + struct cvmx_pciercx_cfg455_s cn52xxp1; + struct cvmx_pciercx_cfg455_s cn56xx; + struct cvmx_pciercx_cfg455_s cn56xxp1; +}; + +union cvmx_pciercx_cfg456 { + uint32_t u32; + struct cvmx_pciercx_cfg456_s { + uint32_t reserved_2_31:30; + uint32_t m_vend1_drp:1; + uint32_t m_vend0_drp:1; + } s; + struct cvmx_pciercx_cfg456_s cn52xx; + struct cvmx_pciercx_cfg456_s cn52xxp1; + struct cvmx_pciercx_cfg456_s cn56xx; + struct cvmx_pciercx_cfg456_s cn56xxp1; +}; + +union cvmx_pciercx_cfg458 { + uint32_t u32; + struct cvmx_pciercx_cfg458_s { + uint32_t dbg_info_l32:32; + } s; + struct cvmx_pciercx_cfg458_s cn52xx; + struct cvmx_pciercx_cfg458_s cn52xxp1; + struct cvmx_pciercx_cfg458_s cn56xx; + struct cvmx_pciercx_cfg458_s cn56xxp1; +}; + +union cvmx_pciercx_cfg459 { + uint32_t u32; + struct cvmx_pciercx_cfg459_s { + uint32_t dbg_info_u32:32; + } s; + struct cvmx_pciercx_cfg459_s cn52xx; + struct cvmx_pciercx_cfg459_s cn52xxp1; + struct cvmx_pciercx_cfg459_s cn56xx; + struct cvmx_pciercx_cfg459_s cn56xxp1; +}; + +union cvmx_pciercx_cfg460 { + uint32_t u32; + struct cvmx_pciercx_cfg460_s { + uint32_t reserved_20_31:12; + uint32_t tphfcc:8; + uint32_t tpdfcc:12; + } s; + struct cvmx_pciercx_cfg460_s cn52xx; + struct cvmx_pciercx_cfg460_s cn52xxp1; + struct cvmx_pciercx_cfg460_s cn56xx; + struct cvmx_pciercx_cfg460_s cn56xxp1; +}; + +union cvmx_pciercx_cfg461 { + uint32_t u32; + struct cvmx_pciercx_cfg461_s { + uint32_t reserved_20_31:12; + uint32_t tchfcc:8; + uint32_t tcdfcc:12; + } s; + struct cvmx_pciercx_cfg461_s cn52xx; + struct cvmx_pciercx_cfg461_s cn52xxp1; + struct cvmx_pciercx_cfg461_s cn56xx; + struct cvmx_pciercx_cfg461_s cn56xxp1; +}; + +union cvmx_pciercx_cfg462 { + uint32_t u32; + struct cvmx_pciercx_cfg462_s { + uint32_t reserved_20_31:12; + uint32_t tchfcc:8; + uint32_t tcdfcc:12; + } s; + struct cvmx_pciercx_cfg462_s cn52xx; + struct cvmx_pciercx_cfg462_s cn52xxp1; + struct cvmx_pciercx_cfg462_s cn56xx; + struct cvmx_pciercx_cfg462_s cn56xxp1; +}; + +union cvmx_pciercx_cfg463 { + uint32_t u32; + struct cvmx_pciercx_cfg463_s { + uint32_t reserved_3_31:29; + uint32_t rqne:1; + uint32_t trbne:1; + uint32_t rtlpfccnr:1; + } s; + struct cvmx_pciercx_cfg463_s cn52xx; + struct cvmx_pciercx_cfg463_s cn52xxp1; + struct cvmx_pciercx_cfg463_s cn56xx; + struct cvmx_pciercx_cfg463_s cn56xxp1; +}; + +union cvmx_pciercx_cfg464 { + uint32_t u32; + struct cvmx_pciercx_cfg464_s { + uint32_t wrr_vc3:8; + uint32_t wrr_vc2:8; + uint32_t wrr_vc1:8; + uint32_t wrr_vc0:8; + } s; + struct cvmx_pciercx_cfg464_s cn52xx; + struct cvmx_pciercx_cfg464_s cn52xxp1; + struct cvmx_pciercx_cfg464_s cn56xx; + struct cvmx_pciercx_cfg464_s cn56xxp1; +}; + +union cvmx_pciercx_cfg465 { + uint32_t u32; + struct cvmx_pciercx_cfg465_s { + uint32_t wrr_vc7:8; + uint32_t wrr_vc6:8; + uint32_t wrr_vc5:8; + uint32_t wrr_vc4:8; + } s; + struct cvmx_pciercx_cfg465_s cn52xx; + struct cvmx_pciercx_cfg465_s cn52xxp1; + struct cvmx_pciercx_cfg465_s cn56xx; + struct cvmx_pciercx_cfg465_s cn56xxp1; +}; + +union cvmx_pciercx_cfg466 { + uint32_t u32; + struct cvmx_pciercx_cfg466_s { + uint32_t rx_queue_order:1; + uint32_t type_ordering:1; + uint32_t reserved_24_29:6; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pciercx_cfg466_s cn52xx; + struct cvmx_pciercx_cfg466_s cn52xxp1; + struct cvmx_pciercx_cfg466_s cn56xx; + struct cvmx_pciercx_cfg466_s cn56xxp1; +}; + +union cvmx_pciercx_cfg467 { + uint32_t u32; + struct cvmx_pciercx_cfg467_s { + uint32_t reserved_24_31:8; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pciercx_cfg467_s cn52xx; + struct cvmx_pciercx_cfg467_s cn52xxp1; + struct cvmx_pciercx_cfg467_s cn56xx; + struct cvmx_pciercx_cfg467_s cn56xxp1; +}; + +union cvmx_pciercx_cfg468 { + uint32_t u32; + struct cvmx_pciercx_cfg468_s { + uint32_t reserved_24_31:8; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pciercx_cfg468_s cn52xx; + struct cvmx_pciercx_cfg468_s cn52xxp1; + struct cvmx_pciercx_cfg468_s cn56xx; + struct cvmx_pciercx_cfg468_s cn56xxp1; +}; + +union cvmx_pciercx_cfg490 { + uint32_t u32; + struct cvmx_pciercx_cfg490_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pciercx_cfg490_s cn52xx; + struct cvmx_pciercx_cfg490_s cn52xxp1; + struct cvmx_pciercx_cfg490_s cn56xx; + struct cvmx_pciercx_cfg490_s cn56xxp1; +}; + +union cvmx_pciercx_cfg491 { + uint32_t u32; + struct cvmx_pciercx_cfg491_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pciercx_cfg491_s cn52xx; + struct cvmx_pciercx_cfg491_s cn52xxp1; + struct cvmx_pciercx_cfg491_s cn56xx; + struct cvmx_pciercx_cfg491_s cn56xxp1; +}; + +union cvmx_pciercx_cfg492 { + uint32_t u32; + struct cvmx_pciercx_cfg492_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pciercx_cfg492_s cn52xx; + struct cvmx_pciercx_cfg492_s cn52xxp1; + struct cvmx_pciercx_cfg492_s cn56xx; + struct cvmx_pciercx_cfg492_s cn56xxp1; +}; + +union cvmx_pciercx_cfg516 { + uint32_t u32; + struct cvmx_pciercx_cfg516_s { + uint32_t phy_stat:32; + } s; + struct cvmx_pciercx_cfg516_s cn52xx; + struct cvmx_pciercx_cfg516_s cn52xxp1; + struct cvmx_pciercx_cfg516_s cn56xx; + struct cvmx_pciercx_cfg516_s cn56xxp1; +}; + +union cvmx_pciercx_cfg517 { + uint32_t u32; + struct cvmx_pciercx_cfg517_s { + uint32_t phy_ctrl:32; + } s; + struct cvmx_pciercx_cfg517_s cn52xx; + struct cvmx_pciercx_cfg517_s cn52xxp1; + struct cvmx_pciercx_cfg517_s cn56xx; + struct cvmx_pciercx_cfg517_s cn56xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-pescx-defs.h b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h new file mode 100644 index 0000000..f40cfaf --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h @@ -0,0 +1,410 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PESCX_DEFS_H__ +#define __CVMX_PESCX_DEFS_H__ + +#define CVMX_PESCX_BIST_STATUS(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000018ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_BIST_STATUS2(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000418ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_CFG_RD(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000030ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_CFG_WR(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000028ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_CPL_LUT_VALID(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000098ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_CTL_STATUS(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000000ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_CTL_STATUS2(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000400ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_DBG_INFO(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000008ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_DBG_INFO_EN(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C80000A0ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_DIAG_STATUS(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000020ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_P2N_BAR0_START(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000080ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_P2N_BAR1_START(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000088ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_P2N_BAR2_START(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000090ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_P2P_BARX_END(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000048ull + (((offset) & 3) * 16) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_P2P_BARX_START(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000040ull + (((offset) & 3) * 16) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_TLP_CREDITS(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000038ull + (((block_id) & 1) * 0x8000000ull)) + +union cvmx_pescx_bist_status { + uint64_t u64; + struct cvmx_pescx_bist_status_s { + uint64_t reserved_13_63:51; + uint64_t rqdata5:1; + uint64_t ctlp_or:1; + uint64_t ntlp_or:1; + uint64_t ptlp_or:1; + uint64_t retry:1; + uint64_t rqdata0:1; + uint64_t rqdata1:1; + uint64_t rqdata2:1; + uint64_t rqdata3:1; + uint64_t rqdata4:1; + uint64_t rqhdr1:1; + uint64_t rqhdr0:1; + uint64_t sot:1; + } s; + struct cvmx_pescx_bist_status_s cn52xx; + struct cvmx_pescx_bist_status_cn52xxp1 { + uint64_t reserved_12_63:52; + uint64_t ctlp_or:1; + uint64_t ntlp_or:1; + uint64_t ptlp_or:1; + uint64_t retry:1; + uint64_t rqdata0:1; + uint64_t rqdata1:1; + uint64_t rqdata2:1; + uint64_t rqdata3:1; + uint64_t rqdata4:1; + uint64_t rqhdr1:1; + uint64_t rqhdr0:1; + uint64_t sot:1; + } cn52xxp1; + struct cvmx_pescx_bist_status_s cn56xx; + struct cvmx_pescx_bist_status_cn52xxp1 cn56xxp1; +}; + +union cvmx_pescx_bist_status2 { + uint64_t u64; + struct cvmx_pescx_bist_status2_s { + uint64_t reserved_14_63:50; + uint64_t cto_p2e:1; + uint64_t e2p_cpl:1; + uint64_t e2p_n:1; + uint64_t e2p_p:1; + uint64_t e2p_rsl:1; + uint64_t dbg_p2e:1; + uint64_t peai_p2e:1; + uint64_t rsl_p2e:1; + uint64_t pef_tpf1:1; + uint64_t pef_tpf0:1; + uint64_t pef_tnf:1; + uint64_t pef_tcf1:1; + uint64_t pef_tc0:1; + uint64_t ppf:1; + } s; + struct cvmx_pescx_bist_status2_s cn52xx; + struct cvmx_pescx_bist_status2_s cn52xxp1; + struct cvmx_pescx_bist_status2_s cn56xx; + struct cvmx_pescx_bist_status2_s cn56xxp1; +}; + +union cvmx_pescx_cfg_rd { + uint64_t u64; + struct cvmx_pescx_cfg_rd_s { + uint64_t data:32; + uint64_t addr:32; + } s; + struct cvmx_pescx_cfg_rd_s cn52xx; + struct cvmx_pescx_cfg_rd_s cn52xxp1; + struct cvmx_pescx_cfg_rd_s cn56xx; + struct cvmx_pescx_cfg_rd_s cn56xxp1; +}; + +union cvmx_pescx_cfg_wr { + uint64_t u64; + struct cvmx_pescx_cfg_wr_s { + uint64_t data:32; + uint64_t addr:32; + } s; + struct cvmx_pescx_cfg_wr_s cn52xx; + struct cvmx_pescx_cfg_wr_s cn52xxp1; + struct cvmx_pescx_cfg_wr_s cn56xx; + struct cvmx_pescx_cfg_wr_s cn56xxp1; +}; + +union cvmx_pescx_cpl_lut_valid { + uint64_t u64; + struct cvmx_pescx_cpl_lut_valid_s { + uint64_t reserved_32_63:32; + uint64_t tag:32; + } s; + struct cvmx_pescx_cpl_lut_valid_s cn52xx; + struct cvmx_pescx_cpl_lut_valid_s cn52xxp1; + struct cvmx_pescx_cpl_lut_valid_s cn56xx; + struct cvmx_pescx_cpl_lut_valid_s cn56xxp1; +}; + +union cvmx_pescx_ctl_status { + uint64_t u64; + struct cvmx_pescx_ctl_status_s { + uint64_t reserved_28_63:36; + uint64_t dnum:5; + uint64_t pbus:8; + uint64_t qlm_cfg:2; + uint64_t lane_swp:1; + uint64_t pm_xtoff:1; + uint64_t pm_xpme:1; + uint64_t ob_p_cmd:1; + uint64_t reserved_7_8:2; + uint64_t nf_ecrc:1; + uint64_t dly_one:1; + uint64_t lnk_enb:1; + uint64_t ro_ctlp:1; + uint64_t reserved_2_2:1; + uint64_t inv_ecrc:1; + uint64_t inv_lcrc:1; + } s; + struct cvmx_pescx_ctl_status_s cn52xx; + struct cvmx_pescx_ctl_status_s cn52xxp1; + struct cvmx_pescx_ctl_status_cn56xx { + uint64_t reserved_28_63:36; + uint64_t dnum:5; + uint64_t pbus:8; + uint64_t qlm_cfg:2; + uint64_t reserved_12_12:1; + uint64_t pm_xtoff:1; + uint64_t pm_xpme:1; + uint64_t ob_p_cmd:1; + uint64_t reserved_7_8:2; + uint64_t nf_ecrc:1; + uint64_t dly_one:1; + uint64_t lnk_enb:1; + uint64_t ro_ctlp:1; + uint64_t reserved_2_2:1; + uint64_t inv_ecrc:1; + uint64_t inv_lcrc:1; + } cn56xx; + struct cvmx_pescx_ctl_status_cn56xx cn56xxp1; +}; + +union cvmx_pescx_ctl_status2 { + uint64_t u64; + struct cvmx_pescx_ctl_status2_s { + uint64_t reserved_2_63:62; + uint64_t pclk_run:1; + uint64_t pcierst:1; + } s; + struct cvmx_pescx_ctl_status2_s cn52xx; + struct cvmx_pescx_ctl_status2_cn52xxp1 { + uint64_t reserved_1_63:63; + uint64_t pcierst:1; + } cn52xxp1; + struct cvmx_pescx_ctl_status2_s cn56xx; + struct cvmx_pescx_ctl_status2_cn52xxp1 cn56xxp1; +}; + +union cvmx_pescx_dbg_info { + uint64_t u64; + struct cvmx_pescx_dbg_info_s { + uint64_t reserved_31_63:33; + uint64_t ecrc_e:1; + uint64_t rawwpp:1; + uint64_t racpp:1; + uint64_t ramtlp:1; + uint64_t rarwdns:1; + uint64_t caar:1; + uint64_t racca:1; + uint64_t racur:1; + uint64_t rauc:1; + uint64_t rqo:1; + uint64_t fcuv:1; + uint64_t rpe:1; + uint64_t fcpvwt:1; + uint64_t dpeoosd:1; + uint64_t rtwdle:1; + uint64_t rdwdle:1; + uint64_t mre:1; + uint64_t rte:1; + uint64_t acto:1; + uint64_t rvdm:1; + uint64_t rumep:1; + uint64_t rptamrc:1; + uint64_t rpmerc:1; + uint64_t rfemrc:1; + uint64_t rnfemrc:1; + uint64_t rcemrc:1; + uint64_t rpoison:1; + uint64_t recrce:1; + uint64_t rtlplle:1; + uint64_t rtlpmal:1; + uint64_t spoison:1; + } s; + struct cvmx_pescx_dbg_info_s cn52xx; + struct cvmx_pescx_dbg_info_s cn52xxp1; + struct cvmx_pescx_dbg_info_s cn56xx; + struct cvmx_pescx_dbg_info_s cn56xxp1; +}; + +union cvmx_pescx_dbg_info_en { + uint64_t u64; + struct cvmx_pescx_dbg_info_en_s { + uint64_t reserved_31_63:33; + uint64_t ecrc_e:1; + uint64_t rawwpp:1; + uint64_t racpp:1; + uint64_t ramtlp:1; + uint64_t rarwdns:1; + uint64_t caar:1; + uint64_t racca:1; + uint64_t racur:1; + uint64_t rauc:1; + uint64_t rqo:1; + uint64_t fcuv:1; + uint64_t rpe:1; + uint64_t fcpvwt:1; + uint64_t dpeoosd:1; + uint64_t rtwdle:1; + uint64_t rdwdle:1; + uint64_t mre:1; + uint64_t rte:1; + uint64_t acto:1; + uint64_t rvdm:1; + uint64_t rumep:1; + uint64_t rptamrc:1; + uint64_t rpmerc:1; + uint64_t rfemrc:1; + uint64_t rnfemrc:1; + uint64_t rcemrc:1; + uint64_t rpoison:1; + uint64_t recrce:1; + uint64_t rtlplle:1; + uint64_t rtlpmal:1; + uint64_t spoison:1; + } s; + struct cvmx_pescx_dbg_info_en_s cn52xx; + struct cvmx_pescx_dbg_info_en_s cn52xxp1; + struct cvmx_pescx_dbg_info_en_s cn56xx; + struct cvmx_pescx_dbg_info_en_s cn56xxp1; +}; + +union cvmx_pescx_diag_status { + uint64_t u64; + struct cvmx_pescx_diag_status_s { + uint64_t reserved_4_63:60; + uint64_t pm_dst:1; + uint64_t pm_stat:1; + uint64_t pm_en:1; + uint64_t aux_en:1; + } s; + struct cvmx_pescx_diag_status_s cn52xx; + struct cvmx_pescx_diag_status_s cn52xxp1; + struct cvmx_pescx_diag_status_s cn56xx; + struct cvmx_pescx_diag_status_s cn56xxp1; +}; + +union cvmx_pescx_p2n_bar0_start { + uint64_t u64; + struct cvmx_pescx_p2n_bar0_start_s { + uint64_t addr:50; + uint64_t reserved_0_13:14; + } s; + struct cvmx_pescx_p2n_bar0_start_s cn52xx; + struct cvmx_pescx_p2n_bar0_start_s cn52xxp1; + struct cvmx_pescx_p2n_bar0_start_s cn56xx; + struct cvmx_pescx_p2n_bar0_start_s cn56xxp1; +}; + +union cvmx_pescx_p2n_bar1_start { + uint64_t u64; + struct cvmx_pescx_p2n_bar1_start_s { + uint64_t addr:38; + uint64_t reserved_0_25:26; + } s; + struct cvmx_pescx_p2n_bar1_start_s cn52xx; + struct cvmx_pescx_p2n_bar1_start_s cn52xxp1; + struct cvmx_pescx_p2n_bar1_start_s cn56xx; + struct cvmx_pescx_p2n_bar1_start_s cn56xxp1; +}; + +union cvmx_pescx_p2n_bar2_start { + uint64_t u64; + struct cvmx_pescx_p2n_bar2_start_s { + uint64_t addr:25; + uint64_t reserved_0_38:39; + } s; + struct cvmx_pescx_p2n_bar2_start_s cn52xx; + struct cvmx_pescx_p2n_bar2_start_s cn52xxp1; + struct cvmx_pescx_p2n_bar2_start_s cn56xx; + struct cvmx_pescx_p2n_bar2_start_s cn56xxp1; +}; + +union cvmx_pescx_p2p_barx_end { + uint64_t u64; + struct cvmx_pescx_p2p_barx_end_s { + uint64_t addr:52; + uint64_t reserved_0_11:12; + } s; + struct cvmx_pescx_p2p_barx_end_s cn52xx; + struct cvmx_pescx_p2p_barx_end_s cn52xxp1; + struct cvmx_pescx_p2p_barx_end_s cn56xx; + struct cvmx_pescx_p2p_barx_end_s cn56xxp1; +}; + +union cvmx_pescx_p2p_barx_start { + uint64_t u64; + struct cvmx_pescx_p2p_barx_start_s { + uint64_t addr:52; + uint64_t reserved_0_11:12; + } s; + struct cvmx_pescx_p2p_barx_start_s cn52xx; + struct cvmx_pescx_p2p_barx_start_s cn52xxp1; + struct cvmx_pescx_p2p_barx_start_s cn56xx; + struct cvmx_pescx_p2p_barx_start_s cn56xxp1; +}; + +union cvmx_pescx_tlp_credits { + uint64_t u64; + struct cvmx_pescx_tlp_credits_s { + uint64_t reserved_0_63:64; + } s; + struct cvmx_pescx_tlp_credits_cn52xx { + uint64_t reserved_56_63:8; + uint64_t peai_ppf:8; + uint64_t pesc_cpl:8; + uint64_t pesc_np:8; + uint64_t pesc_p:8; + uint64_t npei_cpl:8; + uint64_t npei_np:8; + uint64_t npei_p:8; + } cn52xx; + struct cvmx_pescx_tlp_credits_cn52xxp1 { + uint64_t reserved_38_63:26; + uint64_t peai_ppf:8; + uint64_t pesc_cpl:5; + uint64_t pesc_np:5; + uint64_t pesc_p:5; + uint64_t npei_cpl:5; + uint64_t npei_np:5; + uint64_t npei_p:5; + } cn52xxp1; + struct cvmx_pescx_tlp_credits_cn52xx cn56xx; + struct cvmx_pescx_tlp_credits_cn52xxp1 cn56xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-pexp-defs.h b/arch/mips/include/asm/octeon/cvmx-pexp-defs.h new file mode 100644 index 0000000..5ea5dc5 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-pexp-defs.h @@ -0,0 +1,229 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * cvmx-pexp-defs.h + * + * Configuration and status register (CSR) definitions for + * OCTEON PEXP. + * + */ +#ifndef __CVMX_PEXP_DEFS_H__ +#define __CVMX_PEXP_DEFS_H__ + +#define CVMX_PEXP_NPEI_BAR1_INDEXX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000008000ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_BIST_STATUS \ + CVMX_ADD_IO_SEG(0x00011F0000008580ull) +#define CVMX_PEXP_NPEI_BIST_STATUS2 \ + CVMX_ADD_IO_SEG(0x00011F0000008680ull) +#define CVMX_PEXP_NPEI_CTL_PORT0 \ + CVMX_ADD_IO_SEG(0x00011F0000008250ull) +#define CVMX_PEXP_NPEI_CTL_PORT1 \ + CVMX_ADD_IO_SEG(0x00011F0000008260ull) +#define CVMX_PEXP_NPEI_CTL_STATUS \ + CVMX_ADD_IO_SEG(0x00011F0000008570ull) +#define CVMX_PEXP_NPEI_CTL_STATUS2 \ + CVMX_ADD_IO_SEG(0x00011F000000BC00ull) +#define CVMX_PEXP_NPEI_DATA_OUT_CNT \ + CVMX_ADD_IO_SEG(0x00011F00000085F0ull) +#define CVMX_PEXP_NPEI_DBG_DATA \ + CVMX_ADD_IO_SEG(0x00011F0000008510ull) +#define CVMX_PEXP_NPEI_DBG_SELECT \ + CVMX_ADD_IO_SEG(0x00011F0000008500ull) +#define CVMX_PEXP_NPEI_DMA0_INT_LEVEL \ + CVMX_ADD_IO_SEG(0x00011F00000085C0ull) +#define CVMX_PEXP_NPEI_DMA1_INT_LEVEL \ + CVMX_ADD_IO_SEG(0x00011F00000085D0ull) +#define CVMX_PEXP_NPEI_DMAX_COUNTS(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000008450ull + (((offset) & 7) * 16)) +#define CVMX_PEXP_NPEI_DMAX_DBELL(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000083B0ull + (((offset) & 7) * 16)) +#define CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000008400ull + (((offset) & 7) * 16)) +#define CVMX_PEXP_NPEI_DMAX_NADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000084A0ull + (((offset) & 7) * 16)) +#define CVMX_PEXP_NPEI_DMA_CNTS \ + CVMX_ADD_IO_SEG(0x00011F00000085E0ull) +#define CVMX_PEXP_NPEI_DMA_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F00000083A0ull) +#define CVMX_PEXP_NPEI_INT_A_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000008560ull) +#define CVMX_PEXP_NPEI_INT_A_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F000000BCE0ull) +#define CVMX_PEXP_NPEI_INT_A_SUM \ + CVMX_ADD_IO_SEG(0x00011F0000008550ull) +#define CVMX_PEXP_NPEI_INT_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000008540ull) +#define CVMX_PEXP_NPEI_INT_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F000000BCD0ull) +#define CVMX_PEXP_NPEI_INT_INFO \ + CVMX_ADD_IO_SEG(0x00011F0000008590ull) +#define CVMX_PEXP_NPEI_INT_SUM \ + CVMX_ADD_IO_SEG(0x00011F0000008530ull) +#define CVMX_PEXP_NPEI_INT_SUM2 \ + CVMX_ADD_IO_SEG(0x00011F000000BCC0ull) +#define CVMX_PEXP_NPEI_LAST_WIN_RDATA0 \ + CVMX_ADD_IO_SEG(0x00011F0000008600ull) +#define CVMX_PEXP_NPEI_LAST_WIN_RDATA1 \ + CVMX_ADD_IO_SEG(0x00011F0000008610ull) +#define CVMX_PEXP_NPEI_MEM_ACCESS_CTL \ + CVMX_ADD_IO_SEG(0x00011F00000084F0ull) +#define CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000008280ull + (((offset) & 31) * 16) - 16 * 12) +#define CVMX_PEXP_NPEI_MSI_ENB0 \ + CVMX_ADD_IO_SEG(0x00011F000000BC50ull) +#define CVMX_PEXP_NPEI_MSI_ENB1 \ + CVMX_ADD_IO_SEG(0x00011F000000BC60ull) +#define CVMX_PEXP_NPEI_MSI_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F000000BC70ull) +#define CVMX_PEXP_NPEI_MSI_ENB3 \ + CVMX_ADD_IO_SEG(0x00011F000000BC80ull) +#define CVMX_PEXP_NPEI_MSI_RCV0 \ + CVMX_ADD_IO_SEG(0x00011F000000BC10ull) +#define CVMX_PEXP_NPEI_MSI_RCV1 \ + CVMX_ADD_IO_SEG(0x00011F000000BC20ull) +#define CVMX_PEXP_NPEI_MSI_RCV2 \ + CVMX_ADD_IO_SEG(0x00011F000000BC30ull) +#define CVMX_PEXP_NPEI_MSI_RCV3 \ + CVMX_ADD_IO_SEG(0x00011F000000BC40ull) +#define CVMX_PEXP_NPEI_MSI_RD_MAP \ + CVMX_ADD_IO_SEG(0x00011F000000BCA0ull) +#define CVMX_PEXP_NPEI_MSI_W1C_ENB0 \ + CVMX_ADD_IO_SEG(0x00011F000000BCF0ull) +#define CVMX_PEXP_NPEI_MSI_W1C_ENB1 \ + CVMX_ADD_IO_SEG(0x00011F000000BD00ull) +#define CVMX_PEXP_NPEI_MSI_W1C_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F000000BD10ull) +#define CVMX_PEXP_NPEI_MSI_W1C_ENB3 \ + CVMX_ADD_IO_SEG(0x00011F000000BD20ull) +#define CVMX_PEXP_NPEI_MSI_W1S_ENB0 \ + CVMX_ADD_IO_SEG(0x00011F000000BD30ull) +#define CVMX_PEXP_NPEI_MSI_W1S_ENB1 \ + CVMX_ADD_IO_SEG(0x00011F000000BD40ull) +#define CVMX_PEXP_NPEI_MSI_W1S_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F000000BD50ull) +#define CVMX_PEXP_NPEI_MSI_W1S_ENB3 \ + CVMX_ADD_IO_SEG(0x00011F000000BD60ull) +#define CVMX_PEXP_NPEI_MSI_WR_MAP \ + CVMX_ADD_IO_SEG(0x00011F000000BC90ull) +#define CVMX_PEXP_NPEI_PCIE_CREDIT_CNT \ + CVMX_ADD_IO_SEG(0x00011F000000BD70ull) +#define CVMX_PEXP_NPEI_PCIE_MSI_RCV \ + CVMX_ADD_IO_SEG(0x00011F000000BCB0ull) +#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1 \ + CVMX_ADD_IO_SEG(0x00011F0000008650ull) +#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2 \ + CVMX_ADD_IO_SEG(0x00011F0000008660ull) +#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3 \ + CVMX_ADD_IO_SEG(0x00011F0000008670ull) +#define CVMX_PEXP_NPEI_PKTX_CNTS(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000A400ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_INSTR_BADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000A800ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000AC00ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000B000ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_INSTR_HEADER(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000B400ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_IN_BP(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000B800ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_SLIST_BADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000009400ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000009800ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000009C00ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKT_CNT_INT \ + CVMX_ADD_IO_SEG(0x00011F0000009110ull) +#define CVMX_PEXP_NPEI_PKT_CNT_INT_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000009130ull) +#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ES \ + CVMX_ADD_IO_SEG(0x00011F00000090B0ull) +#define CVMX_PEXP_NPEI_PKT_DATA_OUT_NS \ + CVMX_ADD_IO_SEG(0x00011F00000090A0ull) +#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR \ + CVMX_ADD_IO_SEG(0x00011F0000009090ull) +#define CVMX_PEXP_NPEI_PKT_DPADDR \ + CVMX_ADD_IO_SEG(0x00011F0000009080ull) +#define CVMX_PEXP_NPEI_PKT_INPUT_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F0000009150ull) +#define CVMX_PEXP_NPEI_PKT_INSTR_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000009000ull) +#define CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE \ + CVMX_ADD_IO_SEG(0x00011F0000009190ull) +#define CVMX_PEXP_NPEI_PKT_INSTR_SIZE \ + CVMX_ADD_IO_SEG(0x00011F0000009020ull) +#define CVMX_PEXP_NPEI_PKT_INT_LEVELS \ + CVMX_ADD_IO_SEG(0x00011F0000009100ull) +#define CVMX_PEXP_NPEI_PKT_IN_BP \ + CVMX_ADD_IO_SEG(0x00011F00000086B0ull) +#define CVMX_PEXP_NPEI_PKT_IN_DONEX_CNTS(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000A000ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS \ + CVMX_ADD_IO_SEG(0x00011F00000086A0ull) +#define CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT \ + CVMX_ADD_IO_SEG(0x00011F00000091A0ull) +#define CVMX_PEXP_NPEI_PKT_IPTR \ + CVMX_ADD_IO_SEG(0x00011F0000009070ull) +#define CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK \ + CVMX_ADD_IO_SEG(0x00011F0000009160ull) +#define CVMX_PEXP_NPEI_PKT_OUT_BMODE \ + CVMX_ADD_IO_SEG(0x00011F00000090D0ull) +#define CVMX_PEXP_NPEI_PKT_OUT_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000009010ull) +#define CVMX_PEXP_NPEI_PKT_PCIE_PORT \ + CVMX_ADD_IO_SEG(0x00011F00000090E0ull) +#define CVMX_PEXP_NPEI_PKT_PORT_IN_RST \ + CVMX_ADD_IO_SEG(0x00011F0000008690ull) +#define CVMX_PEXP_NPEI_PKT_SLIST_ES \ + CVMX_ADD_IO_SEG(0x00011F0000009050ull) +#define CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE \ + CVMX_ADD_IO_SEG(0x00011F0000009180ull) +#define CVMX_PEXP_NPEI_PKT_SLIST_NS \ + CVMX_ADD_IO_SEG(0x00011F0000009040ull) +#define CVMX_PEXP_NPEI_PKT_SLIST_ROR \ + CVMX_ADD_IO_SEG(0x00011F0000009030ull) +#define CVMX_PEXP_NPEI_PKT_TIME_INT \ + CVMX_ADD_IO_SEG(0x00011F0000009120ull) +#define CVMX_PEXP_NPEI_PKT_TIME_INT_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000009140ull) +#define CVMX_PEXP_NPEI_RSL_INT_BLOCKS \ + CVMX_ADD_IO_SEG(0x00011F0000008520ull) +#define CVMX_PEXP_NPEI_SCRATCH_1 \ + CVMX_ADD_IO_SEG(0x00011F0000008270ull) +#define CVMX_PEXP_NPEI_STATE1 \ + CVMX_ADD_IO_SEG(0x00011F0000008620ull) +#define CVMX_PEXP_NPEI_STATE2 \ + CVMX_ADD_IO_SEG(0x00011F0000008630ull) +#define CVMX_PEXP_NPEI_STATE3 \ + CVMX_ADD_IO_SEG(0x00011F0000008640ull) +#define CVMX_PEXP_NPEI_WINDOW_CTL \ + CVMX_ADD_IO_SEG(0x00011F0000008380ull) + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h index 03fddfa..e31e3fe 100644 --- a/arch/mips/include/asm/octeon/cvmx.h +++ b/arch/mips/include/asm/octeon/cvmx.h @@ -376,6 +376,18 @@ static inline uint64_t cvmx_get_cycle(void) } /** + * Wait for the specified number of cycle + * + */ +static inline void cvmx_wait(uint64_t cycles) +{ + uint64_t done = cvmx_get_cycle() + cycles; + + while (cvmx_get_cycle() < done) + ; /* Spin */ +} + +/** * Reads a chip global cycle counter. This counts CPU cycles since * chip reset. The counter is 64 bit. * This register does not exist on CN38XX pass 1 silicion diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h index 04fac68..ef24a7b 100644 --- a/arch/mips/include/asm/octeon/octeon-feature.h +++ b/arch/mips/include/asm/octeon/octeon-feature.h @@ -57,6 +57,13 @@ enum octeon_feature { OCTEON_FEATURE_RAID, /* Octeon has a builtin USB */ OCTEON_FEATURE_USB, + /* Octeon IPD can run without using work queue entries */ + OCTEON_FEATURE_NO_WPTR, + /* Octeon has DFA state machines */ + OCTEON_FEATURE_DFA, + /* Octeon MDIO block supports clause 45 transactions for 10 + * Gig support */ + OCTEON_FEATURE_MDIO_CLAUSE_45, }; static inline int cvmx_fuse_read(int fuse); @@ -112,6 +119,26 @@ static inline int octeon_has_feature(enum octeon_feature feature) case OCTEON_FEATURE_USB: return !(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)); + case OCTEON_FEATURE_NO_WPTR: + return (OCTEON_IS_MODEL(OCTEON_CN56XX) + || OCTEON_IS_MODEL(OCTEON_CN52XX)) + && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) + && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X); + case OCTEON_FEATURE_DFA: + if (!OCTEON_IS_MODEL(OCTEON_CN38XX) + && !OCTEON_IS_MODEL(OCTEON_CN31XX) + && !OCTEON_IS_MODEL(OCTEON_CN58XX)) + return 0; + else if (OCTEON_IS_MODEL(OCTEON_CN3020)) + return 0; + else if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)) + return 1; + else + return !cvmx_fuse_read(120); + case OCTEON_FEATURE_MDIO_CLAUSE_45: + return !(OCTEON_IS_MODEL(OCTEON_CN3XXX) + || OCTEON_IS_MODEL(OCTEON_CN58XX) + || OCTEON_IS_MODEL(OCTEON_CN50XX)); } return 0; } diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index edc6760..cac9b1a 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h @@ -245,4 +245,6 @@ static inline uint32_t octeon_npi_read32(uint64_t address) return cvmx_read64_uint32(address ^ 4); } +extern struct cvmx_bootinfo *octeon_bootinfo; + #endif /* __ASM_OCTEON_OCTEON_H */ diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 72c80d2..dc0eaa7 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -32,6 +32,11 @@ #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) +#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) +#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + #ifndef __ASSEMBLY__ #include <linux/pfn.h> diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 053e463..a68d111 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -142,19 +142,6 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev, extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index static inline int pci_proc_domain(struct pci_bus *bus) diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index 51b34a4..1073e6d 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -72,6 +72,7 @@ #else #define _PAGE_R4KBUG (1<<5) /* workaround for r4k bug */ +#define _PAGE_HUGE (1<<5) /* huge tlb page */ #define _PAGE_GLOBAL (1<<6) #define _PAGE_VALID (1<<7) #define _PAGE_SILENT_READ (1<<7) /* synonym */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 6a0edf7..1a9f9b2 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -292,6 +292,16 @@ static inline pte_t pte_mkyoung(pte_t pte) pte_val(pte) |= _PAGE_SILENT_READ; return pte; } + +#ifdef _PAGE_HUGE +static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } + +static inline pte_t pte_mkhuge(pte_t pte) +{ + pte_val(pte) |= _PAGE_HUGE; + return pte; +} +#endif /* _PAGE_HUGE */ #endif static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_mkspecial(pte_t pte) { return pte; } diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 4c140db..387bf59 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -399,6 +399,7 @@ __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64) __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h new file mode 100644 index 0000000..294cdb6 --- /dev/null +++ b/arch/mips/include/asm/suspend.h @@ -0,0 +1,9 @@ +#ifndef __ASM_SUSPEND_H +#define __ASM_SUSPEND_H + +static inline int arch_prepare_suspend(void) { return 0; } + +/* References to section boundaries */ +extern const void __nosave_begin, __nosave_end; + +#endif /* __ASM_SUSPEND_H */ diff --git a/arch/mips/include/asm/txx9/dmac.h b/arch/mips/include/asm/txx9/dmac.h new file mode 100644 index 0000000..5e9151f --- /dev/null +++ b/arch/mips/include/asm/txx9/dmac.h @@ -0,0 +1,51 @@ +/* + * TXx9 SoC DMA Controller + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_TXX9_DMAC_H +#define __ASM_TXX9_DMAC_H + +#include <linux/dmaengine.h> + +#define TXX9_DMA_MAX_NR_CHANNELS 4 + +/** + * struct txx9dmac_platform_data - Controller configuration parameters + * @memcpy_chan: Channel used for DMA_MEMCPY + * @have_64bit_regs: DMAC have 64 bit registers + */ +struct txx9dmac_platform_data { + int memcpy_chan; + bool have_64bit_regs; +}; + +/** + * struct txx9dmac_chan_platform_data - Channel configuration parameters + * @dmac_dev: A platform device for DMAC + */ +struct txx9dmac_chan_platform_data { + struct platform_device *dmac_dev; +}; + +/** + * struct txx9dmac_slave - Controller-specific information about a slave + * @tx_reg: physical address of data register used for + * memory-to-peripheral transfers + * @rx_reg: physical address of data register used for + * peripheral-to-memory transfers + * @reg_width: peripheral register width + */ +struct txx9dmac_slave { + u64 tx_reg; + u64 rx_reg; + unsigned int reg_width; +}; + +void txx9_dmac_init(int id, unsigned long baseaddr, int irq, + const struct txx9dmac_platform_data *pdata); + +#endif /* __ASM_TXX9_DMAC_H */ diff --git a/arch/mips/include/asm/txx9/generic.h b/arch/mips/include/asm/txx9/generic.h index 9cde009..827dc22 100644 --- a/arch/mips/include/asm/txx9/generic.h +++ b/arch/mips/include/asm/txx9/generic.h @@ -91,4 +91,10 @@ void txx9_7segled_init(unsigned int num, void (*putc)(unsigned int pos, unsigned char val)); int txx9_7segled_putc(unsigned int pos, char c); +void __init txx9_aclc_init(unsigned long baseaddr, int irq, + unsigned int dmac_id, + unsigned int dma_chan_out, + unsigned int dma_chan_in); +void __init txx9_sramc_init(struct resource *r); + #endif /* __ASM_TXX9_GENERIC_H */ diff --git a/arch/mips/include/asm/txx9/tx4927.h b/arch/mips/include/asm/txx9/tx4927.h index 7d813f1..18c98c5 100644 --- a/arch/mips/include/asm/txx9/tx4927.h +++ b/arch/mips/include/asm/txx9/tx4927.h @@ -41,6 +41,7 @@ #define TX4927_SDRAMC_REG (TX4927_REG_BASE + 0x8000) #define TX4927_EBUSC_REG (TX4927_REG_BASE + 0x9000) +#define TX4927_DMA_REG (TX4927_REG_BASE + 0xb000) #define TX4927_PCIC_REG (TX4927_REG_BASE + 0xd000) #define TX4927_CCFG_REG (TX4927_REG_BASE + 0xe000) #define TX4927_IRC_REG (TX4927_REG_BASE + 0xf600) @@ -49,6 +50,7 @@ #define TX4927_NR_SIO 2 #define TX4927_SIO_REG(ch) (TX4927_REG_BASE + 0xf300 + (ch) * 0x100) #define TX4927_PIO_REG (TX4927_REG_BASE + 0xf500) +#define TX4927_ACLC_REG (TX4927_REG_BASE + 0xf700) #define TX4927_IR_ECCERR 0 #define TX4927_IR_WTOERR 1 @@ -265,5 +267,7 @@ int tx4927_pciclk66_setup(void); void tx4927_setup_pcierr_irq(void); void tx4927_irq_init(void); void tx4927_mtd_init(int ch); +void tx4927_dmac_init(int memcpy_chan); +void tx4927_aclc_init(unsigned int dma_chan_out, unsigned int dma_chan_in); #endif /* __ASM_TXX9_TX4927_H */ diff --git a/arch/mips/include/asm/txx9/tx4938.h b/arch/mips/include/asm/txx9/tx4938.h index cd8bc20..8a178f1 100644 --- a/arch/mips/include/asm/txx9/tx4938.h +++ b/arch/mips/include/asm/txx9/tx4938.h @@ -305,5 +305,8 @@ struct tx4938ide_platform_info { }; void tx4938_ata_init(unsigned int irq, unsigned int shift, int tune); +void tx4938_dmac_init(int memcpy_chan0, int memcpy_chan1); +void tx4938_aclc_init(void); +void tx4938_sramc_init(void); #endif diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h index f02c50b..d4f342c 100644 --- a/arch/mips/include/asm/txx9/tx4939.h +++ b/arch/mips/include/asm/txx9/tx4939.h @@ -45,6 +45,8 @@ #define TX4939_RTC_REG (TX4939_REG_BASE + 0xfb00) #define TX4939_CIR_REG (TX4939_REG_BASE + 0xfc00) +#define TX4939_RNG_REG (TX4939_CRYPTO_REG + 0xb0) + struct tx4939_le_reg { __u32 r; __u32 unused; @@ -544,5 +546,9 @@ void tx4939_ata_init(void); void tx4939_rtc_init(void); void tx4939_ndfmc_init(unsigned int hold, unsigned int spw, unsigned char ch_mask, unsigned char wide_mask); +void tx4939_dmac_init(int memcpy_chan0, int memcpy_chan1); +void tx4939_aclc_init(void); +void tx4939_sramc_init(void); +void tx4939_rng_init(void); #endif /* __ASM_TXX9_TX4939_H */ diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index c901c22..8d006ec 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -14,6 +14,7 @@ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/kbuild.h> +#include <linux/suspend.h> #include <asm/ptrace.h> #include <asm/processor.h> @@ -326,3 +327,15 @@ void output_octeon_cop2_state_defines(void) BLANK(); } #endif + +#ifdef CONFIG_HIBERNATION +void output_pbe_defines(void) +{ + COMMENT(" Linux struct pbe offsets. "); + OFFSET(PBE_ADDRESS, pbe, address); + OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address); + OFFSET(PBE_NEXT, pbe, next); + DEFINE(PBE_SIZE, sizeof(struct pbe)); + BLANK(); +} +#endif diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index 2e911e3..0037f21 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c @@ -20,22 +20,29 @@ #define TIMER_CCD 0 /* 1/2 */ #define TIMER_CLK(imclk) ((imclk) / (2 << TIMER_CCD)) -static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr; +struct txx9_clocksource { + struct clocksource cs; + struct txx9_tmr_reg __iomem *tmrptr; +}; static cycle_t txx9_cs_read(struct clocksource *cs) { - return __raw_readl(&txx9_cs_tmrptr->trr); + struct txx9_clocksource *txx9_cs = + container_of(cs, struct txx9_clocksource, cs); + return __raw_readl(&txx9_cs->tmrptr->trr); } /* Use 1 bit smaller width to use full bits in that width */ #define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1) -static struct clocksource txx9_clocksource = { - .name = "TXx9", - .rating = 200, - .read = txx9_cs_read, - .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, +static struct txx9_clocksource txx9_clocksource = { + .cs = { + .name = "TXx9", + .rating = 200, + .read = txx9_cs_read, + .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }, }; void __init txx9_clocksource_init(unsigned long baseaddr, @@ -43,8 +50,8 @@ void __init txx9_clocksource_init(unsigned long baseaddr, { struct txx9_tmr_reg __iomem *tmrptr; - clocksource_set_clock(&txx9_clocksource, TIMER_CLK(imbusclk)); - clocksource_register(&txx9_clocksource); + clocksource_set_clock(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); + clocksource_register(&txx9_clocksource.cs); tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); __raw_writel(TCR_BASE, &tmrptr->tcr); @@ -53,10 +60,13 @@ void __init txx9_clocksource_init(unsigned long baseaddr, __raw_writel(TXx9_TMITMR_TZCE, &tmrptr->itmr); __raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra); __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr); - txx9_cs_tmrptr = tmrptr; + txx9_clocksource.tmrptr = tmrptr; } -static struct txx9_tmr_reg __iomem *txx9_tmrptr; +struct txx9_clock_event_device { + struct clock_event_device cd; + struct txx9_tmr_reg __iomem *tmrptr; +}; static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr) { @@ -69,7 +79,9 @@ static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr) static void txx9tmr_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { - struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr; + struct txx9_clock_event_device *txx9_cd = + container_of(evt, struct txx9_clock_event_device, cd); + struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; txx9tmr_stop_and_clear(tmrptr); switch (mode) { @@ -99,7 +111,9 @@ static void txx9tmr_set_mode(enum clock_event_mode mode, static int txx9tmr_set_next_event(unsigned long delta, struct clock_event_device *evt) { - struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr; + struct txx9_clock_event_device *txx9_cd = + container_of(evt, struct txx9_clock_event_device, cd); + struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; txx9tmr_stop_and_clear(tmrptr); /* start timer */ @@ -108,18 +122,22 @@ static int txx9tmr_set_next_event(unsigned long delta, return 0; } -static struct clock_event_device txx9tmr_clock_event_device = { - .name = "TXx9", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .rating = 200, - .set_mode = txx9tmr_set_mode, - .set_next_event = txx9tmr_set_next_event, +static struct txx9_clock_event_device txx9_clock_event_device = { + .cd = { + .name = "TXx9", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 200, + .set_mode = txx9tmr_set_mode, + .set_next_event = txx9tmr_set_next_event, + }, }; static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id) { - struct clock_event_device *cd = &txx9tmr_clock_event_device; - struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr; + struct txx9_clock_event_device *txx9_cd = dev_id; + struct clock_event_device *cd = &txx9_cd->cd; + struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; __raw_writel(0, &tmrptr->tisr); /* ack interrupt */ cd->event_handler(cd); @@ -130,19 +148,20 @@ static struct irqaction txx9tmr_irq = { .handler = txx9tmr_interrupt, .flags = IRQF_DISABLED | IRQF_PERCPU, .name = "txx9tmr", + .dev_id = &txx9_clock_event_device, }; void __init txx9_clockevent_init(unsigned long baseaddr, int irq, unsigned int imbusclk) { - struct clock_event_device *cd = &txx9tmr_clock_event_device; + struct clock_event_device *cd = &txx9_clock_event_device.cd; struct txx9_tmr_reg __iomem *tmrptr; tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); txx9tmr_stop_and_clear(tmrptr); __raw_writel(TIMER_CCD, &tmrptr->ccdr); __raw_writel(0, &tmrptr->itmr); - txx9_tmrptr = tmrptr; + txx9_clock_event_device.tmrptr = tmrptr; clockevent_set_clock(cd, TIMER_CLK(imbusclk)); cd->max_delta_ns = diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 5f5af7d..37d51cd 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -924,6 +924,7 @@ void ipi_decode(struct smtc_ipi *pipi) int irq = MIPS_CPU_IRQ_BASE + 1; smtc_ipi_nq(&freeIPIq, pipi); + switch (type_copy) { case SMTC_CLOCK_TICK: irq_enter(); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e83da17..08f1edf 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1502,7 +1502,7 @@ void __cpuinit per_cpu_trap_init(void) status_set); if (cpu_has_mips_r2) { - unsigned int enable = 0x0000000f; + unsigned int enable = 0x0000000f | cpu_hwrena_impl_bits; if (!noulri && cpu_has_userlocal) enable |= (1 << 29); @@ -1510,10 +1510,6 @@ void __cpuinit per_cpu_trap_init(void) write_c0_hwrena(enable); } -#ifdef CONFIG_CPU_CAVIUM_OCTEON - write_c0_hwrena(0xc000000f); /* Octeon has register 30 and 31 */ -#endif - #ifdef CONFIG_MIPS_MT_SMTC if (!secondaryTC) { #endif /* CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index f69c6b5..6b3b1de9 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -43,7 +43,7 @@ void __udelay(unsigned long us) { unsigned int lpj = current_cpu_data.udelay_val; - __delay((us * 0x000010c7 * HZ * lpj) >> 32); + __delay((us * 0x000010c7ull * HZ * lpj) >> 32); } EXPORT_SYMBOL(__udelay); @@ -51,6 +51,6 @@ void __ndelay(unsigned long ns) { unsigned int lpj = current_cpu_data.udelay_val; - __delay((us * 0x00000005 * HZ * lpj) >> 32); + __delay((ns * 0x00000005ull * HZ * lpj) >> 32); } EXPORT_SYMBOL(__ndelay); diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index d7ec955..f0e4355 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -8,6 +8,7 @@ obj-y += cache.o dma-default.o extable.o fault.o \ obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o obj-$(CONFIG_HIGHMEM) += highmem.o +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_CPU_LOONGSON2) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_MIPS32) += c-r4k.o cex-gen.o tlb-r4k.o diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 171951d..71fe4cb 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -100,6 +100,12 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr) blast_dcache32_page(addr); } +static inline void r4k_blast_dcache_page_dc64(unsigned long addr) +{ + R4600_HIT_CACHEOP_WAR_IMPL; + blast_dcache64_page(addr); +} + static void __cpuinit r4k_blast_dcache_page_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); @@ -110,6 +116,8 @@ static void __cpuinit r4k_blast_dcache_page_setup(void) r4k_blast_dcache_page = blast_dcache16_page; else if (dc_lsize == 32) r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; + else if (dc_lsize == 64) + r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; } static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); @@ -124,6 +132,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; else if (dc_lsize == 32) r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; + else if (dc_lsize == 64) + r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; } static void (* r4k_blast_dcache)(void); @@ -138,6 +148,8 @@ static void __cpuinit r4k_blast_dcache_setup(void) r4k_blast_dcache = blast_dcache16; else if (dc_lsize == 32) r4k_blast_dcache = blast_dcache32; + else if (dc_lsize == 64) + r4k_blast_dcache = blast_dcache64; } /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 4fdb7f5..7e48e76 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -20,9 +20,10 @@ #include <dma-coherence.h> -static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr) +static inline unsigned long dma_addr_to_virt(struct device *dev, + dma_addr_t dma_addr) { - unsigned long addr = plat_dma_addr_to_phys(dma_addr); + unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr); return (unsigned long)phys_to_virt(addr); } @@ -111,7 +112,7 @@ EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { - plat_unmap_dma_mem(dev, dma_handle); + plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); free_pages((unsigned long) vaddr, get_order(size)); } @@ -122,7 +123,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, { unsigned long addr = (unsigned long) vaddr; - plat_unmap_dma_mem(dev, dma_handle); + plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!plat_device_is_coherent(dev)) addr = CAC_ADDR(addr); @@ -170,10 +171,10 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { if (cpu_is_noncoherent_r10000(dev)) - __dma_sync(dma_addr_to_virt(dma_addr), size, + __dma_sync(dma_addr_to_virt(dev, dma_addr), size, direction); - plat_unmap_dma_mem(dev, dma_addr); + plat_unmap_dma_mem(dev, dma_addr, size, direction); } EXPORT_SYMBOL(dma_unmap_single); @@ -232,7 +233,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, if (addr) __dma_sync(addr, sg->length, direction); } - plat_unmap_dma_mem(dev, sg->dma_address); + plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); } } @@ -246,7 +247,7 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, if (cpu_is_noncoherent_r10000(dev)) { unsigned long addr; - addr = dma_addr_to_virt(dma_handle); + addr = dma_addr_to_virt(dev, dma_handle); __dma_sync(addr, size, direction); } } @@ -262,7 +263,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, if (!plat_device_is_coherent(dev)) { unsigned long addr; - addr = dma_addr_to_virt(dma_handle); + addr = dma_addr_to_virt(dev, dma_handle); __dma_sync(addr, size, direction); } } @@ -277,7 +278,7 @@ void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, if (cpu_is_noncoherent_r10000(dev)) { unsigned long addr; - addr = dma_addr_to_virt(dma_handle); + addr = dma_addr_to_virt(dev, dma_handle); __dma_sync(addr + offset, size, direction); } } @@ -293,7 +294,7 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, if (!plat_device_is_coherent(dev)) { unsigned long addr; - addr = dma_addr_to_virt(dma_handle); + addr = dma_addr_to_virt(dev, dma_handle); __dma_sync(addr + offset, size, direction); } } diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c new file mode 100644 index 0000000..471c09a --- /dev/null +++ b/arch/mips/mm/hugetlbpage.c @@ -0,0 +1,101 @@ +/* + * MIPS Huge TLB Page Support for Kernel. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> + * Copyright 2005, Embedded Alley Solutions, Inc. + * Matt Porter <mporter@embeddedalley.com> + * Copyright (C) 2008, 2009 Cavium Networks, Inc. + */ + +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/pagemap.h> +#include <linux/smp_lock.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/sysctl.h> +#include <asm/mman.h> +#include <asm/tlb.h> +#include <asm/tlbflush.h> + +pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + pud = pud_alloc(mm, pgd, addr); + if (pud) + pte = (pte_t *)pmd_alloc(mm, pud, addr); + + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd = NULL; + + pgd = pgd_offset(mm, addr); + if (pgd_present(*pgd)) { + pud = pud_offset(pgd, addr); + if (pud_present(*pud)) + pmd = pmd_offset(pud, addr); + } + return (pte_t *) pmd; +} + +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +{ + return 0; +} + +/* + * This function checks for proper alignment of input addr and len parameters. + */ +int is_aligned_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +struct page * +follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) +{ + return ERR_PTR(-EINVAL); +} + +int pmd_huge(pmd_t pmd) +{ + return (pmd_val(pmd) & _PAGE_HUGE) != 0; +} + +int pud_huge(pud_t pud) +{ + return (pud_val(pud) & _PAGE_HUGE) != 0; +} + +struct page * +follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmd, int write) +{ + struct page *page; + + page = pte_page(*(pte_t *)pmd); + if (page) + page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); + return page; +} + diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 892be42..f60fe51 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -11,6 +11,7 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/hugetlb.h> #include <asm/cpu.h> #include <asm/bootinfo.h> @@ -295,21 +296,41 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) pudp = pud_offset(pgdp, address); pmdp = pmd_offset(pudp, address); idx = read_c0_index(); - ptep = pte_offset_map(pmdp, address); +#ifdef CONFIG_HUGETLB_PAGE + /* this could be a huge page */ + if (pmd_huge(*pmdp)) { + unsigned long lo; + write_c0_pagemask(PM_HUGE_MASK); + ptep = (pte_t *)pmdp; + lo = pte_val(*ptep) >> 6; + write_c0_entrylo0(lo); + write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); + + mtc0_tlbw_hazard(); + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + write_c0_pagemask(PM_DEFAULT_MASK); + } else +#endif + { + ptep = pte_offset_map(pmdp, address); #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) - write_c0_entrylo0(ptep->pte_high); - ptep++; - write_c0_entrylo1(ptep->pte_high); + write_c0_entrylo0(ptep->pte_high); + ptep++; + write_c0_entrylo1(ptep->pte_high); #else - write_c0_entrylo0(pte_val(*ptep++) >> 6); - write_c0_entrylo1(pte_val(*ptep) >> 6); + write_c0_entrylo0(pte_val(*ptep++) >> 6); + write_c0_entrylo1(pte_val(*ptep) >> 6); #endif - mtc0_tlbw_hazard(); - if (idx < 0) - tlb_write_random(); - else - tlb_write_indexed(); + mtc0_tlbw_hazard(); + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + } tlbw_use_hazard(); FLUSH_ITLB_VM(vma); EXIT_CRITICAL(flags); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 0615b62..8f606ea 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -6,8 +6,9 @@ * Synthesize TLB refill handlers at runtime. * * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer - * Copyright (C) 2005, 2007 Maciej W. Rozycki + * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2008, 2009 Cavium Networks, Inc. * * ... and the days got worse and worse and now you see * I've gone completly out of my mind. @@ -19,6 +20,7 @@ * (Condolences to Napoleon XIV) */ +#include <linux/bug.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> @@ -82,6 +84,9 @@ enum label_id { label_nopage_tlbm, label_smp_pgtable_change, label_r3000_write_probe_fail, +#ifdef CONFIG_HUGETLB_PAGE + label_tlb_huge_update, +#endif }; UASM_L_LA(_second_part) @@ -98,6 +103,9 @@ UASM_L_LA(_nopage_tlbs) UASM_L_LA(_nopage_tlbm) UASM_L_LA(_smp_pgtable_change) UASM_L_LA(_r3000_write_probe_fail) +#ifdef CONFIG_HUGETLB_PAGE +UASM_L_LA(_tlb_huge_update) +#endif /* * For debug purposes. @@ -125,6 +133,7 @@ static inline void dump_handler(const u32 *handler, int count) #define C0_TCBIND 2, 2 #define C0_ENTRYLO1 3, 0 #define C0_CONTEXT 4, 0 +#define C0_PAGEMASK 5, 0 #define C0_BADVADDR 8, 0 #define C0_ENTRYHI 10, 0 #define C0_EPC 14, 0 @@ -258,7 +267,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, } if (cpu_has_mips_r2) { - uasm_i_ehb(p); + if (cpu_has_mips_r2_exec_hazard) + uasm_i_ehb(p); tlbw(p); return; } @@ -310,7 +320,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_BCM3302: case CPU_BCM4710: case CPU_LOONGSON2: - case CPU_CAVIUM_OCTEON: case CPU_R5500: if (m4kc_tlbp_war()) uasm_i_nop(p); @@ -382,6 +391,98 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, } } +#ifdef CONFIG_HUGETLB_PAGE +static __cpuinit void build_huge_tlb_write_entry(u32 **p, + struct uasm_label **l, + struct uasm_reloc **r, + unsigned int tmp, + enum tlb_write_entry wmode) +{ + /* Set huge page tlb entry size */ + uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); + uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); + uasm_i_mtc0(p, tmp, C0_PAGEMASK); + + build_tlb_write_entry(p, l, r, wmode); + + /* Reset default page size */ + if (PM_DEFAULT_MASK >> 16) { + uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); + uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); + uasm_il_b(p, r, label_leave); + uasm_i_mtc0(p, tmp, C0_PAGEMASK); + } else if (PM_DEFAULT_MASK) { + uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); + uasm_il_b(p, r, label_leave); + uasm_i_mtc0(p, tmp, C0_PAGEMASK); + } else { + uasm_il_b(p, r, label_leave); + uasm_i_mtc0(p, 0, C0_PAGEMASK); + } +} + +/* + * Check if Huge PTE is present, if so then jump to LABEL. + */ +static void __cpuinit +build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, + unsigned int pmd, int lid) +{ + UASM_i_LW(p, tmp, 0, pmd); + uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); + uasm_il_bnez(p, r, tmp, lid); +} + +static __cpuinit void build_huge_update_entries(u32 **p, + unsigned int pte, + unsigned int tmp) +{ + int small_sequence; + + /* + * A huge PTE describes an area the size of the + * configured huge page size. This is twice the + * of the large TLB entry size we intend to use. + * A TLB entry half the size of the configured + * huge page size is configured into entrylo0 + * and entrylo1 to cover the contiguous huge PTE + * address space. + */ + small_sequence = (HPAGE_SIZE >> 7) < 0x10000; + + /* We can clobber tmp. It isn't used after this.*/ + if (!small_sequence) + uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); + + UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ + uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */ + /* convert to entrylo1 */ + if (small_sequence) + UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); + else + UASM_i_ADDU(p, pte, pte, tmp); + + uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */ +} + +static __cpuinit void build_huge_handler_tail(u32 **p, + struct uasm_reloc **r, + struct uasm_label **l, + unsigned int pte, + unsigned int ptr) +{ +#ifdef CONFIG_SMP + UASM_i_SC(p, pte, 0, ptr); + uasm_il_beqz(p, r, pte, label_tlb_huge_update); + UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ +#else + UASM_i_SW(p, pte, 0, ptr); +#endif + build_huge_update_entries(p, pte, ptr); + build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed); +} +#endif /* CONFIG_HUGETLB_PAGE */ + #ifdef CONFIG_64BIT /* * TMP and PTR are scratch. @@ -649,6 +750,14 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, #endif } +/* + * For a 64-bit kernel, we are using the 64-bit XTLB refill exception + * because EXL == 0. If we wrap, we can also use the 32 instruction + * slots before the XTLB refill exception handler which belong to the + * unused TLB refill exception. + */ +#define MIPS64_REFILL_INSNS 32 + static void __cpuinit build_r4000_tlb_refill_handler(void) { u32 *p = tlb_handler; @@ -680,12 +789,23 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ #endif +#ifdef CONFIG_HUGETLB_PAGE + build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); +#endif + build_get_ptep(&p, K0, K1); build_update_entries(&p, K0, K1); build_tlb_write_entry(&p, &l, &r, tlb_random); uasm_l_leave(&l, p); uasm_i_eret(&p); /* return from trap */ +#ifdef CONFIG_HUGETLB_PAGE + uasm_l_tlb_huge_update(&l, p); + UASM_i_LW(&p, K0, 0, K1); + build_huge_update_entries(&p, K0, K1); + build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random); +#endif + #ifdef CONFIG_64BIT build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); #endif @@ -702,9 +822,10 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) if ((p - tlb_handler) > 64) panic("TLB refill handler space exceeded"); #else - if (((p - tlb_handler) > 63) - || (((p - tlb_handler) > 61) - && uasm_insn_has_bdelay(relocs, tlb_handler + 29))) + if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) + || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) + && uasm_insn_has_bdelay(relocs, + tlb_handler + MIPS64_REFILL_INSNS - 3))) panic("TLB refill handler space exceeded"); #endif @@ -717,39 +838,74 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) uasm_copy_handler(relocs, labels, tlb_handler, p, f); final_len = p - tlb_handler; #else /* CONFIG_64BIT */ - f = final_handler + 32; - if ((p - tlb_handler) <= 32) { + f = final_handler + MIPS64_REFILL_INSNS; + if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { /* Just copy the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, p, f); final_len = p - tlb_handler; } else { - u32 *split = tlb_handler + 30; +#if defined(CONFIG_HUGETLB_PAGE) + const enum label_id ls = label_tlb_huge_update; +#elif defined(MODULE_START) + const enum label_id ls = label_module_alloc; +#else + const enum label_id ls = label_vmalloc; +#endif + u32 *split; + int ov = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) + ; + BUG_ON(i == ARRAY_SIZE(labels)); + split = labels[i].addr; /* - * Find the split point. + * See if we have overflown one way or the other. */ - if (uasm_insn_has_bdelay(relocs, split - 1)) - split--; - + if (split > tlb_handler + MIPS64_REFILL_INSNS || + split < p - MIPS64_REFILL_INSNS) + ov = 1; + + if (ov) { + /* + * Split two instructions before the end. One + * for the branch and one for the instruction + * in the delay slot. + */ + split = tlb_handler + MIPS64_REFILL_INSNS - 2; + + /* + * If the branch would fall in a delay slot, + * we must back up an additional instruction + * so that it is no longer in a delay slot. + */ + if (uasm_insn_has_bdelay(relocs, split - 1)) + split--; + } /* Copy first part of the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, split, f); f += split - tlb_handler; - /* Insert branch. */ - uasm_l_split(&l, final_handler); - uasm_il_b(&f, &r, label_split); - if (uasm_insn_has_bdelay(relocs, split)) - uasm_i_nop(&f); - else { - uasm_copy_handler(relocs, labels, split, split + 1, f); - uasm_move_labels(labels, f, f + 1, -1); - f++; - split++; + if (ov) { + /* Insert branch. */ + uasm_l_split(&l, final_handler); + uasm_il_b(&f, &r, label_split); + if (uasm_insn_has_bdelay(relocs, split)) + uasm_i_nop(&f); + else { + uasm_copy_handler(relocs, labels, + split, split + 1, f); + uasm_move_labels(labels, f, f + 1, -1); + f++; + split++; + } } /* Copy the rest of the handler. */ uasm_copy_handler(relocs, labels, split, p, final_handler); - final_len = (f - (final_handler + 32)) + (p - split); + final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + + (p - split); } #endif /* CONFIG_64BIT */ @@ -782,7 +938,7 @@ u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; static void __cpuinit -iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr) +iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) { #ifdef CONFIG_SMP # ifdef CONFIG_64BIT_PHYS_ADDR @@ -862,13 +1018,13 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, * with it's original value. */ static void __cpuinit -build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r, +build_pte_present(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid) { uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); uasm_il_bnez(p, r, pte, lid); - iPTE_LW(p, l, pte, ptr); + iPTE_LW(p, pte, ptr); } /* Make PTE valid, store result in PTR. */ @@ -886,13 +1042,13 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, * restore PTE with value from PTR when done. */ static void __cpuinit -build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, +build_pte_writable(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid) { uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); uasm_il_bnez(p, r, pte, lid); - iPTE_LW(p, l, pte, ptr); + iPTE_LW(p, pte, ptr); } /* Make PTE writable, update software status bits as well, then store @@ -913,12 +1069,12 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, * restore PTE with value from PTR when done. */ static void __cpuinit -build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, +build_pte_modifiable(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid) { uasm_i_andi(p, pte, pte, _PAGE_WRITE); uasm_il_beqz(p, r, pte, lid); - iPTE_LW(p, l, pte, ptr); + iPTE_LW(p, pte, ptr); } /* @@ -994,7 +1150,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void) memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); + build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); uasm_i_nop(&p); /* load delay */ build_make_valid(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); @@ -1024,7 +1180,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void) memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); + build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); @@ -1054,7 +1210,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); + build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_pte_reload_tlbwi(&p, K0, K1); @@ -1087,6 +1243,15 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ #endif +#ifdef CONFIG_HUGETLB_PAGE + /* + * For huge tlb entries, pmd doesn't contain an address but + * instead contains the tlb pte. Check the PAGE_HUGE bit and + * see if we need to jump to huge tlb processing. + */ + build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update); +#endif + UASM_i_MFC0(p, pte, C0_BADVADDR); UASM_i_LW(p, ptr, 0, ptr); UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); @@ -1096,7 +1261,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, #ifdef CONFIG_SMP uasm_l_smp_pgtable_change(l, *p); #endif - iPTE_LW(p, l, pte, ptr); /* get even pte */ + iPTE_LW(p, pte, ptr); /* get even pte */ if (!m4kc_tlbp_war()) build_tlb_probe_entry(p); } @@ -1138,12 +1303,25 @@ static void __cpuinit build_r4000_tlb_load_handler(void) } build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); + build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); build_make_valid(&p, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); +#ifdef CONFIG_HUGETLB_PAGE + /* + * This is the entry point when build_r4000_tlbchange_handler_head + * spots a huge page. + */ + uasm_l_tlb_huge_update(&l, p); + iPTE_LW(&p, K0, K1); + build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); + build_tlb_probe_entry(&p); + uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); + build_huge_handler_tail(&p, &r, &l, K0, K1); +#endif + uasm_l_nopage_tlbl(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); @@ -1169,12 +1347,26 @@ static void __cpuinit build_r4000_tlb_store_handler(void) memset(relocs, 0, sizeof(relocs)); build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); + build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); build_make_write(&p, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); +#ifdef CONFIG_HUGETLB_PAGE + /* + * This is the entry point when + * build_r4000_tlbchange_handler_head spots a huge page. + */ + uasm_l_tlb_huge_update(&l, p); + iPTE_LW(&p, K0, K1); + build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); + build_tlb_probe_entry(&p); + uasm_i_ori(&p, K0, K0, + _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); + build_huge_handler_tail(&p, &r, &l, K0, K1); +#endif + uasm_l_nopage_tlbs(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); @@ -1200,13 +1392,27 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) memset(relocs, 0, sizeof(relocs)); build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); + build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); /* Present and writable bits set, set accessed and dirty bits. */ build_make_write(&p, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); +#ifdef CONFIG_HUGETLB_PAGE + /* + * This is the entry point when + * build_r4000_tlbchange_handler_head spots a huge page. + */ + uasm_l_tlb_huge_update(&l, p); + iPTE_LW(&p, K0, K1); + build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); + build_tlb_probe_entry(&p); + uasm_i_ori(&p, K0, K0, + _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); + build_huge_handler_tail(&p, &r, &l, K0, K1); +#endif + uasm_l_nopage_tlbm(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); diff --git a/arch/mips/power/Makefile b/arch/mips/power/Makefile new file mode 100644 index 0000000..73d56b8 --- /dev/null +++ b/arch/mips/power/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_HIBERNATION) += cpu.o hibernate.o diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c new file mode 100644 index 0000000..7995df4 --- /dev/null +++ b/arch/mips/power/cpu.c @@ -0,0 +1,43 @@ +/* + * Suspend support specific for mips. + * + * Licensed under the GPLv2 + * + * Copyright (C) 2009 Lemote Inc. & Insititute of Computing Technology + * Author: Hu Hongbing <huhb@lemote.com> + * Wu Zhangjin <wuzj@lemote.com> + */ +#include <asm/suspend.h> +#include <asm/fpu.h> +#include <asm/dsp.h> + +static u32 saved_status; +struct pt_regs saved_regs; + +void save_processor_state(void) +{ + saved_status = read_c0_status(); + + if (is_fpu_owner()) + save_fp(current); + if (cpu_has_dsp) + save_dsp(current); +} + +void restore_processor_state(void) +{ + write_c0_status(saved_status); + + if (is_fpu_owner()) + restore_fp(current); + if (cpu_has_dsp) + restore_dsp(current); +} + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); + unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end)); + + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); +} diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S new file mode 100644 index 0000000..486bd3f --- /dev/null +++ b/arch/mips/power/hibernate.S @@ -0,0 +1,70 @@ +/* + * Hibernation support specific for mips - temporary page tables + * + * Licensed under the GPLv2 + * + * Copyright (C) 2009 Lemote Inc. & Insititute of Computing Technology + * Author: Hu Hongbing <huhb@lemote.com> + * Wu Zhangjin <wuzj@lemote.com> + */ +#include <asm/asm-offsets.h> +#include <asm/regdef.h> +#include <asm/asm.h> + +.text +LEAF(swsusp_arch_suspend) + PTR_LA t0, saved_regs + PTR_S ra, PT_R31(t0) + PTR_S sp, PT_R29(t0) + PTR_S fp, PT_R30(t0) + PTR_S gp, PT_R28(t0) + PTR_S s0, PT_R16(t0) + PTR_S s1, PT_R17(t0) + PTR_S s2, PT_R18(t0) + PTR_S s3, PT_R19(t0) + PTR_S s4, PT_R20(t0) + PTR_S s5, PT_R21(t0) + PTR_S s6, PT_R22(t0) + PTR_S s7, PT_R23(t0) + j swsusp_save +END(swsusp_arch_suspend) + +LEAF(swsusp_arch_resume) + PTR_L t0, restore_pblist +0: + PTR_L t1, PBE_ADDRESS(t0) /* source */ + PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ + PTR_ADDIU t3, t1, _PAGE_SIZE +1: + REG_L t8, (t1) + REG_S t8, (t2) + PTR_ADDIU t1, t1, SZREG + PTR_ADDIU t2, t2, SZREG + bne t1, t3, 1b + PTR_L t0, PBE_NEXT(t0) + bnez t0, 0b + /* flush caches to make sure context is in memory */ + PTR_L t0, __flush_cache_all + jalr t0 + /* flush tlb entries */ +#ifdef CONFIG_SMP + jal flush_tlb_all +#else + jal local_flush_tlb_all +#endif + PTR_LA t0, saved_regs + PTR_L ra, PT_R31(t0) + PTR_L sp, PT_R29(t0) + PTR_L fp, PT_R30(t0) + PTR_L gp, PT_R28(t0) + PTR_L s0, PT_R16(t0) + PTR_L s1, PT_R17(t0) + PTR_L s2, PT_R18(t0) + PTR_L s3, PT_R19(t0) + PTR_L s4, PT_R20(t0) + PTR_L s5, PT_R21(t0) + PTR_L s6, PT_R22(t0) + PTR_L s7, PT_R23(t0) + PTR_LI v0, 0x0 + jr ra +END(swsusp_arch_resume) diff --git a/arch/mips/rb532/irq.c b/arch/mips/rb532/irq.c index 53eeb5e..f078820 100644 --- a/arch/mips/rb532/irq.c +++ b/arch/mips/rb532/irq.c @@ -151,7 +151,8 @@ static void rb532_disable_irq(unsigned int irq_nr) mask |= intr_bit; WRITE_MASK(addr, mask); - if (group == GPIO_MAPPED_IRQ_GROUP) + /* There is a maximum of 14 GPIO interrupts */ + if (group == GPIO_MAPPED_IRQ_GROUP && irq_nr <= (GROUP4_IRQ_BASE + 13)) rb532_gpio_set_istat(0, irq_nr - GPIO_MAPPED_IRQ_BASE); /* @@ -174,7 +175,7 @@ static int rb532_set_type(unsigned int irq_nr, unsigned type) int gpio = irq_nr - GPIO_MAPPED_IRQ_BASE; int group = irq_to_group(irq_nr); - if (group != GPIO_MAPPED_IRQ_GROUP) + if (group != GPIO_MAPPED_IRQ_GROUP || irq_nr > (GROUP4_IRQ_BASE + 13)) return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL; switch (type) { diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig index 366b19d..3e639bd 100644 --- a/arch/mips/sibyte/Kconfig +++ b/arch/mips/sibyte/Kconfig @@ -75,6 +75,8 @@ config SIBYTE_SB1xxx_SOC select SWAP_IO_SPACE select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL + select CFE + select SYS_HAS_EARLY_PRINTK choice prompt "SiByte SOC Stepping" @@ -128,13 +130,6 @@ config SIBYTE_ENABLE_LDT_IF_PCI bool select SIBYTE_HAS_LDT if PCI -config SIMULATION - bool "Running under simulation" - depends on SIBYTE_SB1xxx_SOC - help - Build a kernel suitable for running under the GDB simulator. - Primarily adjusts the kernel's notion of time. - config SB1_CEX_ALWAYS_FATAL bool "All cache exceptions considered fatal (no recovery attempted)" depends on SIBYTE_SB1xxx_SOC @@ -143,34 +138,14 @@ config SB1_CERR_STALL bool "Stall (rather than panic) on fatal cache error" depends on SIBYTE_SB1xxx_SOC -config SIBYTE_CFE - bool "Booting from CFE" - depends on SIBYTE_SB1xxx_SOC - select CFE - select SYS_HAS_EARLY_PRINTK - help - Make use of the CFE API for enumerating available memory, - controlling secondary CPUs, and possibly console output. - config SIBYTE_CFE_CONSOLE bool "Use firmware console" - depends on SIBYTE_CFE + depends on SIBYTE_SB1xxx_SOC help Use the CFE API's console write routines during boot. Other console options (VT console, sb1250 duart console, etc.) should not be configured. -config SIBYTE_STANDALONE - bool - depends on SIBYTE_SB1xxx_SOC && !SIBYTE_CFE - select SYS_HAS_EARLY_PRINTK - default y - -config SIBYTE_STANDALONE_RAM_SIZE - int "Memory size (in megabytes)" - depends on SIBYTE_STANDALONE - default "32" - config SIBYTE_BUS_WATCHER bool "Support for Bus Watcher statistics" depends on SIBYTE_SB1xxx_SOC diff --git a/arch/mips/sibyte/cfe/Makefile b/arch/mips/sibyte/cfe/Makefile deleted file mode 100644 index 02b32e1..0000000 --- a/arch/mips/sibyte/cfe/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -lib-y = setup.o -lib-$(CONFIG_SIBYTE_CFE_CONSOLE) += console.o diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile index 48a91b9..4f65983 100644 --- a/arch/mips/sibyte/common/Makefile +++ b/arch/mips/sibyte/common/Makefile @@ -1,5 +1,5 @@ -obj-y := - +obj-y := cfe.o +obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o EXTRA_CFLAGS += -Werror diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/common/cfe.c index eb5396c..eb5396c 100644 --- a/arch/mips/sibyte/cfe/setup.c +++ b/arch/mips/sibyte/common/cfe.c diff --git a/arch/mips/sibyte/cfe/console.c b/arch/mips/sibyte/common/cfe_console.c index 81e3d54..81e3d54 100644 --- a/arch/mips/sibyte/cfe/console.c +++ b/arch/mips/sibyte/common/cfe_console.c diff --git a/arch/mips/sibyte/sb1250/Makefile b/arch/mips/sibyte/sb1250/Makefile index 6977937..1896f4e 100644 --- a/arch/mips/sibyte/sb1250/Makefile +++ b/arch/mips/sibyte/sb1250/Makefile @@ -1,7 +1,6 @@ obj-y := setup.o irq.o time.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SIBYTE_STANDALONE) += prom.o obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o EXTRA_CFLAGS += -Werror diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index 409dec7..5e7f201 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c @@ -111,11 +111,6 @@ static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) i = cpumask_first(mask); - if (cpumask_weight(mask) > 1) { - printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); - return -1; - } - /* Convert logical CPU to physical CPU */ cpu = cpu_logical_map(i); diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c deleted file mode 100644 index 65b1af6..0000000 --- a/arch/mips/sibyte/sb1250/prom.c +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (C) 2000, 2001 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/blkdev.h> -#include <linux/bootmem.h> -#include <linux/smp.h> -#include <linux/initrd.h> -#include <linux/pm.h> - -#include <asm/bootinfo.h> -#include <asm/reboot.h> - -#define MAX_RAM_SIZE ((CONFIG_SIBYTE_STANDALONE_RAM_SIZE * 1024 * 1024) - 1) - -static __init void prom_meminit(void) -{ -#ifdef CONFIG_BLK_DEV_INITRD - unsigned long initrd_pstart; - unsigned long initrd_pend; - - initrd_pstart = __pa(initrd_start); - initrd_pend = __pa(initrd_end); - if (initrd_start && - ((initrd_pstart > MAX_RAM_SIZE) - || (initrd_pend > MAX_RAM_SIZE))) { - panic("initrd out of addressable memory"); - } - - add_memory_region(0, initrd_pstart, - BOOT_MEM_RAM); - add_memory_region(initrd_pstart, initrd_pend - initrd_pstart, - BOOT_MEM_RESERVED); - add_memory_region(initrd_pend, - (CONFIG_SIBYTE_STANDALONE_RAM_SIZE * 1024 * 1024) - initrd_pend, - BOOT_MEM_RAM); -#else - add_memory_region(0, CONFIG_SIBYTE_STANDALONE_RAM_SIZE * 1024 * 1024, - BOOT_MEM_RAM); -#endif -} - -void prom_cpu0_exit(void *unused) -{ - while (1) ; -} - -static void prom_linux_exit(void) -{ -#ifdef CONFIG_SMP - if (smp_processor_id()) { - smp_call_function(prom_cpu0_exit, NULL, 1); - } -#endif - while(1); -} - -/* - * prom_init is called just after the cpu type is determined, from setup_arch() - */ -void __init prom_init(void) -{ - _machine_restart = (void (*)(char *))prom_linux_exit; - _machine_halt = prom_linux_exit; - pm_power_off = prom_linux_exit; - - strcpy(arcs_cmdline, "root=/dev/ram0 "); - - prom_meminit(); -} - -void __init prom_free_prom_memory(void) -{ - /* Not sure what I'm supposed to do here. Nothing, I think */ -} - -void prom_putchar(char c) -{ -} diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 080c966..672e45d 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -136,20 +136,6 @@ void __init plat_mem_setup(void) if (m41t81_probe()) swarm_rtc_type = RTC_M4LT81; - printk("This kernel optimized for " -#ifdef CONFIG_SIMULATION - "simulation" -#else - "board" -#endif - " runs " -#ifdef CONFIG_SIBYTE_CFE - "with" -#else - "without" -#endif - " CFE\n"); - #ifdef CONFIG_VT screen_info = (struct screen_info) { 0, 0, /* orig-x, orig-y */ diff --git a/arch/mips/txx9/Kconfig b/arch/mips/txx9/Kconfig index 0db7cf3..852ae4b 100644 --- a/arch/mips/txx9/Kconfig +++ b/arch/mips/txx9/Kconfig @@ -69,6 +69,7 @@ config SOC_TX4927 select IRQ_TXX9 select PCI_TX4927 select GPIO_TXX9 + select HAS_TXX9_ACLC config SOC_TX4938 bool @@ -78,6 +79,7 @@ config SOC_TX4938 select IRQ_TXX9 select PCI_TX4927 select GPIO_TXX9 + select HAS_TXX9_ACLC config SOC_TX4939 bool @@ -85,6 +87,7 @@ config SOC_TX4939 select HAS_TXX9_SERIAL select HW_HAS_PCI select PCI_TX4927 + select HAS_TXX9_ACLC config TXX9_7SEGLED bool diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index 8a266c6..3b7d77d 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c @@ -24,6 +24,7 @@ #include <linux/serial_core.h> #include <linux/mtd/physmap.h> #include <linux/leds.h> +#include <linux/sysdev.h> #include <asm/bootinfo.h> #include <asm/time.h> #include <asm/reboot.h> @@ -33,6 +34,7 @@ #include <asm/txx9/pci.h> #include <asm/txx9tmr.h> #include <asm/txx9/ndfmc.h> +#include <asm/txx9/dmac.h> #ifdef CONFIG_CPU_TX49XX #include <asm/txx9/tx4938.h> #endif @@ -821,3 +823,176 @@ void __init txx9_iocled_init(unsigned long baseaddr, { } #endif /* CONFIG_LEDS_GPIO */ + +void __init txx9_dmac_init(int id, unsigned long baseaddr, int irq, + const struct txx9dmac_platform_data *pdata) +{ +#if defined(CONFIG_TXX9_DMAC) || defined(CONFIG_TXX9_DMAC_MODULE) + struct resource res[] = { + { + .start = baseaddr, + .end = baseaddr + 0x800 - 1, + .flags = IORESOURCE_MEM, +#ifndef CONFIG_MACH_TX49XX + }, { + .start = irq, + .flags = IORESOURCE_IRQ, +#endif + } + }; +#ifdef CONFIG_MACH_TX49XX + struct resource chan_res[] = { + { + .flags = IORESOURCE_IRQ, + } + }; +#endif + struct platform_device *pdev = platform_device_alloc("txx9dmac", id); + struct txx9dmac_chan_platform_data cpdata; + int i; + + if (!pdev || + platform_device_add_resources(pdev, res, ARRAY_SIZE(res)) || + platform_device_add_data(pdev, pdata, sizeof(*pdata)) || + platform_device_add(pdev)) { + platform_device_put(pdev); + return; + } + memset(&cpdata, 0, sizeof(cpdata)); + cpdata.dmac_dev = pdev; + for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { +#ifdef CONFIG_MACH_TX49XX + chan_res[0].start = irq + i; +#endif + pdev = platform_device_alloc("txx9dmac-chan", + id * TXX9_DMA_MAX_NR_CHANNELS + i); + if (!pdev || +#ifdef CONFIG_MACH_TX49XX + platform_device_add_resources(pdev, chan_res, + ARRAY_SIZE(chan_res)) || +#endif + platform_device_add_data(pdev, &cpdata, sizeof(cpdata)) || + platform_device_add(pdev)) + platform_device_put(pdev); + } +#endif +} + +void __init txx9_aclc_init(unsigned long baseaddr, int irq, + unsigned int dmac_id, + unsigned int dma_chan_out, + unsigned int dma_chan_in) +{ +#if defined(CONFIG_SND_SOC_TXX9ACLC) || \ + defined(CONFIG_SND_SOC_TXX9ACLC_MODULE) + unsigned int dma_base = dmac_id * TXX9_DMA_MAX_NR_CHANNELS; + struct resource res[] = { + { + .start = baseaddr, + .end = baseaddr + 0x100 - 1, + .flags = IORESOURCE_MEM, + }, { + .start = irq, + .flags = IORESOURCE_IRQ, + }, { + .name = "txx9dmac-chan", + .start = dma_base + dma_chan_out, + .flags = IORESOURCE_DMA, + }, { + .name = "txx9dmac-chan", + .start = dma_base + dma_chan_in, + .flags = IORESOURCE_DMA, + } + }; + struct platform_device *pdev = + platform_device_alloc("txx9aclc-ac97", -1); + + if (!pdev || + platform_device_add_resources(pdev, res, ARRAY_SIZE(res)) || + platform_device_add(pdev)) + platform_device_put(pdev); +#endif +} + +static struct sysdev_class txx9_sramc_sysdev_class; + +struct txx9_sramc_sysdev { + struct sys_device dev; + struct bin_attribute bindata_attr; + void __iomem *base; +}; + +static ssize_t txx9_sram_read(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t size) +{ + struct txx9_sramc_sysdev *dev = bin_attr->private; + size_t ramsize = bin_attr->size; + + if (pos >= ramsize) + return 0; + if (pos + size > ramsize) + size = ramsize - pos; + memcpy_fromio(buf, dev->base + pos, size); + return size; +} + +static ssize_t txx9_sram_write(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t size) +{ + struct txx9_sramc_sysdev *dev = bin_attr->private; + size_t ramsize = bin_attr->size; + + if (pos >= ramsize) + return 0; + if (pos + size > ramsize) + size = ramsize - pos; + memcpy_toio(dev->base + pos, buf, size); + return size; +} + +void __init txx9_sramc_init(struct resource *r) +{ + struct txx9_sramc_sysdev *dev; + size_t size; + int err; + + if (!txx9_sramc_sysdev_class.name) { + txx9_sramc_sysdev_class.name = "txx9_sram"; + err = sysdev_class_register(&txx9_sramc_sysdev_class); + if (err) { + txx9_sramc_sysdev_class.name = NULL; + return; + } + } + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return; + size = resource_size(r); + dev->base = ioremap(r->start, size); + if (!dev->base) + goto exit; + dev->dev.cls = &txx9_sramc_sysdev_class; + dev->bindata_attr.attr.name = "bindata"; + dev->bindata_attr.attr.mode = S_IRUSR | S_IWUSR; + dev->bindata_attr.read = txx9_sram_read; + dev->bindata_attr.write = txx9_sram_write; + dev->bindata_attr.size = size; + dev->bindata_attr.private = dev; + err = sysdev_register(&dev->dev); + if (err) + goto exit; + err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr); + if (err) { + sysdev_unregister(&dev->dev); + goto exit; + } + return; +exit: + if (dev) { + if (dev->base) + iounmap(dev->base); + kfree(dev); + } +} diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c index 1093549..3418b2a 100644 --- a/arch/mips/txx9/generic/setup_tx4927.c +++ b/arch/mips/txx9/generic/setup_tx4927.c @@ -22,6 +22,7 @@ #include <asm/txx9tmr.h> #include <asm/txx9pio.h> #include <asm/txx9/generic.h> +#include <asm/txx9/dmac.h> #include <asm/txx9/tx4927.h> static void __init tx4927_wdr_init(void) @@ -253,6 +254,60 @@ void __init tx4927_mtd_init(int ch) txx9_physmap_flash_init(ch, start, size, &pdata); } +void __init tx4927_dmac_init(int memcpy_chan) +{ + struct txx9dmac_platform_data plat_data = { + .memcpy_chan = memcpy_chan, + .have_64bit_regs = true, + }; + + txx9_dmac_init(0, TX4927_DMA_REG & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4927_IR_DMA(0), &plat_data); +} + +void __init tx4927_aclc_init(unsigned int dma_chan_out, + unsigned int dma_chan_in) +{ + u64 pcfg = __raw_readq(&tx4927_ccfgptr->pcfg); + __u64 dmasel_mask = 0, dmasel = 0; + unsigned long flags; + + if (!(pcfg & TX4927_PCFG_SEL2)) + return; + /* setup DMASEL (playback:ACLC ch0, capture:ACLC ch1) */ + switch (dma_chan_out) { + case 0: + dmasel_mask |= TX4927_PCFG_DMASEL0_MASK; + dmasel |= TX4927_PCFG_DMASEL0_ACL0; + break; + case 2: + dmasel_mask |= TX4927_PCFG_DMASEL2_MASK; + dmasel |= TX4927_PCFG_DMASEL2_ACL0; + break; + default: + return; + } + switch (dma_chan_in) { + case 1: + dmasel_mask |= TX4927_PCFG_DMASEL1_MASK; + dmasel |= TX4927_PCFG_DMASEL1_ACL1; + break; + case 3: + dmasel_mask |= TX4927_PCFG_DMASEL3_MASK; + dmasel |= TX4927_PCFG_DMASEL3_ACL1; + break; + default: + return; + } + local_irq_save(flags); + txx9_clear64(&tx4927_ccfgptr->pcfg, dmasel_mask); + txx9_set64(&tx4927_ccfgptr->pcfg, dmasel); + local_irq_restore(flags); + txx9_aclc_init(TX4927_ACLC_REG & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4927_IR_ACLC, + 0, dma_chan_out, dma_chan_in); +} + static void __init tx4927_stop_unused_modules(void) { __u64 pcfg, rst = 0, ckd = 0; diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c index 3925219..eb20801 100644 --- a/arch/mips/txx9/generic/setup_tx4938.c +++ b/arch/mips/txx9/generic/setup_tx4938.c @@ -24,6 +24,7 @@ #include <asm/txx9pio.h> #include <asm/txx9/generic.h> #include <asm/txx9/ndfmc.h> +#include <asm/txx9/dmac.h> #include <asm/txx9/tx4938.h> static void __init tx4938_wdr_init(void) @@ -239,11 +240,6 @@ void __init tx4938_setup(void) for (i = 0; i < TX4938_NR_TMR; i++) txx9_tmr_init(TX4938_TMR_REG(i) & 0xfffffffffULL); - /* DMA */ - for (i = 0; i < 2; i++) - ____raw_writeq(TX4938_DMA_MCR_MSTEN, - (void __iomem *)(TX4938_DMA_REG(i) + 0x50)); - /* PIO */ txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO); __raw_writel(0, &tx4938_pioptr->maskcpu); @@ -403,6 +399,38 @@ void __init tx4938_ndfmc_init(unsigned int hold, unsigned int spw) txx9_ndfmc_init(baseaddr, &plat_data); } +void __init tx4938_dmac_init(int memcpy_chan0, int memcpy_chan1) +{ + struct txx9dmac_platform_data plat_data = { + .have_64bit_regs = true, + }; + int i; + + for (i = 0; i < 2; i++) { + plat_data.memcpy_chan = i ? memcpy_chan1 : memcpy_chan0; + txx9_dmac_init(i, TX4938_DMA_REG(i) & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4938_IR_DMA(i, 0), + &plat_data); + } +} + +void __init tx4938_aclc_init(void) +{ + u64 pcfg = __raw_readq(&tx4938_ccfgptr->pcfg); + + if ((pcfg & TX4938_PCFG_SEL2) && + !(pcfg & TX4938_PCFG_ETH0_SEL)) + txx9_aclc_init(TX4938_ACLC_REG & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4938_IR_ACLC, + 1, 0, 1); +} + +void __init tx4938_sramc_init(void) +{ + if (tx4938_sram_resource.start) + txx9_sramc_init(&tx4938_sram_resource); +} + static void __init tx4938_stop_unused_modules(void) { __u64 pcfg, rst = 0, ckd = 0; diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index c2bf150..3dc19f4 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c @@ -28,6 +28,7 @@ #include <asm/txx9tmr.h> #include <asm/txx9/generic.h> #include <asm/txx9/ndfmc.h> +#include <asm/txx9/dmac.h> #include <asm/txx9/tx4939.h> static void __init tx4939_wdr_init(void) @@ -259,11 +260,6 @@ void __init tx4939_setup(void) for (i = 0; i < TX4939_NR_TMR; i++) txx9_tmr_init(TX4939_TMR_REG(i) & 0xfffffffffULL); - /* DMA */ - for (i = 0; i < 2; i++) - ____raw_writeq(TX4938_DMA_MCR_MSTEN, - (void __iomem *)(TX4939_DMA_REG(i) + 0x50)); - /* set PCIC1 reset (required to prevent hangup on BIST) */ txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_PCI1RST); pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg); @@ -474,6 +470,53 @@ void __init tx4939_ndfmc_init(unsigned int hold, unsigned int spw, txx9_ndfmc_init(TX4939_NDFMC_REG & 0xfffffffffULL, &plat_data); } +void __init tx4939_dmac_init(int memcpy_chan0, int memcpy_chan1) +{ + struct txx9dmac_platform_data plat_data = { + .have_64bit_regs = true, + }; + int i; + + for (i = 0; i < 2; i++) { + plat_data.memcpy_chan = i ? memcpy_chan1 : memcpy_chan0; + txx9_dmac_init(i, TX4939_DMA_REG(i) & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4939_IR_DMA(i, 0), + &plat_data); + } +} + +void __init tx4939_aclc_init(void) +{ + u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg); + + if ((pcfg & TX4939_PCFG_I2SMODE_MASK) == TX4939_PCFG_I2SMODE_ACLC) + txx9_aclc_init(TX4939_ACLC_REG & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4939_IR_ACLC, 1, 0, 1); +} + +void __init tx4939_sramc_init(void) +{ + if (tx4939_sram_resource.start) + txx9_sramc_init(&tx4939_sram_resource); +} + +void __init tx4939_rng_init(void) +{ + static struct resource res = { + .start = TX4939_RNG_REG & 0xfffffffffULL, + .end = (TX4939_RNG_REG & 0xfffffffffULL) + 0x30 - 1, + .flags = IORESOURCE_MEM, + }; + static struct platform_device pdev = { + .name = "tx4939-rng", + .id = -1, + .num_resources = 1, + .resource = &res, + }; + + platform_device_register(&pdev); +} + static void __init tx4939_stop_unused_modules(void) { __u64 pcfg, rst = 0, ckd = 0; diff --git a/arch/mips/txx9/rbtx4927/setup.c b/arch/mips/txx9/rbtx4927/setup.c index 01129a9..ee468ea 100644 --- a/arch/mips/txx9/rbtx4927/setup.c +++ b/arch/mips/txx9/rbtx4927/setup.c @@ -337,6 +337,14 @@ static void __init rbtx4927_device_init(void) rbtx4927_ne_init(); tx4927_wdt_init(); rbtx4927_mtd_init(); + if (TX4927_REV_PCODE() == 0x4927) { + tx4927_dmac_init(2); + tx4927_aclc_init(0, 1); + } else { + tx4938_dmac_init(0, 2); + tx4938_aclc_init(); + } + platform_device_register_simple("txx9aclc-generic", -1, NULL, 0); txx9_iocled_init(RBTX4927_LED_ADDR - IO_BASE, -1, 3, 1, "green", NULL); rbtx4927_gpioled_init(); } diff --git a/arch/mips/txx9/rbtx4938/setup.c b/arch/mips/txx9/rbtx4938/setup.c index 65d13df..d66509b 100644 --- a/arch/mips/txx9/rbtx4938/setup.c +++ b/arch/mips/txx9/rbtx4938/setup.c @@ -355,6 +355,10 @@ static void __init rbtx4938_device_init(void) /* TC58DVM82A1FT: tDH=10ns, tWP=tRP=tREADID=35ns */ tx4938_ndfmc_init(10, 35); tx4938_ata_init(RBTX4938_IRQ_IOC_ATA, 0, 1); + tx4938_dmac_init(0, 2); + tx4938_aclc_init(); + platform_device_register_simple("txx9aclc-generic", -1, NULL, 0); + tx4938_sramc_init(); txx9_iocled_init(RBTX4938_LED_ADDR - IO_BASE, -1, 8, 1, "green", NULL); } diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c index 4199c6f..c033ffe 100644 --- a/arch/mips/txx9/rbtx4939/setup.c +++ b/arch/mips/txx9/rbtx4939/setup.c @@ -498,6 +498,11 @@ static void __init rbtx4939_device_init(void) tx4939_wdt_init(); tx4939_ata_init(); tx4939_rtc_init(); + tx4939_dmac_init(0, 2); + tx4939_aclc_init(); + platform_device_register_simple("txx9aclc-generic", -1, NULL, 0); + tx4939_sramc_init(); + tx4939_rng_init(); } static void __init rbtx4939_setup(void) diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h index 4910546..75a70aa 100644 --- a/arch/mn10300/include/asm/elf.h +++ b/arch/mn10300/include/asm/elf.h @@ -28,6 +28,8 @@ #define R_MN10300_PCREL8 6 /* PC-relative 8-bit signed. */ #define R_MN10300_24 9 /* Direct 24 bit. */ #define R_MN10300_RELATIVE 23 /* Adjust by program base. */ +#define R_MN10300_SYM_DIFF 33 /* Adjustment when relaxing. */ +#define R_MN10300_ALIGN 34 /* Alignment requirement. */ /* * ELF register definitions.. diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h index 0517b45..e58b9a4 100644 --- a/arch/mn10300/include/asm/pci.h +++ b/arch/mn10300/include/asm/pci.h @@ -106,19 +106,6 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - #define pcibios_scan_all_fns(a, b) 0 static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) diff --git a/arch/mn10300/kernel/module.c b/arch/mn10300/kernel/module.c index 4fa0e36..6aea7fd 100644 --- a/arch/mn10300/kernel/module.c +++ b/arch/mn10300/kernel/module.c @@ -1,6 +1,6 @@ /* MN10300 Kernel module helper routines * - * Copyright (C) 2007, 2008 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2007, 2008, 2009 Red Hat, Inc. All Rights Reserved. * Written by Mark Salter (msalter@redhat.com) * - Derived from arch/i386/kernel/module.c * @@ -103,10 +103,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, unsigned int relsec, struct module *me) { - unsigned int i; + unsigned int i, sym_diff_seen = 0; Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; - Elf32_Addr relocation; + Elf32_Addr relocation, sym_diff_val = 0; uint8_t *location; uint32_t value; @@ -126,6 +126,22 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, /* this is the adjustment to be made */ relocation = sym->st_value + rel[i].r_addend; + if (sym_diff_seen) { + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_MN10300_32: + case R_MN10300_24: + case R_MN10300_16: + case R_MN10300_8: + relocation -= sym_diff_val; + sym_diff_seen = 0; + break; + default: + printk(KERN_ERR "module %s: Unexpected SYM_DIFF relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + } + switch (ELF32_R_TYPE(rel[i].r_info)) { /* for the first four relocation types, we simply * store the adjustment at the location given */ @@ -157,12 +173,29 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, *location = relocation - (uint32_t) location; break; + case R_MN10300_SYM_DIFF: + /* This is used to adjust the next reloc as required + * by relaxation. */ + sym_diff_seen = 1; + sym_diff_val = sym->st_value; + break; + + case R_MN10300_ALIGN: + /* Just ignore the ALIGN relocs. + * Only interesting if kernel performed relaxation. */ + continue; + default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } + if (sym_diff_seen) { + printk(KERN_ERR "module %s: Nothing follows SYM_DIFF relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } return 0; } diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index 234cf34..892cce8 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c @@ -281,9 +281,6 @@ asmlinkage long sys_execve(char __user *name, error = PTR_ERR(filename); if (!IS_ERR(filename)) { error = do_execve(filename, argv, envp, __frame); - if (error == 0) - current->ptrace &= ~PT_DTRACE; - putname(filename); } diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c index 5cbe9f9..5407536 100644 --- a/arch/parisc/hpux/fs.c +++ b/arch/parisc/hpux/fs.c @@ -44,11 +44,6 @@ int hpux_execve(struct pt_regs *regs) error = do_execve(filename, (char __user * __user *) regs->gr[25], (char __user * __user *) regs->gr[24], regs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - } putname(filename); out: diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 4ba868f..7d842d6 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -268,19 +268,6 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - static inline void pcibios_penalize_isa_irq(int irq, int active) { /* We don't need to penalize isa irq's */ diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 6f69101..61c0707 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -349,11 +349,6 @@ asmlinkage int sys_execve(struct pt_regs *regs) goto out; error = do_execve(filename, (char __user * __user *) regs->gr[25], (char __user * __user *) regs->gr[24], regs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - } putname(filename); out: diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 0838155..1adb40c 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c @@ -77,11 +77,6 @@ asmlinkage int sys32_execve(struct pt_regs *regs) goto out; error = compat_do_execve(filename, compat_ptr(regs->gr[25]), compat_ptr(regs->gr[24]), regs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - } putname(filename); out: diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index ba17d5d..d9483c5 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -195,19 +195,6 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -static inline struct resource *pcibios_select_root(struct pci_dev *pdev, - struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - extern void pcibios_claim_one_bus(struct pci_bus *b); extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index ef6f649..a538824 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1947,8 +1947,47 @@ static void __init fixup_device_tree_maple(void) prom_setprop(isa, name, "ranges", isa_ranges, sizeof(isa_ranges)); } + +#define CPC925_MC_START 0xf8000000 +#define CPC925_MC_LENGTH 0x1000000 +/* The values for memory-controller don't have right number of cells */ +static void __init fixup_device_tree_maple_memory_controller(void) +{ + phandle mc; + u32 mc_reg[4]; + char *name = "/hostbridge@f8000000"; + struct prom_t *_prom = &RELOC(prom); + u32 ac, sc; + + mc = call_prom("finddevice", 1, 1, ADDR(name)); + if (!PHANDLE_VALID(mc)) + return; + + if (prom_getproplen(mc, "reg") != 8) + return; + + prom_getprop(_prom->root, "#address-cells", &ac, sizeof(ac)); + prom_getprop(_prom->root, "#size-cells", &sc, sizeof(sc)); + if ((ac != 2) || (sc != 2)) + return; + + if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR) + return; + + if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH) + return; + + prom_printf("Fixing up bogus hostbridge on Maple...\n"); + + mc_reg[0] = 0x0; + mc_reg[1] = CPC925_MC_START; + mc_reg[2] = 0x0; + mc_reg[3] = CPC925_MC_LENGTH; + prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg)); +} #else #define fixup_device_tree_maple() +#define fixup_device_tree_maple_memory_controller() #endif #ifdef CONFIG_PPC_CHRP @@ -2189,6 +2228,7 @@ static void __init fixup_device_tree_efika(void) static void __init fixup_device_tree(void) { fixup_device_tree_maple(); + fixup_device_tree_maple_memory_controller(); fixup_device_tree_chrp(); fixup_device_tree_pmac(); fixup_device_tree_efika(); diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index bfd60e4..0636a3d 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -335,3 +335,62 @@ define_machine(maple) { .progress = maple_progress, .power_save = power4_idle, }; + +#ifdef CONFIG_EDAC +/* + * Register a platform device for CPC925 memory controller on + * Motorola ATCA-6101 blade. + */ +#define MAPLE_CPC925_MODEL "Motorola,ATCA-6101" +static int __init maple_cpc925_edac_setup(void) +{ + struct platform_device *pdev; + struct device_node *np = NULL; + struct resource r; + const unsigned char *model; + int ret; + + np = of_find_node_by_path("/"); + if (!np) { + printk(KERN_ERR "%s: Unable to get root node\n", __func__); + return -ENODEV; + } + + model = (const unsigned char *)of_get_property(np, "model", NULL); + if (!model) { + printk(KERN_ERR "%s: Unabel to get model info\n", __func__); + return -ENODEV; + } + + ret = strcmp(model, MAPLE_CPC925_MODEL); + of_node_put(np); + + if (ret != 0) + return 0; + + np = of_find_node_by_type(NULL, "memory-controller"); + if (!np) { + printk(KERN_ERR "%s: Unable to find memory-controller node\n", + __func__); + return -ENODEV; + } + + ret = of_address_to_resource(np, 0, &r); + of_node_put(np); + + if (ret < 0) { + printk(KERN_ERR "%s: Unable to get memory-controller reg\n", + __func__); + return -ENODEV; + } + + pdev = platform_device_register_simple("cpc925_edac", 0, &r, 1); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + printk(KERN_INFO "%s: CPC925 platform device created\n", __func__); + + return 0; +} +machine_device_initcall(maple, maple_cpc925_edac_setup); +#endif diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 002c70d..9ab188d 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -461,9 +461,6 @@ asmlinkage long sys32_execve(void) result = rc; goto out_putname; } - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); current->thread.fp_regs.fpc=0; asm volatile("sfpc %0,0" : : "d" (0)); result = regs->gprs[2]; diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 355f7a3..5a43f27 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -266,9 +266,6 @@ SYSCALL_DEFINE0(vfork) asmlinkage void execve_tail(void) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); current->thread.fp_regs.fpc = 0; if (MACHINE_HAS_IEEE) asm volatile("sfpc %0,%0" : : "d" (0)); diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index ae0da6f..d3633f5 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -137,19 +137,6 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev, extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - /* Chances are this interrupt is wired PC-style ... */ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index eea4cf9..92d7740 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -365,11 +365,6 @@ asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv, goto out; error = do_execve(filename, uargv, uenvp, regs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - } putname(filename); out: return error; diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 44c8077..24de742 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -524,11 +524,6 @@ asmlinkage int sys_execve(char *ufilename, char **uargv, (char __user * __user *)uargv, (char __user * __user *)uenvp, pregs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - } putname(filename); out: return error; diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 4f79a54..7a1e356 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h @@ -191,8 +191,6 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -extern struct resource *pcibios_select_root(struct pci_dev *, struct resource *); - static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { return PCI_IRQ_NONE; diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 4638fba..57859ad 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -711,19 +711,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *pbus) pbus->resource[1] = &pbm->mem_space; } -struct resource *pcibios_select_root(struct pci_dev *pdev, struct resource *r) -{ - struct pci_pbm_info *pbm = pdev->bus->sysdata; - struct resource *root = NULL; - - if (r->flags & IORESOURCE_IO) - root = &pbm->io_space; - if (r->flags & IORESOURCE_MEM) - root = &pbm->mem_space; - - return root; -} - void pcibios_update_irq(struct pci_dev *pdev, int irq) { } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cf42fc3..73c0bda 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -28,6 +28,7 @@ config X86 select HAVE_KPROBES select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_FRAME_POINTERS + select HAVE_DMA_ATTRS select HAVE_KRETPROBES select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 8d16ada..ec749c2 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -70,6 +70,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ $(call cc-option, -mpreferred-stack-boundary=2) KBUILD_CFLAGS += $(call cc-option, -m32) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ +GCOV_PROFILE := n $(obj)/bzImage: asflags-y := $(SVGA_MODE) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 49c8a4c..e2ff504 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -15,6 +15,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding) KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ +GCOV_PROFILE := n LDFLAGS := -m elf_$(UTS_MACHINE) LDFLAGS_vmlinux := -T diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index b93405b..1c3f943 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -33,6 +33,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) #endif } +#include <asm-generic/dma-mapping-common.h> + /* Make sure we keep the same behaviour */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { @@ -53,177 +55,6 @@ extern int dma_set_mask(struct device *dev, u64 mask); extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag); -static inline dma_addr_t -dma_map_single(struct device *hwdev, void *ptr, size_t size, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(hwdev); - dma_addr_t addr; - - BUG_ON(!valid_dma_direction(dir)); - kmemcheck_mark_initialized(ptr, size); - addr = ops->map_page(hwdev, virt_to_page(ptr), - (unsigned long)ptr & ~PAGE_MASK, size, - dir, NULL); - debug_dma_map_page(hwdev, virt_to_page(ptr), - (unsigned long)ptr & ~PAGE_MASK, size, - dir, addr, true); - return addr; -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, NULL); - debug_dma_unmap_page(dev, addr, size, dir, true); -} - -static inline int -dma_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(hwdev); - int ents; - struct scatterlist *s; - int i; - - BUG_ON(!valid_dma_direction(dir)); - for_each_sg(sg, s, nents, i) - kmemcheck_mark_initialized(sg_virt(s), s->length); - ents = ops->map_sg(hwdev, sg, nents, dir, NULL); - debug_dma_map_sg(hwdev, sg, nents, ents, dir); - - return ents; -} - -static inline void -dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(dir)); - debug_dma_unmap_sg(hwdev, sg, nents, dir); - if (ops->unmap_sg) - ops->unmap_sg(hwdev, sg, nents, dir, NULL); -} - -static inline void -dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_for_cpu) - ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); - debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir); - flush_write_buffers(); -} - -static inline void -dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_for_device) - ops->sync_single_for_device(hwdev, dma_handle, size, dir); - debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir); - flush_write_buffers(); -} - -static inline void -dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_range_for_cpu) - ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, - size, dir); - debug_dma_sync_single_range_for_cpu(hwdev, dma_handle, - offset, size, dir); - flush_write_buffers(); -} - -static inline void -dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_range_for_device) - ops->sync_single_range_for_device(hwdev, dma_handle, - offset, size, dir); - debug_dma_sync_single_range_for_device(hwdev, dma_handle, - offset, size, dir); - flush_write_buffers(); -} - -static inline void -dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_sg_for_cpu) - ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); - debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir); - flush_write_buffers(); -} - -static inline void -dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(hwdev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_sg_for_device) - ops->sync_sg_for_device(hwdev, sg, nelems, dir); - debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir); - - flush_write_buffers(); -} - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - size_t offset, size_t size, - enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - dma_addr_t addr; - - BUG_ON(!valid_dma_direction(dir)); - kmemcheck_mark_initialized(page_address(page) + offset, size); - addr = ops->map_page(dev, page, offset, size, dir, NULL); - debug_dma_map_page(dev, page, offset, size, dir, addr, false); - - return addr; -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, NULL); - debug_dma_unmap_page(dev, addr, size, dir, false); -} - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir) diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index f3477bb..6c327b8 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -24,6 +24,8 @@ CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) CFLAGS_hpet.o := $(nostackp) CFLAGS_tsc.o := $(nostackp) CFLAGS_paravirt.o := $(nostackp) +GCOV_PROFILE_vsyscall_64.o := n +GCOV_PROFILE_hpet.o := n obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile index 167bc16..6a564ac 100644 --- a/arch/x86/kernel/acpi/realmode/Makefile +++ b/arch/x86/kernel/acpi/realmode/Makefile @@ -42,6 +42,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \ $(call cc-option, -mpreferred-stack-boundary=2) KBUILD_CFLAGS += $(call cc-option, -m32) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ +GCOV_PROFILE := n WAKEUP_OBJS = $(addprefix $(obj)/,$(wakeup-y)) diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index cf52215..81cbe64 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1,3 +1,4 @@ + /* * (c) 2003-2006 Advanced Micro Devices, Inc. * Your use of this code is subject to the terms and conditions of the @@ -117,20 +118,17 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) u32 i = 0; if (cpu_family == CPU_HW_PSTATE) { - if (data->currpstate == HW_PSTATE_INVALID) { - /* read (initial) hw pstate if not yet set */ - rdmsr(MSR_PSTATE_STATUS, lo, hi); - i = lo & HW_PSTATE_MASK; - - /* - * a workaround for family 11h erratum 311 might cause - * an "out-of-range Pstate if the core is in Pstate-0 - */ - if (i >= data->numps) - data->currpstate = HW_PSTATE_0; - else - data->currpstate = i; - } + rdmsr(MSR_PSTATE_STATUS, lo, hi); + i = lo & HW_PSTATE_MASK; + data->currpstate = i; + + /* + * a workaround for family 11h erratum 311 might cause + * an "out-of-range Pstate if the core is in Pstate-0 + */ + if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps)) + data->currpstate = HW_PSTATE_0; + return 0; } do { @@ -510,41 +508,34 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, return 0; } -static int check_supported_cpu(unsigned int cpu) +static void check_supported_cpu(void *_rc) { - cpumask_t oldmask; u32 eax, ebx, ecx, edx; - unsigned int rc = 0; - - oldmask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + int *rc = _rc; - if (smp_processor_id() != cpu) { - printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); - goto out; - } + *rc = -ENODEV; if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) - goto out; + return; eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) && ((eax & CPUID_XFAM) < CPUID_XFAM_10H)) - goto out; + return; if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); - goto out; + return; } eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { printk(KERN_INFO PFX "No frequency change capabilities detected\n"); - goto out; + return; } cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); @@ -552,21 +543,17 @@ static int check_supported_cpu(unsigned int cpu) != P_STATE_TRANSITION_CAPABLE) { printk(KERN_INFO PFX "Power state transitions not supported\n"); - goto out; + return; } } else { /* must be a HW Pstate capable processor */ cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE) cpu_family = CPU_HW_PSTATE; else - goto out; + return; } - rc = 1; - -out: - set_cpus_allowed_ptr(current, &oldmask); - return rc; + *rc = 0; } static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, @@ -823,13 +810,14 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) return; - control = data->acpi_data.states[index].control; data->irt = (control - >> IRT_SHIFT) & IRT_MASK; data->rvo = (control >> - RVO_SHIFT) & RVO_MASK; data->exttype = (control - >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; - data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; data->vidmvs = 1 - << ((control >> MVS_SHIFT) & MVS_MASK); data->vstable = - (control >> VST_SHIFT) & VST_MASK; } + control = data->acpi_data.states[index].control; + data->irt = (control >> IRT_SHIFT) & IRT_MASK; + data->rvo = (control >> RVO_SHIFT) & RVO_MASK; + data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; + data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; + data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK); + data->vstable = (control >> VST_SHIFT) & VST_MASK; +} static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { @@ -1046,6 +1034,19 @@ static int get_transition_latency(struct powernow_k8_data *data) if (cur_latency > max_latency) max_latency = cur_latency; } + if (max_latency == 0) { + /* + * Fam 11h always returns 0 as transition latency. + * This is intended and means "very fast". While cpufreq core + * and governors currently can handle that gracefully, better + * set it to 1 to avoid problems in the future. + * For all others it's a BIOS bug. + */ + if (!boot_cpu_data.x86 == 0x11) + printk(KERN_ERR FW_WARN PFX "Invalid zero transition " + "latency\n"); + max_latency = 1; + } /* value in usecs, needs to be in nanoseconds */ return 1000 * max_latency; } @@ -1093,7 +1094,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, freqs.old = find_khz_freq_from_fid(data->currfid); freqs.new = find_khz_freq_from_fid(fid); - for_each_cpu_mask_nr(i, *(data->available_cores)) { + for_each_cpu(i, data->available_cores) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -1101,7 +1102,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, res = transition_fid_vid(data, fid, vid); freqs.new = find_khz_freq_from_fid(data->currfid); - for_each_cpu_mask_nr(i, *(data->available_cores)) { + for_each_cpu(i, data->available_cores) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -1126,7 +1127,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, data->currpstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); - for_each_cpu_mask_nr(i, *(data->available_cores)) { + for_each_cpu(i, data->available_cores) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -1134,7 +1135,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, res = transition_pstate(data, pstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); - for_each_cpu_mask_nr(i, *(data->available_cores)) { + for_each_cpu(i, data->available_cores) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -1235,21 +1236,47 @@ static int powernowk8_verify(struct cpufreq_policy *pol) return cpufreq_frequency_table_verify(pol, data->powernow_table); } -static const char ACPI_PSS_BIOS_BUG_MSG[] = - KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" - KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; +struct init_on_cpu { + struct powernow_k8_data *data; + int rc; +}; + +static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu) +{ + struct init_on_cpu *init_on_cpu = _init_on_cpu; + + if (pending_bit_stuck()) { + printk(KERN_ERR PFX "failing init, change pending bit set\n"); + init_on_cpu->rc = -ENODEV; + return; + } + + if (query_current_values_with_pending_wait(init_on_cpu->data)) { + init_on_cpu->rc = -ENODEV; + return; + } + + if (cpu_family == CPU_OPTERON) + fidvid_msr_init(); + + init_on_cpu->rc = 0; +} /* per CPU init entry point to the driver */ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) { + static const char ACPI_PSS_BIOS_BUG_MSG[] = + KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" + KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; struct powernow_k8_data *data; - cpumask_t oldmask; + struct init_on_cpu init_on_cpu; int rc; if (!cpu_online(pol->cpu)) return -ENODEV; - if (!check_supported_cpu(pol->cpu)) + smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); + if (rc) return -ENODEV; data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); @@ -1289,27 +1316,12 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) pol->cpuinfo.transition_latency = get_transition_latency(data); /* only run on specific CPU from here on */ - oldmask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); - - if (smp_processor_id() != pol->cpu) { - printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); - goto err_out_unmask; - } - - if (pending_bit_stuck()) { - printk(KERN_ERR PFX "failing init, change pending bit set\n"); - goto err_out_unmask; - } - - if (query_current_values_with_pending_wait(data)) - goto err_out_unmask; - - if (cpu_family == CPU_OPTERON) - fidvid_msr_init(); - - /* run on any CPU again */ - set_cpus_allowed_ptr(current, &oldmask); + init_on_cpu.data = data; + smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu, + &init_on_cpu, 1); + rc = init_on_cpu.rc; + if (rc != 0) + goto err_out_exit_acpi; if (cpu_family == CPU_HW_PSTATE) cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); @@ -1346,8 +1358,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) return 0; -err_out_unmask: - set_cpus_allowed_ptr(current, &oldmask); +err_out_exit_acpi: powernow_k8_cpu_exit_acpi(data); err_out: @@ -1372,28 +1383,25 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) return 0; } +static void query_values_on_cpu(void *_err) +{ + int *err = _err; + struct powernow_k8_data *data = __get_cpu_var(powernow_data); + + *err = query_current_values_with_pending_wait(data); +} + static unsigned int powernowk8_get(unsigned int cpu) { - struct powernow_k8_data *data; - cpumask_t oldmask = current->cpus_allowed; + struct powernow_k8_data *data = per_cpu(powernow_data, cpu); unsigned int khz = 0; - unsigned int first; - - first = cpumask_first(cpu_core_mask(cpu)); - data = per_cpu(powernow_data, first); + int err; if (!data) return -EINVAL; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); - if (smp_processor_id() != cpu) { - printk(KERN_ERR PFX - "limiting to CPU %d failed in powernowk8_get\n", cpu); - set_cpus_allowed_ptr(current, &oldmask); - return 0; - } - - if (query_current_values_with_pending_wait(data)) + smp_call_function_single(cpu, query_values_on_cpu, &err, true); + if (err) goto out; if (cpu_family == CPU_HW_PSTATE) @@ -1404,7 +1412,6 @@ static unsigned int powernowk8_get(unsigned int cpu) out: - set_cpus_allowed_ptr(current, &oldmask); return khz; } @@ -1430,7 +1437,9 @@ static int __cpuinit powernowk8_init(void) unsigned int i, supported_cpus = 0; for_each_online_cpu(i) { - if (check_supported_cpu(i)) + int rc; + smp_call_function_single(i, check_supported_cpu, &rc, 1); + if (rc == 0) supported_cpus++; } diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index 6c6698f..c9c1190 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h @@ -223,14 +223,3 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); - -#ifdef CONFIG_SMP -static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) -{ -} -#else -static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) -{ - cpu_set(0, cpu_sharedcore_mask[0]); -} -#endif diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 55c831e..8d672ef 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -323,14 +323,8 @@ static unsigned int get_cur_freq(unsigned int cpu) { unsigned l, h; unsigned clock_freq; - cpumask_t saved_mask; - saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); - if (smp_processor_id() != cpu) - return 0; - - rdmsr(MSR_IA32_PERF_STATUS, l, h); + rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h); clock_freq = extract_clock(l, cpu, 0); if (unlikely(clock_freq == 0)) { @@ -340,11 +334,9 @@ static unsigned int get_cur_freq(unsigned int cpu) * P-state transition (like TM2). Get the last freq set * in PERF_CTL. */ - rdmsr(MSR_IA32_PERF_CTL, l, h); + rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h); clock_freq = extract_clock(l, cpu, 1); } - - set_cpus_allowed_ptr(current, &saved_mask); return clock_freq; } @@ -467,15 +459,10 @@ static int centrino_target (struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int retval = 0; unsigned int j, k, first_cpu, tmp; - cpumask_var_t saved_mask, covered_cpus; + cpumask_var_t covered_cpus; - if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) - return -ENOMEM; - if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { - free_cpumask_var(saved_mask); + if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) return -ENOMEM; - } - cpumask_copy(saved_mask, ¤t->cpus_allowed); if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { retval = -ENODEV; @@ -493,7 +480,7 @@ static int centrino_target (struct cpufreq_policy *policy, first_cpu = 1; for_each_cpu(j, policy->cpus) { - const struct cpumask *mask; + int good_cpu; /* cpufreq holds the hotplug lock, so we are safe here */ if (!cpu_online(j)) @@ -504,32 +491,30 @@ static int centrino_target (struct cpufreq_policy *policy, * Make sure we are running on CPU that wants to change freq */ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) - mask = policy->cpus; + good_cpu = cpumask_any_and(policy->cpus, + cpu_online_mask); else - mask = cpumask_of(j); + good_cpu = j; - set_cpus_allowed_ptr(current, mask); - preempt_disable(); - if (unlikely(!cpu_isset(smp_processor_id(), *mask))) { + if (good_cpu >= nr_cpu_ids) { dprintk("couldn't limit to CPUs in this domain\n"); retval = -EAGAIN; if (first_cpu) { /* We haven't started the transition yet. */ - goto migrate_end; + goto out; } - preempt_enable(); break; } msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; if (first_cpu) { - rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); + rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); if (msr == (oldmsr & 0xffff)) { dprintk("no change needed - msr was and needs " "to be %x\n", oldmsr); retval = 0; - goto migrate_end; + goto out; } freqs.old = extract_clock(oldmsr, cpu, 0); @@ -553,14 +538,11 @@ static int centrino_target (struct cpufreq_policy *policy, oldmsr |= msr; } - wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); - if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { - preempt_enable(); + wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h); + if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) break; - } - cpu_set(j, *covered_cpus); - preempt_enable(); + cpumask_set_cpu(j, covered_cpus); } for_each_cpu(k, policy->cpus) { @@ -578,10 +560,8 @@ static int centrino_target (struct cpufreq_policy *policy, * Best effort undo.. */ - for_each_cpu_mask_nr(j, *covered_cpus) { - set_cpus_allowed_ptr(current, &cpumask_of_cpu(j)); - wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); - } + for_each_cpu(j, covered_cpus) + wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h); tmp = freqs.new; freqs.new = freqs.old; @@ -593,15 +573,9 @@ static int centrino_target (struct cpufreq_policy *policy, cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } } - set_cpus_allowed_ptr(current, saved_mask); retval = 0; - goto out; -migrate_end: - preempt_enable(); - set_cpus_allowed_ptr(current, saved_mask); out: - free_cpumask_var(saved_mask); free_cpumask_var(covered_cpus); return retval; } diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 016c1a4..6911e91 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -89,7 +89,8 @@ static int speedstep_find_register(void) * speedstep_set_state - set the SpeedStep state * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * - * Tries to change the SpeedStep state. + * Tries to change the SpeedStep state. Can be called from + * smp_call_function_single. */ static void speedstep_set_state(unsigned int state) { @@ -143,6 +144,11 @@ static void speedstep_set_state(unsigned int state) return; } +/* Wrapper for smp_call_function_single. */ +static void _speedstep_set_state(void *_state) +{ + speedstep_set_state(*(unsigned int *)_state); +} /** * speedstep_activate - activate SpeedStep control in the chipset @@ -226,22 +232,28 @@ static unsigned int speedstep_detect_chipset(void) return 0; } -static unsigned int _speedstep_get(const struct cpumask *cpus) -{ +struct get_freq_data { unsigned int speed; - cpumask_t cpus_allowed; - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpus); - speed = speedstep_get_frequency(speedstep_processor); - set_cpus_allowed_ptr(current, &cpus_allowed); - dprintk("detected %u kHz as current frequency\n", speed); - return speed; + unsigned int processor; +}; + +static void get_freq_data(void *_data) +{ + struct get_freq_data *data = _data; + + data->speed = speedstep_get_frequency(data->processor); } static unsigned int speedstep_get(unsigned int cpu) { - return _speedstep_get(cpumask_of(cpu)); + struct get_freq_data data = { .processor = cpu }; + + /* You're supposed to ensure CPU is online. */ + if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0) + BUG(); + + dprintk("detected %u kHz as current frequency\n", data.speed); + return data.speed; } /** @@ -257,16 +269,16 @@ static int speedstep_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - unsigned int newstate = 0; + unsigned int newstate = 0, policy_cpu; struct cpufreq_freqs freqs; - cpumask_t cpus_allowed; int i; if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) return -EINVAL; - freqs.old = _speedstep_get(policy->cpus); + policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); + freqs.old = speedstep_get(policy_cpu); freqs.new = speedstep_freqs[newstate].frequency; freqs.cpu = policy->cpu; @@ -276,20 +288,13 @@ static int speedstep_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return 0; - cpus_allowed = current->cpus_allowed; - for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } - /* switch to physical CPU where state is to be changed */ - set_cpus_allowed_ptr(current, policy->cpus); - - speedstep_set_state(newstate); - - /* allow to be run on all CPUs */ - set_cpus_allowed_ptr(current, &cpus_allowed); + smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, + true); for_each_cpu(i, policy->cpus) { freqs.cpu = i; @@ -312,33 +317,43 @@ static int speedstep_verify(struct cpufreq_policy *policy) return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); } +struct get_freqs { + struct cpufreq_policy *policy; + int ret; +}; + +static void get_freqs_on_cpu(void *_get_freqs) +{ + struct get_freqs *get_freqs = _get_freqs; + + get_freqs->ret = + speedstep_get_freqs(speedstep_processor, + &speedstep_freqs[SPEEDSTEP_LOW].frequency, + &speedstep_freqs[SPEEDSTEP_HIGH].frequency, + &get_freqs->policy->cpuinfo.transition_latency, + &speedstep_set_state); +} static int speedstep_cpu_init(struct cpufreq_policy *policy) { - int result = 0; - unsigned int speed; - cpumask_t cpus_allowed; + int result; + unsigned int policy_cpu, speed; + struct get_freqs gf; /* only run on CPU to be set, or on its sibling */ #ifdef CONFIG_SMP cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); #endif - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, policy->cpus); + policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); /* detect low and high frequency and transition latency */ - result = speedstep_get_freqs(speedstep_processor, - &speedstep_freqs[SPEEDSTEP_LOW].frequency, - &speedstep_freqs[SPEEDSTEP_HIGH].frequency, - &policy->cpuinfo.transition_latency, - &speedstep_set_state); - set_cpus_allowed_ptr(current, &cpus_allowed); - if (result) - return result; + gf.policy = policy; + smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1); + if (gf.ret) + return gf.ret; /* get current speed setting */ - speed = _speedstep_get(policy->cpus); + speed = speedstep_get(policy_cpu); if (!speed) return -EIO; diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index 2e3c686..f4c290b 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c @@ -226,6 +226,7 @@ static unsigned int pentium4_get_frequency(void) } +/* Warning: may get called from smp_call_function_single. */ unsigned int speedstep_get_frequency(unsigned int processor) { switch (processor) { diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ae3180c..b0597ad 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -632,17 +632,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; - unsigned long *lpj, dummy; + unsigned long *lpj; if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) return 0; - lpj = &dummy; - if (!(freq->flags & CPUFREQ_CONST_LOOPS)) + lpj = &boot_cpu_data.loops_per_jiffy; #ifdef CONFIG_SMP + if (!(freq->flags & CPUFREQ_CONST_LOOPS)) lpj = &cpu_data(freq->cpu).loops_per_jiffy; -#else - lpj = &boot_cpu_data.loops_per_jiffy; #endif if (!ref_freq) { diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index a85bef2..0fb56db 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -116,7 +116,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) struct pci_bus *bus; struct pci_dev *dev; int idx; - struct resource *r, *pr; + struct resource *r; /* Depth-First Search on bus tree */ list_for_each_entry(bus, bus_list, node) { @@ -126,9 +126,8 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) r = &dev->resource[idx]; if (!r->flags) continue; - pr = pci_find_parent_resource(dev, r); - if (!r->start || !pr || - request_resource(pr, r) < 0) { + if (!r->start || + pci_claim_resource(dev, idx) < 0) { dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); /* * Something is wrong with the region. @@ -149,7 +148,7 @@ static void __init pcibios_allocate_resources(int pass) struct pci_dev *dev = NULL; int idx, disabled; u16 command; - struct resource *r, *pr; + struct resource *r; for_each_pci_dev(dev) { pci_read_config_word(dev, PCI_COMMAND, &command); @@ -168,8 +167,7 @@ static void __init pcibios_allocate_resources(int pass) (unsigned long long) r->start, (unsigned long long) r->end, r->flags, disabled, pass); - pr = pci_find_parent_resource(dev, r); - if (!pr || request_resource(pr, r) < 0) { + if (pci_claim_resource(dev, idx) < 0) { dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); /* We'll assign a new address later */ r->end -= r->start; @@ -197,7 +195,7 @@ static void __init pcibios_allocate_resources(int pass) static int __init pcibios_assign_resources(void) { struct pci_dev *dev = NULL; - struct resource *r, *pr; + struct resource *r; if (!(pci_probe & PCI_ASSIGN_ROMS)) { /* @@ -209,8 +207,7 @@ static int __init pcibios_assign_resources(void) r = &dev->resource[PCI_ROM_RESOURCE]; if (!r->flags || !r->start) continue; - pr = pci_find_parent_resource(dev, r); - if (!pr || request_resource(pr, r) < 0) { + if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) { r->end -= r->start; r->start = 0; } diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 16a9020..88112b49 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -123,6 +123,7 @@ quiet_cmd_vdso = VDSO $@ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) VDSO_LDFLAGS = -fPIC -shared $(call ld-option, -Wl$(comma)--hash-style=sysv) +GCOV_PROFILE := n # # Install the unstripped copy of vdso*.so listed in $(vdso-install-y). diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 031f366..e1a04a3 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -331,11 +331,6 @@ long xtensa_execve(char __user *name, char __user * __user *argv, if (IS_ERR(filename)) goto out; error = do_execve(filename, argv, envp, regs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - } putname(filename); out: return error; diff --git a/drivers/Kconfig b/drivers/Kconfig index a442c8f..48bbdbe 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -52,6 +52,8 @@ source "drivers/i2c/Kconfig" source "drivers/spi/Kconfig" +source "drivers/pps/Kconfig" + source "drivers/gpio/Kconfig" source "drivers/w1/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index 00b44f4..bc4205d 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -72,6 +72,7 @@ obj-$(CONFIG_INPUT) += input/ obj-$(CONFIG_I2O) += message/ obj-$(CONFIG_RTC_LIB) += rtc/ obj-y += i2c/ media/ +obj-$(CONFIG_PPS) += pps/ obj-$(CONFIG_W1) += w1/ obj-$(CONFIG_POWER_SUPPLY) += power/ obj-$(CONFIG_HWMON) += hwmon/ diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 30bae6d..0bd01f4 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -807,7 +807,7 @@ if RTC_LIB=n config RTC tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)" depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \ - && !ARM && !SUPERH && !S390 && !AVR32 + && !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN ---help--- If you say Y here and create a character special file /dev/rtc with major number 10 and minor number 135 using mknod ("man mknod"), you diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c index 183ac3f..9c7e234 100644 --- a/drivers/char/agp/hp-agp.c +++ b/drivers/char/agp/hp-agp.c @@ -518,8 +518,9 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret) if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa)) return AE_OK; - printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset (ioc=%lx, lba=%lx)\n", - (char *) context, sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa); + printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset " + "(ioc=%llx, lba=%llx)\n", (char *)context, + sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa); hp_zx1_gart_found = 1; return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */ diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index f4b3f72..ce66a70 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -149,6 +149,19 @@ config HW_RANDOM_VIRTIO To compile this driver as a module, choose M here: the module will be called virtio-rng. If unsure, say N. +config HW_RANDOM_TX4939 + tristate "TX4939 Random Number Generator support" + depends on HW_RANDOM && SOC_TX4939 + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on TX4939 SoC. + + To compile this driver as a module, choose M here: the + module will be called tx4939-rng. + + If unsure, say Y. + config HW_RANDOM_MXC_RNGA tristate "Freescale i.MX RNGA Random Number Generator" depends on HW_RANDOM && ARCH_HAS_RNGA diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index fd1ecd2..676828b 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -15,4 +15,5 @@ obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o +obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c new file mode 100644 index 0000000..544d908 --- /dev/null +++ b/drivers/char/hw_random/tx4939-rng.c @@ -0,0 +1,184 @@ +/* + * RNG driver for TX4939 Random Number Generators (RNG) + * + * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/hw_random.h> + +#define TX4939_RNG_RCSR 0x00000000 +#define TX4939_RNG_ROR(n) (0x00000018 + (n) * 8) + +#define TX4939_RNG_RCSR_INTE 0x00000008 +#define TX4939_RNG_RCSR_RST 0x00000004 +#define TX4939_RNG_RCSR_FIN 0x00000002 +#define TX4939_RNG_RCSR_ST 0x00000001 + +struct tx4939_rng { + struct hwrng rng; + void __iomem *base; + u64 databuf[3]; + unsigned int data_avail; +}; + +static void rng_io_start(void) +{ +#ifndef CONFIG_64BIT + /* + * readq is reading a 64-bit register using a 64-bit load. On + * a 32-bit kernel however interrupts or any other processor + * exception would clobber the upper 32-bit of the processor + * register so interrupts need to be disabled. + */ + local_irq_disable(); +#endif +} + +static void rng_io_end(void) +{ +#ifndef CONFIG_64BIT + local_irq_enable(); +#endif +} + +static u64 read_rng(void __iomem *base, unsigned int offset) +{ + return ____raw_readq(base + offset); +} + +static void write_rng(u64 val, void __iomem *base, unsigned int offset) +{ + return ____raw_writeq(val, base + offset); +} + +static int tx4939_rng_data_present(struct hwrng *rng, int wait) +{ + struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng); + int i; + + if (rngdev->data_avail) + return rngdev->data_avail; + for (i = 0; i < 20; i++) { + rng_io_start(); + if (!(read_rng(rngdev->base, TX4939_RNG_RCSR) + & TX4939_RNG_RCSR_ST)) { + rngdev->databuf[0] = + read_rng(rngdev->base, TX4939_RNG_ROR(0)); + rngdev->databuf[1] = + read_rng(rngdev->base, TX4939_RNG_ROR(1)); + rngdev->databuf[2] = + read_rng(rngdev->base, TX4939_RNG_ROR(2)); + rngdev->data_avail = + sizeof(rngdev->databuf) / sizeof(u32); + /* Start RNG */ + write_rng(TX4939_RNG_RCSR_ST, + rngdev->base, TX4939_RNG_RCSR); + wait = 0; + } + rng_io_end(); + if (!wait) + break; + /* 90 bus clock cycles by default for generation */ + ndelay(90 * 5); + } + return rngdev->data_avail; +} + +static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer) +{ + struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng); + + rngdev->data_avail--; + *buffer = *((u32 *)&rngdev->databuf + rngdev->data_avail); + return sizeof(u32); +} + +static int __init tx4939_rng_probe(struct platform_device *dev) +{ + struct tx4939_rng *rngdev; + struct resource *r; + int i; + + r = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!r) + return -EBUSY; + rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL); + if (!rngdev) + return -ENOMEM; + if (!devm_request_mem_region(&dev->dev, r->start, resource_size(r), + dev_name(&dev->dev))) + return -EBUSY; + rngdev->base = devm_ioremap(&dev->dev, r->start, resource_size(r)); + if (!rngdev->base) + return -EBUSY; + + rngdev->rng.name = dev_name(&dev->dev); + rngdev->rng.data_present = tx4939_rng_data_present; + rngdev->rng.data_read = tx4939_rng_data_read; + + rng_io_start(); + /* Reset RNG */ + write_rng(TX4939_RNG_RCSR_RST, rngdev->base, TX4939_RNG_RCSR); + write_rng(0, rngdev->base, TX4939_RNG_RCSR); + /* Start RNG */ + write_rng(TX4939_RNG_RCSR_ST, rngdev->base, TX4939_RNG_RCSR); + rng_io_end(); + /* + * Drop first two results. From the datasheet: + * The quality of the random numbers generated immediately + * after reset can be insufficient. Therefore, do not use + * random numbers obtained from the first and second + * generations; use the ones from the third or subsequent + * generation. + */ + for (i = 0; i < 2; i++) { + rngdev->data_avail = 0; + if (!tx4939_rng_data_present(&rngdev->rng, 1)) + return -EIO; + } + + platform_set_drvdata(dev, rngdev); + return hwrng_register(&rngdev->rng); +} + +static int __exit tx4939_rng_remove(struct platform_device *dev) +{ + struct tx4939_rng *rngdev = platform_get_drvdata(dev); + + hwrng_unregister(&rngdev->rng); + platform_set_drvdata(dev, NULL); + return 0; +} + +static struct platform_driver tx4939_rng_driver = { + .driver = { + .name = "tx4939-rng", + .owner = THIS_MODULE, + }, + .remove = tx4939_rng_remove, +}; + +static int __init tx4939rng_init(void) +{ + return platform_driver_probe(&tx4939_rng_driver, tx4939_rng_probe); +} + +static void __exit tx4939rng_exit(void) +{ + platform_driver_unregister(&tx4939_rng_driver); +} + +module_init(tx4939rng_init); +module_exit(tx4939rng_exit); + +MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for TX4939"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 4d745a8..4159292 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c @@ -1593,7 +1593,7 @@ static unsigned int card_count; static int __devinit isicom_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - unsigned int signature, index; + unsigned int uninitialized_var(signature), index; int retval = -EPERM; struct isi_board *board = NULL; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index f96d0be..afa8813 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -863,59 +863,58 @@ static const struct file_operations kmsg_fops = { .write = kmsg_write, }; -static int memory_open(struct inode * inode, struct file * filp) -{ - int ret = 0; - - lock_kernel(); - switch (iminor(inode)) { - case 1: - filp->f_op = &mem_fops; - filp->f_mapping->backing_dev_info = - &directly_mappable_cdev_bdi; - break; +static const struct { + unsigned int minor; + char *name; + umode_t mode; + const struct file_operations *fops; + struct backing_dev_info *dev_info; +} devlist[] = { /* list of minor devices */ + {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops, + &directly_mappable_cdev_bdi}, #ifdef CONFIG_DEVKMEM - case 2: - filp->f_op = &kmem_fops; - filp->f_mapping->backing_dev_info = - &directly_mappable_cdev_bdi; - break; + {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops, + &directly_mappable_cdev_bdi}, #endif - case 3: - filp->f_op = &null_fops; - break; + {3, "null", S_IRUGO | S_IWUGO, &null_fops, NULL}, #ifdef CONFIG_DEVPORT - case 4: - filp->f_op = &port_fops; - break; + {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops, NULL}, #endif - case 5: - filp->f_mapping->backing_dev_info = &zero_bdi; - filp->f_op = &zero_fops; - break; - case 7: - filp->f_op = &full_fops; - break; - case 8: - filp->f_op = &random_fops; - break; - case 9: - filp->f_op = &urandom_fops; - break; - case 11: - filp->f_op = &kmsg_fops; - break; + {5, "zero", S_IRUGO | S_IWUGO, &zero_fops, &zero_bdi}, + {7, "full", S_IRUGO | S_IWUGO, &full_fops, NULL}, + {8, "random", S_IRUGO | S_IWUSR, &random_fops, NULL}, + {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops, NULL}, + {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops, NULL}, #ifdef CONFIG_CRASH_DUMP - case 12: - filp->f_op = &oldmem_fops; - break; + {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops, NULL}, #endif - default: - unlock_kernel(); - return -ENXIO; +}; + +static int memory_open(struct inode *inode, struct file *filp) +{ + int ret = 0; + int i; + + lock_kernel(); + + for (i = 0; i < ARRAY_SIZE(devlist); i++) { + if (devlist[i].minor == iminor(inode)) { + filp->f_op = devlist[i].fops; + if (devlist[i].dev_info) { + filp->f_mapping->backing_dev_info = + devlist[i].dev_info; + } + + break; + } } - if (filp->f_op && filp->f_op->open) - ret = filp->f_op->open(inode,filp); + + if (i == ARRAY_SIZE(devlist)) + ret = -ENXIO; + else + if (filp->f_op && filp->f_op->open) + ret = filp->f_op->open(inode, filp); + unlock_kernel(); return ret; } @@ -924,30 +923,6 @@ static const struct file_operations memory_fops = { .open = memory_open, /* just a selector for the real open */ }; -static const struct { - unsigned int minor; - char *name; - umode_t mode; - const struct file_operations *fops; -} devlist[] = { /* list of minor devices */ - {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, -#ifdef CONFIG_DEVKMEM - {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, -#endif - {3, "null", S_IRUGO | S_IWUGO, &null_fops}, -#ifdef CONFIG_DEVPORT - {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, -#endif - {5, "zero", S_IRUGO | S_IWUGO, &zero_fops}, - {7, "full", S_IRUGO | S_IWUGO, &full_fops}, - {8, "random", S_IRUGO | S_IWUSR, &random_fops}, - {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}, - {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops}, -#ifdef CONFIG_CRASH_DUMP - {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops}, -#endif -}; - static struct class *mem_class; static int __init chr_dev_init(void) diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index c84c34f..432655b 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -114,8 +114,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count, if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ - printk (KERN_DEBUG CHRDEV "%x: claim the port first\n", - minor); + pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } @@ -198,8 +197,7 @@ static ssize_t pp_write (struct file * file, const char __user * buf, if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ - printk (KERN_DEBUG CHRDEV "%x: claim the port first\n", - minor); + pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } @@ -313,7 +311,7 @@ static int register_device (int minor, struct pp_struct *pp) } pp->pdev = pdev; - printk (KERN_DEBUG "%s: registered pardevice\n", name); + pr_debug("%s: registered pardevice\n", name); return 0; } @@ -343,8 +341,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) int ret; if (pp->flags & PP_CLAIMED) { - printk (KERN_DEBUG CHRDEV - "%x: you've already got it!\n", minor); + pr_debug(CHRDEV "%x: you've already got it!\n", minor); return -EINVAL; } @@ -379,7 +376,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } case PPEXCL: if (pp->pdev) { - printk (KERN_DEBUG CHRDEV "%x: too late for PPEXCL; " + pr_debug(CHRDEV "%x: too late for PPEXCL; " "already registered\n", minor); if (pp->flags & PP_EXCL) /* But it's not really an error. */ @@ -491,8 +488,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) /* Everything else requires the port to be claimed, so check * that now. */ if ((pp->flags & PP_CLAIMED) == 0) { - printk (KERN_DEBUG CHRDEV "%x: claim the port first\n", - minor); + pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } @@ -624,8 +620,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; default: - printk (KERN_DEBUG CHRDEV "%x: What? (cmd=0x%x)\n", minor, - cmd); + pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd); return -EINVAL; } @@ -698,9 +693,8 @@ static int pp_release (struct inode * inode, struct file * file) } if (compat_negot) { parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT); - printk (KERN_DEBUG CHRDEV - "%x: negotiated back to compatibility mode because " - "user-space forgot\n", minor); + pr_debug(CHRDEV "%x: negotiated back to compatibility " + "mode because user-space forgot\n", minor); } if (pp->flags & PP_CLAIMED) { @@ -713,7 +707,7 @@ static int pp_release (struct inode * inode, struct file * file) info->phase = pp->saved_state.phase; parport_release (pp->pdev); if (compat_negot != 1) { - printk (KERN_DEBUG CHRDEV "%x: released pardevice " + pr_debug(CHRDEV "%x: released pardevice " "because user-space forgot\n", minor); } } @@ -723,8 +717,7 @@ static int pp_release (struct inode * inode, struct file * file) parport_unregister_device (pp->pdev); kfree (name); pp->pdev = NULL; - printk (KERN_DEBUG CHRDEV "%x: unregistered pardevice\n", - minor); + pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } kfree (pp); diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 39a05b5..0db3585 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -121,20 +121,17 @@ static struct sysrq_key_op sysrq_unraw_op = { #define sysrq_unraw_op (*(struct sysrq_key_op *)0) #endif /* CONFIG_VT */ -#ifdef CONFIG_KEXEC -static void sysrq_handle_crashdump(int key, struct tty_struct *tty) +static void sysrq_handle_crash(int key, struct tty_struct *tty) { - crash_kexec(get_irq_regs()); + char *killer = NULL; + *killer = 1; } static struct sysrq_key_op sysrq_crashdump_op = { - .handler = sysrq_handle_crashdump, - .help_msg = "Crashdump", - .action_msg = "Trigger a crashdump", + .handler = sysrq_handle_crash, + .help_msg = "Crash", + .action_msg = "Trigger a crash", .enable_mask = SYSRQ_ENABLE_DUMP, }; -#else -#define sysrq_crashdump_op (*(struct sysrq_key_op *)0) -#endif static void sysrq_handle_reboot(int key, struct tty_struct *tty) { diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 7a74d17..7fc58af 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -42,27 +42,12 @@ * this governor will not work. * All times here are in uS. */ -static unsigned int def_sampling_rate; #define MIN_SAMPLING_RATE_RATIO (2) -/* for correct statistics, we need at least 10 ticks between each measure */ -#define MIN_STAT_SAMPLING_RATE \ - (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) -#define MIN_SAMPLING_RATE \ - (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) -/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon - * Define the minimal settable sampling rate to the greater of: - * - "HW transition latency" * 100 (same as default sampling / 10) - * - MIN_STAT_SAMPLING_RATE - * To avoid that userspace shoots itself. -*/ -static unsigned int minimum_sampling_rate(void) -{ - return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE); -} -/* This will also vanish soon with removing sampling_rate_max */ -#define MAX_SAMPLING_RATE (500 * def_sampling_rate) +static unsigned int min_sampling_rate; + #define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (10) #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) @@ -182,27 +167,14 @@ static struct notifier_block dbs_cpufreq_notifier_block = { /************************** sysfs interface ************************/ static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) { - static int print_once; - - if (!print_once) { - printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", - current->comm); - print_once = 1; - } - return sprintf(buf, "%u\n", MAX_SAMPLING_RATE); + printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " + "sysfs file is deprecated - used by: %s\n", current->comm); + return sprintf(buf, "%u\n", -1U); } static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) { - static int print_once; - - if (!print_once) { - printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", current->comm); - print_once = 1; - } - return sprintf(buf, "%u\n", MIN_SAMPLING_RATE); + return sprintf(buf, "%u\n", min_sampling_rate); } #define define_one_ro(_name) \ @@ -254,7 +226,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, return -EINVAL; mutex_lock(&dbs_mutex); - dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate()); + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); mutex_unlock(&dbs_mutex); return count; @@ -601,11 +573,18 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (latency == 0) latency = 1; - def_sampling_rate = - max(latency * LATENCY_MULTIPLIER, - MIN_STAT_SAMPLING_RATE); - - dbs_tuners_ins.sampling_rate = def_sampling_rate; + /* + * conservative does not implement micro like ondemand + * governor, thus we are bound to jiffes/HZ + */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); cpufreq_register_notifier( &dbs_cpufreq_notifier_block, diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index e741c33..1911d17 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -32,6 +32,7 @@ #define DEF_FREQUENCY_UP_THRESHOLD (80) #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) #define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) #define MIN_FREQUENCY_UP_THRESHOLD (11) #define MAX_FREQUENCY_UP_THRESHOLD (100) @@ -45,27 +46,12 @@ * this governor will not work. * All times here are in uS. */ -static unsigned int def_sampling_rate; #define MIN_SAMPLING_RATE_RATIO (2) -/* for correct statistics, we need at least 10 ticks between each measure */ -#define MIN_STAT_SAMPLING_RATE \ - (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) -#define MIN_SAMPLING_RATE \ - (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) -/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon - * Define the minimal settable sampling rate to the greater of: - * - "HW transition latency" * 100 (same as default sampling / 10) - * - MIN_STAT_SAMPLING_RATE - * To avoid that userspace shoots itself. -*/ -static unsigned int minimum_sampling_rate(void) -{ - return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE); -} -/* This will also vanish soon with removing sampling_rate_max */ -#define MAX_SAMPLING_RATE (500 * def_sampling_rate) +static unsigned int min_sampling_rate; + #define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) static void do_dbs_timer(struct work_struct *work); @@ -219,28 +205,14 @@ static void ondemand_powersave_bias_init(void) /************************** sysfs interface ************************/ static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) { - static int print_once; - - if (!print_once) { - printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", - current->comm); - print_once = 1; - } - return sprintf(buf, "%u\n", MAX_SAMPLING_RATE); + printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max " + "sysfs file is deprecated - used by: %s\n", current->comm); + return sprintf(buf, "%u\n", -1U); } static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) { - static int print_once; - - if (!print_once) { - printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_min " - "sysfs file is deprecated - used by: %s\n", - current->comm); - print_once = 1; - } - return sprintf(buf, "%u\n", MIN_SAMPLING_RATE); + return sprintf(buf, "%u\n", min_sampling_rate); } #define define_one_ro(_name) \ @@ -274,7 +246,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, mutex_unlock(&dbs_mutex); return -EINVAL; } - dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate()); + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); mutex_unlock(&dbs_mutex); return count; @@ -619,12 +591,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, latency = policy->cpuinfo.transition_latency / 1000; if (latency == 0) latency = 1; - - def_sampling_rate = - max(latency * LATENCY_MULTIPLIER, - MIN_STAT_SAMPLING_RATE); - - dbs_tuners_ins.sampling_rate = def_sampling_rate; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); } dbs_timer_init(this_dbs_info); @@ -678,6 +650,16 @@ static int __init cpufreq_gov_dbs_init(void) dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; dbs_tuners_ins.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In no_hz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); } kondemand_wq = create_workqueue("kondemand"); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 3b3c01b..070357a 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -81,6 +81,14 @@ config MX3_IPU_IRQS To avoid bloating the irq_desc[] array we allocate a sufficient number of IRQ slots and map them dynamically to specific sources. +config TXX9_DMAC + tristate "Toshiba TXx9 SoC DMA support" + depends on MACH_TX49XX || MACH_TX39XX + select DMA_ENGINE + help + Support the TXx9 SoC internal DMA controller. This can be + integrated in chips such as the Toshiba TX4927/38/39. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 2e5dc96..a0b6564 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_DW_DMAC) += dw_dmac.o obj-$(CONFIG_MX3_IPU) += ipu/ +obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c new file mode 100644 index 0000000..9aa9ea9 --- /dev/null +++ b/drivers/dma/txx9dmac.c @@ -0,0 +1,1354 @@ +/* + * Driver for the TXx9 SoC DMA Controller + * + * Copyright (C) 2009 Atsushi Nemoto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/dma-mapping.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/scatterlist.h> +#include "txx9dmac.h" + +static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) +{ + return container_of(chan, struct txx9dmac_chan, chan); +} + +static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc) +{ + return dc->ch_regs; +} + +static struct txx9dmac_cregs32 __iomem *__dma_regs32( + const struct txx9dmac_chan *dc) +{ + return dc->ch_regs; +} + +#define channel64_readq(dc, name) \ + __raw_readq(&(__dma_regs(dc)->name)) +#define channel64_writeq(dc, name, val) \ + __raw_writeq((val), &(__dma_regs(dc)->name)) +#define channel64_readl(dc, name) \ + __raw_readl(&(__dma_regs(dc)->name)) +#define channel64_writel(dc, name, val) \ + __raw_writel((val), &(__dma_regs(dc)->name)) + +#define channel32_readl(dc, name) \ + __raw_readl(&(__dma_regs32(dc)->name)) +#define channel32_writel(dc, name, val) \ + __raw_writel((val), &(__dma_regs32(dc)->name)) + +#define channel_readq(dc, name) channel64_readq(dc, name) +#define channel_writeq(dc, name, val) channel64_writeq(dc, name, val) +#define channel_readl(dc, name) \ + (is_dmac64(dc) ? \ + channel64_readl(dc, name) : channel32_readl(dc, name)) +#define channel_writel(dc, name, val) \ + (is_dmac64(dc) ? \ + channel64_writel(dc, name, val) : channel32_writel(dc, name, val)) + +static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc) +{ + if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) + return channel64_readq(dc, CHAR); + else + return channel64_readl(dc, CHAR); +} + +static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) +{ + if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) + channel64_writeq(dc, CHAR, val); + else + channel64_writel(dc, CHAR, val); +} + +static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) +{ +#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) + channel64_writel(dc, CHAR, 0); + channel64_writel(dc, __pad_CHAR, 0); +#else + channel64_writeq(dc, CHAR, 0); +#endif +} + +static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc) +{ + if (is_dmac64(dc)) + return channel64_read_CHAR(dc); + else + return channel32_readl(dc, CHAR); +} + +static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) +{ + if (is_dmac64(dc)) + channel64_write_CHAR(dc, val); + else + channel32_writel(dc, CHAR, val); +} + +static struct txx9dmac_regs __iomem *__txx9dmac_regs( + const struct txx9dmac_dev *ddev) +{ + return ddev->regs; +} + +static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32( + const struct txx9dmac_dev *ddev) +{ + return ddev->regs; +} + +#define dma64_readl(ddev, name) \ + __raw_readl(&(__txx9dmac_regs(ddev)->name)) +#define dma64_writel(ddev, name, val) \ + __raw_writel((val), &(__txx9dmac_regs(ddev)->name)) + +#define dma32_readl(ddev, name) \ + __raw_readl(&(__txx9dmac_regs32(ddev)->name)) +#define dma32_writel(ddev, name, val) \ + __raw_writel((val), &(__txx9dmac_regs32(ddev)->name)) + +#define dma_readl(ddev, name) \ + (__is_dmac64(ddev) ? \ + dma64_readl(ddev, name) : dma32_readl(ddev, name)) +#define dma_writel(ddev, name, val) \ + (__is_dmac64(ddev) ? \ + dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val)) + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} +static struct device *chan2parent(struct dma_chan *chan) +{ + return chan->dev->device.parent; +} + +static struct txx9dmac_desc * +txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct txx9dmac_desc, txd); +} + +static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc, + const struct txx9dmac_desc *desc) +{ + return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; +} + +static void desc_write_CHAR(const struct txx9dmac_chan *dc, + struct txx9dmac_desc *desc, dma_addr_t val) +{ + if (is_dmac64(dc)) + desc->hwdesc.CHAR = val; + else + desc->hwdesc32.CHAR = val; +} + +#define TXX9_DMA_MAX_COUNT 0x04000000 + +#define TXX9_DMA_INITIAL_DESC_COUNT 64 + +static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc) +{ + return list_entry(dc->active_list.next, + struct txx9dmac_desc, desc_node); +} + +static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc) +{ + return list_entry(dc->active_list.prev, + struct txx9dmac_desc, desc_node); +} + +static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) +{ + return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node); +} + +static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) +{ + if (!list_empty(&desc->txd.tx_list)) + desc = list_entry(desc->txd.tx_list.prev, + struct txx9dmac_desc, desc_node); + return desc; +} + +static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx); + +static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, + gfp_t flags) +{ + struct txx9dmac_dev *ddev = dc->ddev; + struct txx9dmac_desc *desc; + + desc = kzalloc(sizeof(*desc), flags); + if (!desc) + return NULL; + dma_async_tx_descriptor_init(&desc->txd, &dc->chan); + desc->txd.tx_submit = txx9dmac_tx_submit; + /* txd.flags will be overwritten in prep funcs */ + desc->txd.flags = DMA_CTRL_ACK; + desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, + ddev->descsize, DMA_TO_DEVICE); + return desc; +} + +static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc) +{ + struct txx9dmac_desc *desc, *_desc; + struct txx9dmac_desc *ret = NULL; + unsigned int i = 0; + + spin_lock_bh(&dc->lock); + list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { + if (async_tx_test_ack(&desc->txd)) { + list_del(&desc->desc_node); + ret = desc; + break; + } + dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc); + i++; + } + spin_unlock_bh(&dc->lock); + + dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n", + i); + if (!ret) { + ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC); + if (ret) { + spin_lock_bh(&dc->lock); + dc->descs_allocated++; + spin_unlock_bh(&dc->lock); + } else + dev_err(chan2dev(&dc->chan), + "not enough descriptors available\n"); + } + return ret; +} + +static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, + struct txx9dmac_desc *desc) +{ + struct txx9dmac_dev *ddev = dc->ddev; + struct txx9dmac_desc *child; + + list_for_each_entry(child, &desc->txd.tx_list, desc_node) + dma_sync_single_for_cpu(chan2parent(&dc->chan), + child->txd.phys, ddev->descsize, + DMA_TO_DEVICE); + dma_sync_single_for_cpu(chan2parent(&dc->chan), + desc->txd.phys, ddev->descsize, + DMA_TO_DEVICE); +} + +/* + * Move a descriptor, including any children, to the free list. + * `desc' must not be on any lists. + */ +static void txx9dmac_desc_put(struct txx9dmac_chan *dc, + struct txx9dmac_desc *desc) +{ + if (desc) { + struct txx9dmac_desc *child; + + txx9dmac_sync_desc_for_cpu(dc, desc); + + spin_lock_bh(&dc->lock); + list_for_each_entry(child, &desc->txd.tx_list, desc_node) + dev_vdbg(chan2dev(&dc->chan), + "moving child desc %p to freelist\n", + child); + list_splice_init(&desc->txd.tx_list, &dc->free_list); + dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", + desc); + list_add(&desc->desc_node, &dc->free_list); + spin_unlock_bh(&dc->lock); + } +} + +/* Called with dc->lock held and bh disabled */ +static dma_cookie_t +txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) +{ + dma_cookie_t cookie = dc->chan.cookie; + + if (++cookie < 0) + cookie = 1; + + dc->chan.cookie = cookie; + desc->txd.cookie = cookie; + + return cookie; +} + +/*----------------------------------------------------------------------*/ + +static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) +{ + if (is_dmac64(dc)) + dev_err(chan2dev(&dc->chan), + " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x" + " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", + (u64)channel64_read_CHAR(dc), + channel64_readq(dc, SAR), + channel64_readq(dc, DAR), + channel64_readl(dc, CNTR), + channel64_readl(dc, SAIR), + channel64_readl(dc, DAIR), + channel64_readl(dc, CCR), + channel64_readl(dc, CSR)); + else + dev_err(chan2dev(&dc->chan), + " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x" + " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", + channel32_readl(dc, CHAR), + channel32_readl(dc, SAR), + channel32_readl(dc, DAR), + channel32_readl(dc, CNTR), + channel32_readl(dc, SAIR), + channel32_readl(dc, DAIR), + channel32_readl(dc, CCR), + channel32_readl(dc, CSR)); +} + +static void txx9dmac_reset_chan(struct txx9dmac_chan *dc) +{ + channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST); + if (is_dmac64(dc)) { + channel64_clear_CHAR(dc); + channel_writeq(dc, SAR, 0); + channel_writeq(dc, DAR, 0); + } else { + channel_writel(dc, CHAR, 0); + channel_writel(dc, SAR, 0); + channel_writel(dc, DAR, 0); + } + channel_writel(dc, CNTR, 0); + channel_writel(dc, SAIR, 0); + channel_writel(dc, DAIR, 0); + channel_writel(dc, CCR, 0); + mmiowb(); +} + +/* Called with dc->lock held and bh disabled */ +static void txx9dmac_dostart(struct txx9dmac_chan *dc, + struct txx9dmac_desc *first) +{ + struct txx9dmac_slave *ds = dc->chan.private; + u32 sai, dai; + + dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n", + first->txd.cookie, first); + /* ASSERT: channel is idle */ + if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { + dev_err(chan2dev(&dc->chan), + "BUG: Attempted to start non-idle channel\n"); + txx9dmac_dump_regs(dc); + /* The tasklet will hopefully advance the queue... */ + return; + } + + if (is_dmac64(dc)) { + channel64_writel(dc, CNTR, 0); + channel64_writel(dc, CSR, 0xffffffff); + if (ds) { + if (ds->tx_reg) { + sai = ds->reg_width; + dai = 0; + } else { + sai = 0; + dai = ds->reg_width; + } + } else { + sai = 8; + dai = 8; + } + channel64_writel(dc, SAIR, sai); + channel64_writel(dc, DAIR, dai); + /* All 64-bit DMAC supports SMPCHN */ + channel64_writel(dc, CCR, dc->ccr); + /* Writing a non zero value to CHAR will assert XFACT */ + channel64_write_CHAR(dc, first->txd.phys); + } else { + channel32_writel(dc, CNTR, 0); + channel32_writel(dc, CSR, 0xffffffff); + if (ds) { + if (ds->tx_reg) { + sai = ds->reg_width; + dai = 0; + } else { + sai = 0; + dai = ds->reg_width; + } + } else { + sai = 4; + dai = 4; + } + channel32_writel(dc, SAIR, sai); + channel32_writel(dc, DAIR, dai); + if (txx9_dma_have_SMPCHN()) { + channel32_writel(dc, CCR, dc->ccr); + /* Writing a non zero value to CHAR will assert XFACT */ + channel32_writel(dc, CHAR, first->txd.phys); + } else { + channel32_writel(dc, CHAR, first->txd.phys); + channel32_writel(dc, CCR, dc->ccr); + } + } +} + +/*----------------------------------------------------------------------*/ + +static void +txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, + struct txx9dmac_desc *desc) +{ + dma_async_tx_callback callback; + void *param; + struct dma_async_tx_descriptor *txd = &desc->txd; + struct txx9dmac_slave *ds = dc->chan.private; + + dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", + txd->cookie, desc); + + dc->completed = txd->cookie; + callback = txd->callback; + param = txd->callback_param; + + txx9dmac_sync_desc_for_cpu(dc, desc); + list_splice_init(&txd->tx_list, &dc->free_list); + list_move(&desc->desc_node, &dc->free_list); + + /* + * We use dma_unmap_page() regardless of how the buffers were + * mapped before they were submitted... + */ + if (!ds) { + dma_addr_t dmaaddr; + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + dmaaddr = is_dmac64(dc) ? + desc->hwdesc.DAR : desc->hwdesc32.DAR; + dma_unmap_page(chan2parent(&dc->chan), dmaaddr, + desc->len, DMA_FROM_DEVICE); + } + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + dmaaddr = is_dmac64(dc) ? + desc->hwdesc.SAR : desc->hwdesc32.SAR; + dma_unmap_page(chan2parent(&dc->chan), dmaaddr, + desc->len, DMA_TO_DEVICE); + } + } + + /* + * The API requires that no submissions are done from a + * callback, so we don't need to drop the lock here + */ + if (callback) + callback(param); + dma_run_dependencies(txd); +} + +static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list) +{ + struct txx9dmac_dev *ddev = dc->ddev; + struct txx9dmac_desc *desc; + struct txx9dmac_desc *prev = NULL; + + BUG_ON(!list_empty(list)); + do { + desc = txx9dmac_first_queued(dc); + if (prev) { + desc_write_CHAR(dc, prev, desc->txd.phys); + dma_sync_single_for_device(chan2parent(&dc->chan), + prev->txd.phys, ddev->descsize, + DMA_TO_DEVICE); + } + prev = txx9dmac_last_child(desc); + list_move_tail(&desc->desc_node, list); + /* Make chain-completion interrupt happen */ + if ((desc->txd.flags & DMA_PREP_INTERRUPT) && + !txx9dmac_chan_INTENT(dc)) + break; + } while (!list_empty(&dc->queue)); +} + +static void txx9dmac_complete_all(struct txx9dmac_chan *dc) +{ + struct txx9dmac_desc *desc, *_desc; + LIST_HEAD(list); + + /* + * Submit queued descriptors ASAP, i.e. before we go through + * the completed ones. + */ + list_splice_init(&dc->active_list, &list); + if (!list_empty(&dc->queue)) { + txx9dmac_dequeue(dc, &dc->active_list); + txx9dmac_dostart(dc, txx9dmac_first_active(dc)); + } + + list_for_each_entry_safe(desc, _desc, &list, desc_node) + txx9dmac_descriptor_complete(dc, desc); +} + +static void txx9dmac_dump_desc(struct txx9dmac_chan *dc, + struct txx9dmac_hwdesc *desc) +{ + if (is_dmac64(dc)) { +#ifdef TXX9_DMA_USE_SIMPLE_CHAIN + dev_crit(chan2dev(&dc->chan), + " desc: ch%#llx s%#llx d%#llx c%#x\n", + (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); +#else + dev_crit(chan2dev(&dc->chan), + " desc: ch%#llx s%#llx d%#llx c%#x" + " si%#x di%#x cc%#x cs%#x\n", + (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, + desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); +#endif + } else { + struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; +#ifdef TXX9_DMA_USE_SIMPLE_CHAIN + dev_crit(chan2dev(&dc->chan), + " desc: ch%#x s%#x d%#x c%#x\n", + d->CHAR, d->SAR, d->DAR, d->CNTR); +#else + dev_crit(chan2dev(&dc->chan), + " desc: ch%#x s%#x d%#x c%#x" + " si%#x di%#x cc%#x cs%#x\n", + d->CHAR, d->SAR, d->DAR, d->CNTR, + d->SAIR, d->DAIR, d->CCR, d->CSR); +#endif + } +} + +static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) +{ + struct txx9dmac_desc *bad_desc; + struct txx9dmac_desc *child; + u32 errors; + + /* + * The descriptor currently at the head of the active list is + * borked. Since we don't have any way to report errors, we'll + * just have to scream loudly and try to carry on. + */ + dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n"); + txx9dmac_dump_regs(dc); + + bad_desc = txx9dmac_first_active(dc); + list_del_init(&bad_desc->desc_node); + + /* Clear all error flags and try to restart the controller */ + errors = csr & (TXX9_DMA_CSR_ABCHC | + TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR | + TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR); + channel_writel(dc, CSR, errors); + + if (list_empty(&dc->active_list) && !list_empty(&dc->queue)) + txx9dmac_dequeue(dc, &dc->active_list); + if (!list_empty(&dc->active_list)) + txx9dmac_dostart(dc, txx9dmac_first_active(dc)); + + dev_crit(chan2dev(&dc->chan), + "Bad descriptor submitted for DMA! (cookie: %d)\n", + bad_desc->txd.cookie); + txx9dmac_dump_desc(dc, &bad_desc->hwdesc); + list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) + txx9dmac_dump_desc(dc, &child->hwdesc); + /* Pretend the descriptor completed successfully */ + txx9dmac_descriptor_complete(dc, bad_desc); +} + +static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) +{ + dma_addr_t chain; + struct txx9dmac_desc *desc, *_desc; + struct txx9dmac_desc *child; + u32 csr; + + if (is_dmac64(dc)) { + chain = channel64_read_CHAR(dc); + csr = channel64_readl(dc, CSR); + channel64_writel(dc, CSR, csr); + } else { + chain = channel32_readl(dc, CHAR); + csr = channel32_readl(dc, CSR); + channel32_writel(dc, CSR, csr); + } + /* For dynamic chain, we should look at XFACT instead of NCHNC */ + if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) { + /* Everything we've submitted is done */ + txx9dmac_complete_all(dc); + return; + } + if (!(csr & TXX9_DMA_CSR_CHNEN)) + chain = 0; /* last descriptor of this chain */ + + dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n", + (u64)chain); + + list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { + if (desc_read_CHAR(dc, desc) == chain) { + /* This one is currently in progress */ + if (csr & TXX9_DMA_CSR_ABCHC) + goto scan_done; + return; + } + + list_for_each_entry(child, &desc->txd.tx_list, desc_node) + if (desc_read_CHAR(dc, child) == chain) { + /* Currently in progress */ + if (csr & TXX9_DMA_CSR_ABCHC) + goto scan_done; + return; + } + + /* + * No descriptors so far seem to be in progress, i.e. + * this one must be done. + */ + txx9dmac_descriptor_complete(dc, desc); + } +scan_done: + if (csr & TXX9_DMA_CSR_ABCHC) { + txx9dmac_handle_error(dc, csr); + return; + } + + dev_err(chan2dev(&dc->chan), + "BUG: All descriptors done, but channel not idle!\n"); + + /* Try to continue after resetting the channel... */ + txx9dmac_reset_chan(dc); + + if (!list_empty(&dc->queue)) { + txx9dmac_dequeue(dc, &dc->active_list); + txx9dmac_dostart(dc, txx9dmac_first_active(dc)); + } +} + +static void txx9dmac_chan_tasklet(unsigned long data) +{ + int irq; + u32 csr; + struct txx9dmac_chan *dc; + + dc = (struct txx9dmac_chan *)data; + csr = channel_readl(dc, CSR); + dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr); + + spin_lock(&dc->lock); + if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | + TXX9_DMA_CSR_NTRNFC)) + txx9dmac_scan_descriptors(dc); + spin_unlock(&dc->lock); + irq = dc->irq; + + enable_irq(irq); +} + +static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id) +{ + struct txx9dmac_chan *dc = dev_id; + + dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n", + channel_readl(dc, CSR)); + + tasklet_schedule(&dc->tasklet); + /* + * Just disable the interrupts. We'll turn them back on in the + * softirq handler. + */ + disable_irq_nosync(irq); + + return IRQ_HANDLED; +} + +static void txx9dmac_tasklet(unsigned long data) +{ + int irq; + u32 csr; + struct txx9dmac_chan *dc; + + struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data; + u32 mcr; + int i; + + mcr = dma_readl(ddev, MCR); + dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr); + for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { + if ((mcr >> (24 + i)) & 0x11) { + dc = ddev->chan[i]; + csr = channel_readl(dc, CSR); + dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", + csr); + spin_lock(&dc->lock); + if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | + TXX9_DMA_CSR_NTRNFC)) + txx9dmac_scan_descriptors(dc); + spin_unlock(&dc->lock); + } + } + irq = ddev->irq; + + enable_irq(irq); +} + +static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id) +{ + struct txx9dmac_dev *ddev = dev_id; + + dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n", + dma_readl(ddev, MCR)); + + tasklet_schedule(&ddev->tasklet); + /* + * Just disable the interrupts. We'll turn them back on in the + * softirq handler. + */ + disable_irq_nosync(irq); + + return IRQ_HANDLED; +} + +/*----------------------------------------------------------------------*/ + +static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx); + struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan); + dma_cookie_t cookie; + + spin_lock_bh(&dc->lock); + cookie = txx9dmac_assign_cookie(dc, desc); + + dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", + desc->txd.cookie, desc); + + list_add_tail(&desc->desc_node, &dc->queue); + spin_unlock_bh(&dc->lock); + + return cookie; +} + +static struct dma_async_tx_descriptor * +txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); + struct txx9dmac_dev *ddev = dc->ddev; + struct txx9dmac_desc *desc; + struct txx9dmac_desc *first; + struct txx9dmac_desc *prev; + size_t xfer_count; + size_t offset; + + dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n", + (u64)dest, (u64)src, len, flags); + + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + return NULL; + } + + prev = first = NULL; + + for (offset = 0; offset < len; offset += xfer_count) { + xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT); + /* + * Workaround for ERT-TX49H2-033, ERT-TX49H3-020, + * ERT-TX49H4-016 (slightly conservative) + */ + if (__is_dmac64(ddev)) { + if (xfer_count > 0x100 && + (xfer_count & 0xff) >= 0xfa && + (xfer_count & 0xff) <= 0xff) + xfer_count -= 0x20; + } else { + if (xfer_count > 0x80 && + (xfer_count & 0x7f) >= 0x7e && + (xfer_count & 0x7f) <= 0x7f) + xfer_count -= 0x20; + } + + desc = txx9dmac_desc_get(dc); + if (!desc) { + txx9dmac_desc_put(dc, first); + return NULL; + } + + if (__is_dmac64(ddev)) { + desc->hwdesc.SAR = src + offset; + desc->hwdesc.DAR = dest + offset; + desc->hwdesc.CNTR = xfer_count; + txx9dmac_desc_set_nosimple(ddev, desc, 8, 8, + dc->ccr | TXX9_DMA_CCR_XFACT); + } else { + desc->hwdesc32.SAR = src + offset; + desc->hwdesc32.DAR = dest + offset; + desc->hwdesc32.CNTR = xfer_count; + txx9dmac_desc_set_nosimple(ddev, desc, 4, 4, + dc->ccr | TXX9_DMA_CCR_XFACT); + } + + /* + * The descriptors on tx_list are not reachable from + * the dc->queue list or dc->active_list after a + * submit. If we put all descriptors on active_list, + * calling of callback on the completion will be more + * complex. + */ + if (!first) { + first = desc; + } else { + desc_write_CHAR(dc, prev, desc->txd.phys); + dma_sync_single_for_device(chan2parent(&dc->chan), + prev->txd.phys, ddev->descsize, + DMA_TO_DEVICE); + list_add_tail(&desc->desc_node, + &first->txd.tx_list); + } + prev = desc; + } + + /* Trigger interrupt after last block */ + if (flags & DMA_PREP_INTERRUPT) + txx9dmac_desc_set_INTENT(ddev, prev); + + desc_write_CHAR(dc, prev, 0); + dma_sync_single_for_device(chan2parent(&dc->chan), + prev->txd.phys, ddev->descsize, + DMA_TO_DEVICE); + + first->txd.flags = flags; + first->len = len; + + return &first->txd; +} + +static struct dma_async_tx_descriptor * +txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flags) +{ + struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); + struct txx9dmac_dev *ddev = dc->ddev; + struct txx9dmac_slave *ds = chan->private; + struct txx9dmac_desc *prev; + struct txx9dmac_desc *first; + unsigned int i; + struct scatterlist *sg; + + dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); + + BUG_ON(!ds || !ds->reg_width); + if (ds->tx_reg) + BUG_ON(direction != DMA_TO_DEVICE); + else + BUG_ON(direction != DMA_FROM_DEVICE); + if (unlikely(!sg_len)) + return NULL; + + prev = first = NULL; + + for_each_sg(sgl, sg, sg_len, i) { + struct txx9dmac_desc *desc; + dma_addr_t mem; + u32 sai, dai; + + desc = txx9dmac_desc_get(dc); + if (!desc) { + txx9dmac_desc_put(dc, first); + return NULL; + } + + mem = sg_dma_address(sg); + + if (__is_dmac64(ddev)) { + if (direction == DMA_TO_DEVICE) { + desc->hwdesc.SAR = mem; + desc->hwdesc.DAR = ds->tx_reg; + } else { + desc->hwdesc.SAR = ds->rx_reg; + desc->hwdesc.DAR = mem; + } + desc->hwdesc.CNTR = sg_dma_len(sg); + } else { + if (direction == DMA_TO_DEVICE) { + desc->hwdesc32.SAR = mem; + desc->hwdesc32.DAR = ds->tx_reg; + } else { + desc->hwdesc32.SAR = ds->rx_reg; + desc->hwdesc32.DAR = mem; + } + desc->hwdesc32.CNTR = sg_dma_len(sg); + } + if (direction == DMA_TO_DEVICE) { + sai = ds->reg_width; + dai = 0; + } else { + sai = 0; + dai = ds->reg_width; + } + txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, + dc->ccr | TXX9_DMA_CCR_XFACT); + + if (!first) { + first = desc; + } else { + desc_write_CHAR(dc, prev, desc->txd.phys); + dma_sync_single_for_device(chan2parent(&dc->chan), + prev->txd.phys, + ddev->descsize, + DMA_TO_DEVICE); + list_add_tail(&desc->desc_node, + &first->txd.tx_list); + } + prev = desc; + } + + /* Trigger interrupt after last block */ + if (flags & DMA_PREP_INTERRUPT) + txx9dmac_desc_set_INTENT(ddev, prev); + + desc_write_CHAR(dc, prev, 0); + dma_sync_single_for_device(chan2parent(&dc->chan), + prev->txd.phys, ddev->descsize, + DMA_TO_DEVICE); + + first->txd.flags = flags; + first->len = 0; + + return &first->txd; +} + +static void txx9dmac_terminate_all(struct dma_chan *chan) +{ + struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); + struct txx9dmac_desc *desc, *_desc; + LIST_HEAD(list); + + dev_vdbg(chan2dev(chan), "terminate_all\n"); + spin_lock_bh(&dc->lock); + + txx9dmac_reset_chan(dc); + + /* active_list entries will end up before queued entries */ + list_splice_init(&dc->queue, &list); + list_splice_init(&dc->active_list, &list); + + spin_unlock_bh(&dc->lock); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + txx9dmac_descriptor_complete(dc, desc); +} + +static enum dma_status +txx9dmac_is_tx_complete(struct dma_chan *chan, + dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + int ret; + + last_complete = dc->completed; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + if (ret != DMA_SUCCESS) { + spin_lock_bh(&dc->lock); + txx9dmac_scan_descriptors(dc); + spin_unlock_bh(&dc->lock); + + last_complete = dc->completed; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_complete, last_used); + } + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + return ret; +} + +static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, + struct txx9dmac_desc *prev) +{ + struct txx9dmac_dev *ddev = dc->ddev; + struct txx9dmac_desc *desc; + LIST_HEAD(list); + + prev = txx9dmac_last_child(prev); + txx9dmac_dequeue(dc, &list); + desc = list_entry(list.next, struct txx9dmac_desc, desc_node); + desc_write_CHAR(dc, prev, desc->txd.phys); + dma_sync_single_for_device(chan2parent(&dc->chan), + prev->txd.phys, ddev->descsize, + DMA_TO_DEVICE); + mmiowb(); + if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) && + channel_read_CHAR(dc) == prev->txd.phys) + /* Restart chain DMA */ + channel_write_CHAR(dc, desc->txd.phys); + list_splice_tail(&list, &dc->active_list); +} + +static void txx9dmac_issue_pending(struct dma_chan *chan) +{ + struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); + + spin_lock_bh(&dc->lock); + + if (!list_empty(&dc->active_list)) + txx9dmac_scan_descriptors(dc); + if (!list_empty(&dc->queue)) { + if (list_empty(&dc->active_list)) { + txx9dmac_dequeue(dc, &dc->active_list); + txx9dmac_dostart(dc, txx9dmac_first_active(dc)); + } else if (txx9_dma_have_SMPCHN()) { + struct txx9dmac_desc *prev = txx9dmac_last_active(dc); + + if (!(prev->txd.flags & DMA_PREP_INTERRUPT) || + txx9dmac_chan_INTENT(dc)) + txx9dmac_chain_dynamic(dc, prev); + } + } + + spin_unlock_bh(&dc->lock); +} + +static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) +{ + struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); + struct txx9dmac_slave *ds = chan->private; + struct txx9dmac_desc *desc; + int i; + + dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); + + /* ASSERT: channel is idle */ + if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { + dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); + return -EIO; + } + + dc->completed = chan->cookie = 1; + + dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; + txx9dmac_chan_set_SMPCHN(dc); + if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN)) + dc->ccr |= TXX9_DMA_CCR_INTENC; + if (chan->device->device_prep_dma_memcpy) { + if (ds) + return -EINVAL; + dc->ccr |= TXX9_DMA_CCR_XFSZ_X8; + } else { + if (!ds || + (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg)) + return -EINVAL; + dc->ccr |= TXX9_DMA_CCR_EXTRQ | + TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width)); + txx9dmac_chan_set_INTENT(dc); + } + + spin_lock_bh(&dc->lock); + i = dc->descs_allocated; + while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) { + spin_unlock_bh(&dc->lock); + + desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); + if (!desc) { + dev_info(chan2dev(chan), + "only allocated %d descriptors\n", i); + spin_lock_bh(&dc->lock); + break; + } + txx9dmac_desc_put(dc, desc); + + spin_lock_bh(&dc->lock); + i = ++dc->descs_allocated; + } + spin_unlock_bh(&dc->lock); + + dev_dbg(chan2dev(chan), + "alloc_chan_resources allocated %d descriptors\n", i); + + return i; +} + +static void txx9dmac_free_chan_resources(struct dma_chan *chan) +{ + struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); + struct txx9dmac_dev *ddev = dc->ddev; + struct txx9dmac_desc *desc, *_desc; + LIST_HEAD(list); + + dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", + dc->descs_allocated); + + /* ASSERT: channel is idle */ + BUG_ON(!list_empty(&dc->active_list)); + BUG_ON(!list_empty(&dc->queue)); + BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT); + + spin_lock_bh(&dc->lock); + list_splice_init(&dc->free_list, &list); + dc->descs_allocated = 0; + spin_unlock_bh(&dc->lock); + + list_for_each_entry_safe(desc, _desc, &list, desc_node) { + dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); + dma_unmap_single(chan2parent(chan), desc->txd.phys, + ddev->descsize, DMA_TO_DEVICE); + kfree(desc); + } + + dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); +} + +/*----------------------------------------------------------------------*/ + +static void txx9dmac_off(struct txx9dmac_dev *ddev) +{ + dma_writel(ddev, MCR, 0); + mmiowb(); +} + +static int __init txx9dmac_chan_probe(struct platform_device *pdev) +{ + struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data; + struct platform_device *dmac_dev = cpdata->dmac_dev; + struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data; + struct txx9dmac_chan *dc; + int err; + int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; + int irq; + + dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); + if (!dc) + return -ENOMEM; + + dc->dma.dev = &pdev->dev; + dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; + dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; + dc->dma.device_terminate_all = txx9dmac_terminate_all; + dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete; + dc->dma.device_issue_pending = txx9dmac_issue_pending; + if (pdata && pdata->memcpy_chan == ch) { + dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; + dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask); + } else { + dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg; + dma_cap_set(DMA_SLAVE, dc->dma.cap_mask); + dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask); + } + + INIT_LIST_HEAD(&dc->dma.channels); + dc->ddev = platform_get_drvdata(dmac_dev); + if (dc->ddev->irq < 0) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet, + (unsigned long)dc); + dc->irq = irq; + err = devm_request_irq(&pdev->dev, dc->irq, + txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc); + if (err) + return err; + } else + dc->irq = -1; + dc->ddev->chan[ch] = dc; + dc->chan.device = &dc->dma; + list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); + dc->chan.cookie = dc->completed = 1; + + if (is_dmac64(dc)) + dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; + else + dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch]; + spin_lock_init(&dc->lock); + + INIT_LIST_HEAD(&dc->active_list); + INIT_LIST_HEAD(&dc->queue); + INIT_LIST_HEAD(&dc->free_list); + + txx9dmac_reset_chan(dc); + + platform_set_drvdata(pdev, dc); + + err = dma_async_device_register(&dc->dma); + if (err) + return err; + dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n", + dc->dma.dev_id, + dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "", + dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : ""); + + return 0; +} + +static int __exit txx9dmac_chan_remove(struct platform_device *pdev) +{ + struct txx9dmac_chan *dc = platform_get_drvdata(pdev); + + dma_async_device_unregister(&dc->dma); + if (dc->irq >= 0) + tasklet_kill(&dc->tasklet); + dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; + return 0; +} + +static int __init txx9dmac_probe(struct platform_device *pdev) +{ + struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; + struct resource *io; + struct txx9dmac_dev *ddev; + u32 mcr; + int err; + + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!io) + return -EINVAL; + + ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL); + if (!ddev) + return -ENOMEM; + + if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), + dev_name(&pdev->dev))) + return -EBUSY; + + ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io)); + if (!ddev->regs) + return -ENOMEM; + ddev->have_64bit_regs = pdata->have_64bit_regs; + if (__is_dmac64(ddev)) + ddev->descsize = sizeof(struct txx9dmac_hwdesc); + else + ddev->descsize = sizeof(struct txx9dmac_hwdesc32); + + /* force dma off, just in case */ + txx9dmac_off(ddev); + + ddev->irq = platform_get_irq(pdev, 0); + if (ddev->irq >= 0) { + tasklet_init(&ddev->tasklet, txx9dmac_tasklet, + (unsigned long)ddev); + err = devm_request_irq(&pdev->dev, ddev->irq, + txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev); + if (err) + return err; + } + + mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; + if (pdata && pdata->memcpy_chan >= 0) + mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); + dma_writel(ddev, MCR, mcr); + + platform_set_drvdata(pdev, ddev); + return 0; +} + +static int __exit txx9dmac_remove(struct platform_device *pdev) +{ + struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); + + txx9dmac_off(ddev); + if (ddev->irq >= 0) + tasklet_kill(&ddev->tasklet); + return 0; +} + +static void txx9dmac_shutdown(struct platform_device *pdev) +{ + struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); + + txx9dmac_off(ddev); +} + +static int txx9dmac_suspend_late(struct platform_device *pdev, + pm_message_t mesg) +{ + struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); + + txx9dmac_off(ddev); + return 0; +} + +static int txx9dmac_resume_early(struct platform_device *pdev) +{ + struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); + struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; + u32 mcr; + + mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; + if (pdata && pdata->memcpy_chan >= 0) + mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); + dma_writel(ddev, MCR, mcr); + return 0; + +} + +static struct platform_driver txx9dmac_chan_driver = { + .remove = __exit_p(txx9dmac_chan_remove), + .driver = { + .name = "txx9dmac-chan", + }, +}; + +static struct platform_driver txx9dmac_driver = { + .remove = __exit_p(txx9dmac_remove), + .shutdown = txx9dmac_shutdown, + .suspend_late = txx9dmac_suspend_late, + .resume_early = txx9dmac_resume_early, + .driver = { + .name = "txx9dmac", + }, +}; + +static int __init txx9dmac_init(void) +{ + int rc; + + rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe); + if (!rc) { + rc = platform_driver_probe(&txx9dmac_chan_driver, + txx9dmac_chan_probe); + if (rc) + platform_driver_unregister(&txx9dmac_driver); + } + return rc; +} +module_init(txx9dmac_init); + +static void __exit txx9dmac_exit(void) +{ + platform_driver_unregister(&txx9dmac_chan_driver); + platform_driver_unregister(&txx9dmac_driver); +} +module_exit(txx9dmac_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TXx9 DMA Controller driver"); +MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h new file mode 100644 index 0000000..c907ff0 --- /dev/null +++ b/drivers/dma/txx9dmac.h @@ -0,0 +1,307 @@ +/* + * Driver for the TXx9 SoC DMA Controller + * + * Copyright (C) 2009 Atsushi Nemoto + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef TXX9DMAC_H +#define TXX9DMAC_H + +#include <linux/dmaengine.h> +#include <asm/txx9/dmac.h> + +/* + * Design Notes: + * + * This DMAC have four channels and one FIFO buffer. Each channel can + * be configured for memory-memory or device-memory transfer, but only + * one channel can do alignment-free memory-memory transfer at a time + * while the channel should occupy the FIFO buffer for effective + * transfers. + * + * Instead of dynamically assign the FIFO buffer to channels, I chose + * make one dedicated channel for memory-memory transfer. The + * dedicated channel is public. Other channels are private and used + * for slave transfer. Some devices in the SoC are wired to certain + * DMA channel. + */ + +#ifdef CONFIG_MACH_TX49XX +static inline bool txx9_dma_have_SMPCHN(void) +{ + return true; +} +#define TXX9_DMA_USE_SIMPLE_CHAIN +#else +static inline bool txx9_dma_have_SMPCHN(void) +{ + return false; +} +#endif + +#ifdef __LITTLE_ENDIAN +#ifdef CONFIG_MACH_TX49XX +#define CCR_LE TXX9_DMA_CCR_LE +#define MCR_LE 0 +#else +#define CCR_LE 0 +#define MCR_LE TXX9_DMA_MCR_LE +#endif +#else +#define CCR_LE 0 +#define MCR_LE 0 +#endif + +/* + * Redefine this macro to handle differences between 32- and 64-bit + * addressing, big vs. little endian, etc. + */ +#ifdef __BIG_ENDIAN +#define TXX9_DMA_REG32(name) u32 __pad_##name; u32 name +#else +#define TXX9_DMA_REG32(name) u32 name; u32 __pad_##name +#endif + +/* Hardware register definitions. */ +struct txx9dmac_cregs { +#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) + TXX9_DMA_REG32(CHAR); /* Chain Address Register */ +#else + u64 CHAR; /* Chain Address Register */ +#endif + u64 SAR; /* Source Address Register */ + u64 DAR; /* Destination Address Register */ + TXX9_DMA_REG32(CNTR); /* Count Register */ + TXX9_DMA_REG32(SAIR); /* Source Address Increment Register */ + TXX9_DMA_REG32(DAIR); /* Destination Address Increment Register */ + TXX9_DMA_REG32(CCR); /* Channel Control Register */ + TXX9_DMA_REG32(CSR); /* Channel Status Register */ +}; +struct txx9dmac_cregs32 { + u32 CHAR; + u32 SAR; + u32 DAR; + u32 CNTR; + u32 SAIR; + u32 DAIR; + u32 CCR; + u32 CSR; +}; + +struct txx9dmac_regs { + /* per-channel registers */ + struct txx9dmac_cregs CHAN[TXX9_DMA_MAX_NR_CHANNELS]; + u64 __pad[9]; + u64 MFDR; /* Memory Fill Data Register */ + TXX9_DMA_REG32(MCR); /* Master Control Register */ +}; +struct txx9dmac_regs32 { + struct txx9dmac_cregs32 CHAN[TXX9_DMA_MAX_NR_CHANNELS]; + u32 __pad[9]; + u32 MFDR; + u32 MCR; +}; + +/* bits for MCR */ +#define TXX9_DMA_MCR_EIS(ch) (0x10000000<<(ch)) +#define TXX9_DMA_MCR_DIS(ch) (0x01000000<<(ch)) +#define TXX9_DMA_MCR_RSFIF 0x00000080 +#define TXX9_DMA_MCR_FIFUM(ch) (0x00000008<<(ch)) +#define TXX9_DMA_MCR_LE 0x00000004 +#define TXX9_DMA_MCR_RPRT 0x00000002 +#define TXX9_DMA_MCR_MSTEN 0x00000001 + +/* bits for CCRn */ +#define TXX9_DMA_CCR_IMMCHN 0x20000000 +#define TXX9_DMA_CCR_USEXFSZ 0x10000000 +#define TXX9_DMA_CCR_LE 0x08000000 +#define TXX9_DMA_CCR_DBINH 0x04000000 +#define TXX9_DMA_CCR_SBINH 0x02000000 +#define TXX9_DMA_CCR_CHRST 0x01000000 +#define TXX9_DMA_CCR_RVBYTE 0x00800000 +#define TXX9_DMA_CCR_ACKPOL 0x00400000 +#define TXX9_DMA_CCR_REQPL 0x00200000 +#define TXX9_DMA_CCR_EGREQ 0x00100000 +#define TXX9_DMA_CCR_CHDN 0x00080000 +#define TXX9_DMA_CCR_DNCTL 0x00060000 +#define TXX9_DMA_CCR_EXTRQ 0x00010000 +#define TXX9_DMA_CCR_INTRQD 0x0000e000 +#define TXX9_DMA_CCR_INTENE 0x00001000 +#define TXX9_DMA_CCR_INTENC 0x00000800 +#define TXX9_DMA_CCR_INTENT 0x00000400 +#define TXX9_DMA_CCR_CHNEN 0x00000200 +#define TXX9_DMA_CCR_XFACT 0x00000100 +#define TXX9_DMA_CCR_SMPCHN 0x00000020 +#define TXX9_DMA_CCR_XFSZ(order) (((order) << 2) & 0x0000001c) +#define TXX9_DMA_CCR_XFSZ_1 TXX9_DMA_CCR_XFSZ(0) +#define TXX9_DMA_CCR_XFSZ_2 TXX9_DMA_CCR_XFSZ(1) +#define TXX9_DMA_CCR_XFSZ_4 TXX9_DMA_CCR_XFSZ(2) +#define TXX9_DMA_CCR_XFSZ_8 TXX9_DMA_CCR_XFSZ(3) +#define TXX9_DMA_CCR_XFSZ_X4 TXX9_DMA_CCR_XFSZ(4) +#define TXX9_DMA_CCR_XFSZ_X8 TXX9_DMA_CCR_XFSZ(5) +#define TXX9_DMA_CCR_XFSZ_X16 TXX9_DMA_CCR_XFSZ(6) +#define TXX9_DMA_CCR_XFSZ_X32 TXX9_DMA_CCR_XFSZ(7) +#define TXX9_DMA_CCR_MEMIO 0x00000002 +#define TXX9_DMA_CCR_SNGAD 0x00000001 + +/* bits for CSRn */ +#define TXX9_DMA_CSR_CHNEN 0x00000400 +#define TXX9_DMA_CSR_STLXFER 0x00000200 +#define TXX9_DMA_CSR_XFACT 0x00000100 +#define TXX9_DMA_CSR_ABCHC 0x00000080 +#define TXX9_DMA_CSR_NCHNC 0x00000040 +#define TXX9_DMA_CSR_NTRNFC 0x00000020 +#define TXX9_DMA_CSR_EXTDN 0x00000010 +#define TXX9_DMA_CSR_CFERR 0x00000008 +#define TXX9_DMA_CSR_CHERR 0x00000004 +#define TXX9_DMA_CSR_DESERR 0x00000002 +#define TXX9_DMA_CSR_SORERR 0x00000001 + +struct txx9dmac_chan { + struct dma_chan chan; + struct dma_device dma; + struct txx9dmac_dev *ddev; + void __iomem *ch_regs; + struct tasklet_struct tasklet; + int irq; + u32 ccr; + + spinlock_t lock; + + /* these other elements are all protected by lock */ + dma_cookie_t completed; + struct list_head active_list; + struct list_head queue; + struct list_head free_list; + + unsigned int descs_allocated; +}; + +struct txx9dmac_dev { + void __iomem *regs; + struct tasklet_struct tasklet; + int irq; + struct txx9dmac_chan *chan[TXX9_DMA_MAX_NR_CHANNELS]; + bool have_64bit_regs; + unsigned int descsize; +}; + +static inline bool __is_dmac64(const struct txx9dmac_dev *ddev) +{ + return ddev->have_64bit_regs; +} + +static inline bool is_dmac64(const struct txx9dmac_chan *dc) +{ + return __is_dmac64(dc->ddev); +} + +#ifdef TXX9_DMA_USE_SIMPLE_CHAIN +/* Hardware descriptor definition. (for simple-chain) */ +struct txx9dmac_hwdesc { +#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) + TXX9_DMA_REG32(CHAR); +#else + u64 CHAR; +#endif + u64 SAR; + u64 DAR; + TXX9_DMA_REG32(CNTR); +}; +struct txx9dmac_hwdesc32 { + u32 CHAR; + u32 SAR; + u32 DAR; + u32 CNTR; +}; +#else +#define txx9dmac_hwdesc txx9dmac_cregs +#define txx9dmac_hwdesc32 txx9dmac_cregs32 +#endif + +struct txx9dmac_desc { + /* FIRST values the hardware uses */ + union { + struct txx9dmac_hwdesc hwdesc; + struct txx9dmac_hwdesc32 hwdesc32; + }; + + /* THEN values for driver housekeeping */ + struct list_head desc_node ____cacheline_aligned; + struct dma_async_tx_descriptor txd; + size_t len; +}; + +#ifdef TXX9_DMA_USE_SIMPLE_CHAIN + +static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc) +{ + return (dc->ccr & TXX9_DMA_CCR_INTENT) != 0; +} + +static inline void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc) +{ + dc->ccr |= TXX9_DMA_CCR_INTENT; +} + +static inline void txx9dmac_desc_set_INTENT(struct txx9dmac_dev *ddev, + struct txx9dmac_desc *desc) +{ +} + +static inline void txx9dmac_chan_set_SMPCHN(struct txx9dmac_chan *dc) +{ + dc->ccr |= TXX9_DMA_CCR_SMPCHN; +} + +static inline void txx9dmac_desc_set_nosimple(struct txx9dmac_dev *ddev, + struct txx9dmac_desc *desc, + u32 sair, u32 dair, u32 ccr) +{ +} + +#else /* TXX9_DMA_USE_SIMPLE_CHAIN */ + +static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc) +{ + return true; +} + +static void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc) +{ +} + +static inline void txx9dmac_desc_set_INTENT(struct txx9dmac_dev *ddev, + struct txx9dmac_desc *desc) +{ + if (__is_dmac64(ddev)) + desc->hwdesc.CCR |= TXX9_DMA_CCR_INTENT; + else + desc->hwdesc32.CCR |= TXX9_DMA_CCR_INTENT; +} + +static inline void txx9dmac_chan_set_SMPCHN(struct txx9dmac_chan *dc) +{ +} + +static inline void txx9dmac_desc_set_nosimple(struct txx9dmac_dev *ddev, + struct txx9dmac_desc *desc, + u32 sai, u32 dai, u32 ccr) +{ + if (__is_dmac64(ddev)) { + desc->hwdesc.SAIR = sai; + desc->hwdesc.DAIR = dai; + desc->hwdesc.CCR = ccr; + } else { + desc->hwdesc32.SAIR = sai; + desc->hwdesc32.DAIR = dai; + desc->hwdesc32.CCR = ccr; + } +} + +#endif /* TXX9_DMA_USE_SIMPLE_CHAIN */ + +#endif /* TXX9DMAC_H */ diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index ab4f359..4339b1a 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -5,7 +5,7 @@ # menuconfig EDAC - bool "EDAC - error detection and reporting" + bool "EDAC (Error Detection And Correction) reporting" depends on HAS_IOMEM depends on X86 || PPC help @@ -232,4 +232,13 @@ config EDAC_AMD8111 Note, add more Kconfig dependency if it's adopted on some machine other than Maple. +config EDAC_CPC925 + tristate "IBM CPC925 Memory Controller (PPC970FX)" + depends on EDAC_MM_EDAC && PPC64 + help + Support for error detection and correction on the + IBM CPC925 Bridge and Memory Controller, which is + a companion chip to the PowerPC 970 family of + processors. + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 633dc56..98aa4a7 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -18,6 +18,7 @@ edac_core-objs += edac_pci.o edac_pci_sysfs.o endif obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o +obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o obj-$(CONFIG_EDAC_I5000) += i5000_edac.o obj-$(CONFIG_EDAC_I5100) += i5100_edac.o obj-$(CONFIG_EDAC_I5400) += i5400_edac.o diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c index 2cb58ef..35b78d0 100644 --- a/drivers/edac/amd8111_edac.c +++ b/drivers/edac/amd8111_edac.c @@ -37,7 +37,6 @@ #define AMD8111_EDAC_MOD_STR "amd8111_edac" #define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 -static int edac_dev_idx; enum amd8111_edac_devs { LPC_BRIDGE = 0, @@ -377,7 +376,7 @@ static int amd8111_dev_probe(struct pci_dev *dev, * edac_device_ctl_info, but make use of existing * one instead. */ - dev_info->edac_idx = edac_dev_idx++; + dev_info->edac_idx = edac_device_alloc_index(); dev_info->edac_dev = edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1, NULL, 0, 0, diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c index cb0f639..c973004 100644 --- a/drivers/edac/cell_edac.c +++ b/drivers/edac/cell_edac.c @@ -227,7 +227,7 @@ static struct platform_driver cell_edac_driver = { .owner = THIS_MODULE, }, .probe = cell_edac_probe, - .remove = cell_edac_remove, + .remove = __devexit_p(cell_edac_remove), }; static int __init cell_edac_init(void) diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c new file mode 100644 index 0000000..8c54196 --- /dev/null +++ b/drivers/edac/cpc925_edac.c @@ -0,0 +1,1017 @@ +/* + * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller. + * + * Copyright (c) 2008 Wind River Systems, Inc. + * + * Authors: Cao Qingtao <qingtao.cao@windriver.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/edac.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include "edac_core.h" +#include "edac_module.h" + +#define CPC925_EDAC_REVISION " Ver: 1.0.0 " __DATE__ +#define CPC925_EDAC_MOD_STR "cpc925_edac" + +#define cpc925_printk(level, fmt, arg...) \ + edac_printk(level, "CPC925", fmt, ##arg) + +#define cpc925_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg) + +/* + * CPC925 registers are of 32 bits with bit0 defined at the + * most significant bit and bit31 at that of least significant. + */ +#define CPC925_BITS_PER_REG 32 +#define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr)) + +/* + * EDAC device names for the error detections of + * CPU Interface and Hypertransport Link. + */ +#define CPC925_CPU_ERR_DEV "cpu" +#define CPC925_HT_LINK_DEV "htlink" + +/* Suppose DDR Refresh cycle is 15.6 microsecond */ +#define CPC925_REF_FREQ 0xFA69 +#define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */ +#define CPC925_NR_CSROWS 8 + +/* + * All registers and bits definitions are taken from + * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02". + */ + +/* + * CPU and Memory Controller Registers + */ +/************************************************************ + * Processor Interface Exception Mask Register (APIMASK) + ************************************************************/ +#define REG_APIMASK_OFFSET 0x30070 +enum apimask_bits { + APIMASK_DART = CPC925_BIT(0), /* DART Exception */ + APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ + APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ + APIMASK_STAT = CPC925_BIT(3), /* Status Exception */ + APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */ + APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ + APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ + /* BIT(7) Reserved */ + APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ + APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ + APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ + APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ + + CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 | + APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 | + APIMASK_ADRS1), + ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H | + APIMASK_ECC_UE_L | APIMASK_ECC_CE_L), +}; + +/************************************************************ + * Processor Interface Exception Register (APIEXCP) + ************************************************************/ +#define REG_APIEXCP_OFFSET 0x30060 +enum apiexcp_bits { + APIEXCP_DART = CPC925_BIT(0), /* DART Exception */ + APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ + APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ + APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */ + APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */ + APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ + APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ + /* BIT(7) Reserved */ + APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ + APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ + APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ + APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ + + CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 | + APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 | + APIEXCP_ADRS1), + UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L), + CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L), + ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED), +}; + +/************************************************************ + * Memory Bus Configuration Register (MBCR) +************************************************************/ +#define REG_MBCR_OFFSET 0x2190 +#define MBCR_64BITCFG_SHIFT 23 +#define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT) +#define MBCR_64BITBUS_SHIFT 22 +#define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT) + +/************************************************************ + * Memory Bank Mode Register (MBMR) +************************************************************/ +#define REG_MBMR_OFFSET 0x21C0 +#define MBMR_MODE_MAX_VALUE 0xF +#define MBMR_MODE_SHIFT 25 +#define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT) +#define MBMR_BBA_SHIFT 24 +#define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT) + +/************************************************************ + * Memory Bank Boundary Address Register (MBBAR) + ************************************************************/ +#define REG_MBBAR_OFFSET 0x21D0 +#define MBBAR_BBA_MAX_VALUE 0xFF +#define MBBAR_BBA_SHIFT 24 +#define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT) + +/************************************************************ + * Memory Scrub Control Register (MSCR) + ************************************************************/ +#define REG_MSCR_OFFSET 0x2400 +#define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/ +#define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */ +#define MSCR_SI_SHIFT 16 /* si - bit8:15*/ +#define MSCR_SI_MAX_VALUE 0xFF +#define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT) + +/************************************************************ + * Memory Scrub Range Start Register (MSRSR) + ************************************************************/ +#define REG_MSRSR_OFFSET 0x2410 + +/************************************************************ + * Memory Scrub Range End Register (MSRER) + ************************************************************/ +#define REG_MSRER_OFFSET 0x2420 + +/************************************************************ + * Memory Scrub Pattern Register (MSPR) + ************************************************************/ +#define REG_MSPR_OFFSET 0x2430 + +/************************************************************ + * Memory Check Control Register (MCCR) + ************************************************************/ +#define REG_MCCR_OFFSET 0x2440 +enum mccr_bits { + MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */ +}; + +/************************************************************ + * Memory Check Range End Register (MCRER) + ************************************************************/ +#define REG_MCRER_OFFSET 0x2450 + +/************************************************************ + * Memory Error Address Register (MEAR) + ************************************************************/ +#define REG_MEAR_OFFSET 0x2460 +#define MEAR_BCNT_MAX_VALUE 0x3 +#define MEAR_BCNT_SHIFT 30 +#define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT) +#define MEAR_RANK_MAX_VALUE 0x7 +#define MEAR_RANK_SHIFT 27 +#define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT) +#define MEAR_COL_MAX_VALUE 0x7FF +#define MEAR_COL_SHIFT 16 +#define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT) +#define MEAR_BANK_MAX_VALUE 0x3 +#define MEAR_BANK_SHIFT 14 +#define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT) +#define MEAR_ROW_MASK 0x00003FFF + +/************************************************************ + * Memory Error Syndrome Register (MESR) + ************************************************************/ +#define REG_MESR_OFFSET 0x2470 +#define MESR_ECC_SYN_H_MASK 0xFF00 +#define MESR_ECC_SYN_L_MASK 0x00FF + +/************************************************************ + * Memory Mode Control Register (MMCR) + ************************************************************/ +#define REG_MMCR_OFFSET 0x2500 +enum mmcr_bits { + MMCR_REG_DIMM_MODE = CPC925_BIT(3), +}; + +/* + * HyperTransport Link Registers + */ +/************************************************************ + * Error Handling/Enumeration Scratch Pad Register (ERRCTRL) + ************************************************************/ +#define REG_ERRCTRL_OFFSET 0x70140 +enum errctrl_bits { /* nonfatal interrupts for */ + ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */ + ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */ + ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */ + ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */ + ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */ + ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */ + + ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */ + ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */ + + HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF | + ERRCTRL_RSP_NF | ERRCTRL_EOC_NF | + ERRCTRL_OVF_NF | ERRCTRL_PROT_NF), + HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL), +}; + +/************************************************************ + * Link Configuration and Link Control Register (LINKCTRL) + ************************************************************/ +#define REG_LINKCTRL_OFFSET 0x70110 +enum linkctrl_bits { + LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)), + LINKCTRL_LINK_FAIL = CPC925_BIT(27), + + HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL), +}; + +/************************************************************ + * Link FreqCap/Error/Freq/Revision ID Register (LINKERR) + ************************************************************/ +#define REG_LINKERR_OFFSET 0x70120 +enum linkerr_bits { + LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */ + LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */ + LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */ + + HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR | + LINKERR_PROT_ERR), +}; + +/************************************************************ + * Bridge Control Register (BRGCTRL) + ************************************************************/ +#define REG_BRGCTRL_OFFSET 0x70300 +enum brgctrl_bits { + BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */ + BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */ +}; + +/* Private structure for edac memory controller */ +struct cpc925_mc_pdata { + void __iomem *vbase; + unsigned long total_mem; + const char *name; + int edac_idx; +}; + +/* Private structure for common edac device */ +struct cpc925_dev_info { + void __iomem *vbase; + struct platform_device *pdev; + char *ctl_name; + int edac_idx; + struct edac_device_ctl_info *edac_dev; + void (*init)(struct cpc925_dev_info *dev_info); + void (*exit)(struct cpc925_dev_info *dev_info); + void (*check)(struct edac_device_ctl_info *edac_dev); +}; + +/* Get total memory size from Open Firmware DTB */ +static void get_total_mem(struct cpc925_mc_pdata *pdata) +{ + struct device_node *np = NULL; + const unsigned int *reg, *reg_end; + int len, sw, aw; + unsigned long start, size; + + np = of_find_node_by_type(NULL, "memory"); + if (!np) + return; + + aw = of_n_addr_cells(np); + sw = of_n_size_cells(np); + reg = (const unsigned int *)of_get_property(np, "reg", &len); + reg_end = reg + len/4; + + pdata->total_mem = 0; + do { + start = of_read_number(reg, aw); + reg += aw; + size = of_read_number(reg, sw); + reg += sw; + debugf1("%s: start 0x%lx, size 0x%lx\n", __func__, + start, size); + pdata->total_mem += size; + } while (reg < reg_end); + + of_node_put(np); + debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem); +} + +static void cpc925_init_csrows(struct mem_ctl_info *mci) +{ + struct cpc925_mc_pdata *pdata = mci->pvt_info; + struct csrow_info *csrow; + int index; + u32 mbmr, mbbar, bba; + unsigned long row_size, last_nr_pages = 0; + + get_total_mem(pdata); + + for (index = 0; index < mci->nr_csrows; index++) { + mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET + + 0x20 * index); + mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET + + 0x20 + index); + bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) | + ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT); + + if (bba == 0) + continue; /* not populated */ + + csrow = &mci->csrows[index]; + + row_size = bba * (1UL << 28); /* 256M */ + csrow->first_page = last_nr_pages; + csrow->nr_pages = row_size >> PAGE_SHIFT; + csrow->last_page = csrow->first_page + csrow->nr_pages - 1; + last_nr_pages = csrow->last_page + 1; + + csrow->mtype = MEM_RDDR; + csrow->edac_mode = EDAC_SECDED; + + switch (csrow->nr_channels) { + case 1: /* Single channel */ + csrow->grain = 32; /* four-beat burst of 32 bytes */ + break; + case 2: /* Dual channel */ + default: + csrow->grain = 64; /* four-beat burst of 64 bytes */ + break; + } + + switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { + case 6: /* 0110, no way to differentiate X8 VS X16 */ + case 5: /* 0101 */ + case 8: /* 1000 */ + csrow->dtype = DEV_X16; + break; + case 7: /* 0111 */ + case 9: /* 1001 */ + csrow->dtype = DEV_X8; + break; + default: + csrow->dtype = DEV_UNKNOWN; + break; + } + } +} + +/* Enable memory controller ECC detection */ +static void cpc925_mc_init(struct mem_ctl_info *mci) +{ + struct cpc925_mc_pdata *pdata = mci->pvt_info; + u32 apimask; + u32 mccr; + + /* Enable various ECC error exceptions */ + apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET); + if ((apimask & ECC_MASK_ENABLE) == 0) { + apimask |= ECC_MASK_ENABLE; + __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET); + } + + /* Enable ECC detection */ + mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET); + if ((mccr & MCCR_ECC_EN) == 0) { + mccr |= MCCR_ECC_EN; + __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET); + } +} + +/* Disable memory controller ECC detection */ +static void cpc925_mc_exit(struct mem_ctl_info *mci) +{ + /* + * WARNING: + * We are supposed to clear the ECC error detection bits, + * and it will be no problem to do so. However, once they + * are cleared here if we want to re-install CPC925 EDAC + * module later, setting them up in cpc925_mc_init() will + * trigger machine check exception. + * Also, it's ok to leave ECC error detection bits enabled, + * since they are reset to 1 by default or by boot loader. + */ + + return; +} + +/* + * Revert DDR column/row/bank addresses into page frame number and + * offset in page. + * + * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs), + * physical address(PA) bits to column address(CA) bits mappings are: + * CA 0 1 2 3 4 5 6 7 8 9 10 + * PA 59 58 57 56 55 54 53 52 51 50 49 + * + * physical address(PA) bits to bank address(BA) bits mappings are: + * BA 0 1 + * PA 43 44 + * + * physical address(PA) bits to row address(RA) bits mappings are: + * RA 0 1 2 3 4 5 6 7 8 9 10 11 12 + * PA 36 35 34 48 47 46 45 40 41 42 39 38 37 + */ +static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, + unsigned long *pfn, unsigned long *offset, int *csrow) +{ + u32 bcnt, rank, col, bank, row; + u32 c; + unsigned long pa; + int i; + + bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT; + rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT; + col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT; + bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT; + row = mear & MEAR_ROW_MASK; + + *csrow = rank; + +#ifdef CONFIG_EDAC_DEBUG + if (mci->csrows[rank].first_page == 0) { + cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " + "non-populated csrow, broken hardware?\n"); + return; + } +#endif + + /* Revert csrow number */ + pa = mci->csrows[rank].first_page << PAGE_SHIFT; + + /* Revert column address */ + col += bcnt; + for (i = 0; i < 11; i++) { + c = col & 0x1; + col >>= 1; + pa |= c << (14 - i); + } + + /* Revert bank address */ + pa |= bank << 19; + + /* Revert row address, in 4 steps */ + for (i = 0; i < 3; i++) { + c = row & 0x1; + row >>= 1; + pa |= c << (26 - i); + } + + for (i = 0; i < 3; i++) { + c = row & 0x1; + row >>= 1; + pa |= c << (21 + i); + } + + for (i = 0; i < 4; i++) { + c = row & 0x1; + row >>= 1; + pa |= c << (18 - i); + } + + for (i = 0; i < 3; i++) { + c = row & 0x1; + row >>= 1; + pa |= c << (29 - i); + } + + *offset = pa & (PAGE_SIZE - 1); + *pfn = pa >> PAGE_SHIFT; + + debugf0("%s: ECC physical address 0x%lx\n", __func__, pa); +} + +static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) +{ + if ((syndrome & MESR_ECC_SYN_H_MASK) == 0) + return 0; + + if ((syndrome & MESR_ECC_SYN_L_MASK) == 0) + return 1; + + cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n", + syndrome); + return 1; +} + +/* Check memory controller registers for ECC errors */ +static void cpc925_mc_check(struct mem_ctl_info *mci) +{ + struct cpc925_mc_pdata *pdata = mci->pvt_info; + u32 apiexcp; + u32 mear; + u32 mesr; + u16 syndrome; + unsigned long pfn = 0, offset = 0; + int csrow = 0, channel = 0; + + /* APIEXCP is cleared when read */ + apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET); + if ((apiexcp & ECC_EXCP_DETECTED) == 0) + return; + + mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET); + syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK); + + mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET); + + /* Revert column/row addresses into page frame number, etc */ + cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow); + + if (apiexcp & CECC_EXCP_DETECTED) { + cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); + channel = cpc925_mc_find_channel(mci, syndrome); + edac_mc_handle_ce(mci, pfn, offset, syndrome, + csrow, channel, mci->ctl_name); + } + + if (apiexcp & UECC_EXCP_DETECTED) { + cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); + edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name); + } + + cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); + cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n", + __raw_readl(pdata->vbase + REG_APIMASK_OFFSET)); + cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n", + apiexcp); + cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n", + __raw_readl(pdata->vbase + REG_MSCR_OFFSET)); + cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n", + __raw_readl(pdata->vbase + REG_MSRSR_OFFSET)); + cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n", + __raw_readl(pdata->vbase + REG_MSRER_OFFSET)); + cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n", + __raw_readl(pdata->vbase + REG_MSPR_OFFSET)); + cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n", + __raw_readl(pdata->vbase + REG_MCCR_OFFSET)); + cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n", + __raw_readl(pdata->vbase + REG_MCRER_OFFSET)); + cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n", + mesr); + cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n", + syndrome); +} + +/******************** CPU err device********************************/ +/* Enable CPU Errors detection */ +static void cpc925_cpu_init(struct cpc925_dev_info *dev_info) +{ + u32 apimask; + + apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); + if ((apimask & CPU_MASK_ENABLE) == 0) { + apimask |= CPU_MASK_ENABLE; + __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET); + } +} + +/* Disable CPU Errors detection */ +static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info) +{ + /* + * WARNING: + * We are supposed to clear the CPU error detection bits, + * and it will be no problem to do so. However, once they + * are cleared here if we want to re-install CPC925 EDAC + * module later, setting them up in cpc925_cpu_init() will + * trigger machine check exception. + * Also, it's ok to leave CPU error detection bits enabled, + * since they are reset to 1 by default. + */ + + return; +} + +/* Check for CPU Errors */ +static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev) +{ + struct cpc925_dev_info *dev_info = edac_dev->pvt_info; + u32 apiexcp; + u32 apimask; + + /* APIEXCP is cleared when read */ + apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET); + if ((apiexcp & CPU_EXCP_DETECTED) == 0) + return; + + apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); + cpc925_printk(KERN_INFO, "Processor Interface Fault\n" + "Processor Interface register dump:\n"); + cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask); + cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp); + + edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); +} + +/******************** HT Link err device****************************/ +/* Enable HyperTransport Link Error detection */ +static void cpc925_htlink_init(struct cpc925_dev_info *dev_info) +{ + u32 ht_errctrl; + + ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); + if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) { + ht_errctrl |= HT_ERRCTRL_ENABLE; + __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); + } +} + +/* Disable HyperTransport Link Error detection */ +static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info) +{ + u32 ht_errctrl; + + ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); + ht_errctrl &= ~HT_ERRCTRL_ENABLE; + __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); +} + +/* Check for HyperTransport Link errors */ +static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev) +{ + struct cpc925_dev_info *dev_info = edac_dev->pvt_info; + u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET); + u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET); + u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); + u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET); + + if (!((brgctrl & BRGCTRL_DETSERR) || + (linkctrl & HT_LINKCTRL_DETECTED) || + (errctrl & HT_ERRCTRL_DETECTED) || + (linkerr & HT_LINKERR_DETECTED))) + return; + + cpc925_printk(KERN_INFO, "HT Link Fault\n" + "HT register dump:\n"); + cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n", + brgctrl); + cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n", + linkctrl); + cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n", + errctrl); + cpc925_printk(KERN_INFO, "Link Error 0x%08x\n", + linkerr); + + /* Clear by write 1 */ + if (brgctrl & BRGCTRL_DETSERR) + __raw_writel(BRGCTRL_DETSERR, + dev_info->vbase + REG_BRGCTRL_OFFSET); + + if (linkctrl & HT_LINKCTRL_DETECTED) + __raw_writel(HT_LINKCTRL_DETECTED, + dev_info->vbase + REG_LINKCTRL_OFFSET); + + /* Initiate Secondary Bus Reset to clear the chain failure */ + if (errctrl & ERRCTRL_CHN_FAL) + __raw_writel(BRGCTRL_SECBUSRESET, + dev_info->vbase + REG_BRGCTRL_OFFSET); + + if (errctrl & ERRCTRL_RSP_ERR) + __raw_writel(ERRCTRL_RSP_ERR, + dev_info->vbase + REG_ERRCTRL_OFFSET); + + if (linkerr & HT_LINKERR_DETECTED) + __raw_writel(HT_LINKERR_DETECTED, + dev_info->vbase + REG_LINKERR_OFFSET); + + edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); +} + +static struct cpc925_dev_info cpc925_devs[] = { + { + .ctl_name = CPC925_CPU_ERR_DEV, + .init = cpc925_cpu_init, + .exit = cpc925_cpu_exit, + .check = cpc925_cpu_check, + }, + { + .ctl_name = CPC925_HT_LINK_DEV, + .init = cpc925_htlink_init, + .exit = cpc925_htlink_exit, + .check = cpc925_htlink_check, + }, + {0}, /* Terminated by NULL */ +}; + +/* + * Add CPU Err detection and HyperTransport Link Err detection + * as common "edac_device", they have no corresponding device + * nodes in the Open Firmware DTB and we have to add platform + * devices for them. Also, they will share the MMIO with that + * of memory controller. + */ +static void cpc925_add_edac_devices(void __iomem *vbase) +{ + struct cpc925_dev_info *dev_info; + + if (!vbase) { + cpc925_printk(KERN_ERR, "MMIO not established yet\n"); + return; + } + + for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { + dev_info->vbase = vbase; + dev_info->pdev = platform_device_register_simple( + dev_info->ctl_name, 0, NULL, 0); + if (IS_ERR(dev_info->pdev)) { + cpc925_printk(KERN_ERR, + "Can't register platform device for %s\n", + dev_info->ctl_name); + continue; + } + + /* + * Don't have to allocate private structure but + * make use of cpc925_devs[] instead. + */ + dev_info->edac_idx = edac_device_alloc_index(); + dev_info->edac_dev = + edac_device_alloc_ctl_info(0, dev_info->ctl_name, + 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx); + if (!dev_info->edac_dev) { + cpc925_printk(KERN_ERR, "No memory for edac device\n"); + goto err1; + } + + dev_info->edac_dev->pvt_info = dev_info; + dev_info->edac_dev->dev = &dev_info->pdev->dev; + dev_info->edac_dev->ctl_name = dev_info->ctl_name; + dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR; + dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev); + + if (edac_op_state == EDAC_OPSTATE_POLL) + dev_info->edac_dev->edac_check = dev_info->check; + + if (dev_info->init) + dev_info->init(dev_info); + + if (edac_device_add_device(dev_info->edac_dev) > 0) { + cpc925_printk(KERN_ERR, + "Unable to add edac device for %s\n", + dev_info->ctl_name); + goto err2; + } + + debugf0("%s: Successfully added edac device for %s\n", + __func__, dev_info->ctl_name); + + continue; + +err2: + if (dev_info->exit) + dev_info->exit(dev_info); + edac_device_free_ctl_info(dev_info->edac_dev); +err1: + platform_device_unregister(dev_info->pdev); + } +} + +/* + * Delete the common "edac_device" for CPU Err Detection + * and HyperTransport Link Err Detection + */ +static void cpc925_del_edac_devices(void) +{ + struct cpc925_dev_info *dev_info; + + for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { + if (dev_info->edac_dev) { + edac_device_del_device(dev_info->edac_dev->dev); + edac_device_free_ctl_info(dev_info->edac_dev); + platform_device_unregister(dev_info->pdev); + } + + if (dev_info->exit) + dev_info->exit(dev_info); + + debugf0("%s: Successfully deleted edac device for %s\n", + __func__, dev_info->ctl_name); + } +} + +/* Convert current back-ground scrub rate into byte/sec bandwith */ +static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw) +{ + struct cpc925_mc_pdata *pdata = mci->pvt_info; + u32 mscr; + u8 si; + + mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); + si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; + + debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr); + + if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || + (si == 0)) { + cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); + *bw = 0; + } else + *bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; + + return 0; +} + +/* Return 0 for single channel; 1 for dual channel */ +static int cpc925_mc_get_channels(void __iomem *vbase) +{ + int dual = 0; + u32 mbcr; + + mbcr = __raw_readl(vbase + REG_MBCR_OFFSET); + + /* + * Dual channel only when 128-bit wide physical bus + * and 128-bit configuration. + */ + if (((mbcr & MBCR_64BITCFG_MASK) == 0) && + ((mbcr & MBCR_64BITBUS_MASK) == 0)) + dual = 1; + + debugf0("%s: %s channel\n", __func__, + (dual > 0) ? "Dual" : "Single"); + + return dual; +} + +static int __devinit cpc925_probe(struct platform_device *pdev) +{ + static int edac_mc_idx; + struct mem_ctl_info *mci; + void __iomem *vbase; + struct cpc925_mc_pdata *pdata; + struct resource *r; + int res = 0, nr_channels; + + debugf0("%s: %s platform device found!\n", __func__, pdev->name); + + if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { + res = -ENOMEM; + goto out; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + cpc925_printk(KERN_ERR, "Unable to get resource\n"); + res = -ENOENT; + goto err1; + } + + if (!devm_request_mem_region(&pdev->dev, + r->start, + r->end - r->start + 1, + pdev->name)) { + cpc925_printk(KERN_ERR, "Unable to request mem region\n"); + res = -EBUSY; + goto err1; + } + + vbase = devm_ioremap(&pdev->dev, r->start, r->end - r->start + 1); + if (!vbase) { + cpc925_printk(KERN_ERR, "Unable to ioremap device\n"); + res = -ENOMEM; + goto err2; + } + + nr_channels = cpc925_mc_get_channels(vbase); + mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata), + CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx); + if (!mci) { + cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n"); + res = -ENOMEM; + goto err2; + } + + pdata = mci->pvt_info; + pdata->vbase = vbase; + pdata->edac_idx = edac_mc_idx++; + pdata->name = pdev->name; + + mci->dev = &pdev->dev; + platform_set_drvdata(pdev, mci); + mci->dev_name = dev_name(&pdev->dev); + mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; + mci->edac_cap = EDAC_FLAG_SECDED; + mci->mod_name = CPC925_EDAC_MOD_STR; + mci->mod_ver = CPC925_EDAC_REVISION; + mci->ctl_name = pdev->name; + + if (edac_op_state == EDAC_OPSTATE_POLL) + mci->edac_check = cpc925_mc_check; + + mci->ctl_page_to_phys = NULL; + mci->scrub_mode = SCRUB_SW_SRC; + mci->set_sdram_scrub_rate = NULL; + mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate; + + cpc925_init_csrows(mci); + + /* Setup memory controller registers */ + cpc925_mc_init(mci); + + if (edac_mc_add_mc(mci) > 0) { + cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n"); + goto err3; + } + + cpc925_add_edac_devices(vbase); + + /* get this far and it's successful */ + debugf0("%s: success\n", __func__); + + res = 0; + goto out; + +err3: + cpc925_mc_exit(mci); + edac_mc_free(mci); +err2: + devm_release_mem_region(&pdev->dev, r->start, r->end-r->start+1); +err1: + devres_release_group(&pdev->dev, cpc925_probe); +out: + return res; +} + +static int cpc925_remove(struct platform_device *pdev) +{ + struct mem_ctl_info *mci = platform_get_drvdata(pdev); + + /* + * Delete common edac devices before edac mc, because + * the former share the MMIO of the latter. + */ + cpc925_del_edac_devices(); + cpc925_mc_exit(mci); + + edac_mc_del_mc(&pdev->dev); + edac_mc_free(mci); + + return 0; +} + +static struct platform_driver cpc925_edac_driver = { + .probe = cpc925_probe, + .remove = cpc925_remove, + .driver = { + .name = "cpc925_edac", + } +}; + +static int __init cpc925_edac_init(void) +{ + int ret = 0; + + printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n"); + printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n"); + + /* Only support POLL mode so far */ + edac_op_state = EDAC_OPSTATE_POLL; + + ret = platform_driver_register(&cpc925_edac_driver); + if (ret) { + printk(KERN_WARNING "Failed to register %s\n", + CPC925_EDAC_MOD_STR); + } + + return ret; +} + +static void __exit cpc925_edac_exit(void) +{ + platform_driver_unregister(&cpc925_edac_driver); +} + +module_init(cpc925_edac_init); +module_exit(cpc925_edac_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>"); +MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module"); diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 48d3b14..3493c6b 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h @@ -841,6 +841,7 @@ extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev, int inst_nr, int block_nr, const char *msg); extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, int inst_nr, int block_nr, const char *msg); +extern int edac_device_alloc_index(void); /* * edac_pci APIs diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index a7d2c71..b02a6a6 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -490,6 +490,20 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, mutex_unlock(&device_ctls_mutex); } +/* + * edac_device_alloc_index: Allocate a unique device index number + * + * Return: + * allocated index number + */ +int edac_device_alloc_index(void) +{ + static atomic_t device_indexes = ATOMIC_INIT(0); + + return atomic_inc_return(&device_indexes) - 1; +} +EXPORT_SYMBOL_GPL(edac_device_alloc_index); + /** * edac_device_add_device: Insert the 'edac_dev' structure into the * edac_device global list and create sysfs entries associated with diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c index 58e9f8e..51e0e2d 100644 --- a/drivers/firmware/pcdp.c +++ b/drivers/firmware/pcdp.c @@ -28,10 +28,10 @@ setup_serial_console(struct pcdp_uart *uart) char parity; mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY); - p += sprintf(p, "uart8250,%s,0x%lx", + p += sprintf(p, "uart8250,%s,0x%llx", mmio ? "mmio" : "io", uart->addr.address); if (uart->baud) { - p += sprintf(p, ",%lu", uart->baud); + p += sprintf(p, ",%llu", uart->baud); if (uart->bits) { switch (uart->parity) { case 0x2: parity = 'e'; break; diff --git a/drivers/gpio/max7301.c b/drivers/gpio/max7301.c index 3e7f4e0..7b82eaa 100644 --- a/drivers/gpio/max7301.c +++ b/drivers/gpio/max7301.c @@ -287,7 +287,7 @@ exit_destroy: return ret; } -static int max7301_remove(struct spi_device *spi) +static int __devexit max7301_remove(struct spi_device *spi) { struct max7301 *ts; int ret; diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index 8dc0164..cdb6574 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c @@ -15,6 +15,10 @@ #include <linux/init.h> #include <linux/i2c.h> #include <linux/i2c/pca953x.h> +#ifdef CONFIG_OF_GPIO +#include <linux/of_platform.h> +#include <linux/of_gpio.h> +#endif #include <asm/gpio.h> @@ -32,6 +36,7 @@ static const struct i2c_device_id pca953x_id[] = { { "pca9539", 16, }, { "pca9554", 8, }, { "pca9555", 16, }, + { "pca9556", 8, }, { "pca9557", 8, }, { "max7310", 8, }, @@ -49,7 +54,9 @@ struct pca953x_chip { uint16_t reg_direction; struct i2c_client *client; + struct pca953x_platform_data *dyn_pdata; struct gpio_chip gpio_chip; + char **names; }; static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val) @@ -192,8 +199,57 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) gc->label = chip->client->name; gc->dev = &chip->client->dev; gc->owner = THIS_MODULE; + gc->names = chip->names; } +/* + * Handlers for alternative sources of platform_data + */ +#ifdef CONFIG_OF_GPIO +/* + * Translate OpenFirmware node properties into platform_data + */ +static struct pca953x_platform_data * +pca953x_get_alt_pdata(struct i2c_client *client) +{ + struct pca953x_platform_data *pdata; + struct device_node *node; + const uint16_t *val; + + node = dev_archdata_get_node(&client->dev.archdata); + if (node == NULL) + return NULL; + + pdata = kzalloc(sizeof(struct pca953x_platform_data), GFP_KERNEL); + if (pdata == NULL) { + dev_err(&client->dev, "Unable to allocate platform_data\n"); + return NULL; + } + + pdata->gpio_base = -1; + val = of_get_property(node, "linux,gpio-base", NULL); + if (val) { + if (*val < 0) + dev_warn(&client->dev, + "invalid gpio-base in device tree\n"); + else + pdata->gpio_base = *val; + } + + val = of_get_property(node, "polarity", NULL); + if (val) + pdata->invert = *val; + + return pdata; +} +#else +static struct pca953x_platform_data * +pca953x_get_alt_pdata(struct i2c_client *client) +{ + return NULL; +} +#endif + static int __devinit pca953x_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -201,20 +257,32 @@ static int __devinit pca953x_probe(struct i2c_client *client, struct pca953x_chip *chip; int ret; + chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); + if (chip == NULL) + return -ENOMEM; + pdata = client->dev.platform_data; if (pdata == NULL) { - dev_dbg(&client->dev, "no platform data\n"); - return -EINVAL; + pdata = pca953x_get_alt_pdata(client); + /* + * Unlike normal platform_data, this is allocated + * dynamically and must be freed in the driver + */ + chip->dyn_pdata = pdata; } - chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); - if (chip == NULL) - return -ENOMEM; + if (pdata == NULL) { + dev_dbg(&client->dev, "no platform data\n"); + ret = -EINVAL; + goto out_failed; + } chip->client = client; chip->gpio_start = pdata->gpio_base; + chip->names = pdata->names; + /* initialize cached registers from their original values. * we can't share this chip with another i2c master. */ @@ -249,6 +317,7 @@ static int __devinit pca953x_probe(struct i2c_client *client, return 0; out_failed: + kfree(chip->dyn_pdata); kfree(chip); return ret; } @@ -276,6 +345,7 @@ static int pca953x_remove(struct i2c_client *client) return ret; } + kfree(chip->dyn_pdata); kfree(chip); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index b0ce44b..64f42b1 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -176,7 +176,7 @@ int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master) /* prebuild the SAREA */ sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, - _DRM_CONTAINS_LOCK|_DRM_DRIVER, + _DRM_CONTAINS_LOCK, &master_priv->sarea); if (ret) { DRM_ERROR("SAREA setup failed\n"); diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 34d54e7..de4aad0 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -1300,7 +1300,7 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev) netif_stop_queue(ndev); } } - return 1; + return NETDEV_TX_BUSY; } /* diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 8695809..87d88db 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -255,14 +255,14 @@ static void status(struct seq_file *seq, mddev_t *mddev) } -static int reconfig(mddev_t *mddev, int layout, int chunk_size) +static int reshape(mddev_t *mddev) { - int mode = layout & ModeMask; - int count = layout >> ModeShift; + int mode = mddev->new_layout & ModeMask; + int count = mddev->new_layout >> ModeShift; conf_t *conf = mddev->private; - if (chunk_size != -1) - return -EINVAL; + if (mddev->new_layout < 0) + return 0; /* new layout */ if (mode == ClearFaults) @@ -279,6 +279,7 @@ static int reconfig(mddev_t *mddev, int layout, int chunk_size) atomic_set(&conf->counters[mode], count); } else return -EINVAL; + mddev->new_layout = -1; mddev->layout = -1; /* makes sure further changes come through */ return 0; } @@ -298,8 +299,12 @@ static int run(mddev_t *mddev) { mdk_rdev_t *rdev; int i; + conf_t *conf; + + if (md_check_no_bitmap(mddev)) + return -EINVAL; - conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL); + conf = kmalloc(sizeof(*conf), GFP_KERNEL); if (!conf) return -ENOMEM; @@ -315,7 +320,7 @@ static int run(mddev_t *mddev) md_set_array_sectors(mddev, faulty_size(mddev, 0, 0)); mddev->private = conf; - reconfig(mddev, mddev->layout, -1); + reshape(mddev); return 0; } @@ -338,7 +343,7 @@ static struct mdk_personality faulty_personality = .run = run, .stop = stop, .status = status, - .reconfig = reconfig, + .check_reshape = reshape, .size = faulty_size, }; diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 64f1f3e0..15c8b7b 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -27,19 +27,27 @@ */ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) { - dev_info_t *hash; - linear_conf_t *conf = mddev_to_conf(mddev); - sector_t idx = sector >> conf->sector_shift; + int lo, mid, hi; + linear_conf_t *conf; + + lo = 0; + hi = mddev->raid_disks - 1; + conf = rcu_dereference(mddev->private); /* - * sector_div(a,b) returns the remainer and sets a to a/b + * Binary Search */ - (void)sector_div(idx, conf->spacing); - hash = conf->hash_table[idx]; - while (sector >= hash->num_sectors + hash->start_sector) - hash++; - return hash; + while (hi > lo) { + + mid = (hi + lo) / 2; + if (sector < conf->disks[mid].end_sector) + hi = mid; + else + lo = mid + 1; + } + + return conf->disks + lo; } /** @@ -59,8 +67,10 @@ static int linear_mergeable_bvec(struct request_queue *q, unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); + rcu_read_lock(); dev0 = which_dev(mddev, sector); - maxsectors = dev0->num_sectors - (sector - dev0->start_sector); + maxsectors = dev0->end_sector - sector; + rcu_read_unlock(); if (maxsectors < bio_sectors) maxsectors = 0; @@ -79,46 +89,57 @@ static int linear_mergeable_bvec(struct request_queue *q, static void linear_unplug(struct request_queue *q) { mddev_t *mddev = q->queuedata; - linear_conf_t *conf = mddev_to_conf(mddev); + linear_conf_t *conf; int i; + rcu_read_lock(); + conf = rcu_dereference(mddev->private); + for (i=0; i < mddev->raid_disks; i++) { struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); blk_unplug(r_queue); } + rcu_read_unlock(); } static int linear_congested(void *data, int bits) { mddev_t *mddev = data; - linear_conf_t *conf = mddev_to_conf(mddev); + linear_conf_t *conf; int i, ret = 0; + rcu_read_lock(); + conf = rcu_dereference(mddev->private); + for (i = 0; i < mddev->raid_disks && !ret ; i++) { struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); ret |= bdi_congested(&q->backing_dev_info, bits); } + + rcu_read_unlock(); return ret; } static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) { - linear_conf_t *conf = mddev_to_conf(mddev); + linear_conf_t *conf; + sector_t array_sectors; + rcu_read_lock(); + conf = rcu_dereference(mddev->private); WARN_ONCE(sectors || raid_disks, "%s does not support generic reshape\n", __func__); + array_sectors = conf->array_sectors; + rcu_read_unlock(); - return conf->array_sectors; + return array_sectors; } static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) { linear_conf_t *conf; - dev_info_t **table; mdk_rdev_t *rdev; - int i, nb_zone, cnt; - sector_t min_sectors; - sector_t curr_sector; + int i, cnt; conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), GFP_KERNEL); @@ -131,6 +152,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) list_for_each_entry(rdev, &mddev->disks, same_set) { int j = rdev->raid_disk; dev_info_t *disk = conf->disks + j; + sector_t sectors; if (j < 0 || j >= raid_disks || disk->rdev) { printk("linear: disk numbering problem. Aborting!\n"); @@ -138,6 +160,11 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) } disk->rdev = rdev; + if (mddev->chunk_sectors) { + sectors = rdev->sectors; + sector_div(sectors, mddev->chunk_sectors); + rdev->sectors = sectors * mddev->chunk_sectors; + } blk_queue_stack_limits(mddev->queue, rdev->bdev->bd_disk->queue); @@ -149,102 +176,24 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); - disk->num_sectors = rdev->sectors; conf->array_sectors += rdev->sectors; - cnt++; + } if (cnt != raid_disks) { printk("linear: not enough drives present. Aborting!\n"); goto out; } - min_sectors = conf->array_sectors; - sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *)); - if (min_sectors == 0) - min_sectors = 1; - - /* min_sectors is the minimum spacing that will fit the hash - * table in one PAGE. This may be much smaller than needed. - * We find the smallest non-terminal set of consecutive devices - * that is larger than min_sectors and use the size of that as - * the actual spacing - */ - conf->spacing = conf->array_sectors; - for (i=0; i < cnt-1 ; i++) { - sector_t tmp = 0; - int j; - for (j = i; j < cnt - 1 && tmp < min_sectors; j++) - tmp += conf->disks[j].num_sectors; - if (tmp >= min_sectors && tmp < conf->spacing) - conf->spacing = tmp; - } - - /* spacing may be too large for sector_div to work with, - * so we might need to pre-shift - */ - conf->sector_shift = 0; - if (sizeof(sector_t) > sizeof(u32)) { - sector_t space = conf->spacing; - while (space > (sector_t)(~(u32)0)) { - space >>= 1; - conf->sector_shift++; - } - } /* - * This code was restructured to work around a gcc-2.95.3 internal - * compiler error. Alter it with care. + * Here we calculate the device offsets. */ - { - sector_t sz; - unsigned round; - unsigned long base; - - sz = conf->array_sectors >> conf->sector_shift; - sz += 1; /* force round-up */ - base = conf->spacing >> conf->sector_shift; - round = sector_div(sz, base); - nb_zone = sz + (round ? 1 : 0); - } - BUG_ON(nb_zone > PAGE_SIZE / sizeof(struct dev_info *)); - - conf->hash_table = kmalloc (sizeof (struct dev_info *) * nb_zone, - GFP_KERNEL); - if (!conf->hash_table) - goto out; + conf->disks[0].end_sector = conf->disks[0].rdev->sectors; - /* - * Here we generate the linear hash table - * First calculate the device offsets. - */ - conf->disks[0].start_sector = 0; for (i = 1; i < raid_disks; i++) - conf->disks[i].start_sector = - conf->disks[i-1].start_sector + - conf->disks[i-1].num_sectors; - - table = conf->hash_table; - i = 0; - for (curr_sector = 0; - curr_sector < conf->array_sectors; - curr_sector += conf->spacing) { - - while (i < raid_disks-1 && - curr_sector >= conf->disks[i+1].start_sector) - i++; - - *table ++ = conf->disks + i; - } - - if (conf->sector_shift) { - conf->spacing >>= conf->sector_shift; - /* round spacing up so that when we divide by it, - * we err on the side of "too-low", which is safest. - */ - conf->spacing++; - } - - BUG_ON(table - conf->hash_table > nb_zone); + conf->disks[i].end_sector = + conf->disks[i-1].end_sector + + conf->disks[i].rdev->sectors; return conf; @@ -257,6 +206,8 @@ static int linear_run (mddev_t *mddev) { linear_conf_t *conf; + if (md_check_no_bitmap(mddev)) + return -EINVAL; mddev->queue->queue_lock = &mddev->queue->__queue_lock; conf = linear_conf(mddev, mddev->raid_disks); @@ -272,6 +223,12 @@ static int linear_run (mddev_t *mddev) return 0; } +static void free_conf(struct rcu_head *head) +{ + linear_conf_t *conf = container_of(head, linear_conf_t, rcu); + kfree(conf); +} + static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) { /* Adding a drive to a linear array allows the array to grow. @@ -282,7 +239,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) * The current one is never freed until the array is stopped. * This avoids races. */ - linear_conf_t *newconf; + linear_conf_t *newconf, *oldconf; if (rdev->saved_raid_disk != mddev->raid_disks) return -EINVAL; @@ -294,25 +251,29 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) if (!newconf) return -ENOMEM; - newconf->prev = mddev_to_conf(mddev); - mddev->private = newconf; + oldconf = rcu_dereference(mddev->private); mddev->raid_disks++; + rcu_assign_pointer(mddev->private, newconf); md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); set_capacity(mddev->gendisk, mddev->array_sectors); + call_rcu(&oldconf->rcu, free_conf); return 0; } static int linear_stop (mddev_t *mddev) { - linear_conf_t *conf = mddev_to_conf(mddev); - + linear_conf_t *conf = mddev->private; + + /* + * We do not require rcu protection here since + * we hold reconfig_mutex for both linear_add and + * linear_stop, so they cannot race. + * We should make sure any old 'conf's are properly + * freed though. + */ + rcu_barrier(); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ - do { - linear_conf_t *t = conf->prev; - kfree(conf->hash_table); - kfree(conf); - conf = t; - } while (conf); + kfree(conf); return 0; } @@ -322,6 +283,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) const int rw = bio_data_dir(bio); mddev_t *mddev = q->queuedata; dev_info_t *tmp_dev; + sector_t start_sector; int cpu; if (unlikely(bio_barrier(bio))) { @@ -335,33 +297,36 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) bio_sectors(bio)); part_stat_unlock(); + rcu_read_lock(); tmp_dev = which_dev(mddev, bio->bi_sector); - - if (unlikely(bio->bi_sector >= (tmp_dev->num_sectors + - tmp_dev->start_sector) - || (bio->bi_sector < - tmp_dev->start_sector))) { + start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; + + + if (unlikely(bio->bi_sector >= (tmp_dev->end_sector) + || (bio->bi_sector < start_sector))) { char b[BDEVNAME_SIZE]; printk("linear_make_request: Sector %llu out of bounds on " "dev %s: %llu sectors, offset %llu\n", (unsigned long long)bio->bi_sector, bdevname(tmp_dev->rdev->bdev, b), - (unsigned long long)tmp_dev->num_sectors, - (unsigned long long)tmp_dev->start_sector); + (unsigned long long)tmp_dev->rdev->sectors, + (unsigned long long)start_sector); + rcu_read_unlock(); bio_io_error(bio); return 0; } if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > - tmp_dev->start_sector + tmp_dev->num_sectors)) { + tmp_dev->end_sector)) { /* This bio crosses a device boundary, so we have to * split it. */ struct bio_pair *bp; + sector_t end_sector = tmp_dev->end_sector; + + rcu_read_unlock(); - bp = bio_split(bio, - tmp_dev->start_sector + tmp_dev->num_sectors - - bio->bi_sector); + bp = bio_split(bio, end_sector - bio->bi_sector); if (linear_make_request(q, &bp->bio1)) generic_make_request(&bp->bio1); @@ -372,8 +337,9 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) } bio->bi_bdev = tmp_dev->rdev->bdev; - bio->bi_sector = bio->bi_sector - tmp_dev->start_sector + bio->bi_sector = bio->bi_sector - start_sector + tmp_dev->rdev->data_offset; + rcu_read_unlock(); return 1; } @@ -381,7 +347,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) static void linear_status (struct seq_file *seq, mddev_t *mddev) { - seq_printf(seq, " %dk rounding", mddev->chunk_size/1024); + seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); } diff --git a/drivers/md/linear.h b/drivers/md/linear.h index bf81795..0ce29b6 100644 --- a/drivers/md/linear.h +++ b/drivers/md/linear.h @@ -3,27 +3,19 @@ struct dev_info { mdk_rdev_t *rdev; - sector_t num_sectors; - sector_t start_sector; + sector_t end_sector; }; typedef struct dev_info dev_info_t; struct linear_private_data { - struct linear_private_data *prev; /* earlier version */ - dev_info_t **hash_table; - sector_t spacing; sector_t array_sectors; - int sector_shift; /* shift before dividing - * by spacing - */ dev_info_t disks[0]; + struct rcu_head rcu; }; typedef struct linear_private_data linear_conf_t; -#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private) - #endif diff --git a/drivers/md/md.c b/drivers/md/md.c index 20f6ac3..09be637 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -440,15 +440,6 @@ static inline sector_t calc_dev_sboffset(struct block_device *bdev) return MD_NEW_SIZE_SECTORS(num_sectors); } -static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size) -{ - sector_t num_sectors = rdev->sb_start; - - if (chunk_size) - num_sectors &= ~((sector_t)chunk_size/512 - 1); - return num_sectors; -} - static int alloc_disk_sb(mdk_rdev_t * rdev) { if (rdev->sb_page) @@ -745,6 +736,24 @@ struct super_type { }; /* + * Check that the given mddev has no bitmap. + * + * This function is called from the run method of all personalities that do not + * support bitmaps. It prints an error message and returns non-zero if mddev + * has a bitmap. Otherwise, it returns 0. + * + */ +int md_check_no_bitmap(mddev_t *mddev) +{ + if (!mddev->bitmap_file && !mddev->bitmap_offset) + return 0; + printk(KERN_ERR "%s: bitmaps are not supported for %s\n", + mdname(mddev), mddev->pers->name); + return 1; +} +EXPORT_SYMBOL(md_check_no_bitmap); + +/* * load_super for 0.90.0 */ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) @@ -797,17 +806,6 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version rdev->data_offset = 0; rdev->sb_size = MD_SB_BYTES; - if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) { - if (sb->level != 1 && sb->level != 4 - && sb->level != 5 && sb->level != 6 - && sb->level != 10) { - /* FIXME use a better test */ - printk(KERN_WARNING - "md: bitmaps not supported for this level.\n"); - goto abort; - } - } - if (sb->level == LEVEL_MULTIPATH) rdev->desc_nr = -1; else @@ -836,7 +834,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version else ret = 0; } - rdev->sectors = calc_num_sectors(rdev, sb->chunk_size); + rdev->sectors = rdev->sb_start; if (rdev->sectors < sb->size * 2 && sb->level > 1) /* "this cannot possibly happen" ... */ @@ -866,7 +864,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->minor_version = sb->minor_version; mddev->patch_version = sb->patch_version; mddev->external = 0; - mddev->chunk_size = sb->chunk_size; + mddev->chunk_sectors = sb->chunk_size >> 9; mddev->ctime = sb->ctime; mddev->utime = sb->utime; mddev->level = sb->level; @@ -883,13 +881,13 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->delta_disks = sb->delta_disks; mddev->new_level = sb->new_level; mddev->new_layout = sb->new_layout; - mddev->new_chunk = sb->new_chunk; + mddev->new_chunk_sectors = sb->new_chunk >> 9; } else { mddev->reshape_position = MaxSector; mddev->delta_disks = 0; mddev->new_level = mddev->level; mddev->new_layout = mddev->layout; - mddev->new_chunk = mddev->chunk_size; + mddev->new_chunk_sectors = mddev->chunk_sectors; } if (sb->state & (1<<MD_SB_CLEAN)) @@ -1004,7 +1002,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->new_level = mddev->new_level; sb->delta_disks = mddev->delta_disks; sb->new_layout = mddev->new_layout; - sb->new_chunk = mddev->new_chunk; + sb->new_chunk = mddev->new_chunk_sectors << 9; } mddev->minor_version = sb->minor_version; if (mddev->in_sync) @@ -1018,7 +1016,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->recovery_cp = 0; sb->layout = mddev->layout; - sb->chunk_size = mddev->chunk_size; + sb->chunk_size = mddev->chunk_sectors << 9; if (mddev->bitmap && mddev->bitmap_file == NULL) sb->state |= (1<<MD_SB_BITMAP_PRESENT); @@ -1185,17 +1183,6 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) bdevname(rdev->bdev,b)); return -EINVAL; } - if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) { - if (sb->level != cpu_to_le32(1) && - sb->level != cpu_to_le32(4) && - sb->level != cpu_to_le32(5) && - sb->level != cpu_to_le32(6) && - sb->level != cpu_to_le32(10)) { - printk(KERN_WARNING - "md: bitmaps not supported for this level.\n"); - return -EINVAL; - } - } rdev->preferred_minor = 0xffff; rdev->data_offset = le64_to_cpu(sb->data_offset); @@ -1248,9 +1235,6 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) if (rdev->sectors < le64_to_cpu(sb->data_size)) return -EINVAL; rdev->sectors = le64_to_cpu(sb->data_size); - if (le32_to_cpu(sb->chunksize)) - rdev->sectors &= ~((sector_t)le32_to_cpu(sb->chunksize) - 1); - if (le64_to_cpu(sb->size) > rdev->sectors) return -EINVAL; return ret; @@ -1271,7 +1255,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->major_version = 1; mddev->patch_version = 0; mddev->external = 0; - mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; + mddev->chunk_sectors = le32_to_cpu(sb->chunksize); mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); mddev->level = le32_to_cpu(sb->level); @@ -1297,13 +1281,13 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) mddev->delta_disks = le32_to_cpu(sb->delta_disks); mddev->new_level = le32_to_cpu(sb->new_level); mddev->new_layout = le32_to_cpu(sb->new_layout); - mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9; + mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); } else { mddev->reshape_position = MaxSector; mddev->delta_disks = 0; mddev->new_level = mddev->level; mddev->new_layout = mddev->layout; - mddev->new_chunk = mddev->chunk_size; + mddev->new_chunk_sectors = mddev->chunk_sectors; } } else if (mddev->pers == NULL) { @@ -1375,7 +1359,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->raid_disks = cpu_to_le32(mddev->raid_disks); sb->size = cpu_to_le64(mddev->dev_sectors); - sb->chunksize = cpu_to_le32(mddev->chunk_size >> 9); + sb->chunksize = cpu_to_le32(mddev->chunk_sectors); sb->level = cpu_to_le32(mddev->level); sb->layout = cpu_to_le32(mddev->layout); @@ -1402,7 +1386,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->new_layout = cpu_to_le32(mddev->new_layout); sb->delta_disks = cpu_to_le32(mddev->delta_disks); sb->new_level = cpu_to_le32(mddev->new_level); - sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9); + sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); } max_dev = 0; @@ -1897,6 +1881,7 @@ static void md_update_sb(mddev_t * mddev, int force_change) int sync_req; int nospares = 0; + mddev->utime = get_seconds(); if (mddev->external) return; repeat: @@ -1926,7 +1911,6 @@ repeat: nospares = 0; sync_req = mddev->in_sync; - mddev->utime = get_seconds(); /* If this is just a dirty<->clean transition, and the array is clean * and 'events' is odd, we can roll back to the previous clean state */ @@ -2597,15 +2581,6 @@ static void analyze_sbs(mddev_t * mddev) clear_bit(In_sync, &rdev->flags); } } - - - - if (mddev->recovery_cp != MaxSector && - mddev->level >= 1) - printk(KERN_ERR "md: %s: raid array is not clean" - " -- starting background reconstruction\n", - mdname(mddev)); - } static void md_safemode_timeout(unsigned long data); @@ -2746,7 +2721,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) if (IS_ERR(priv)) { mddev->new_level = mddev->level; mddev->new_layout = mddev->layout; - mddev->new_chunk = mddev->chunk_size; + mddev->new_chunk_sectors = mddev->chunk_sectors; mddev->raid_disks -= mddev->delta_disks; mddev->delta_disks = 0; module_put(pers->owner); @@ -2764,7 +2739,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); mddev->level = mddev->new_level; mddev->layout = mddev->new_layout; - mddev->chunk_size = mddev->new_chunk; + mddev->chunk_sectors = mddev->new_chunk_sectors; mddev->delta_disks = 0; pers->run(mddev); mddev_resume(mddev); @@ -2800,11 +2775,14 @@ layout_store(mddev_t *mddev, const char *buf, size_t len) if (mddev->pers) { int err; - if (mddev->pers->reconfig == NULL) + if (mddev->pers->check_reshape == NULL) return -EBUSY; - err = mddev->pers->reconfig(mddev, n, -1); - if (err) + mddev->new_layout = n; + err = mddev->pers->check_reshape(mddev); + if (err) { + mddev->new_layout = mddev->layout; return err; + } } else { mddev->new_layout = n; if (mddev->reshape_position == MaxSector) @@ -2857,10 +2835,11 @@ static ssize_t chunk_size_show(mddev_t *mddev, char *page) { if (mddev->reshape_position != MaxSector && - mddev->chunk_size != mddev->new_chunk) - return sprintf(page, "%d (%d)\n", mddev->new_chunk, - mddev->chunk_size); - return sprintf(page, "%d\n", mddev->chunk_size); + mddev->chunk_sectors != mddev->new_chunk_sectors) + return sprintf(page, "%d (%d)\n", + mddev->new_chunk_sectors << 9, + mddev->chunk_sectors << 9); + return sprintf(page, "%d\n", mddev->chunk_sectors << 9); } static ssize_t @@ -2874,15 +2853,18 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len) if (mddev->pers) { int err; - if (mddev->pers->reconfig == NULL) + if (mddev->pers->check_reshape == NULL) return -EBUSY; - err = mddev->pers->reconfig(mddev, -1, n); - if (err) + mddev->new_chunk_sectors = n >> 9; + err = mddev->pers->check_reshape(mddev); + if (err) { + mddev->new_chunk_sectors = mddev->chunk_sectors; return err; + } } else { - mddev->new_chunk = n; + mddev->new_chunk_sectors = n >> 9; if (mddev->reshape_position == MaxSector) - mddev->chunk_size = n; + mddev->chunk_sectors = n >> 9; } return len; } @@ -3527,8 +3509,9 @@ min_sync_store(mddev_t *mddev, const char *buf, size_t len) return -EBUSY; /* Must be a multiple of chunk_size */ - if (mddev->chunk_size) { - if (min & (sector_t)((mddev->chunk_size>>9)-1)) + if (mddev->chunk_sectors) { + sector_t temp = min; + if (sector_div(temp, mddev->chunk_sectors)) return -EINVAL; } mddev->resync_min = min; @@ -3564,8 +3547,9 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len) return -EBUSY; /* Must be a multiple of chunk_size */ - if (mddev->chunk_size) { - if (max & (sector_t)((mddev->chunk_size>>9)-1)) + if (mddev->chunk_sectors) { + sector_t temp = max; + if (sector_div(temp, mddev->chunk_sectors)) return -EINVAL; } mddev->resync_max = max; @@ -3656,7 +3640,7 @@ reshape_position_store(mddev_t *mddev, const char *buf, size_t len) mddev->delta_disks = 0; mddev->new_level = mddev->level; mddev->new_layout = mddev->layout; - mddev->new_chunk = mddev->chunk_size; + mddev->new_chunk_sectors = mddev->chunk_sectors; return len; } @@ -3976,11 +3960,9 @@ static int start_dirty_degraded; static int do_md_run(mddev_t * mddev) { int err; - int chunk_size; mdk_rdev_t *rdev; struct gendisk *disk; struct mdk_personality *pers; - char b[BDEVNAME_SIZE]; if (list_empty(&mddev->disks)) /* cannot run an array with no devices.. */ @@ -3998,38 +3980,6 @@ static int do_md_run(mddev_t * mddev) analyze_sbs(mddev); } - chunk_size = mddev->chunk_size; - - if (chunk_size) { - if (chunk_size > MAX_CHUNK_SIZE) { - printk(KERN_ERR "too big chunk_size: %d > %d\n", - chunk_size, MAX_CHUNK_SIZE); - return -EINVAL; - } - /* - * chunk-size has to be a power of 2 - */ - if ( (1 << ffz(~chunk_size)) != chunk_size) { - printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size); - return -EINVAL; - } - - /* devices must have minimum size of one chunk */ - list_for_each_entry(rdev, &mddev->disks, same_set) { - if (test_bit(Faulty, &rdev->flags)) - continue; - if (rdev->sectors < chunk_size / 512) { - printk(KERN_WARNING - "md: Dev %s smaller than chunk_size:" - " %llu < %d\n", - bdevname(rdev->bdev,b), - (unsigned long long)rdev->sectors, - chunk_size / 512); - return -EINVAL; - } - } - } - if (mddev->level != LEVEL_NONE) request_module("md-level-%d", mddev->level); else if (mddev->clevel[0]) @@ -4405,7 +4355,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) mddev->flags = 0; mddev->ro = 0; mddev->metadata_type[0] = 0; - mddev->chunk_size = 0; + mddev->chunk_sectors = 0; mddev->ctime = mddev->utime = 0; mddev->layout = 0; mddev->max_disks = 0; @@ -4413,7 +4363,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) mddev->delta_disks = 0; mddev->new_level = LEVEL_NONE; mddev->new_layout = 0; - mddev->new_chunk = 0; + mddev->new_chunk_sectors = 0; mddev->curr_resync = 0; mddev->resync_mismatches = 0; mddev->suspend_lo = mddev->suspend_hi = 0; @@ -4618,7 +4568,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg) info.spare_disks = spare; info.layout = mddev->layout; - info.chunk_size = mddev->chunk_size; + info.chunk_size = mddev->chunk_sectors << 9; if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; @@ -4843,7 +4793,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; } else rdev->sb_start = calc_dev_sboffset(rdev->bdev); - rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); + rdev->sectors = rdev->sb_start; err = bind_rdev_to_array(rdev, mddev); if (err) { @@ -4913,7 +4863,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) else rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; - rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); + rdev->sectors = rdev->sb_start; if (test_bit(Faulty, &rdev->flags)) { printk(KERN_WARNING @@ -5062,7 +5012,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) mddev->external = 0; mddev->layout = info->layout; - mddev->chunk_size = info->chunk_size; + mddev->chunk_sectors = info->chunk_size >> 9; mddev->max_disks = MD_SB_DISKS; @@ -5081,7 +5031,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) get_random_bytes(mddev->uuid, 16); mddev->new_level = mddev->level; - mddev->new_chunk = mddev->chunk_size; + mddev->new_chunk_sectors = mddev->chunk_sectors; mddev->new_layout = mddev->layout; mddev->delta_disks = 0; @@ -5191,7 +5141,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) mddev->level != info->level || /* mddev->layout != info->layout || */ !mddev->persistent != info->not_persistent|| - mddev->chunk_size != info->chunk_size || + mddev->chunk_sectors != info->chunk_size >> 9 || /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ ((state^info->state) & 0xfffffe00) ) @@ -5215,10 +5165,15 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) * we don't need to do anything at the md level, the * personality will take care of it all. */ - if (mddev->pers->reconfig == NULL) + if (mddev->pers->check_reshape == NULL) return -EINVAL; - else - return mddev->pers->reconfig(mddev, info->layout, -1); + else { + mddev->new_layout = info->layout; + rv = mddev->pers->check_reshape(mddev); + if (rv) + mddev->new_layout = mddev->layout; + return rv; + } } if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) rv = update_size(mddev, (sector_t)info->size * 2); @@ -6717,7 +6672,8 @@ void md_check_recovery(mddev_t *mddev) */ if (mddev->reshape_position != MaxSector) { - if (mddev->pers->check_reshape(mddev) != 0) + if (mddev->pers->check_reshape == NULL || + mddev->pers->check_reshape(mddev) != 0) /* Cannot proceed */ goto unlock; set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); diff --git a/drivers/md/md.h b/drivers/md/md.h index 8227ab9..9430a11 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -30,13 +30,6 @@ typedef struct mddev_s mddev_t; typedef struct mdk_rdev_s mdk_rdev_t; /* - * options passed in raidrun: - */ - -/* Currently this must fit in an 'int' */ -#define MAX_CHUNK_SIZE (1<<30) - -/* * MD's 'extended' device */ struct mdk_rdev_s @@ -145,7 +138,7 @@ struct mddev_s int external; /* metadata is * managed externally */ char metadata_type[17]; /* externally set*/ - int chunk_size; + int chunk_sectors; time_t ctime, utime; int level, layout; char clevel[16]; @@ -166,7 +159,8 @@ struct mddev_s * If reshape_position is MaxSector, then no reshape is happening (yet). */ sector_t reshape_position; - int delta_disks, new_level, new_layout, new_chunk; + int delta_disks, new_level, new_layout; + int new_chunk_sectors; struct mdk_thread_s *thread; /* management thread */ struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ @@ -325,7 +319,6 @@ struct mdk_personality int (*check_reshape) (mddev_t *mddev); int (*start_reshape) (mddev_t *mddev); void (*finish_reshape) (mddev_t *mddev); - int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); /* quiesce moves between quiescence states * 0 - fully active * 1 - no new requests allowed @@ -437,5 +430,6 @@ extern void md_new_event(mddev_t *mddev); extern int md_allow_write(mddev_t *mddev); extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); +extern int md_check_no_bitmap(mddev_t *mddev); #endif /* _MD_MD_H */ diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 4ee31aa..cbe368f 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -58,7 +58,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) { unsigned long flags; mddev_t *mddev = mp_bh->mddev; - multipath_conf_t *conf = mddev_to_conf(mddev); + multipath_conf_t *conf = mddev->private; spin_lock_irqsave(&conf->device_lock, flags); list_add(&mp_bh->retry_list, &conf->retry_list); @@ -75,7 +75,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) { struct bio *bio = mp_bh->master_bio; - multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); + multipath_conf_t *conf = mp_bh->mddev->private; bio_endio(bio, err); mempool_free(mp_bh, conf->pool); @@ -85,7 +85,7 @@ static void multipath_end_request(struct bio *bio, int error) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); - multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); + multipath_conf_t *conf = mp_bh->mddev->private; mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; if (uptodate) @@ -107,7 +107,7 @@ static void multipath_end_request(struct bio *bio, int error) static void unplug_slaves(mddev_t *mddev) { - multipath_conf_t *conf = mddev_to_conf(mddev); + multipath_conf_t *conf = mddev->private; int i; rcu_read_lock(); @@ -138,7 +138,7 @@ static void multipath_unplug(struct request_queue *q) static int multipath_make_request (struct request_queue *q, struct bio * bio) { mddev_t *mddev = q->queuedata; - multipath_conf_t *conf = mddev_to_conf(mddev); + multipath_conf_t *conf = mddev->private; struct multipath_bh * mp_bh; struct multipath_info *multipath; const int rw = bio_data_dir(bio); @@ -180,7 +180,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) static void multipath_status (struct seq_file *seq, mddev_t *mddev) { - multipath_conf_t *conf = mddev_to_conf(mddev); + multipath_conf_t *conf = mddev->private; int i; seq_printf (seq, " [%d/%d] [", conf->raid_disks, @@ -195,7 +195,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev) static int multipath_congested(void *data, int bits) { mddev_t *mddev = data; - multipath_conf_t *conf = mddev_to_conf(mddev); + multipath_conf_t *conf = mddev->private; int i, ret = 0; rcu_read_lock(); @@ -220,7 +220,7 @@ static int multipath_congested(void *data, int bits) */ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) { - multipath_conf_t *conf = mddev_to_conf(mddev); + multipath_conf_t *conf = mddev->private; if (conf->working_disks <= 1) { /* @@ -367,7 +367,7 @@ static void multipathd (mddev_t *mddev) struct multipath_bh *mp_bh; struct bio *bio; unsigned long flags; - multipath_conf_t *conf = mddev_to_conf(mddev); + multipath_conf_t *conf = mddev->private; struct list_head *head = &conf->retry_list; md_check_recovery(mddev); @@ -421,6 +421,9 @@ static int multipath_run (mddev_t *mddev) struct multipath_info *disk; mdk_rdev_t *rdev; + if (md_check_no_bitmap(mddev)) + return -EINVAL; + if (mddev->level != LEVEL_MULTIPATH) { printk("multipath: %s: raid level not set to multipath IO (%d)\n", mdname(mddev), mddev->level); @@ -531,7 +534,7 @@ out: static int multipath_stop (mddev_t *mddev) { - multipath_conf_t *conf = mddev_to_conf(mddev); + multipath_conf_t *conf = mddev->private; md_unregister_thread(mddev->thread); mddev->thread = NULL; diff --git a/drivers/md/multipath.h b/drivers/md/multipath.h index 6fa70b4..d1c2a8d 100644 --- a/drivers/md/multipath.h +++ b/drivers/md/multipath.h @@ -19,12 +19,6 @@ struct multipath_private_data { typedef struct multipath_private_data multipath_conf_t; /* - * this is the only point in the RAID code where we violate - * C type safety. mddev->private is an 'opaque' pointer. - */ -#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private) - -/* * this is our 'private' 'collective' MULTIPATH buffer head. * it contains information about what kind of IO operations were started * for this MULTIPATH operation, and about their status: diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 925507e..ab4a489 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -26,8 +26,8 @@ static void raid0_unplug(struct request_queue *q) { mddev_t *mddev = q->queuedata; - raid0_conf_t *conf = mddev_to_conf(mddev); - mdk_rdev_t **devlist = conf->strip_zone[0].dev; + raid0_conf_t *conf = mddev->private; + mdk_rdev_t **devlist = conf->devlist; int i; for (i=0; i<mddev->raid_disks; i++) { @@ -40,8 +40,8 @@ static void raid0_unplug(struct request_queue *q) static int raid0_congested(void *data, int bits) { mddev_t *mddev = data; - raid0_conf_t *conf = mddev_to_conf(mddev); - mdk_rdev_t **devlist = conf->strip_zone[0].dev; + raid0_conf_t *conf = mddev->private; + mdk_rdev_t **devlist = conf->devlist; int i, ret = 0; for (i = 0; i < mddev->raid_disks && !ret ; i++) { @@ -52,27 +52,60 @@ static int raid0_congested(void *data, int bits) return ret; } +/* + * inform the user of the raid configuration +*/ +static void dump_zones(mddev_t *mddev) +{ + int j, k, h; + sector_t zone_size = 0; + sector_t zone_start = 0; + char b[BDEVNAME_SIZE]; + raid0_conf_t *conf = mddev->private; + printk(KERN_INFO "******* %s configuration *********\n", + mdname(mddev)); + h = 0; + for (j = 0; j < conf->nr_strip_zones; j++) { + printk(KERN_INFO "zone%d=[", j); + for (k = 0; k < conf->strip_zone[j].nb_dev; k++) + printk("%s/", + bdevname(conf->devlist[j*mddev->raid_disks + + k]->bdev, b)); + printk("]\n"); + + zone_size = conf->strip_zone[j].zone_end - zone_start; + printk(KERN_INFO " zone offset=%llukb " + "device offset=%llukb size=%llukb\n", + (unsigned long long)zone_start>>1, + (unsigned long long)conf->strip_zone[j].dev_start>>1, + (unsigned long long)zone_size>>1); + zone_start = conf->strip_zone[j].zone_end; + } + printk(KERN_INFO "**********************************\n\n"); +} -static int create_strip_zones (mddev_t *mddev) +static int create_strip_zones(mddev_t *mddev) { - int i, c, j; - sector_t current_start, curr_zone_start; - sector_t min_spacing; - raid0_conf_t *conf = mddev_to_conf(mddev); - mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; + int i, c, j, err; + sector_t curr_zone_end, sectors; + mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev; struct strip_zone *zone; int cnt; char b[BDEVNAME_SIZE]; - - /* - * The number of 'same size groups' - */ - conf->nr_strip_zones = 0; - + raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL); + + if (!conf) + return -ENOMEM; list_for_each_entry(rdev1, &mddev->disks, same_set) { printk(KERN_INFO "raid0: looking at %s\n", bdevname(rdev1->bdev,b)); c = 0; + + /* round size to chunk_size */ + sectors = rdev1->sectors; + sector_div(sectors, mddev->chunk_sectors); + rdev1->sectors = sectors * mddev->chunk_sectors; + list_for_each_entry(rdev2, &mddev->disks, same_set) { printk(KERN_INFO "raid0: comparing %s(%llu)", bdevname(rdev1->bdev,b), @@ -103,16 +136,16 @@ static int create_strip_zones (mddev_t *mddev) } } printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones); - + err = -ENOMEM; conf->strip_zone = kzalloc(sizeof(struct strip_zone)* conf->nr_strip_zones, GFP_KERNEL); if (!conf->strip_zone) - return 1; + goto abort; conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* conf->nr_strip_zones*mddev->raid_disks, GFP_KERNEL); if (!conf->devlist) - return 1; + goto abort; /* The first zone must contain all devices, so here we check that * there is a proper alignment of slots to devices and find them all @@ -120,7 +153,8 @@ static int create_strip_zones (mddev_t *mddev) zone = &conf->strip_zone[0]; cnt = 0; smallest = NULL; - zone->dev = conf->devlist; + dev = conf->devlist; + err = -EINVAL; list_for_each_entry(rdev1, &mddev->disks, same_set) { int j = rdev1->raid_disk; @@ -129,12 +163,12 @@ static int create_strip_zones (mddev_t *mddev) "aborting!\n", j); goto abort; } - if (zone->dev[j]) { + if (dev[j]) { printk(KERN_ERR "raid0: multiple devices for %d - " "aborting!\n", j); goto abort; } - zone->dev[j] = rdev1; + dev[j] = rdev1; blk_queue_stack_limits(mddev->queue, rdev1->bdev->bd_disk->queue); @@ -157,34 +191,32 @@ static int create_strip_zones (mddev_t *mddev) goto abort; } zone->nb_dev = cnt; - zone->sectors = smallest->sectors * cnt; - zone->zone_start = 0; + zone->zone_end = smallest->sectors * cnt; - current_start = smallest->sectors; - curr_zone_start = zone->sectors; + curr_zone_end = zone->zone_end; /* now do the other zones */ for (i = 1; i < conf->nr_strip_zones; i++) { zone = conf->strip_zone + i; - zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; + dev = conf->devlist + i * mddev->raid_disks; printk(KERN_INFO "raid0: zone %d\n", i); - zone->dev_start = current_start; + zone->dev_start = smallest->sectors; smallest = NULL; c = 0; for (j=0; j<cnt; j++) { char b[BDEVNAME_SIZE]; - rdev = conf->strip_zone[0].dev[j]; + rdev = conf->devlist[j]; printk(KERN_INFO "raid0: checking %s ...", bdevname(rdev->bdev, b)); - if (rdev->sectors <= current_start) { + if (rdev->sectors <= zone->dev_start) { printk(KERN_INFO " nope.\n"); continue; } printk(KERN_INFO " contained as device %d\n", c); - zone->dev[c] = rdev; + dev[c] = rdev; c++; if (!smallest || rdev->sectors < smallest->sectors) { smallest = rdev; @@ -194,47 +226,39 @@ static int create_strip_zones (mddev_t *mddev) } zone->nb_dev = c; - zone->sectors = (smallest->sectors - current_start) * c; + sectors = (smallest->sectors - zone->dev_start) * c; printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n", - zone->nb_dev, (unsigned long long)zone->sectors); + zone->nb_dev, (unsigned long long)sectors); - zone->zone_start = curr_zone_start; - curr_zone_start += zone->sectors; + curr_zone_end += sectors; + zone->zone_end = curr_zone_end; - current_start = smallest->sectors; printk(KERN_INFO "raid0: current zone start: %llu\n", - (unsigned long long)current_start); - } - - /* Now find appropriate hash spacing. - * We want a number which causes most hash entries to cover - * at most two strips, but the hash table must be at most - * 1 PAGE. We choose the smallest strip, or contiguous collection - * of strips, that has big enough size. We never consider the last - * strip though as it's size has no bearing on the efficacy of the hash - * table. - */ - conf->spacing = curr_zone_start; - min_spacing = curr_zone_start; - sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); - for (i=0; i < conf->nr_strip_zones-1; i++) { - sector_t s = 0; - for (j = i; j < conf->nr_strip_zones - 1 && - s < min_spacing; j++) - s += conf->strip_zone[j].sectors; - if (s >= min_spacing && s < conf->spacing) - conf->spacing = s; + (unsigned long long)smallest->sectors); } - mddev->queue->unplug_fn = raid0_unplug; - mddev->queue->backing_dev_info.congested_fn = raid0_congested; mddev->queue->backing_dev_info.congested_data = mddev; + /* + * now since we have the hard sector sizes, we can make sure + * chunk size is a multiple of that sector size + */ + if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) { + printk(KERN_ERR "%s chunk_size of %d not valid\n", + mdname(mddev), + mddev->chunk_sectors << 9); + goto abort; + } printk(KERN_INFO "raid0: done.\n"); + mddev->private = conf; return 0; - abort: - return 1; +abort: + kfree(conf->strip_zone); + kfree(conf->devlist); + kfree(conf); + mddev->private = NULL; + return err; } /** @@ -252,10 +276,15 @@ static int raid0_mergeable_bvec(struct request_queue *q, mddev_t *mddev = q->queuedata; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); int max; - unsigned int chunk_sectors = mddev->chunk_size >> 9; + unsigned int chunk_sectors = mddev->chunk_sectors; unsigned int bio_sectors = bvm->bi_size >> 9; - max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; + if (is_power_of_2(chunk_sectors)) + max = (chunk_sectors - ((sector & (chunk_sectors-1)) + + bio_sectors)) << 9; + else + max = (chunk_sectors - (sector_div(sector, chunk_sectors) + + bio_sectors)) << 9; if (max < 0) max = 0; /* bio_add cannot handle a negative return */ if (max <= biovec->bv_len && bio_sectors == 0) return biovec->bv_len; @@ -277,84 +306,28 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks) return array_sectors; } -static int raid0_run (mddev_t *mddev) +static int raid0_run(mddev_t *mddev) { - unsigned cur=0, i=0, nb_zone; - s64 sectors; - raid0_conf_t *conf; + int ret; - if (mddev->chunk_size == 0) { - printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); + if (mddev->chunk_sectors == 0) { + printk(KERN_ERR "md/raid0: chunk size must be set.\n"); return -EINVAL; } - printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n", - mdname(mddev), - mddev->chunk_size >> 9, - (mddev->chunk_size>>1)-1); - blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9); - blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1); + if (md_check_no_bitmap(mddev)) + return -EINVAL; + blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors); mddev->queue->queue_lock = &mddev->queue->__queue_lock; - conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL); - if (!conf) - goto out; - mddev->private = (void *)conf; - - conf->strip_zone = NULL; - conf->devlist = NULL; - if (create_strip_zones (mddev)) - goto out_free_conf; + ret = create_strip_zones(mddev); + if (ret < 0) + return ret; /* calculate array device size */ md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); printk(KERN_INFO "raid0 : md_size is %llu sectors.\n", (unsigned long long)mddev->array_sectors); - printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n", - (unsigned long long)conf->spacing); - { - sector_t s = raid0_size(mddev, 0, 0); - sector_t space = conf->spacing; - int round; - conf->sector_shift = 0; - if (sizeof(sector_t) > sizeof(u32)) { - /*shift down space and s so that sector_div will work */ - while (space > (sector_t) (~(u32)0)) { - s >>= 1; - space >>= 1; - s += 1; /* force round-up */ - conf->sector_shift++; - } - } - round = sector_div(s, (u32)space) ? 1 : 0; - nb_zone = s + round; - } - printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone); - - printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n", - nb_zone*sizeof(struct strip_zone*)); - conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); - if (!conf->hash_table) - goto out_free_conf; - sectors = conf->strip_zone[cur].sectors; - - conf->hash_table[0] = conf->strip_zone + cur; - for (i=1; i< nb_zone; i++) { - while (sectors <= conf->spacing) { - cur++; - sectors += conf->strip_zone[cur].sectors; - } - sectors -= conf->spacing; - conf->hash_table[i] = conf->strip_zone + cur; - } - if (conf->sector_shift) { - conf->spacing >>= conf->sector_shift; - /* round spacing up so when we divide by it, we - * err on the side of too-low, which is safest - */ - conf->spacing++; - } - /* calculate the max read-ahead size. * For read-ahead of large files to be effective, we need to * readahead at least twice a whole stripe. i.e. number of devices @@ -365,48 +338,107 @@ static int raid0_run (mddev_t *mddev) * chunksize should be used in that case. */ { - int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; + int stripe = mddev->raid_disks * + (mddev->chunk_sectors << 9) / PAGE_SIZE; if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) mddev->queue->backing_dev_info.ra_pages = 2* stripe; } - blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); + dump_zones(mddev); return 0; +} -out_free_conf: +static int raid0_stop(mddev_t *mddev) +{ + raid0_conf_t *conf = mddev->private; + + blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ kfree(conf->strip_zone); kfree(conf->devlist); kfree(conf); mddev->private = NULL; -out: - return -ENOMEM; + return 0; } -static int raid0_stop (mddev_t *mddev) +/* Find the zone which holds a particular offset + * Update *sectorp to be an offset in that zone + */ +static struct strip_zone *find_zone(struct raid0_private_data *conf, + sector_t *sectorp) { - raid0_conf_t *conf = mddev_to_conf(mddev); + int i; + struct strip_zone *z = conf->strip_zone; + sector_t sector = *sectorp; + + for (i = 0; i < conf->nr_strip_zones; i++) + if (sector < z[i].zone_end) { + if (i) + *sectorp = sector - z[i-1].zone_end; + return z + i; + } + BUG(); +} - blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ - kfree(conf->hash_table); - conf->hash_table = NULL; - kfree(conf->strip_zone); - conf->strip_zone = NULL; - kfree(conf); - mddev->private = NULL; +/* + * remaps the bio to the target device. we separate two flows. + * power 2 flow and a general flow for the sake of perfromance +*/ +static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone, + sector_t sector, sector_t *sector_offset) +{ + unsigned int sect_in_chunk; + sector_t chunk; + raid0_conf_t *conf = mddev->private; + unsigned int chunk_sects = mddev->chunk_sectors; + + if (is_power_of_2(chunk_sects)) { + int chunksect_bits = ffz(~chunk_sects); + /* find the sector offset inside the chunk */ + sect_in_chunk = sector & (chunk_sects - 1); + sector >>= chunksect_bits; + /* chunk in zone */ + chunk = *sector_offset; + /* quotient is the chunk in real device*/ + sector_div(chunk, zone->nb_dev << chunksect_bits); + } else{ + sect_in_chunk = sector_div(sector, chunk_sects); + chunk = *sector_offset; + sector_div(chunk, chunk_sects * zone->nb_dev); + } + /* + * position the bio over the real device + * real sector = chunk in device + starting of zone + * + the position in the chunk + */ + *sector_offset = (chunk * chunk_sects) + sect_in_chunk; + return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks + + sector_div(sector, zone->nb_dev)]; +} - return 0; +/* + * Is io distribute over 1 or more chunks ? +*/ +static inline int is_io_in_chunk_boundary(mddev_t *mddev, + unsigned int chunk_sects, struct bio *bio) +{ + if (likely(is_power_of_2(chunk_sects))) { + return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) + + (bio->bi_size >> 9)); + } else{ + sector_t sector = bio->bi_sector; + return chunk_sects >= (sector_div(sector, chunk_sects) + + (bio->bi_size >> 9)); + } } -static int raid0_make_request (struct request_queue *q, struct bio *bio) +static int raid0_make_request(struct request_queue *q, struct bio *bio) { mddev_t *mddev = q->queuedata; - unsigned int sect_in_chunk, chunksect_bits, chunk_sects; - raid0_conf_t *conf = mddev_to_conf(mddev); + unsigned int chunk_sects; + sector_t sector_offset; struct strip_zone *zone; mdk_rdev_t *tmp_dev; - sector_t chunk; - sector_t sector, rsect; const int rw = bio_data_dir(bio); int cpu; @@ -421,11 +453,9 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) bio_sectors(bio)); part_stat_unlock(); - chunk_sects = mddev->chunk_size >> 9; - chunksect_bits = ffz(~chunk_sects); - sector = bio->bi_sector; - - if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { + chunk_sects = mddev->chunk_sectors; + if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { + sector_t sector = bio->bi_sector; struct bio_pair *bp; /* Sanity check -- queue functions should prevent this happening */ if (bio->bi_vcnt != 1 || @@ -434,7 +464,12 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) /* This is a one page bio that upper layers * refuse to split for us, so we need to split it. */ - bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1))); + if (likely(is_power_of_2(chunk_sects))) + bp = bio_split(bio, chunk_sects - (sector & + (chunk_sects-1))); + else + bp = bio_split(bio, chunk_sects - + sector_div(sector, chunk_sects)); if (raid0_make_request(q, &bp->bio1)) generic_make_request(&bp->bio1); if (raid0_make_request(q, &bp->bio2)) @@ -443,34 +478,14 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) bio_pair_release(bp); return 0; } - - - { - sector_t x = sector >> conf->sector_shift; - sector_div(x, (u32)conf->spacing); - zone = conf->hash_table[x]; - } - while (sector >= zone->zone_start + zone->sectors) - zone++; - - sect_in_chunk = bio->bi_sector & (chunk_sects - 1); - - - { - sector_t x = (sector - zone->zone_start) >> chunksect_bits; - - sector_div(x, zone->nb_dev); - chunk = x; - - x = sector >> chunksect_bits; - tmp_dev = zone->dev[sector_div(x, zone->nb_dev)]; - } - rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk; - + sector_offset = bio->bi_sector; + zone = find_zone(mddev->private, §or_offset); + tmp_dev = map_sector(mddev, zone, bio->bi_sector, + §or_offset); bio->bi_bdev = tmp_dev->bdev; - bio->bi_sector = rsect + tmp_dev->data_offset; - + bio->bi_sector = sector_offset + zone->dev_start + + tmp_dev->data_offset; /* * Let the main block layer submit the IO and resolve recursion: */ @@ -485,31 +500,35 @@ bad_map: return 0; } -static void raid0_status (struct seq_file *seq, mddev_t *mddev) +static void raid0_status(struct seq_file *seq, mddev_t *mddev) { #undef MD_DEBUG #ifdef MD_DEBUG int j, k, h; char b[BDEVNAME_SIZE]; - raid0_conf_t *conf = mddev_to_conf(mddev); + raid0_conf_t *conf = mddev->private; + sector_t zone_size; + sector_t zone_start = 0; h = 0; + for (j = 0; j < conf->nr_strip_zones; j++) { seq_printf(seq, " z%d", j); - if (conf->hash_table[h] == conf->strip_zone+j) - seq_printf(seq, "(h%d)", h++); seq_printf(seq, "=["); for (k = 0; k < conf->strip_zone[j].nb_dev; k++) seq_printf(seq, "%s/", bdevname( - conf->strip_zone[j].dev[k]->bdev,b)); - - seq_printf(seq, "] zs=%d ds=%d s=%d\n", - conf->strip_zone[j].zone_start, - conf->strip_zone[j].dev_start, - conf->strip_zone[j].sectors); + conf->devlist[j*mddev->raid_disks + k] + ->bdev, b)); + + zone_size = conf->strip_zone[j].zone_end - zone_start; + seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n", + (unsigned long long)zone_start>>1, + (unsigned long long)conf->strip_zone[j].dev_start>>1, + (unsigned long long)zone_size>>1); + zone_start = conf->strip_zone[j].zone_end; } #endif - seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); + seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); return; } diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h index 824b12e..91f8e87 100644 --- a/drivers/md/raid0.h +++ b/drivers/md/raid0.h @@ -3,26 +3,18 @@ struct strip_zone { - sector_t zone_start; /* Zone offset in md_dev (in sectors) */ + sector_t zone_end; /* Start of the next zone (in sectors) */ sector_t dev_start; /* Zone offset in real dev (in sectors) */ - sector_t sectors; /* Zone size in sectors */ int nb_dev; /* # of devices attached to the zone */ - mdk_rdev_t **dev; /* Devices attached to the zone */ }; struct raid0_private_data { - struct strip_zone **hash_table; /* Table of indexes into strip_zone */ struct strip_zone *strip_zone; mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ int nr_strip_zones; - - sector_t spacing; - int sector_shift; /* shift this before divide by spacing */ }; typedef struct raid0_private_data raid0_conf_t; -#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private) - #endif diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e23758b..89939a7 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -182,7 +182,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) static void free_r1bio(r1bio_t *r1_bio) { - conf_t *conf = mddev_to_conf(r1_bio->mddev); + conf_t *conf = r1_bio->mddev->private; /* * Wake up any possible resync thread that waits for the device @@ -196,7 +196,7 @@ static void free_r1bio(r1bio_t *r1_bio) static void put_buf(r1bio_t *r1_bio) { - conf_t *conf = mddev_to_conf(r1_bio->mddev); + conf_t *conf = r1_bio->mddev->private; int i; for (i=0; i<conf->raid_disks; i++) { @@ -214,7 +214,7 @@ static void reschedule_retry(r1bio_t *r1_bio) { unsigned long flags; mddev_t *mddev = r1_bio->mddev; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; spin_lock_irqsave(&conf->device_lock, flags); list_add(&r1_bio->retry_list, &conf->retry_list); @@ -253,7 +253,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio) */ static inline void update_head_pos(int disk, r1bio_t *r1_bio) { - conf_t *conf = mddev_to_conf(r1_bio->mddev); + conf_t *conf = r1_bio->mddev->private; conf->mirrors[disk].head_position = r1_bio->sector + (r1_bio->sectors); @@ -264,7 +264,7 @@ static void raid1_end_read_request(struct bio *bio, int error) int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); int mirror; - conf_t *conf = mddev_to_conf(r1_bio->mddev); + conf_t *conf = r1_bio->mddev->private; mirror = r1_bio->read_disk; /* @@ -309,7 +309,7 @@ static void raid1_end_write_request(struct bio *bio, int error) int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); - conf_t *conf = mddev_to_conf(r1_bio->mddev); + conf_t *conf = r1_bio->mddev->private; struct bio *to_put = NULL; @@ -541,7 +541,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) static void unplug_slaves(mddev_t *mddev) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int i; rcu_read_lock(); @@ -573,7 +573,7 @@ static void raid1_unplug(struct request_queue *q) static int raid1_congested(void *data, int bits) { mddev_t *mddev = data; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int i, ret = 0; rcu_read_lock(); @@ -772,7 +772,7 @@ do_sync_io: static int make_request(struct request_queue *q, struct bio * bio) { mddev_t *mddev = q->queuedata; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; mirror_info_t *mirror; r1bio_t *r1_bio; struct bio *read_bio; @@ -991,7 +991,7 @@ static int make_request(struct request_queue *q, struct bio * bio) static void status(struct seq_file *seq, mddev_t *mddev) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int i; seq_printf(seq, " [%d/%d] [", conf->raid_disks, @@ -1010,7 +1010,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) static void error(mddev_t *mddev, mdk_rdev_t *rdev) { char b[BDEVNAME_SIZE]; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; /* * If it is not operational, then we have already marked it as dead @@ -1214,7 +1214,7 @@ static void end_sync_write(struct bio *bio, int error) int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); mddev_t *mddev = r1_bio->mddev; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int i; int mirror=0; @@ -1248,7 +1248,7 @@ static void end_sync_write(struct bio *bio, int error) static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int i; int disks = conf->raid_disks; struct bio *bio, *wbio; @@ -1562,7 +1562,7 @@ static void raid1d(mddev_t *mddev) r1bio_t *r1_bio; struct bio *bio; unsigned long flags; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; struct list_head *head = &conf->retry_list; int unplug=0; mdk_rdev_t *rdev; @@ -1585,7 +1585,7 @@ static void raid1d(mddev_t *mddev) spin_unlock_irqrestore(&conf->device_lock, flags); mddev = r1_bio->mddev; - conf = mddev_to_conf(mddev); + conf = mddev->private; if (test_bit(R1BIO_IsSync, &r1_bio->state)) { sync_request_write(mddev, r1_bio); unplug = 1; @@ -1706,7 +1706,7 @@ static int init_resync(conf_t *conf) static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; r1bio_t *r1_bio; struct bio *bio; sector_t max_sector, nr_sectors; @@ -2052,6 +2052,10 @@ static int run(mddev_t *mddev) goto out_free_conf; } + if (mddev->recovery_cp != MaxSector) + printk(KERN_NOTICE "raid1: %s is not clean" + " -- starting background reconstruction\n", + mdname(mddev)); printk(KERN_INFO "raid1: raid set %s active with %d out of %d mirrors\n", mdname(mddev), mddev->raid_disks - mddev->degraded, @@ -2087,7 +2091,7 @@ out: static int stop(mddev_t *mddev) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; struct bitmap *bitmap = mddev->bitmap; int behind_wait = 0; @@ -2155,16 +2159,16 @@ static int raid1_reshape(mddev_t *mddev) mempool_t *newpool, *oldpool; struct pool_info *newpoolinfo; mirror_info_t *newmirrors; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int cnt, raid_disks; unsigned long flags; int d, d2, err; /* Cannot change chunk_size, layout, or level */ - if (mddev->chunk_size != mddev->new_chunk || + if (mddev->chunk_sectors != mddev->new_chunk_sectors || mddev->layout != mddev->new_layout || mddev->level != mddev->new_level) { - mddev->new_chunk = mddev->chunk_size; + mddev->new_chunk_sectors = mddev->chunk_sectors; mddev->new_layout = mddev->layout; mddev->new_level = mddev->level; return -EINVAL; @@ -2252,7 +2256,7 @@ static int raid1_reshape(mddev_t *mddev) static void raid1_quiesce(mddev_t *mddev, int state) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; switch(state) { case 1: diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 1620eea..e87b84d 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -64,12 +64,6 @@ struct r1_private_data_s { typedef struct r1_private_data_s conf_t; /* - * this is the only point in the RAID code where we violate - * C type safety. mddev->private is an 'opaque' pointer. - */ -#define mddev_to_conf(mddev) ((conf_t *) mddev->private) - -/* * this is our 'private' RAID1 bio. * * it contains information about what kind of IO operations were started diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 750550c..ae12cea 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -188,7 +188,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) static void free_r10bio(r10bio_t *r10_bio) { - conf_t *conf = mddev_to_conf(r10_bio->mddev); + conf_t *conf = r10_bio->mddev->private; /* * Wake up any possible resync thread that waits for the device @@ -202,7 +202,7 @@ static void free_r10bio(r10bio_t *r10_bio) static void put_buf(r10bio_t *r10_bio) { - conf_t *conf = mddev_to_conf(r10_bio->mddev); + conf_t *conf = r10_bio->mddev->private; mempool_free(r10_bio, conf->r10buf_pool); @@ -213,7 +213,7 @@ static void reschedule_retry(r10bio_t *r10_bio) { unsigned long flags; mddev_t *mddev = r10_bio->mddev; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; spin_lock_irqsave(&conf->device_lock, flags); list_add(&r10_bio->retry_list, &conf->retry_list); @@ -245,7 +245,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio) */ static inline void update_head_pos(int slot, r10bio_t *r10_bio) { - conf_t *conf = mddev_to_conf(r10_bio->mddev); + conf_t *conf = r10_bio->mddev->private; conf->mirrors[r10_bio->devs[slot].devnum].head_position = r10_bio->devs[slot].addr + (r10_bio->sectors); @@ -256,7 +256,7 @@ static void raid10_end_read_request(struct bio *bio, int error) int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); int slot, dev; - conf_t *conf = mddev_to_conf(r10_bio->mddev); + conf_t *conf = r10_bio->mddev->private; slot = r10_bio->read_slot; @@ -297,7 +297,7 @@ static void raid10_end_write_request(struct bio *bio, int error) int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); int slot, dev; - conf_t *conf = mddev_to_conf(r10_bio->mddev); + conf_t *conf = r10_bio->mddev->private; for (slot = 0; slot < conf->copies; slot++) if (r10_bio->devs[slot].bio == bio) @@ -461,7 +461,7 @@ static int raid10_mergeable_bvec(struct request_queue *q, mddev_t *mddev = q->queuedata; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); int max; - unsigned int chunk_sectors = mddev->chunk_size >> 9; + unsigned int chunk_sectors = mddev->chunk_sectors; unsigned int bio_sectors = bvm->bi_size >> 9; max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; @@ -596,7 +596,7 @@ rb_out: static void unplug_slaves(mddev_t *mddev) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int i; rcu_read_lock(); @@ -628,7 +628,7 @@ static void raid10_unplug(struct request_queue *q) static int raid10_congested(void *data, int bits) { mddev_t *mddev = data; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int i, ret = 0; rcu_read_lock(); @@ -788,7 +788,7 @@ static void unfreeze_array(conf_t *conf) static int make_request(struct request_queue *q, struct bio * bio) { mddev_t *mddev = q->queuedata; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; mirror_info_t *mirror; r10bio_t *r10_bio; struct bio *read_bio; @@ -981,11 +981,11 @@ static int make_request(struct request_queue *q, struct bio * bio) static void status(struct seq_file *seq, mddev_t *mddev) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int i; if (conf->near_copies < conf->raid_disks) - seq_printf(seq, " %dK chunks", mddev->chunk_size/1024); + seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); if (conf->near_copies > 1) seq_printf(seq, " %d near-copies", conf->near_copies); if (conf->far_copies > 1) { @@ -1006,7 +1006,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) static void error(mddev_t *mddev, mdk_rdev_t *rdev) { char b[BDEVNAME_SIZE]; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; /* * If it is not operational, then we have already marked it as dead @@ -1215,7 +1215,7 @@ abort: static void end_sync_read(struct bio *bio, int error) { r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); - conf_t *conf = mddev_to_conf(r10_bio->mddev); + conf_t *conf = r10_bio->mddev->private; int i,d; for (i=0; i<conf->copies; i++) @@ -1253,7 +1253,7 @@ static void end_sync_write(struct bio *bio, int error) int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); mddev_t *mddev = r10_bio->mddev; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int i,d; for (i = 0; i < conf->copies; i++) @@ -1300,7 +1300,7 @@ static void end_sync_write(struct bio *bio, int error) */ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int i, first; struct bio *tbio, *fbio; @@ -1400,7 +1400,7 @@ done: static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; int i, d; struct bio *bio, *wbio; @@ -1549,7 +1549,7 @@ static void raid10d(mddev_t *mddev) r10bio_t *r10_bio; struct bio *bio; unsigned long flags; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; struct list_head *head = &conf->retry_list; int unplug=0; mdk_rdev_t *rdev; @@ -1572,7 +1572,7 @@ static void raid10d(mddev_t *mddev) spin_unlock_irqrestore(&conf->device_lock, flags); mddev = r10_bio->mddev; - conf = mddev_to_conf(mddev); + conf = mddev->private; if (test_bit(R10BIO_IsSync, &r10_bio->state)) { sync_request_write(mddev, r10_bio); unplug = 1; @@ -1680,7 +1680,7 @@ static int init_resync(conf_t *conf) static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; r10bio_t *r10_bio; struct bio *biolist = NULL, *bio; sector_t max_sector, nr_sectors; @@ -2026,7 +2026,7 @@ static sector_t raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) { sector_t size; - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; if (!raid_disks) raid_disks = mddev->raid_disks; @@ -2050,9 +2050,10 @@ static int run(mddev_t *mddev) int nc, fc, fo; sector_t stride, size; - if (mddev->chunk_size < PAGE_SIZE) { + if (mddev->chunk_sectors < (PAGE_SIZE >> 9) || + !is_power_of_2(mddev->chunk_sectors)) { printk(KERN_ERR "md/raid10: chunk size must be " - "at least PAGE_SIZE(%ld).\n", PAGE_SIZE); + "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE); return -EINVAL; } @@ -2095,8 +2096,8 @@ static int run(mddev_t *mddev) conf->far_copies = fc; conf->copies = nc*fc; conf->far_offset = fo; - conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1; - conf->chunk_shift = ffz(~mddev->chunk_size) - 9; + conf->chunk_mask = mddev->chunk_sectors - 1; + conf->chunk_shift = ffz(~mddev->chunk_sectors); size = mddev->dev_sectors >> conf->chunk_shift; sector_div(size, fc); size = size * conf->raid_disks; @@ -2185,6 +2186,10 @@ static int run(mddev_t *mddev) goto out_free_conf; } + if (mddev->recovery_cp != MaxSector) + printk(KERN_NOTICE "raid10: %s is not clean" + " -- starting background reconstruction\n", + mdname(mddev)); printk(KERN_INFO "raid10: raid set %s active with %d out of %d devices\n", mdname(mddev), mddev->raid_disks - mddev->degraded, @@ -2204,7 +2209,8 @@ static int run(mddev_t *mddev) * maybe... */ { - int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE); + int stripe = conf->raid_disks * + ((mddev->chunk_sectors << 9) / PAGE_SIZE); stripe /= conf->near_copies; if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) mddev->queue->backing_dev_info.ra_pages = 2* stripe; @@ -2227,7 +2233,7 @@ out: static int stop(mddev_t *mddev) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; raise_barrier(conf, 0); lower_barrier(conf); @@ -2245,7 +2251,7 @@ static int stop(mddev_t *mddev) static void raid10_quiesce(mddev_t *mddev, int state) { - conf_t *conf = mddev_to_conf(mddev); + conf_t *conf = mddev->private; switch(state) { case 1: diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 244dbe5..59cd1ef 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -62,12 +62,6 @@ struct r10_private_data_s { typedef struct r10_private_data_s conf_t; /* - * this is the only point in the RAID code where we violate - * C type safety. mddev->private is an 'opaque' pointer. - */ -#define mddev_to_conf(mddev) ((conf_t *) mddev->private) - -/* * this is our 'private' RAID10 bio. * * it contains information about what kind of IO operations were started diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index bef8766..f9f991e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1274,8 +1274,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, sector_t new_sector; int algorithm = previous ? conf->prev_algo : conf->algorithm; - int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) - : (conf->chunk_size >> 9); + int sectors_per_chunk = previous ? conf->prev_chunk_sectors + : conf->chunk_sectors; int raid_disks = previous ? conf->previous_raid_disks : conf->raid_disks; int data_disks = raid_disks - conf->max_degraded; @@ -1480,8 +1480,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) int raid_disks = sh->disks; int data_disks = raid_disks - conf->max_degraded; sector_t new_sector = sh->sector, check; - int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) - : (conf->chunk_size >> 9); + int sectors_per_chunk = previous ? conf->prev_chunk_sectors + : conf->chunk_sectors; int algorithm = previous ? conf->prev_algo : conf->algorithm; sector_t stripe; @@ -1997,8 +1997,7 @@ static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, struct stripe_head *sh) { int sectors_per_chunk = - previous ? (conf->prev_chunk >> 9) - : (conf->chunk_size >> 9); + previous ? conf->prev_chunk_sectors : conf->chunk_sectors; int dd_idx; int chunk_offset = sector_div(stripe, sectors_per_chunk); int disks = previous ? conf->previous_raid_disks : conf->raid_disks; @@ -3284,7 +3283,7 @@ static void activate_bit_delay(raid5_conf_t *conf) static void unplug_slaves(mddev_t *mddev) { - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; int i; rcu_read_lock(); @@ -3308,7 +3307,7 @@ static void unplug_slaves(mddev_t *mddev) static void raid5_unplug_device(struct request_queue *q) { mddev_t *mddev = q->queuedata; - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; unsigned long flags; spin_lock_irqsave(&conf->device_lock, flags); @@ -3327,7 +3326,7 @@ static void raid5_unplug_device(struct request_queue *q) static int raid5_congested(void *data, int bits) { mddev_t *mddev = data; - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; /* No difference between reads and writes. Just check * how busy the stripe_cache is @@ -3352,14 +3351,14 @@ static int raid5_mergeable_bvec(struct request_queue *q, mddev_t *mddev = q->queuedata; sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); int max; - unsigned int chunk_sectors = mddev->chunk_size >> 9; + unsigned int chunk_sectors = mddev->chunk_sectors; unsigned int bio_sectors = bvm->bi_size >> 9; if ((bvm->bi_rw & 1) == WRITE) return biovec->bv_len; /* always allow writes to be mergeable */ - if (mddev->new_chunk < mddev->chunk_size) - chunk_sectors = mddev->new_chunk >> 9; + if (mddev->new_chunk_sectors < mddev->chunk_sectors) + chunk_sectors = mddev->new_chunk_sectors; max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; if (max < 0) max = 0; if (max <= biovec->bv_len && bio_sectors == 0) @@ -3372,11 +3371,11 @@ static int raid5_mergeable_bvec(struct request_queue *q, static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) { sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); - unsigned int chunk_sectors = mddev->chunk_size >> 9; + unsigned int chunk_sectors = mddev->chunk_sectors; unsigned int bio_sectors = bio->bi_size >> 9; - if (mddev->new_chunk < mddev->chunk_size) - chunk_sectors = mddev->new_chunk >> 9; + if (mddev->new_chunk_sectors < mddev->chunk_sectors) + chunk_sectors = mddev->new_chunk_sectors; return chunk_sectors >= ((sector & (chunk_sectors - 1)) + bio_sectors); } @@ -3440,7 +3439,7 @@ static void raid5_align_endio(struct bio *bi, int error) bio_put(bi); mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; - conf = mddev_to_conf(mddev); + conf = mddev->private; rdev = (void*)raid_bi->bi_next; raid_bi->bi_next = NULL; @@ -3482,7 +3481,7 @@ static int bio_fits_rdev(struct bio *bi) static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) { mddev_t *mddev = q->queuedata; - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; unsigned int dd_idx; struct bio* align_bi; mdk_rdev_t *rdev; @@ -3599,7 +3598,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) static int make_request(struct request_queue *q, struct bio * bi) { mddev_t *mddev = q->queuedata; - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; int dd_idx; sector_t new_sector; sector_t logical_sector, last_sector; @@ -3696,6 +3695,7 @@ static int make_request(struct request_queue *q, struct bio * bi) spin_unlock_irq(&conf->device_lock); if (must_retry) { release_stripe(sh); + schedule(); goto retry; } } @@ -3791,10 +3791,10 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped * If old and new chunk sizes differ, we need to process the * largest of these */ - if (mddev->new_chunk > mddev->chunk_size) - reshape_sectors = mddev->new_chunk / 512; + if (mddev->new_chunk_sectors > mddev->chunk_sectors) + reshape_sectors = mddev->new_chunk_sectors; else - reshape_sectors = mddev->chunk_size / 512; + reshape_sectors = mddev->chunk_sectors; /* we update the metadata when there is more than 3Meg * in the block range (that is rather arbitrary, should @@ -3917,7 +3917,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped 1, &dd_idx, NULL); last_sector = raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) - *(new_data_disks) - 1), + * new_data_disks - 1), 1, &dd_idx, NULL); if (last_sector >= mddev->dev_sectors) last_sector = mddev->dev_sectors - 1; @@ -3946,7 +3946,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped wait_event(conf->wait_for_overlap, atomic_read(&conf->reshape_stripes) == 0); mddev->reshape_position = conf->reshape_progress; - mddev->curr_resync_completed = mddev->curr_resync; + mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors; conf->reshape_checkpoint = jiffies; set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); @@ -4129,7 +4129,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) static void raid5d(mddev_t *mddev) { struct stripe_head *sh; - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; int handled; pr_debug("+++ raid5d active\n"); @@ -4185,7 +4185,7 @@ static void raid5d(mddev_t *mddev) static ssize_t raid5_show_stripe_cache_size(mddev_t *mddev, char *page) { - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; if (conf) return sprintf(page, "%d\n", conf->max_nr_stripes); else @@ -4195,7 +4195,7 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page) static ssize_t raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) { - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; unsigned long new; int err; @@ -4233,7 +4233,7 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, static ssize_t raid5_show_preread_threshold(mddev_t *mddev, char *page) { - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; if (conf) return sprintf(page, "%d\n", conf->bypass_threshold); else @@ -4243,7 +4243,7 @@ raid5_show_preread_threshold(mddev_t *mddev, char *page) static ssize_t raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) { - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; unsigned long new; if (len >= PAGE_SIZE) return -EINVAL; @@ -4267,7 +4267,7 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, static ssize_t stripe_cache_active_show(mddev_t *mddev, char *page) { - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; if (conf) return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); else @@ -4291,7 +4291,7 @@ static struct attribute_group raid5_attrs_group = { static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) { - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; if (!sectors) sectors = mddev->dev_sectors; @@ -4303,8 +4303,8 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) raid_disks = conf->previous_raid_disks; } - sectors &= ~((sector_t)mddev->chunk_size/512 - 1); - sectors &= ~((sector_t)mddev->new_chunk/512 - 1); + sectors &= ~((sector_t)mddev->chunk_sectors - 1); + sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); return sectors * (raid_disks - conf->max_degraded); } @@ -4336,9 +4336,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) return ERR_PTR(-EINVAL); } - if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) { + if (!mddev->new_chunk_sectors || + (mddev->new_chunk_sectors << 9) % PAGE_SIZE || + !is_power_of_2(mddev->new_chunk_sectors)) { printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", - mddev->new_chunk, mdname(mddev)); + mddev->new_chunk_sectors << 9, mdname(mddev)); return ERR_PTR(-EINVAL); } @@ -4401,7 +4403,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) conf->fullsync = 1; } - conf->chunk_size = mddev->new_chunk; + conf->chunk_sectors = mddev->new_chunk_sectors; conf->level = mddev->new_level; if (conf->level == 6) conf->max_degraded = 2; @@ -4411,7 +4413,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) conf->max_nr_stripes = NR_STRIPES; conf->reshape_progress = mddev->reshape_position; if (conf->reshape_progress != MaxSector) { - conf->prev_chunk = mddev->chunk_size; + conf->prev_chunk_sectors = mddev->chunk_sectors; conf->prev_algo = mddev->layout; } @@ -4453,6 +4455,10 @@ static int run(mddev_t *mddev) int working_disks = 0; mdk_rdev_t *rdev; + if (mddev->recovery_cp != MaxSector) + printk(KERN_NOTICE "raid5: %s is not clean" + " -- starting background reconstruction\n", + mdname(mddev)); if (mddev->reshape_position != MaxSector) { /* Check that we can continue the reshape. * Currently only disks can change, it must @@ -4475,7 +4481,7 @@ static int run(mddev_t *mddev) * geometry. */ here_new = mddev->reshape_position; - if (sector_div(here_new, (mddev->new_chunk>>9)* + if (sector_div(here_new, mddev->new_chunk_sectors * (mddev->raid_disks - max_degraded))) { printk(KERN_ERR "raid5: reshape_position not " "on a stripe boundary\n"); @@ -4483,7 +4489,7 @@ static int run(mddev_t *mddev) } /* here_new is the stripe we will write to */ here_old = mddev->reshape_position; - sector_div(here_old, (mddev->chunk_size>>9)* + sector_div(here_old, mddev->chunk_sectors * (old_disks-max_degraded)); /* here_old is the first stripe that we might need to read * from */ @@ -4498,7 +4504,7 @@ static int run(mddev_t *mddev) } else { BUG_ON(mddev->level != mddev->new_level); BUG_ON(mddev->layout != mddev->new_layout); - BUG_ON(mddev->chunk_size != mddev->new_chunk); + BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); BUG_ON(mddev->delta_disks != 0); } @@ -4532,7 +4538,7 @@ static int run(mddev_t *mddev) } /* device size must be a multiple of chunk size */ - mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1); + mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); mddev->resync_max_sectors = mddev->dev_sectors; if (mddev->degraded > 0 && @@ -4581,7 +4587,7 @@ static int run(mddev_t *mddev) { int data_disks = conf->previous_raid_disks - conf->max_degraded; int stripe = data_disks * - (mddev->chunk_size / PAGE_SIZE); + ((mddev->chunk_sectors << 9) / PAGE_SIZE); if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) mddev->queue->backing_dev_info.ra_pages = 2 * stripe; } @@ -4678,7 +4684,8 @@ static void status(struct seq_file *seq, mddev_t *mddev) raid5_conf_t *conf = (raid5_conf_t *) mddev->private; int i; - seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); + seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, + mddev->chunk_sectors / 2, mddev->layout); seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); for (i = 0; i < conf->raid_disks; i++) seq_printf (seq, "%s", @@ -4826,7 +4833,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) * any io in the removed space completes, but it hardly seems * worth it. */ - sectors &= ~((sector_t)mddev->chunk_size/512 - 1); + sectors &= ~((sector_t)mddev->chunk_sectors - 1); md_set_array_sectors(mddev, raid5_size(mddev, sectors, mddev->raid_disks)); if (mddev->array_sectors > @@ -4843,14 +4850,37 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) return 0; } -static int raid5_check_reshape(mddev_t *mddev) +static int check_stripe_cache(mddev_t *mddev) { - raid5_conf_t *conf = mddev_to_conf(mddev); + /* Can only proceed if there are plenty of stripe_heads. + * We need a minimum of one full stripe,, and for sensible progress + * it is best to have about 4 times that. + * If we require 4 times, then the default 256 4K stripe_heads will + * allow for chunk sizes up to 256K, which is probably OK. + * If the chunk size is greater, user-space should request more + * stripe_heads first. + */ + raid5_conf_t *conf = mddev->private; + if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 + > conf->max_nr_stripes || + ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 + > conf->max_nr_stripes) { + printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", + ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) + / STRIPE_SIZE)*4); + return 0; + } + return 1; +} + +static int check_reshape(mddev_t *mddev) +{ + raid5_conf_t *conf = mddev->private; if (mddev->delta_disks == 0 && mddev->new_layout == mddev->layout && - mddev->new_chunk == mddev->chunk_size) - return -EINVAL; /* nothing to do */ + mddev->new_chunk_sectors == mddev->chunk_sectors) + return 0; /* nothing to do */ if (mddev->bitmap) /* Cannot grow a bitmap yet */ return -EBUSY; @@ -4869,28 +4899,15 @@ static int raid5_check_reshape(mddev_t *mddev) return -EINVAL; } - /* Can only proceed if there are plenty of stripe_heads. - * We need a minimum of one full stripe,, and for sensible progress - * it is best to have about 4 times that. - * If we require 4 times, then the default 256 4K stripe_heads will - * allow for chunk sizes up to 256K, which is probably OK. - * If the chunk size is greater, user-space should request more - * stripe_heads first. - */ - if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || - (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { - printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", - (max(mddev->chunk_size, mddev->new_chunk) - / STRIPE_SIZE)*4); + if (!check_stripe_cache(mddev)) return -ENOSPC; - } return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); } static int raid5_start_reshape(mddev_t *mddev) { - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; mdk_rdev_t *rdev; int spares = 0; int added_devices = 0; @@ -4899,6 +4916,9 @@ static int raid5_start_reshape(mddev_t *mddev) if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return -EBUSY; + if (!check_stripe_cache(mddev)) + return -ENOSPC; + list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk < 0 && !test_bit(Faulty, &rdev->flags)) @@ -4925,8 +4945,8 @@ static int raid5_start_reshape(mddev_t *mddev) spin_lock_irq(&conf->device_lock); conf->previous_raid_disks = conf->raid_disks; conf->raid_disks += mddev->delta_disks; - conf->prev_chunk = conf->chunk_size; - conf->chunk_size = mddev->new_chunk; + conf->prev_chunk_sectors = conf->chunk_sectors; + conf->chunk_sectors = mddev->new_chunk_sectors; conf->prev_algo = conf->algorithm; conf->algorithm = mddev->new_layout; if (mddev->delta_disks < 0) @@ -5008,7 +5028,7 @@ static void end_reshape(raid5_conf_t *conf) */ { int data_disks = conf->raid_disks - conf->max_degraded; - int stripe = data_disks * (conf->chunk_size + int stripe = data_disks * ((conf->chunk_sectors << 9) / PAGE_SIZE); if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; @@ -5022,7 +5042,7 @@ static void end_reshape(raid5_conf_t *conf) static void raid5_finish_reshape(mddev_t *mddev) { struct block_device *bdev; - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { @@ -5053,7 +5073,7 @@ static void raid5_finish_reshape(mddev_t *mddev) raid5_remove_disk(mddev, d); } mddev->layout = conf->algorithm; - mddev->chunk_size = conf->chunk_size; + mddev->chunk_sectors = conf->chunk_sectors; mddev->reshape_position = MaxSector; mddev->delta_disks = 0; } @@ -5061,7 +5081,7 @@ static void raid5_finish_reshape(mddev_t *mddev) static void raid5_quiesce(mddev_t *mddev, int state) { - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; switch(state) { case 2: /* resume for a suspend */ @@ -5111,7 +5131,7 @@ static void *raid5_takeover_raid1(mddev_t *mddev) mddev->new_level = 5; mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; - mddev->new_chunk = chunksect << 9; + mddev->new_chunk_sectors = chunksect; return setup_conf(mddev); } @@ -5150,24 +5170,24 @@ static void *raid5_takeover_raid6(mddev_t *mddev) } -static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) +static int raid5_check_reshape(mddev_t *mddev) { /* For a 2-drive array, the layout and chunk size can be changed * immediately as not restriping is needed. * For larger arrays we record the new value - after validation * to be used by a reshape pass. */ - raid5_conf_t *conf = mddev_to_conf(mddev); + raid5_conf_t *conf = mddev->private; + int new_chunk = mddev->new_chunk_sectors; - if (new_layout >= 0 && !algorithm_valid_raid5(new_layout)) + if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) return -EINVAL; if (new_chunk > 0) { - if (new_chunk & (new_chunk-1)) - /* not a power of 2 */ + if (!is_power_of_2(new_chunk)) return -EINVAL; - if (new_chunk < PAGE_SIZE) + if (new_chunk < (PAGE_SIZE>>9)) return -EINVAL; - if (mddev->array_sectors & ((new_chunk>>9)-1)) + if (mddev->array_sectors & (new_chunk-1)) /* not factor of array size */ return -EINVAL; } @@ -5175,49 +5195,39 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) /* They look valid */ if (mddev->raid_disks == 2) { - - if (new_layout >= 0) { - conf->algorithm = new_layout; - mddev->layout = mddev->new_layout = new_layout; + /* can make the change immediately */ + if (mddev->new_layout >= 0) { + conf->algorithm = mddev->new_layout; + mddev->layout = mddev->new_layout; } if (new_chunk > 0) { - conf->chunk_size = new_chunk; - mddev->chunk_size = mddev->new_chunk = new_chunk; + conf->chunk_sectors = new_chunk ; + mddev->chunk_sectors = new_chunk; } set_bit(MD_CHANGE_DEVS, &mddev->flags); md_wakeup_thread(mddev->thread); - } else { - if (new_layout >= 0) - mddev->new_layout = new_layout; - if (new_chunk > 0) - mddev->new_chunk = new_chunk; } - return 0; + return check_reshape(mddev); } -static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk) +static int raid6_check_reshape(mddev_t *mddev) { - if (new_layout >= 0 && !algorithm_valid_raid6(new_layout)) + int new_chunk = mddev->new_chunk_sectors; + + if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) return -EINVAL; if (new_chunk > 0) { - if (new_chunk & (new_chunk-1)) - /* not a power of 2 */ + if (!is_power_of_2(new_chunk)) return -EINVAL; - if (new_chunk < PAGE_SIZE) + if (new_chunk < (PAGE_SIZE >> 9)) return -EINVAL; - if (mddev->array_sectors & ((new_chunk>>9)-1)) + if (mddev->array_sectors & (new_chunk-1)) /* not factor of array size */ return -EINVAL; } /* They look valid */ - - if (new_layout >= 0) - mddev->new_layout = new_layout; - if (new_chunk > 0) - mddev->new_chunk = new_chunk; - - return 0; + return check_reshape(mddev); } static void *raid5_takeover(mddev_t *mddev) @@ -5227,8 +5237,6 @@ static void *raid5_takeover(mddev_t *mddev) * raid1 - if there are two drives. We need to know the chunk size * raid4 - trivial - just use a raid4 layout. * raid6 - Providing it is a *_6 layout - * - * For now, just do raid1 */ if (mddev->level == 1) @@ -5310,12 +5318,11 @@ static struct mdk_personality raid6_personality = .sync_request = sync_request, .resize = raid5_resize, .size = raid5_size, - .check_reshape = raid5_check_reshape, + .check_reshape = raid6_check_reshape, .start_reshape = raid5_start_reshape, .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, .takeover = raid6_takeover, - .reconfig = raid6_reconfig, }; static struct mdk_personality raid5_personality = { @@ -5338,7 +5345,6 @@ static struct mdk_personality raid5_personality = .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, .takeover = raid5_takeover, - .reconfig = raid5_reconfig, }; static struct mdk_personality raid4_personality = diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 52ba999..9459689 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -334,7 +334,8 @@ struct raid5_private_data { struct hlist_head *stripe_hashtbl; mddev_t *mddev; struct disk_info *spare; - int chunk_size, level, algorithm; + int chunk_sectors; + int level, algorithm; int max_degraded; int raid_disks; int max_nr_stripes; @@ -350,7 +351,8 @@ struct raid5_private_data { */ sector_t reshape_safe; int previous_raid_disks; - int prev_chunk, prev_algo; + int prev_chunk_sectors; + int prev_algo; short generation; /* increments with every reshape */ unsigned long reshape_checkpoint; /* Time we last updated * metadata */ @@ -408,8 +410,6 @@ struct raid5_private_data { typedef struct raid5_private_data raid5_conf_t; -#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private) - /* * Our supported algorithms */ diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index ee3927a..491ac0f 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -30,6 +30,7 @@ config MFD_SM501_GPIO config MFD_ASIC3 bool "Support for Compaq ASIC3" depends on GENERIC_HARDIRQS && GPIOLIB && ARM + select MFD_CORE ---help--- This driver supports the ASIC3 multifunction chip found on many PDAs (mainly iPAQ and HTC based ones) @@ -152,7 +153,7 @@ config MFD_WM8400 depends on I2C help Support for the Wolfson Microelecronics WM8400 PMIC and audio - CODEC. This driver adds provides common support for accessing + CODEC. This driver provides common support for accessing the device, additional drivers must be enabled in order to use the functionality of the device. @@ -241,6 +242,27 @@ config PCF50633_GPIO Say yes here if you want to include support GPIO for pins on the PCF50633 chip. +config AB3100_CORE + tristate "ST-Ericsson AB3100 Mixed Signal Circuit core functions" + depends on I2C + default y if ARCH_U300 + help + Select this to enable the AB3100 Mixed Signal IC core + functionality. This connects to a AB3100 on the I2C bus + and expose a number of symbols needed for dependent devices + to read and write registers and subscribe to events from + this multi-functional IC. This is needed to use other features + of the AB3100 such as battery-backed RTC, charging control, + LEDs, vibrator, system power and temperature, power management + and ALSA sound. + +config EZX_PCAP + bool "PCAP Support" + depends on GENERIC_HARDIRQS && SPI_MASTER + help + This enables the PCAP ASIC present on EZX Phones. This is + needed for MMC, TouchScreen, Sound, USB, etc.. + endmenu menu "Multimedia Capabilities Port drivers" diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 3afb519..6f8a9a1af 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -26,6 +26,8 @@ obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o obj-$(CONFIG_MFD_CORE) += mfd-core.o +obj-$(CONFIG_EZX_PCAP) += ezx-pcap.o + obj-$(CONFIG_MCP) += mcp-core.o obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o @@ -40,4 +42,5 @@ obj-$(CONFIG_PMIC_DA903X) += da903x.o obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o -obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
\ No newline at end of file +obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o +obj-$(CONFIG_AB3100_CORE) += ab3100-core.o diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c new file mode 100644 index 0000000..13e7d7b --- /dev/null +++ b/drivers/mfd/ab3100-core.c @@ -0,0 +1,991 @@ +/* + * Copyright (C) 2007-2009 ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + * Low-level core for exclusive access to the AB3100 IC on the I2C bus + * and some basic chip-configuration. + * Author: Linus Walleij <linus.walleij@stericsson.com> + */ + +#include <linux/i2c.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/notifier.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/mfd/ab3100.h> + +/* These are the only registers inside AB3100 used in this main file */ + +/* Interrupt event registers */ +#define AB3100_EVENTA1 0x21 +#define AB3100_EVENTA2 0x22 +#define AB3100_EVENTA3 0x23 + +/* AB3100 DAC converter registers */ +#define AB3100_DIS 0x00 +#define AB3100_D0C 0x01 +#define AB3100_D1C 0x02 +#define AB3100_D2C 0x03 +#define AB3100_D3C 0x04 + +/* Chip ID register */ +#define AB3100_CID 0x20 + +/* AB3100 interrupt registers */ +#define AB3100_IMRA1 0x24 +#define AB3100_IMRA2 0x25 +#define AB3100_IMRA3 0x26 +#define AB3100_IMRB1 0x2B +#define AB3100_IMRB2 0x2C +#define AB3100_IMRB3 0x2D + +/* System Power Monitoring and control registers */ +#define AB3100_MCA 0x2E +#define AB3100_MCB 0x2F + +/* SIM power up */ +#define AB3100_SUP 0x50 + +/* + * I2C communication + * + * The AB3100 is usually assigned address 0x48 (7-bit) + * The chip is defined in the platform i2c_board_data section. + */ +static unsigned short normal_i2c[] = { 0x48, I2C_CLIENT_END }; +I2C_CLIENT_INSMOD_1(ab3100); + +u8 ab3100_get_chip_type(struct ab3100 *ab3100) +{ + u8 chip = ABUNKNOWN; + + switch (ab3100->chip_id & 0xf0) { + case 0xa0: + chip = AB3000; + break; + case 0xc0: + chip = AB3100; + break; + } + return chip; +} +EXPORT_SYMBOL(ab3100_get_chip_type); + +int ab3100_set_register(struct ab3100 *ab3100, u8 reg, u8 regval) +{ + u8 regandval[2] = {reg, regval}; + int err; + + err = mutex_lock_interruptible(&ab3100->access_mutex); + if (err) + return err; + + /* + * A two-byte write message with the first byte containing the register + * number and the second byte containing the value to be written + * effectively sets a register in the AB3100. + */ + err = i2c_master_send(ab3100->i2c_client, regandval, 2); + if (err < 0) { + dev_err(ab3100->dev, + "write error (write register): %d\n", + err); + } else if (err != 2) { + dev_err(ab3100->dev, + "write error (write register) " + "%d bytes transferred (expected 2)\n", + err); + err = -EIO; + } else { + /* All is well */ + err = 0; + } + mutex_unlock(&ab3100->access_mutex); + return 0; +} +EXPORT_SYMBOL(ab3100_set_register); + +/* + * The test registers exist at an I2C bus address up one + * from the ordinary base. They are not supposed to be used + * in production code, but sometimes you have to do that + * anyway. It's currently only used from this file so declare + * it static and do not export. + */ +static int ab3100_set_test_register(struct ab3100 *ab3100, + u8 reg, u8 regval) +{ + u8 regandval[2] = {reg, regval}; + int err; + + err = mutex_lock_interruptible(&ab3100->access_mutex); + if (err) + return err; + + err = i2c_master_send(ab3100->testreg_client, regandval, 2); + if (err < 0) { + dev_err(ab3100->dev, + "write error (write test register): %d\n", + err); + } else if (err != 2) { + dev_err(ab3100->dev, + "write error (write test register) " + "%d bytes transferred (expected 2)\n", + err); + err = -EIO; + } else { + /* All is well */ + err = 0; + } + mutex_unlock(&ab3100->access_mutex); + + return err; +} + +int ab3100_get_register(struct ab3100 *ab3100, u8 reg, u8 *regval) +{ + int err; + + err = mutex_lock_interruptible(&ab3100->access_mutex); + if (err) + return err; + + /* + * AB3100 require an I2C "stop" command between each message, else + * it will not work. The only way of achieveing this with the + * message transport layer is to send the read and write messages + * separately. + */ + err = i2c_master_send(ab3100->i2c_client, ®, 1); + if (err < 0) { + dev_err(ab3100->dev, + "write error (send register address): %d\n", + err); + goto get_reg_out_unlock; + } else if (err != 1) { + dev_err(ab3100->dev, + "write error (send register address) " + "%d bytes transferred (expected 1)\n", + err); + err = -EIO; + goto get_reg_out_unlock; + } else { + /* All is well */ + err = 0; + } + + err = i2c_master_recv(ab3100->i2c_client, regval, 1); + if (err < 0) { + dev_err(ab3100->dev, + "write error (read register): %d\n", + err); + goto get_reg_out_unlock; + } else if (err != 1) { + dev_err(ab3100->dev, + "write error (read register) " + "%d bytes transferred (expected 1)\n", + err); + err = -EIO; + goto get_reg_out_unlock; + } else { + /* All is well */ + err = 0; + } + + get_reg_out_unlock: + mutex_unlock(&ab3100->access_mutex); + return err; +} +EXPORT_SYMBOL(ab3100_get_register); + +int ab3100_get_register_page(struct ab3100 *ab3100, + u8 first_reg, u8 *regvals, u8 numregs) +{ + int err; + + if (ab3100->chip_id == 0xa0 || + ab3100->chip_id == 0xa1) + /* These don't support paged reads */ + return -EIO; + + err = mutex_lock_interruptible(&ab3100->access_mutex); + if (err) + return err; + + /* + * Paged read also require an I2C "stop" command. + */ + err = i2c_master_send(ab3100->i2c_client, &first_reg, 1); + if (err < 0) { + dev_err(ab3100->dev, + "write error (send first register address): %d\n", + err); + goto get_reg_page_out_unlock; + } else if (err != 1) { + dev_err(ab3100->dev, + "write error (send first register address) " + "%d bytes transferred (expected 1)\n", + err); + err = -EIO; + goto get_reg_page_out_unlock; + } + + err = i2c_master_recv(ab3100->i2c_client, regvals, numregs); + if (err < 0) { + dev_err(ab3100->dev, + "write error (read register page): %d\n", + err); + goto get_reg_page_out_unlock; + } else if (err != numregs) { + dev_err(ab3100->dev, + "write error (read register page) " + "%d bytes transferred (expected %d)\n", + err, numregs); + err = -EIO; + goto get_reg_page_out_unlock; + } + + /* All is well */ + err = 0; + + get_reg_page_out_unlock: + mutex_unlock(&ab3100->access_mutex); + return err; +} +EXPORT_SYMBOL(ab3100_get_register_page); + +int ab3100_mask_and_set_register(struct ab3100 *ab3100, + u8 reg, u8 andmask, u8 ormask) +{ + u8 regandval[2] = {reg, 0}; + int err; + + err = mutex_lock_interruptible(&ab3100->access_mutex); + if (err) + return err; + + /* First read out the target register */ + err = i2c_master_send(ab3100->i2c_client, ®, 1); + if (err < 0) { + dev_err(ab3100->dev, + "write error (maskset send address): %d\n", + err); + goto get_maskset_unlock; + } else if (err != 1) { + dev_err(ab3100->dev, + "write error (maskset send address) " + "%d bytes transferred (expected 1)\n", + err); + err = -EIO; + goto get_maskset_unlock; + } + + err = i2c_master_recv(ab3100->i2c_client, ®andval[1], 1); + if (err < 0) { + dev_err(ab3100->dev, + "write error (maskset read register): %d\n", + err); + goto get_maskset_unlock; + } else if (err != 1) { + dev_err(ab3100->dev, + "write error (maskset read register) " + "%d bytes transferred (expected 1)\n", + err); + err = -EIO; + goto get_maskset_unlock; + } + + /* Modify the register */ + regandval[1] &= andmask; + regandval[1] |= ormask; + + /* Write the register */ + err = i2c_master_send(ab3100->i2c_client, regandval, 2); + if (err < 0) { + dev_err(ab3100->dev, + "write error (write register): %d\n", + err); + goto get_maskset_unlock; + } else if (err != 2) { + dev_err(ab3100->dev, + "write error (write register) " + "%d bytes transferred (expected 2)\n", + err); + err = -EIO; + goto get_maskset_unlock; + } + + /* All is well */ + err = 0; + + get_maskset_unlock: + mutex_unlock(&ab3100->access_mutex); + return err; +} +EXPORT_SYMBOL(ab3100_mask_and_set_register); + +/* + * Register a simple callback for handling any AB3100 events. + */ +int ab3100_event_register(struct ab3100 *ab3100, + struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&ab3100->event_subscribers, + nb); +} +EXPORT_SYMBOL(ab3100_event_register); + +/* + * Remove a previously registered callback. + */ +int ab3100_event_unregister(struct ab3100 *ab3100, + struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&ab3100->event_subscribers, + nb); +} +EXPORT_SYMBOL(ab3100_event_unregister); + + +int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100, + u32 *fatevent) +{ + if (!ab3100->startup_events_read) + return -EAGAIN; /* Try again later */ + *fatevent = ab3100->startup_events; + return 0; +} +EXPORT_SYMBOL(ab3100_event_registers_startup_state_get); + +/* Interrupt handling worker */ +static void ab3100_work(struct work_struct *work) +{ + struct ab3100 *ab3100 = container_of(work, struct ab3100, work); + u8 event_regs[3]; + u32 fatevent; + int err; + + err = ab3100_get_register_page(ab3100, AB3100_EVENTA1, + event_regs, 3); + if (err) + goto err_event_wq; + + fatevent = (event_regs[0] << 16) | + (event_regs[1] << 8) | + event_regs[2]; + + if (!ab3100->startup_events_read) { + ab3100->startup_events = fatevent; + ab3100->startup_events_read = true; + } + /* + * The notified parties will have to mask out the events + * they're interested in and react to them. They will be + * notified on all events, then they use the fatevent value + * to determine if they're interested. + */ + blocking_notifier_call_chain(&ab3100->event_subscribers, + fatevent, NULL); + + dev_dbg(ab3100->dev, + "IRQ Event: 0x%08x\n", fatevent); + + /* By now the IRQ should be acked and deasserted so enable it again */ + enable_irq(ab3100->i2c_client->irq); + return; + + err_event_wq: + dev_dbg(ab3100->dev, + "error in event workqueue\n"); + /* Enable the IRQ anyway, what choice do we have? */ + enable_irq(ab3100->i2c_client->irq); + return; +} + +static irqreturn_t ab3100_irq_handler(int irq, void *data) +{ + struct ab3100 *ab3100 = data; + /* + * Disable the IRQ and dispatch a worker to handle the + * event. Since the chip resides on I2C this is slow + * stuff and we will re-enable the interrupts once th + * worker has finished. + */ + disable_irq(ab3100->i2c_client->irq); + schedule_work(&ab3100->work); + return IRQ_HANDLED; +} + +#ifdef CONFIG_DEBUG_FS +/* + * Some debugfs entries only exposed if we're using debug + */ +static int ab3100_registers_print(struct seq_file *s, void *p) +{ + struct ab3100 *ab3100 = s->private; + u8 value; + u8 reg; + + seq_printf(s, "AB3100 registers:\n"); + + for (reg = 0; reg < 0xff; reg++) { + ab3100_get_register(ab3100, reg, &value); + seq_printf(s, "[0x%x]: 0x%x\n", reg, value); + } + return 0; +} + +static int ab3100_registers_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab3100_registers_print, inode->i_private); +} + +static const struct file_operations ab3100_registers_fops = { + .open = ab3100_registers_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +struct ab3100_get_set_reg_priv { + struct ab3100 *ab3100; + bool mode; +}; + +static int ab3100_get_set_reg_open_file(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static int ab3100_get_set_reg(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ab3100_get_set_reg_priv *priv = file->private_data; + struct ab3100 *ab3100 = priv->ab3100; + char buf[32]; + int buf_size; + int regp; + unsigned long user_reg; + int err; + int i = 0; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + /* + * The idea is here to parse a string which is either + * "0xnn" for reading a register, or "0xaa 0xbb" for + * writing 0xbb to the register 0xaa. First move past + * whitespace and then begin to parse the register. + */ + while ((i < buf_size) && (buf[i] == ' ')) + i++; + regp = i; + + /* + * Advance pointer to end of string then terminate + * the register string. This is needed to satisfy + * the strict_strtoul() function. + */ + while ((i < buf_size) && (buf[i] != ' ')) + i++; + buf[i] = '\0'; + + err = strict_strtoul(&buf[regp], 16, &user_reg); + if (err) + return err; + if (user_reg > 0xff) + return -EINVAL; + + /* Either we read or we write a register here */ + if (!priv->mode) { + /* Reading */ + u8 reg = (u8) user_reg; + u8 regvalue; + + ab3100_get_register(ab3100, reg, ®value); + + dev_info(ab3100->dev, + "debug read AB3100 reg[0x%02x]: 0x%02x\n", + reg, regvalue); + } else { + int valp; + unsigned long user_value; + u8 reg = (u8) user_reg; + u8 value; + u8 regvalue; + + /* + * Writing, we need some value to write to + * the register so keep parsing the string + * from userspace. + */ + i++; + while ((i < buf_size) && (buf[i] == ' ')) + i++; + valp = i; + while ((i < buf_size) && (buf[i] != ' ')) + i++; + buf[i] = '\0'; + + err = strict_strtoul(&buf[valp], 16, &user_value); + if (err) + return err; + if (user_reg > 0xff) + return -EINVAL; + + value = (u8) user_value; + ab3100_set_register(ab3100, reg, value); + ab3100_get_register(ab3100, reg, ®value); + + dev_info(ab3100->dev, + "debug write reg[0x%02x] with 0x%02x, " + "after readback: 0x%02x\n", + reg, value, regvalue); + } + return buf_size; +} + +static const struct file_operations ab3100_get_set_reg_fops = { + .open = ab3100_get_set_reg_open_file, + .write = ab3100_get_set_reg, +}; + +static struct dentry *ab3100_dir; +static struct dentry *ab3100_reg_file; +static struct ab3100_get_set_reg_priv ab3100_get_priv; +static struct dentry *ab3100_get_reg_file; +static struct ab3100_get_set_reg_priv ab3100_set_priv; +static struct dentry *ab3100_set_reg_file; + +static void ab3100_setup_debugfs(struct ab3100 *ab3100) +{ + int err; + + ab3100_dir = debugfs_create_dir("ab3100", NULL); + if (!ab3100_dir) + goto exit_no_debugfs; + + ab3100_reg_file = debugfs_create_file("registers", + S_IRUGO, ab3100_dir, ab3100, + &ab3100_registers_fops); + if (!ab3100_reg_file) { + err = -ENOMEM; + goto exit_destroy_dir; + } + + ab3100_get_priv.ab3100 = ab3100; + ab3100_get_priv.mode = false; + ab3100_get_reg_file = debugfs_create_file("get_reg", + S_IWUGO, ab3100_dir, &ab3100_get_priv, + &ab3100_get_set_reg_fops); + if (!ab3100_get_reg_file) { + err = -ENOMEM; + goto exit_destroy_reg; + } + + ab3100_set_priv.ab3100 = ab3100; + ab3100_set_priv.mode = true; + ab3100_set_reg_file = debugfs_create_file("set_reg", + S_IWUGO, ab3100_dir, &ab3100_set_priv, + &ab3100_get_set_reg_fops); + if (!ab3100_set_reg_file) { + err = -ENOMEM; + goto exit_destroy_get_reg; + } + return; + + exit_destroy_get_reg: + debugfs_remove(ab3100_get_reg_file); + exit_destroy_reg: + debugfs_remove(ab3100_reg_file); + exit_destroy_dir: + debugfs_remove(ab3100_dir); + exit_no_debugfs: + return; +} +static inline void ab3100_remove_debugfs(void) +{ + debugfs_remove(ab3100_set_reg_file); + debugfs_remove(ab3100_get_reg_file); + debugfs_remove(ab3100_reg_file); + debugfs_remove(ab3100_dir); +} +#else +static inline void ab3100_setup_debugfs(struct ab3100 *ab3100) +{ +} +static inline void ab3100_remove_debugfs(void) +{ +} +#endif + +/* + * Basic set-up, datastructure creation/destruction and I2C interface. + * This sets up a default config in the AB3100 chip so that it + * will work as expected. + */ + +struct ab3100_init_setting { + u8 abreg; + u8 setting; +}; + +static const struct ab3100_init_setting __initdata +ab3100_init_settings[] = { + { + .abreg = AB3100_MCA, + .setting = 0x01 + }, { + .abreg = AB3100_MCB, + .setting = 0x30 + }, { + .abreg = AB3100_IMRA1, + .setting = 0x00 + }, { + .abreg = AB3100_IMRA2, + .setting = 0xFF + }, { + .abreg = AB3100_IMRA3, + .setting = 0x01 + }, { + .abreg = AB3100_IMRB1, + .setting = 0xFF + }, { + .abreg = AB3100_IMRB2, + .setting = 0xFF + }, { + .abreg = AB3100_IMRB3, + .setting = 0xFF + }, { + .abreg = AB3100_SUP, + .setting = 0x00 + }, { + .abreg = AB3100_DIS, + .setting = 0xF0 + }, { + .abreg = AB3100_D0C, + .setting = 0x00 + }, { + .abreg = AB3100_D1C, + .setting = 0x00 + }, { + .abreg = AB3100_D2C, + .setting = 0x00 + }, { + .abreg = AB3100_D3C, + .setting = 0x00 + }, +}; + +static int __init ab3100_setup(struct ab3100 *ab3100) +{ + int err = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(ab3100_init_settings); i++) { + err = ab3100_set_register(ab3100, + ab3100_init_settings[i].abreg, + ab3100_init_settings[i].setting); + if (err) + goto exit_no_setup; + } + + /* + * Special trick to make the AB3100 use the 32kHz clock (RTC) + * bit 3 in test registe 0x02 is a special, undocumented test + * register bit that only exist in AB3100 P1E + */ + if (ab3100->chip_id == 0xc4) { + dev_warn(ab3100->dev, + "AB3100 P1E variant detected, " + "forcing chip to 32KHz\n"); + err = ab3100_set_test_register(ab3100, 0x02, 0x08); + } + + exit_no_setup: + return err; +} + +/* + * Here we define all the platform devices that appear + * as children of the AB3100. These are regular platform + * devices with the IORESOURCE_IO .start and .end set + * to correspond to the internal AB3100 register range + * mapping to the corresponding subdevice. + */ + +#define AB3100_DEVICE(devname, devid) \ +static struct platform_device ab3100_##devname##_device = { \ + .name = devid, \ + .id = -1, \ +} + +/* + * This lists all the subdevices and corresponding register + * ranges. + */ +AB3100_DEVICE(dac, "ab3100-dac"); +AB3100_DEVICE(leds, "ab3100-leds"); +AB3100_DEVICE(power, "ab3100-power"); +AB3100_DEVICE(regulators, "ab3100-regulators"); +AB3100_DEVICE(sim, "ab3100-sim"); +AB3100_DEVICE(uart, "ab3100-uart"); +AB3100_DEVICE(rtc, "ab3100-rtc"); +AB3100_DEVICE(charger, "ab3100-charger"); +AB3100_DEVICE(boost, "ab3100-boost"); +AB3100_DEVICE(adc, "ab3100-adc"); +AB3100_DEVICE(fuelgauge, "ab3100-fuelgauge"); +AB3100_DEVICE(vibrator, "ab3100-vibrator"); +AB3100_DEVICE(otp, "ab3100-otp"); +AB3100_DEVICE(codec, "ab3100-codec"); + +static struct platform_device * +ab3100_platform_devs[] = { + &ab3100_dac_device, + &ab3100_leds_device, + &ab3100_power_device, + &ab3100_regulators_device, + &ab3100_sim_device, + &ab3100_uart_device, + &ab3100_rtc_device, + &ab3100_charger_device, + &ab3100_boost_device, + &ab3100_adc_device, + &ab3100_fuelgauge_device, + &ab3100_vibrator_device, + &ab3100_otp_device, + &ab3100_codec_device, +}; + +struct ab_family_id { + u8 id; + char *name; +}; + +static const struct ab_family_id ids[] __initdata = { + /* AB3100 */ + { + .id = 0xc0, + .name = "P1A" + }, { + .id = 0xc1, + .name = "P1B" + }, { + .id = 0xc2, + .name = "P1C" + }, { + .id = 0xc3, + .name = "P1D" + }, { + .id = 0xc4, + .name = "P1E" + }, { + .id = 0xc5, + .name = "P1F/R1A" + }, { + .id = 0xc6, + .name = "P1G/R1A" + }, { + .id = 0xc7, + .name = "P2A/R2A" + }, { + .id = 0xc8, + .name = "P2B/R2B" + }, + /* AB3000 variants, not supported */ + { + .id = 0xa0 + }, { + .id = 0xa1 + }, { + .id = 0xa2 + }, { + .id = 0xa3 + }, { + .id = 0xa4 + }, { + .id = 0xa5 + }, { + .id = 0xa6 + }, { + .id = 0xa7 + }, + /* Terminator */ + { + .id = 0x00, + }, +}; + +static int __init ab3100_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ab3100 *ab3100; + int err; + int i; + + ab3100 = kzalloc(sizeof(struct ab3100), GFP_KERNEL); + if (!ab3100) { + dev_err(&client->dev, "could not allocate AB3100 device\n"); + return -ENOMEM; + } + + /* Initialize data structure */ + mutex_init(&ab3100->access_mutex); + BLOCKING_INIT_NOTIFIER_HEAD(&ab3100->event_subscribers); + + ab3100->i2c_client = client; + ab3100->dev = &ab3100->i2c_client->dev; + + i2c_set_clientdata(client, ab3100); + + /* Read chip ID register */ + err = ab3100_get_register(ab3100, AB3100_CID, + &ab3100->chip_id); + if (err) { + dev_err(&client->dev, + "could not communicate with the AB3100 analog " + "baseband chip\n"); + goto exit_no_detect; + } + + for (i = 0; ids[i].id != 0x0; i++) { + if (ids[i].id == ab3100->chip_id) { + if (ids[i].name != NULL) { + snprintf(&ab3100->chip_name[0], + sizeof(ab3100->chip_name) - 1, + "AB3100 %s", + ids[i].name); + break; + } else { + dev_err(&client->dev, + "AB3000 is not supported\n"); + goto exit_no_detect; + } + } + } + + if (ids[i].id == 0x0) { + dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n", + ab3100->chip_id); + dev_err(&client->dev, "accepting it anyway. Please update " + "the driver.\n"); + goto exit_no_detect; + } + + dev_info(&client->dev, "Detected chip: %s\n", + &ab3100->chip_name[0]); + + /* Attach a second dummy i2c_client to the test register address */ + ab3100->testreg_client = i2c_new_dummy(client->adapter, + client->addr + 1); + if (!ab3100->testreg_client) { + err = -ENOMEM; + goto exit_no_testreg_client; + } + + strlcpy(ab3100->testreg_client->name, id->name, + sizeof(ab3100->testreg_client->name)); + + err = ab3100_setup(ab3100); + if (err) + goto exit_no_setup; + + INIT_WORK(&ab3100->work, ab3100_work); + + /* This real unpredictable IRQ is of course sampled for entropy */ + err = request_irq(client->irq, ab3100_irq_handler, + IRQF_DISABLED | IRQF_SAMPLE_RANDOM, + "AB3100 IRQ", ab3100); + if (err) + goto exit_no_irq; + + /* Set parent and a pointer back to the container in device data */ + for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) { + ab3100_platform_devs[i]->dev.parent = + &client->dev; + platform_set_drvdata(ab3100_platform_devs[i], ab3100); + } + + /* Register the platform devices */ + platform_add_devices(ab3100_platform_devs, + ARRAY_SIZE(ab3100_platform_devs)); + + ab3100_setup_debugfs(ab3100); + + return 0; + + exit_no_irq: + exit_no_setup: + i2c_unregister_device(ab3100->testreg_client); + exit_no_testreg_client: + exit_no_detect: + kfree(ab3100); + return err; +} + +static int __exit ab3100_remove(struct i2c_client *client) +{ + struct ab3100 *ab3100 = i2c_get_clientdata(client); + int i; + + /* Unregister subdevices */ + for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) + platform_device_unregister(ab3100_platform_devs[i]); + + ab3100_remove_debugfs(); + i2c_unregister_device(ab3100->testreg_client); + + /* + * At this point, all subscribers should have unregistered + * their notifiers so deactivate IRQ + */ + free_irq(client->irq, ab3100); + kfree(ab3100); + return 0; +} + +static const struct i2c_device_id ab3100_id[] = { + { "ab3100", ab3100 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ab3100_id); + +static struct i2c_driver ab3100_driver = { + .driver = { + .name = "ab3100", + .owner = THIS_MODULE, + }, + .id_table = ab3100_id, + .probe = ab3100_probe, + .remove = __exit_p(ab3100_remove), +}; + +static int __init ab3100_i2c_init(void) +{ + return i2c_add_driver(&ab3100_driver); +} + +static void __exit ab3100_i2c_exit(void) +{ + i2c_del_driver(&ab3100_driver); +} + +subsys_initcall(ab3100_i2c_init); +module_exit(ab3100_i2c_exit); + +MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); +MODULE_DESCRIPTION("AB3100 core driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 9e48545..63a2a66 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -17,6 +17,7 @@ */ #include <linux/kernel.h> +#include <linux/delay.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/io.h> @@ -24,6 +25,51 @@ #include <linux/platform_device.h> #include <linux/mfd/asic3.h> +#include <linux/mfd/core.h> +#include <linux/mfd/ds1wm.h> +#include <linux/mfd/tmio.h> + +enum { + ASIC3_CLOCK_SPI, + ASIC3_CLOCK_OWM, + ASIC3_CLOCK_PWM0, + ASIC3_CLOCK_PWM1, + ASIC3_CLOCK_LED0, + ASIC3_CLOCK_LED1, + ASIC3_CLOCK_LED2, + ASIC3_CLOCK_SD_HOST, + ASIC3_CLOCK_SD_BUS, + ASIC3_CLOCK_SMBUS, + ASIC3_CLOCK_EX0, + ASIC3_CLOCK_EX1, +}; + +struct asic3_clk { + int enabled; + unsigned int cdex; + unsigned long rate; +}; + +#define INIT_CDEX(_name, _rate) \ + [ASIC3_CLOCK_##_name] = { \ + .cdex = CLOCK_CDEX_##_name, \ + .rate = _rate, \ + } + +struct asic3_clk asic3_clk_init[] __initdata = { + INIT_CDEX(SPI, 0), + INIT_CDEX(OWM, 5000000), + INIT_CDEX(PWM0, 0), + INIT_CDEX(PWM1, 0), + INIT_CDEX(LED0, 0), + INIT_CDEX(LED1, 0), + INIT_CDEX(LED2, 0), + INIT_CDEX(SD_HOST, 24576000), + INIT_CDEX(SD_BUS, 12288000), + INIT_CDEX(SMBUS, 0), + INIT_CDEX(EX0, 32768), + INIT_CDEX(EX1, 24576000), +}; struct asic3 { void __iomem *mapping; @@ -34,6 +80,8 @@ struct asic3 { u16 irq_bothedge[4]; struct gpio_chip gpio; struct device *dev; + + struct asic3_clk clocks[ARRAY_SIZE(asic3_clk_init)]; }; static int asic3_gpio_get(struct gpio_chip *chip, unsigned offset); @@ -52,6 +100,21 @@ static inline u32 asic3_read_register(struct asic3 *asic, (reg >> asic->bus_shift)); } +void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&asic->lock, flags); + val = asic3_read_register(asic, reg); + if (set) + val |= bits; + else + val &= ~bits; + asic3_write_register(asic, reg, val); + spin_unlock_irqrestore(&asic->lock, flags); +} + /* IRQs */ #define MAX_ASIC_ISR_LOOPS 20 #define ASIC3_GPIO_BASE_INCR \ @@ -525,6 +588,240 @@ static int asic3_gpio_remove(struct platform_device *pdev) return gpiochip_remove(&asic->gpio); } +static int asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk) +{ + unsigned long flags; + u32 cdex; + + spin_lock_irqsave(&asic->lock, flags); + if (clk->enabled++ == 0) { + cdex = asic3_read_register(asic, ASIC3_OFFSET(CLOCK, CDEX)); + cdex |= clk->cdex; + asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex); + } + spin_unlock_irqrestore(&asic->lock, flags); + + return 0; +} + +static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk) +{ + unsigned long flags; + u32 cdex; + + WARN_ON(clk->enabled == 0); + + spin_lock_irqsave(&asic->lock, flags); + if (--clk->enabled == 0) { + cdex = asic3_read_register(asic, ASIC3_OFFSET(CLOCK, CDEX)); + cdex &= ~clk->cdex; + asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex); + } + spin_unlock_irqrestore(&asic->lock, flags); +} + +/* MFD cells (SPI, PWM, LED, DS1WM, MMC) */ +static struct ds1wm_driver_data ds1wm_pdata = { + .active_high = 1, +}; + +static struct resource ds1wm_resources[] = { + { + .start = ASIC3_OWM_BASE, + .end = ASIC3_OWM_BASE + 0x13, + .flags = IORESOURCE_MEM, + }, + { + .start = ASIC3_IRQ_OWM, + .start = ASIC3_IRQ_OWM, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, + }, +}; + +static int ds1wm_enable(struct platform_device *pdev) +{ + struct asic3 *asic = dev_get_drvdata(pdev->dev.parent); + + /* Turn on external clocks and the OWM clock */ + asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX0]); + asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX1]); + asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_OWM]); + msleep(1); + + /* Reset and enable DS1WM */ + asic3_set_register(asic, ASIC3_OFFSET(EXTCF, RESET), + ASIC3_EXTCF_OWM_RESET, 1); + msleep(1); + asic3_set_register(asic, ASIC3_OFFSET(EXTCF, RESET), + ASIC3_EXTCF_OWM_RESET, 0); + msleep(1); + asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT), + ASIC3_EXTCF_OWM_EN, 1); + msleep(1); + + return 0; +} + +static int ds1wm_disable(struct platform_device *pdev) +{ + struct asic3 *asic = dev_get_drvdata(pdev->dev.parent); + + asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT), + ASIC3_EXTCF_OWM_EN, 0); + + asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_OWM]); + asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX0]); + asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX1]); + + return 0; +} + +static struct mfd_cell asic3_cell_ds1wm = { + .name = "ds1wm", + .enable = ds1wm_enable, + .disable = ds1wm_disable, + .driver_data = &ds1wm_pdata, + .num_resources = ARRAY_SIZE(ds1wm_resources), + .resources = ds1wm_resources, +}; + +static struct tmio_mmc_data asic3_mmc_data = { + .hclk = 24576000, +}; + +static struct resource asic3_mmc_resources[] = { + { + .start = ASIC3_SD_CTRL_BASE, + .end = ASIC3_SD_CTRL_BASE + 0x3ff, + .flags = IORESOURCE_MEM, + }, + { + .start = ASIC3_SD_CONFIG_BASE, + .end = ASIC3_SD_CONFIG_BASE + 0x1ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 0, + .end = 0, + .flags = IORESOURCE_IRQ, + }, +}; + +static int asic3_mmc_enable(struct platform_device *pdev) +{ + struct asic3 *asic = dev_get_drvdata(pdev->dev.parent); + + /* Not sure if it must be done bit by bit, but leaving as-is */ + asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF), + ASIC3_SDHWCTRL_LEVCD, 1); + asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF), + ASIC3_SDHWCTRL_LEVWP, 1); + asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF), + ASIC3_SDHWCTRL_SUSPEND, 0); + asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF), + ASIC3_SDHWCTRL_PCLR, 0); + + asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX0]); + /* CLK32 used for card detection and for interruption detection + * when HCLK is stopped. + */ + asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX1]); + msleep(1); + + /* HCLK 24.576 MHz, BCLK 12.288 MHz: */ + asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), + CLOCK_SEL_CX | CLOCK_SEL_SD_HCLK_SEL); + + asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_SD_HOST]); + asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_SD_BUS]); + msleep(1); + + asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT), + ASIC3_EXTCF_SD_MEM_ENABLE, 1); + + /* Enable SD card slot 3.3V power supply */ + asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF), + ASIC3_SDHWCTRL_SDPWR, 1); + + return 0; +} + +static int asic3_mmc_disable(struct platform_device *pdev) +{ + struct asic3 *asic = dev_get_drvdata(pdev->dev.parent); + + /* Put in suspend mode */ + asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF), + ASIC3_SDHWCTRL_SUSPEND, 1); + + /* Disable clocks */ + asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_SD_HOST]); + asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_SD_BUS]); + asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX0]); + asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX1]); + return 0; +} + +static struct mfd_cell asic3_cell_mmc = { + .name = "tmio-mmc", + .enable = asic3_mmc_enable, + .disable = asic3_mmc_disable, + .driver_data = &asic3_mmc_data, + .num_resources = ARRAY_SIZE(asic3_mmc_resources), + .resources = asic3_mmc_resources, +}; + +static int __init asic3_mfd_probe(struct platform_device *pdev, + struct resource *mem) +{ + struct asic3 *asic = platform_get_drvdata(pdev); + struct resource *mem_sdio; + int irq, ret; + + mem_sdio = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!mem_sdio) + dev_dbg(asic->dev, "no SDIO MEM resource\n"); + + irq = platform_get_irq(pdev, 1); + if (irq < 0) + dev_dbg(asic->dev, "no SDIO IRQ resource\n"); + + /* DS1WM */ + asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT), + ASIC3_EXTCF_OWM_SMB, 0); + + ds1wm_resources[0].start >>= asic->bus_shift; + ds1wm_resources[0].end >>= asic->bus_shift; + + asic3_cell_ds1wm.platform_data = &asic3_cell_ds1wm; + asic3_cell_ds1wm.data_size = sizeof(asic3_cell_ds1wm); + + /* MMC */ + asic3_mmc_resources[0].start >>= asic->bus_shift; + asic3_mmc_resources[0].end >>= asic->bus_shift; + asic3_mmc_resources[1].start >>= asic->bus_shift; + asic3_mmc_resources[1].end >>= asic->bus_shift; + + asic3_cell_mmc.platform_data = &asic3_cell_mmc; + asic3_cell_mmc.data_size = sizeof(asic3_cell_mmc); + + ret = mfd_add_devices(&pdev->dev, pdev->id, + &asic3_cell_ds1wm, 1, mem, asic->irq_base); + if (ret < 0) + goto out; + + if (mem_sdio && (irq >= 0)) + ret = mfd_add_devices(&pdev->dev, pdev->id, + &asic3_cell_mmc, 1, mem_sdio, irq); + + out: + return ret; +} + +static void asic3_mfd_remove(struct platform_device *pdev) +{ + mfd_remove_devices(&pdev->dev); +} /* Core */ static int __init asic3_probe(struct platform_device *pdev) @@ -533,7 +830,6 @@ static int __init asic3_probe(struct platform_device *pdev) struct asic3 *asic; struct resource *mem; unsigned long clksel; - int map_size; int ret = 0; asic = kzalloc(sizeof(struct asic3), GFP_KERNEL); @@ -553,8 +849,7 @@ static int __init asic3_probe(struct platform_device *pdev) goto out_free; } - map_size = mem->end - mem->start + 1; - asic->mapping = ioremap(mem->start, map_size); + asic->mapping = ioremap(mem->start, resource_size(mem)); if (!asic->mapping) { ret = -ENOMEM; dev_err(asic->dev, "Couldn't ioremap\n"); @@ -564,7 +859,7 @@ static int __init asic3_probe(struct platform_device *pdev) asic->irq_base = pdata->irq_base; /* calculate bus shift from mem resource */ - asic->bus_shift = 2 - (map_size >> 12); + asic->bus_shift = 2 - (resource_size(mem) >> 12); clksel = 0; asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), clksel); @@ -590,6 +885,13 @@ static int __init asic3_probe(struct platform_device *pdev) goto out_irq; } + /* Making a per-device copy is only needed for the + * theoretical case of multiple ASIC3s on one board: + */ + memcpy(asic->clocks, asic3_clk_init, sizeof(asic3_clk_init)); + + asic3_mfd_probe(pdev, mem); + dev_info(asic->dev, "ASIC3 Core driver\n"); return 0; @@ -611,6 +913,8 @@ static int asic3_remove(struct platform_device *pdev) int ret; struct asic3 *asic = platform_get_drvdata(pdev); + asic3_mfd_remove(pdev); + ret = asic3_gpio_remove(pdev); if (ret < 0) return ret; diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c index 7283d88..e5ffe56 100644 --- a/drivers/mfd/da903x.c +++ b/drivers/mfd/da903x.c @@ -561,7 +561,7 @@ static int __init da903x_init(void) { return i2c_add_driver(&da903x_driver); } -module_init(da903x_init); +subsys_initcall(da903x_init); static void __exit da903x_exit(void) { diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c new file mode 100644 index 0000000..671a7ef --- /dev/null +++ b/drivers/mfd/ezx-pcap.c @@ -0,0 +1,505 @@ +/* + * Driver for Motorola PCAP2 as present in EZX phones + * + * Copyright (C) 2006 Harald Welte <laforge@openezx.org> + * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/mfd/ezx-pcap.h> +#include <linux/spi/spi.h> + +#define PCAP_ADC_MAXQ 8 +struct pcap_adc_request { + u8 bank; + u8 ch[2]; + u32 flags; + void (*callback)(void *, u16[]); + void *data; +}; + +struct pcap_adc_sync_request { + u16 res[2]; + struct completion completion; +}; + +struct pcap_chip { + struct spi_device *spi; + + /* IO */ + u32 buf; + struct mutex io_mutex; + + /* IRQ */ + unsigned int irq_base; + u32 msr; + struct work_struct isr_work; + struct work_struct msr_work; + struct workqueue_struct *workqueue; + + /* ADC */ + struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ]; + u8 adc_head; + u8 adc_tail; + struct mutex adc_mutex; +}; + +/* IO */ +static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data) +{ + struct spi_transfer t; + struct spi_message m; + int status; + + memset(&t, 0, sizeof t); + spi_message_init(&m); + t.len = sizeof(u32); + spi_message_add_tail(&t, &m); + + pcap->buf = *data; + t.tx_buf = (u8 *) &pcap->buf; + t.rx_buf = (u8 *) &pcap->buf; + status = spi_sync(pcap->spi, &m); + + if (status == 0) + *data = pcap->buf; + + return status; +} + +int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value) +{ + int ret; + + mutex_lock(&pcap->io_mutex); + value &= PCAP_REGISTER_VALUE_MASK; + value |= PCAP_REGISTER_WRITE_OP_BIT + | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); + ret = ezx_pcap_putget(pcap, &value); + mutex_unlock(&pcap->io_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(ezx_pcap_write); + +int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value) +{ + int ret; + + mutex_lock(&pcap->io_mutex); + *value = PCAP_REGISTER_READ_OP_BIT + | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); + + ret = ezx_pcap_putget(pcap, value); + mutex_unlock(&pcap->io_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(ezx_pcap_read); + +/* IRQ */ +static inline unsigned int irq2pcap(struct pcap_chip *pcap, int irq) +{ + return 1 << (irq - pcap->irq_base); +} + +int pcap_to_irq(struct pcap_chip *pcap, int irq) +{ + return pcap->irq_base + irq; +} +EXPORT_SYMBOL_GPL(pcap_to_irq); + +static void pcap_mask_irq(unsigned int irq) +{ + struct pcap_chip *pcap = get_irq_chip_data(irq); + + pcap->msr |= irq2pcap(pcap, irq); + queue_work(pcap->workqueue, &pcap->msr_work); +} + +static void pcap_unmask_irq(unsigned int irq) +{ + struct pcap_chip *pcap = get_irq_chip_data(irq); + + pcap->msr &= ~irq2pcap(pcap, irq); + queue_work(pcap->workqueue, &pcap->msr_work); +} + +static struct irq_chip pcap_irq_chip = { + .name = "pcap", + .mask = pcap_mask_irq, + .unmask = pcap_unmask_irq, +}; + +static void pcap_msr_work(struct work_struct *work) +{ + struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work); + + ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); +} + +static void pcap_isr_work(struct work_struct *work) +{ + struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work); + struct pcap_platform_data *pdata = pcap->spi->dev.platform_data; + u32 msr, isr, int_sel, service; + int irq; + + ezx_pcap_read(pcap, PCAP_REG_MSR, &msr); + ezx_pcap_read(pcap, PCAP_REG_ISR, &isr); + + /* We cant service/ack irqs that are assigned to port 2 */ + if (!(pdata->config & PCAP_SECOND_PORT)) { + ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel); + isr &= ~int_sel; + } + ezx_pcap_write(pcap, PCAP_REG_ISR, isr); + + local_irq_disable(); + service = isr & ~msr; + + for (irq = pcap->irq_base; service; service >>= 1, irq++) { + if (service & 1) { + struct irq_desc *desc = irq_to_desc(irq); + + if (WARN(!desc, KERN_WARNING + "Invalid PCAP IRQ %d\n", irq)) + break; + + if (desc->status & IRQ_DISABLED) + note_interrupt(irq, desc, IRQ_NONE); + else + desc->handle_irq(irq, desc); + } + } + local_irq_enable(); +} + +static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) +{ + struct pcap_chip *pcap = get_irq_data(irq); + + desc->chip->ack(irq); + queue_work(pcap->workqueue, &pcap->isr_work); + return; +} + +/* ADC */ +static void pcap_disable_adc(struct pcap_chip *pcap) +{ + u32 tmp; + + ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); + tmp &= ~(PCAP_ADC_ADEN|PCAP_ADC_BATT_I_ADC|PCAP_ADC_BATT_I_POLARITY); + ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); +} + +static void pcap_adc_trigger(struct pcap_chip *pcap) +{ + u32 tmp; + u8 head; + + mutex_lock(&pcap->adc_mutex); + head = pcap->adc_head; + if (!pcap->adc_queue[head]) { + /* queue is empty, save power */ + pcap_disable_adc(pcap); + mutex_unlock(&pcap->adc_mutex); + return; + } + mutex_unlock(&pcap->adc_mutex); + + /* start conversion on requested bank */ + tmp = pcap->adc_queue[head]->flags | PCAP_ADC_ADEN; + + if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1) + tmp |= PCAP_ADC_AD_SEL1; + + ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); + ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC); +} + +static irqreturn_t pcap_adc_irq(int irq, void *_pcap) +{ + struct pcap_chip *pcap = _pcap; + struct pcap_adc_request *req; + u16 res[2]; + u32 tmp; + + mutex_lock(&pcap->adc_mutex); + req = pcap->adc_queue[pcap->adc_head]; + + if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) + return IRQ_HANDLED; + + /* read requested channels results */ + ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); + tmp &= ~(PCAP_ADC_ADA1_MASK | PCAP_ADC_ADA2_MASK); + tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT); + tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT); + ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); + ezx_pcap_read(pcap, PCAP_REG_ADR, &tmp); + res[0] = (tmp & PCAP_ADR_ADD1_MASK) >> PCAP_ADR_ADD1_SHIFT; + res[1] = (tmp & PCAP_ADR_ADD2_MASK) >> PCAP_ADR_ADD2_SHIFT; + + pcap->adc_queue[pcap->adc_head] = NULL; + pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1); + mutex_unlock(&pcap->adc_mutex); + + /* pass the results and release memory */ + req->callback(req->data, res); + kfree(req); + + /* trigger next conversion (if any) on queue */ + pcap_adc_trigger(pcap); + + return IRQ_HANDLED; +} + +int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[], + void *callback, void *data) +{ + struct pcap_adc_request *req; + + /* This will be freed after we have a result */ + req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->bank = bank; + req->flags = flags; + req->ch[0] = ch[0]; + req->ch[1] = ch[1]; + req->callback = callback; + req->data = data; + + mutex_lock(&pcap->adc_mutex); + if (pcap->adc_queue[pcap->adc_tail]) { + mutex_unlock(&pcap->adc_mutex); + kfree(req); + return -EBUSY; + } + pcap->adc_queue[pcap->adc_tail] = req; + pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1); + mutex_unlock(&pcap->adc_mutex); + + /* start conversion */ + pcap_adc_trigger(pcap); + + return 0; +} +EXPORT_SYMBOL_GPL(pcap_adc_async); + +static void pcap_adc_sync_cb(void *param, u16 res[]) +{ + struct pcap_adc_sync_request *req = param; + + req->res[0] = res[0]; + req->res[1] = res[1]; + complete(&req->completion); +} + +int pcap_adc_sync(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[], + u16 res[]) +{ + struct pcap_adc_sync_request sync_data; + int ret; + + init_completion(&sync_data.completion); + ret = pcap_adc_async(pcap, bank, flags, ch, pcap_adc_sync_cb, + &sync_data); + if (ret) + return ret; + wait_for_completion(&sync_data.completion); + res[0] = sync_data.res[0]; + res[1] = sync_data.res[1]; + + return 0; +} +EXPORT_SYMBOL_GPL(pcap_adc_sync); + +/* subdevs */ +static int pcap_remove_subdev(struct device *dev, void *unused) +{ + platform_device_unregister(to_platform_device(dev)); + return 0; +} + +static int __devinit pcap_add_subdev(struct pcap_chip *pcap, + struct pcap_subdev *subdev) +{ + struct platform_device *pdev; + + pdev = platform_device_alloc(subdev->name, subdev->id); + pdev->dev.parent = &pcap->spi->dev; + pdev->dev.platform_data = subdev->platform_data; + platform_set_drvdata(pdev, pcap); + + return platform_device_add(pdev); +} + +static int __devexit ezx_pcap_remove(struct spi_device *spi) +{ + struct pcap_chip *pcap = dev_get_drvdata(&spi->dev); + struct pcap_platform_data *pdata = spi->dev.platform_data; + int i, adc_irq; + + /* remove all registered subdevs */ + device_for_each_child(&spi->dev, NULL, pcap_remove_subdev); + + /* cleanup ADC */ + adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ? + PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE); + free_irq(adc_irq, pcap); + mutex_lock(&pcap->adc_mutex); + for (i = 0; i < PCAP_ADC_MAXQ; i++) + kfree(pcap->adc_queue[i]); + mutex_unlock(&pcap->adc_mutex); + + /* cleanup irqchip */ + for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) + set_irq_chip_and_handler(i, NULL, NULL); + + destroy_workqueue(pcap->workqueue); + + kfree(pcap); + + return 0; +} + +static int __devinit ezx_pcap_probe(struct spi_device *spi) +{ + struct pcap_platform_data *pdata = spi->dev.platform_data; + struct pcap_chip *pcap; + int i, adc_irq; + int ret = -ENODEV; + + /* platform data is required */ + if (!pdata) + goto ret; + + pcap = kzalloc(sizeof(*pcap), GFP_KERNEL); + if (!pcap) { + ret = -ENOMEM; + goto ret; + } + + mutex_init(&pcap->io_mutex); + mutex_init(&pcap->adc_mutex); + INIT_WORK(&pcap->isr_work, pcap_isr_work); + INIT_WORK(&pcap->msr_work, pcap_msr_work); + dev_set_drvdata(&spi->dev, pcap); + + /* setup spi */ + spi->bits_per_word = 32; + spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0); + ret = spi_setup(spi); + if (ret) + goto free_pcap; + + pcap->spi = spi; + + /* setup irq */ + pcap->irq_base = pdata->irq_base; + pcap->workqueue = create_singlethread_workqueue("pcapd"); + if (!pcap->workqueue) { + dev_err(&spi->dev, "cant create pcap thread\n"); + goto free_pcap; + } + + /* redirect interrupts to AP, except adcdone2 */ + if (!(pdata->config & PCAP_SECOND_PORT)) + ezx_pcap_write(pcap, PCAP_REG_INT_SEL, + (1 << PCAP_IRQ_ADCDONE2)); + + /* setup irq chip */ + for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) { + set_irq_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq); + set_irq_chip_data(i, pcap); +#ifdef CONFIG_ARM + set_irq_flags(i, IRQF_VALID); +#else + set_irq_noprobe(i); +#endif + } + + /* mask/ack all PCAP interrupts */ + ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT); + ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER); + pcap->msr = PCAP_MASK_ALL_INTERRUPT; + + set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING); + set_irq_data(spi->irq, pcap); + set_irq_chained_handler(spi->irq, pcap_irq_handler); + set_irq_wake(spi->irq, 1); + + /* ADC */ + adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ? + PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE); + + ret = request_irq(adc_irq, pcap_adc_irq, 0, "ADC", pcap); + if (ret) + goto free_irqchip; + + /* setup subdevs */ + for (i = 0; i < pdata->num_subdevs; i++) { + ret = pcap_add_subdev(pcap, &pdata->subdevs[i]); + if (ret) + goto remove_subdevs; + } + + /* board specific quirks */ + if (pdata->init) + pdata->init(pcap); + + return 0; + +remove_subdevs: + device_for_each_child(&spi->dev, NULL, pcap_remove_subdev); +/* free_adc: */ + free_irq(adc_irq, pcap); +free_irqchip: + for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) + set_irq_chip_and_handler(i, NULL, NULL); +/* destroy_workqueue: */ + destroy_workqueue(pcap->workqueue); +free_pcap: + kfree(pcap); +ret: + return ret; +} + +static struct spi_driver ezxpcap_driver = { + .probe = ezx_pcap_probe, + .remove = __devexit_p(ezx_pcap_remove), + .driver = { + .name = "ezx-pcap", + .owner = THIS_MODULE, + }, +}; + +static int __init ezx_pcap_init(void) +{ + return spi_register_driver(&ezxpcap_driver); +} + +static void __exit ezx_pcap_exit(void) +{ + spi_unregister_driver(&ezxpcap_driver); +} + +module_init(ezx_pcap_init); +module_exit(ezx_pcap_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Daniel Ribeiro / Harald Welte"); +MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver"); diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index 082c197..8d3c38b 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c @@ -705,5 +705,5 @@ MODULE_DESCRIPTION("I2C chip driver for NXP PCF50633 PMU"); MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>"); MODULE_LICENSE("GPL"); -module_init(pcf50633_init); +subsys_initcall(pcf50633_init); module_exit(pcf50633_exit); diff --git a/drivers/mfd/pcf50633-gpio.c b/drivers/mfd/pcf50633-gpio.c index 2fa2eca..9ab19a8 100644 --- a/drivers/mfd/pcf50633-gpio.c +++ b/drivers/mfd/pcf50633-gpio.c @@ -15,6 +15,7 @@ */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/mfd/pcf50633/core.h> #include <linux/mfd/pcf50633/gpio.h> @@ -116,3 +117,5 @@ int pcf50633_gpio_power_supply_set(struct pcf50633 *pcf, return pcf50633_reg_set_bit_mask(pcf, reg, mask, val); } EXPORT_SYMBOL_GPL(pcf50633_gpio_power_supply_set); + +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index 875f7a8..0a255c1 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c @@ -108,7 +108,7 @@ static int t7l66xb_mmc_disable(struct platform_device *mmc) /*--------------------------------------------------------------------------*/ -static const struct tmio_mmc_data t7166xb_mmc_data = { +static struct tmio_mmc_data t7166xb_mmc_data = { .hclk = 24000000, }; diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c index c3993ac..3280ab33 100644 --- a/drivers/mfd/tc6387xb.c +++ b/drivers/mfd/tc6387xb.c @@ -75,7 +75,7 @@ static int tc6387xb_mmc_disable(struct platform_device *mmc) /*--------------------------------------------------------------------------*/ -const static struct tmio_mmc_data tc6387xb_mmc_data = { +static struct tmio_mmc_data tc6387xb_mmc_data = { .hclk = 24000000, }; diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c index 9d2abb5..1429a73 100644 --- a/drivers/mfd/tc6393xb.c +++ b/drivers/mfd/tc6393xb.c @@ -136,7 +136,7 @@ static int tc6393xb_nand_enable(struct platform_device *nand) return 0; } -const static struct tmio_mmc_data tc6393xb_mmc_data = { +static struct tmio_mmc_data tc6393xb_mmc_data = { .hclk = 24000000, }; diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c index ec90e95..cd1008c 100644 --- a/drivers/mfd/twl4030-core.c +++ b/drivers/mfd/twl4030-core.c @@ -647,7 +647,7 @@ static inline int __init unprotect_pm_master(void) return e; } -static void __init clocks_init(struct device *dev) +static void clocks_init(struct device *dev) { int e = 0; struct clk *osc; diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index aca2670..bae61b2 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -255,7 +255,7 @@ static int twl4030_irq_thread(void *data) * thread. All we do here is acknowledge and mask the interrupt and wakeup * the kernel thread. */ -static void handle_twl4030_pih(unsigned int irq, irq_desc_t *desc) +static void handle_twl4030_pih(unsigned int irq, struct irq_desc *desc) { /* Acknowledge, clear *AND* mask the interrupt... */ desc->chip->ack(irq); diff --git a/drivers/mfd/wm8350-regmap.c b/drivers/mfd/wm8350-regmap.c index 9a4cc95..7ccc1ea 100644 --- a/drivers/mfd/wm8350-regmap.c +++ b/drivers/mfd/wm8350-regmap.c @@ -3186,7 +3186,7 @@ const struct wm8350_reg_access wm8350_reg_io_map[] = { /* read write volatile */ { 0xFFFF, 0xFFFF, 0xFFFF }, /* R0 - Reset/ID */ { 0x7CFF, 0x0C00, 0x7FFF }, /* R1 - ID */ - { 0x0000, 0x0000, 0x0000 }, /* R2 */ + { 0x007F, 0x0000, 0x0000 }, /* R2 - ROM Mask ID */ { 0xBE3B, 0xBE3B, 0x8000 }, /* R3 - System Control 1 */ { 0xFEF7, 0xFEF7, 0xF800 }, /* R4 - System Control 2 */ { 0x80FF, 0x80FF, 0x8000 }, /* R5 - System Hibernate */ @@ -3411,7 +3411,7 @@ const struct wm8350_reg_access wm8350_reg_io_map[] = { { 0x0000, 0x0000, 0x0000 }, /* R224 */ { 0x8F3F, 0x0000, 0xFFFF }, /* R225 - DCDC/LDO status */ { 0x0000, 0x0000, 0xFFFF }, /* R226 - Charger status */ - { 0x0000, 0x0000, 0xFFFF }, /* R227 */ + { 0x34FE, 0x0000, 0xFFFF }, /* R227 */ { 0x0000, 0x0000, 0x0000 }, /* R228 */ { 0x0000, 0x0000, 0x0000 }, /* R229 */ { 0xFFFF, 0x1FFF, 0xFFFF }, /* R230 - GPIO Pin Status */ diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c index 7c21bf7..ecfc8bb 100644 --- a/drivers/mfd/wm8400-core.c +++ b/drivers/mfd/wm8400-core.c @@ -460,7 +460,7 @@ static int __init wm8400_module_init(void) return ret; } -module_init(wm8400_module_init); +subsys_initcall(wm8400_module_init); static void __exit wm8400_module_exit(void) { diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile index bcd8136..7c4c306 100644 --- a/drivers/misc/sgi-gru/Makefile +++ b/drivers/misc/sgi-gru/Makefile @@ -3,5 +3,5 @@ ifdef CONFIG_SGI_GRU_DEBUG endif obj-$(CONFIG_SGI_GRU) := gru.o -gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o +gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o grukdump.o diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h index 3fde33c..3c9c066 100644 --- a/drivers/misc/sgi-gru/gru_instructions.h +++ b/drivers/misc/sgi-gru/gru_instructions.h @@ -81,6 +81,8 @@ struct control_block_extended_exc_detail { int exopc; long exceptdet0; int exceptdet1; + int cbrstate; + int cbrexecstatus; }; /* @@ -107,7 +109,8 @@ struct gru_instruction_bits { unsigned char reserved2: 2; unsigned char istatus: 2; unsigned char isubstatus:4; - unsigned char reserved3: 2; + unsigned char reserved3: 1; + unsigned char tlb_fault_color: 1; /* DW 1 */ unsigned long idef4; /* 42 bits: TRi1, BufSize */ /* DW 2-6 */ @@ -250,17 +253,37 @@ struct gru_instruction { #define CBE_CAUSE_HA_RESPONSE_FATAL (1 << 13) #define CBE_CAUSE_HA_RESPONSE_NON_FATAL (1 << 14) #define CBE_CAUSE_ADDRESS_SPACE_DECODE_ERROR (1 << 15) -#define CBE_CAUSE_RESPONSE_DATA_ERROR (1 << 16) -#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 17) +#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16) +#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17) +#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18) + +/* CBE cbrexecstatus bits */ +#define CBR_EXS_ABORT_OCC_BIT 0 +#define CBR_EXS_INT_OCC_BIT 1 +#define CBR_EXS_PENDING_BIT 2 +#define CBR_EXS_QUEUED_BIT 3 +#define CBR_EXS_TLB_INVAL_BIT 4 +#define CBR_EXS_EXCEPTION_BIT 5 + +#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT) +#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT) +#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT) +#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT) +#define CBR_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT) +#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT) /* * Exceptions are retried for the following cases. If any OTHER bits are set * in ecause, the exception is not retryable. */ -#define EXCEPTION_RETRY_BITS (CBE_CAUSE_RESPONSE_DATA_ERROR | \ - CBE_CAUSE_RA_REQUEST_TIMEOUT | \ +#define EXCEPTION_RETRY_BITS (CBE_CAUSE_EXECUTION_HW_ERROR | \ CBE_CAUSE_TLBHW_ERROR | \ - CBE_CAUSE_HA_REQUEST_TIMEOUT) + CBE_CAUSE_RA_REQUEST_TIMEOUT | \ + CBE_CAUSE_RA_RESPONSE_NON_FATAL | \ + CBE_CAUSE_HA_RESPONSE_NON_FATAL | \ + CBE_CAUSE_RA_RESPONSE_DATA_ERROR | \ + CBE_CAUSE_HA_RESPONSE_DATA_ERROR \ + ) /* Message queue head structure */ union gru_mesqhead { @@ -600,9 +623,11 @@ static inline int gru_get_cb_substatus(void *cb) return cbs->isubstatus; } -/* Check the status of a CB. If the CB is in UPM mode, call the - * OS to handle the UPM status. - * Returns the CB status field value (0 for normal completion) +/* + * User interface to check an instruction status. UPM and exceptions + * are handled automatically. However, this function does NOT wait + * for an active instruction to complete. + * */ static inline int gru_check_status(void *cb) { @@ -610,34 +635,31 @@ static inline int gru_check_status(void *cb) int ret; ret = cbs->istatus; - if (ret == CBS_CALL_OS) + if (ret != CBS_ACTIVE) ret = gru_check_status_proc(cb); return ret; } -/* Wait for CB to complete. - * Returns the CB status field value (0 for normal completion) +/* + * User interface (via inline function) to wait for an instruction + * to complete. Completion status (IDLE or EXCEPTION is returned + * to the user. Exception due to hardware errors are automatically + * retried before returning an exception. + * */ static inline int gru_wait(void *cb) { - struct gru_control_block_status *cbs = (void *)cb; - int ret = cbs->istatus; - - if (ret != CBS_IDLE) - ret = gru_wait_proc(cb); - return ret; + return gru_wait_proc(cb); } -/* Wait for CB to complete. Aborts program if error. (Note: error does NOT +/* + * Wait for CB to complete. Aborts program if error. (Note: error does NOT * mean TLB mis - only fatal errors such as memory parity error or user * bugs will cause termination. */ static inline void gru_wait_abort(void *cb) { - struct gru_control_block_status *cbs = (void *)cb; - - if (cbs->istatus != CBS_IDLE) - gru_wait_abort_proc(cb); + gru_wait_abort_proc(cb); } diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index ab11855..679e017 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -166,7 +166,8 @@ static inline struct gru_state *irq_to_gru(int irq) * the GRU, atomic operations must be used to clear bits. */ static void get_clear_fault_map(struct gru_state *gru, - struct gru_tlb_fault_map *map) + struct gru_tlb_fault_map *imap, + struct gru_tlb_fault_map *dmap) { unsigned long i, k; struct gru_tlb_fault_map *tfm; @@ -177,7 +178,11 @@ static void get_clear_fault_map(struct gru_state *gru, k = tfm->fault_bits[i]; if (k) k = xchg(&tfm->fault_bits[i], 0UL); - map->fault_bits[i] = k; + imap->fault_bits[i] = k; + k = tfm->done_bits[i]; + if (k) + k = xchg(&tfm->done_bits[i], 0UL); + dmap->fault_bits[i] = k; } /* @@ -334,6 +339,12 @@ static int gru_try_dropin(struct gru_thread_state *gts, * Might be a hardware race OR a stupid user. Ignore FMM because FMM * is a transient state. */ + if (tfh->status != TFHSTATUS_EXCEPTION) { + gru_flush_cache(tfh); + if (tfh->status != TFHSTATUS_EXCEPTION) + goto failnoexception; + STAT(tfh_stale_on_fault); + } if (tfh->state == TFHSTATE_IDLE) goto failidle; if (tfh->state == TFHSTATE_MISS_FMM && cb) @@ -401,8 +412,17 @@ failfmm: gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); return 0; +failnoexception: + /* TFH status did not show exception pending */ + gru_flush_cache(tfh); + if (cb) + gru_flush_cache(cb); + STAT(tlb_dropin_fail_no_exception); + gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state); + return 0; + failidle: - /* TFH was idle - no miss pending */ + /* TFH state was idle - no miss pending */ gru_flush_cache(tfh); if (cb) gru_flush_cache(cb); @@ -438,7 +458,7 @@ failactive: irqreturn_t gru_intr(int irq, void *dev_id) { struct gru_state *gru; - struct gru_tlb_fault_map map; + struct gru_tlb_fault_map imap, dmap; struct gru_thread_state *gts; struct gru_tlb_fault_handle *tfh = NULL; int cbrnum, ctxnum; @@ -451,11 +471,15 @@ irqreturn_t gru_intr(int irq, void *dev_id) raw_smp_processor_id(), irq); return IRQ_NONE; } - get_clear_fault_map(gru, &map); - gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid, - map.fault_bits[0]); + get_clear_fault_map(gru, &imap, &dmap); + + for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { + complete(gru->gs_blade->bs_async_wq); + gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n", + gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done); + } - for_each_cbr_in_tfm(cbrnum, map.fault_bits) { + for_each_cbr_in_tfm(cbrnum, imap.fault_bits) { tfh = get_tfh_by_index(gru, cbrnum); prefetchw(tfh); /* Helps on hdw, required for emulator */ @@ -472,7 +496,9 @@ irqreturn_t gru_intr(int irq, void *dev_id) * This is running in interrupt context. Trylock the mmap_sem. * If it fails, retry the fault in user context. */ - if (down_read_trylock(>s->ts_mm->mmap_sem)) { + if (!gts->ts_force_cch_reload && + down_read_trylock(>s->ts_mm->mmap_sem)) { + gts->ustats.fmm_tlbdropin++; gru_try_dropin(gts, tfh, NULL); up_read(>s->ts_mm->mmap_sem); } else { @@ -491,6 +517,7 @@ static int gru_user_dropin(struct gru_thread_state *gts, struct gru_mm_struct *gms = gts->ts_gms; int ret; + gts->ustats.upm_tlbdropin++; while (1) { wait_event(gms->ms_wait_queue, atomic_read(&gms->ms_range_active) == 0); @@ -546,8 +573,8 @@ int gru_handle_user_call_os(unsigned long cb) * CCH may contain stale data if ts_force_cch_reload is set. */ if (gts->ts_gru && gts->ts_force_cch_reload) { - gru_update_cch(gts, 0); gts->ts_force_cch_reload = 0; + gru_update_cch(gts, 0); } ret = -EAGAIN; @@ -589,20 +616,26 @@ int gru_get_exception_detail(unsigned long arg) } else if (gts->ts_gru) { cbrnum = thread_cbr_number(gts, ucbnum); cbe = get_cbe_by_index(gts->ts_gru, cbrnum); - prefetchw(cbe);/* Harmless on hardware, required for emulator */ + gru_flush_cache(cbe); /* CBE not coherent */ excdet.opc = cbe->opccpy; excdet.exopc = cbe->exopccpy; excdet.ecause = cbe->ecause; excdet.exceptdet0 = cbe->idef1upd; excdet.exceptdet1 = cbe->idef3upd; + excdet.cbrstate = cbe->cbrstate; + excdet.cbrexecstatus = cbe->cbrexecstatus; + gru_flush_cache(cbe); ret = 0; } else { ret = -EAGAIN; } gru_unlock_gts(gts); - gru_dbg(grudev, "address 0x%lx, ecause 0x%x\n", excdet.cb, - excdet.ecause); + gru_dbg(grudev, + "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, " + "exdet0 0x%lx, exdet1 0x%x\n", + excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus, + excdet.ecause, excdet.exceptdet0, excdet.exceptdet1); if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet))) ret = -EFAULT; return ret; @@ -627,7 +660,7 @@ static int gru_unload_all_contexts(void) if (gts && mutex_trylock(>s->ts_ctxlock)) { spin_unlock(&gru->gs_lock); gru_unload_context(gts, 1); - gru_unlock_gts(gts); + mutex_unlock(>s->ts_ctxlock); spin_lock(&gru->gs_lock); } } @@ -669,6 +702,7 @@ int gru_user_flush_tlb(unsigned long arg) { struct gru_thread_state *gts; struct gru_flush_tlb_req req; + struct gru_mm_struct *gms; STAT(user_flush_tlb); if (copy_from_user(&req, (void __user *)arg, sizeof(req))) @@ -681,8 +715,34 @@ int gru_user_flush_tlb(unsigned long arg) if (!gts) return -EINVAL; - gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len); + gms = gts->ts_gms; gru_unlock_gts(gts); + gru_flush_tlb_range(gms, req.vaddr, req.len); + + return 0; +} + +/* + * Fetch GSEG statisticss + */ +long gru_get_gseg_statistics(unsigned long arg) +{ + struct gru_thread_state *gts; + struct gru_get_gseg_statistics_req req; + + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + + gts = gru_find_lock_gts(req.gseg); + if (gts) { + memcpy(&req.stats, >s->ustats, sizeof(gts->ustats)); + gru_unlock_gts(gts); + } else { + memset(&req.stats, 0, sizeof(gts->ustats)); + } + + if (copy_to_user((void __user *)arg, &req, sizeof(req))) + return -EFAULT; return 0; } @@ -691,18 +751,34 @@ int gru_user_flush_tlb(unsigned long arg) * Register the current task as the user of the GSEG slice. * Needed for TLB fault interrupt targeting. */ -int gru_set_task_slice(long address) +int gru_set_context_option(unsigned long arg) { struct gru_thread_state *gts; + struct gru_set_context_option_req req; + int ret = 0; + + STAT(set_context_option); + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1); - STAT(set_task_slice); - gru_dbg(grudev, "address 0x%lx\n", address); - gts = gru_alloc_locked_gts(address); + gts = gru_alloc_locked_gts(req.gseg); if (!gts) return -EINVAL; - gts->ts_tgid_owner = current->tgid; + switch (req.op) { + case sco_gseg_owner: + /* Register the current task as the GSEG owner */ + gts->ts_tgid_owner = current->tgid; + break; + case sco_cch_req_slice: + /* Set the CCH slice option */ + gts->ts_cch_req_slice = req.val1 & 3; + break; + default: + ret = -EINVAL; + } gru_unlock_gts(gts); - return 0; + return ret; } diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 3ce2920..fa2d93a 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -46,6 +46,7 @@ struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; unsigned long gru_start_paddr __read_mostly; +void *gru_start_vaddr __read_mostly; unsigned long gru_end_paddr __read_mostly; unsigned int gru_max_gids __read_mostly; struct gru_stats_s gru_stats; @@ -135,11 +136,9 @@ static int gru_create_new_context(unsigned long arg) if (copy_from_user(&req, (void __user *)arg, sizeof(req))) return -EFAULT; - if (req.data_segment_bytes == 0 || - req.data_segment_bytes > max_user_dsr_bytes) + if (req.data_segment_bytes > max_user_dsr_bytes) return -EINVAL; - if (!req.control_blocks || !req.maximum_thread_count || - req.control_blocks > max_user_cbrs) + if (req.control_blocks > max_user_cbrs || !req.maximum_thread_count) return -EINVAL; if (!(req.options & GRU_OPT_MISS_MASK)) @@ -184,41 +183,6 @@ static long gru_get_config_info(unsigned long arg) } /* - * Get GRU chiplet status - */ -static long gru_get_chiplet_status(unsigned long arg) -{ - struct gru_state *gru; - struct gru_chiplet_info info; - - if (copy_from_user(&info, (void __user *)arg, sizeof(info))) - return -EFAULT; - - if (info.node == -1) - info.node = numa_node_id(); - if (info.node >= num_possible_nodes() || - info.chiplet >= GRU_CHIPLETS_PER_HUB || - info.node < 0 || info.chiplet < 0) - return -EINVAL; - - info.blade = uv_node_to_blade_id(info.node); - gru = get_gru(info.blade, info.chiplet); - - info.total_dsr_bytes = GRU_NUM_DSR_BYTES; - info.total_cbr = GRU_NUM_CB; - info.total_user_dsr_bytes = GRU_NUM_DSR_BYTES - - gru->gs_reserved_dsr_bytes; - info.total_user_cbr = GRU_NUM_CB - gru->gs_reserved_cbrs; - info.free_user_dsr_bytes = hweight64(gru->gs_dsr_map) * - GRU_DSR_AU_BYTES; - info.free_user_cbr = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; - - if (copy_to_user((void __user *)arg, &info, sizeof(info))) - return -EFAULT; - return 0; -} - -/* * gru_file_unlocked_ioctl * * Called to update file attributes via IOCTL calls. @@ -234,8 +198,8 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req, case GRU_CREATE_CONTEXT: err = gru_create_new_context(arg); break; - case GRU_SET_TASK_SLICE: - err = gru_set_task_slice(arg); + case GRU_SET_CONTEXT_OPTION: + err = gru_set_context_option(arg); break; case GRU_USER_GET_EXCEPTION_DETAIL: err = gru_get_exception_detail(arg); @@ -243,18 +207,24 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req, case GRU_USER_UNLOAD_CONTEXT: err = gru_user_unload_context(arg); break; - case GRU_GET_CHIPLET_STATUS: - err = gru_get_chiplet_status(arg); - break; case GRU_USER_FLUSH_TLB: err = gru_user_flush_tlb(arg); break; case GRU_USER_CALL_OS: err = gru_handle_user_call_os(arg); break; + case GRU_GET_GSEG_STATISTICS: + err = gru_get_gseg_statistics(arg); + break; + case GRU_KTEST: + err = gru_ktest(arg); + break; case GRU_GET_CONFIG_INFO: err = gru_get_config_info(arg); break; + case GRU_DUMP_CHIPLET_STATE: + err = gru_dump_chiplet_request(arg); + break; } return err; } @@ -282,7 +252,6 @@ static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n", bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, gru->gs_gru_base_paddr); - gru_kservices_init(gru); } static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) @@ -309,6 +278,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) memset(gru_base[bid], 0, sizeof(struct gru_blade_state)); gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0]; spin_lock_init(&gru_base[bid]->bs_lock); + init_rwsem(&gru_base[bid]->bs_kgts_sema); dsrbytes = 0; cbrs = 0; @@ -372,7 +342,6 @@ static int __init gru_init(void) { int ret, irq, chip; char id[10]; - void *gru_start_vaddr; if (!is_uv_system()) return 0; @@ -422,6 +391,7 @@ static int __init gru_init(void) printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); goto exit3; } + gru_kservices_init(); printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, GRU_DRIVER_VERSION_STR); @@ -440,7 +410,7 @@ exit1: static void __exit gru_exit(void) { - int i, bid, gid; + int i, bid; int order = get_order(sizeof(struct gru_state) * GRU_CHIPLETS_PER_BLADE); @@ -449,10 +419,7 @@ static void __exit gru_exit(void) for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) free_irq(IRQ_GRU + i, NULL); - - foreach_gid(gid) - gru_kservices_exit(GID_TO_GRU(gid)); - + gru_kservices_exit(); for (bid = 0; bid < GRU_MAX_BLADES; bid++) free_pages((unsigned long)gru_base[bid], order); diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c index 9b7ccb3..37e7cfc 100644 --- a/drivers/misc/sgi-gru/gruhandles.c +++ b/drivers/misc/sgi-gru/gruhandles.c @@ -57,7 +57,7 @@ static void start_instruction(void *h) static int wait_instruction_complete(void *h, enum mcs_op opc) { int status; - cycles_t start_time = get_cycles(); + unsigned long start_time = get_cycles(); while (1) { cpu_relax(); @@ -65,25 +65,16 @@ static int wait_instruction_complete(void *h, enum mcs_op opc) if (status != CCHSTATUS_ACTIVE) break; if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) - panic("GRU %p is malfunctioning\n", h); + panic("GRU %p is malfunctioning: start %ld, end %ld\n", + h, start_time, (unsigned long)get_cycles()); } if (gru_options & OPT_STATS) update_mcs_stats(opc, get_cycles() - start_time); return status; } -int cch_allocate(struct gru_context_configuration_handle *cch, - int asidval, int sizeavail, unsigned long cbrmap, - unsigned long dsrmap) +int cch_allocate(struct gru_context_configuration_handle *cch) { - int i; - - for (i = 0; i < 8; i++) { - cch->asid[i] = (asidval++); - cch->sizeavail[i] = sizeavail; - } - cch->dsr_allocation_map = dsrmap; - cch->cbr_allocation_map = cbrmap; cch->opc = CCHOP_ALLOCATE; start_instruction(cch); return wait_instruction_complete(cch, cchop_allocate); diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h index 1ed74d7..f441122 100644 --- a/drivers/misc/sgi-gru/gruhandles.h +++ b/drivers/misc/sgi-gru/gruhandles.h @@ -39,7 +39,6 @@ #define GRU_NUM_CBE 128 #define GRU_NUM_TFH 128 #define GRU_NUM_CCH 16 -#define GRU_NUM_GSH 1 /* Maximum resource counts that can be reserved by user programs */ #define GRU_NUM_USER_CBR GRU_NUM_CBE @@ -56,7 +55,6 @@ #define GRU_CBE_BASE (GRU_MCS_BASE + 0x10000) #define GRU_TFH_BASE (GRU_MCS_BASE + 0x18000) #define GRU_CCH_BASE (GRU_MCS_BASE + 0x20000) -#define GRU_GSH_BASE (GRU_MCS_BASE + 0x30000) /* User gseg constants */ #define GRU_GSEG_STRIDE (4 * 1024 * 1024) @@ -251,15 +249,15 @@ struct gru_tlb_fault_handle { unsigned int fill1:9; unsigned int status:2; - unsigned int fill2:1; - unsigned int color:1; + unsigned int fill2:2; unsigned int state:3; unsigned int fill3:1; - unsigned int cause:7; /* DW 0 - high 32 */ + unsigned int cause:6; + unsigned int cb_int:1; unsigned int fill4:1; - unsigned int indexway:12; + unsigned int indexway:12; /* DW 0 - high 32 */ unsigned int fill5:4; unsigned int ctxnum:4; @@ -457,21 +455,7 @@ enum gru_cbr_state { CBRSTATE_BUSY_INTERRUPT, }; -/* CBE cbrexecstatus bits */ -#define CBR_EXS_ABORT_OCC_BIT 0 -#define CBR_EXS_INT_OCC_BIT 1 -#define CBR_EXS_PENDING_BIT 2 -#define CBR_EXS_QUEUED_BIT 3 -#define CBR_EXS_TLBHW_BIT 4 -#define CBR_EXS_EXCEPTION_BIT 5 - -#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT) -#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT) -#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT) -#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT) -#define CBR_EXS_TLBHW (1 << CBR_EXS_TLBHW_BIT) -#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT) - +/* CBE cbrexecstatus bits - defined in gru_instructions.h*/ /* CBE ecause bits - defined in gru_instructions.h */ /* @@ -495,9 +479,7 @@ enum gru_cbr_state { /* minimum TLB purge count to ensure a full purge */ #define GRUMAXINVAL 1024UL -int cch_allocate(struct gru_context_configuration_handle *cch, - int asidval, int sizeavail, unsigned long cbrmap, unsigned long dsrmap); - +int cch_allocate(struct gru_context_configuration_handle *cch); int cch_start(struct gru_context_configuration_handle *cch); int cch_interrupt(struct gru_context_configuration_handle *cch); int cch_deallocate(struct gru_context_configuration_handle *cch); diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c new file mode 100644 index 0000000..55eabfa --- /dev/null +++ b/drivers/misc/sgi-gru/grukdump.c @@ -0,0 +1,232 @@ +/* + * SN Platform GRU Driver + * + * Dump GRU State + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> +#include <linux/delay.h> +#include <linux/bitops.h> +#include <asm/uv/uv_hub.h> +#include "gru.h" +#include "grutables.h" +#include "gruhandles.h" +#include "grulib.h" + +#define CCH_LOCK_ATTEMPTS 10 + +static int gru_user_copy_handle(void __user **dp, void *s) +{ + if (copy_to_user(*dp, s, GRU_HANDLE_BYTES)) + return -1; + *dp += GRU_HANDLE_BYTES; + return 0; +} + +static int gru_dump_context_data(void *grubase, + struct gru_context_configuration_handle *cch, + void __user *ubuf, int ctxnum, int dsrcnt) +{ + void *cb, *cbe, *tfh, *gseg; + int i, scr; + + gseg = grubase + ctxnum * GRU_GSEG_STRIDE; + cb = gseg + GRU_CB_BASE; + cbe = grubase + GRU_CBE_BASE; + tfh = grubase + GRU_TFH_BASE; + + for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) { + if (gru_user_copy_handle(&ubuf, cb)) + goto fail; + if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE)) + goto fail; + if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE)) + goto fail; + cb += GRU_HANDLE_STRIDE; + } + if (dsrcnt) + memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE); + return 0; + +fail: + return -EFAULT; +} + +static int gru_dump_tfm(struct gru_state *gru, + void __user *ubuf, void __user *ubufend) +{ + struct gru_tlb_fault_map *tfm; + int i, ret, bytes; + + bytes = GRU_NUM_TFM * GRU_CACHE_LINE_BYTES; + if (bytes > ubufend - ubuf) + ret = -EFBIG; + + for (i = 0; i < GRU_NUM_TFM; i++) { + tfm = get_tfm(gru->gs_gru_base_vaddr, i); + if (gru_user_copy_handle(&ubuf, tfm)) + goto fail; + } + return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES; + +fail: + return -EFAULT; +} + +static int gru_dump_tgh(struct gru_state *gru, + void __user *ubuf, void __user *ubufend) +{ + struct gru_tlb_global_handle *tgh; + int i, ret, bytes; + + bytes = GRU_NUM_TGH * GRU_CACHE_LINE_BYTES; + if (bytes > ubufend - ubuf) + ret = -EFBIG; + + for (i = 0; i < GRU_NUM_TGH; i++) { + tgh = get_tgh(gru->gs_gru_base_vaddr, i); + if (gru_user_copy_handle(&ubuf, tgh)) + goto fail; + } + return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES; + +fail: + return -EFAULT; +} + +static int gru_dump_context(struct gru_state *gru, int ctxnum, + void __user *ubuf, void __user *ubufend, char data_opt, + char lock_cch) +{ + struct gru_dump_context_header hdr; + struct gru_dump_context_header __user *uhdr = ubuf; + struct gru_context_configuration_handle *cch, *ubufcch; + struct gru_thread_state *gts; + int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0; + void *grubase; + + memset(&hdr, 0, sizeof(hdr)); + grubase = gru->gs_gru_base_vaddr; + cch = get_cch(grubase, ctxnum); + for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) { + cch_locked = trylock_cch_handle(cch); + if (cch_locked) + break; + msleep(1); + } + + ubuf += sizeof(hdr); + ubufcch = ubuf; + if (gru_user_copy_handle(&ubuf, cch)) + goto fail; + if (cch_locked) + ubufcch->delresp = 0; + bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES; + + if (cch_locked || !lock_cch) { + gts = gru->gs_gts[ctxnum]; + if (gts && gts->ts_vma) { + hdr.pid = gts->ts_tgid_owner; + hdr.vaddr = gts->ts_vma->vm_start; + } + if (cch->state != CCHSTATE_INACTIVE) { + cbrcnt = hweight64(cch->cbr_allocation_map) * + GRU_CBR_AU_SIZE; + dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) * + GRU_DSR_AU_CL : 0; + } + bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES; + if (bytes > ubufend - ubuf) + ret = -EFBIG; + else + ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum, + dsrcnt); + + } + if (cch_locked) + unlock_cch_handle(cch); + if (ret) + return ret; + + hdr.magic = GRU_DUMP_MAGIC; + hdr.gid = gru->gs_gid; + hdr.ctxnum = ctxnum; + hdr.cbrcnt = cbrcnt; + hdr.dsrcnt = dsrcnt; + hdr.cch_locked = cch_locked; + if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr))) + ret = -EFAULT; + + return ret ? ret : bytes; + +fail: + unlock_cch_handle(cch); + return -EFAULT; +} + +int gru_dump_chiplet_request(unsigned long arg) +{ + struct gru_state *gru; + struct gru_dump_chiplet_state_req req; + void __user *ubuf; + void __user *ubufend; + int ctxnum, ret, cnt = 0; + + if (copy_from_user(&req, (void __user *)arg, sizeof(req))) + return -EFAULT; + + /* Currently, only dump by gid is implemented */ + if (req.gid >= gru_max_gids || req.gid < 0) + return -EINVAL; + + gru = GID_TO_GRU(req.gid); + ubuf = req.buf; + ubufend = req.buf + req.buflen; + + ret = gru_dump_tfm(gru, ubuf, ubufend); + if (ret < 0) + goto fail; + ubuf += ret; + + ret = gru_dump_tgh(gru, ubuf, ubufend); + if (ret < 0) + goto fail; + ubuf += ret; + + for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { + if (req.ctxnum == ctxnum || req.ctxnum < 0) { + ret = gru_dump_context(gru, ctxnum, ubuf, ubufend, + req.data_opt, req.lock_cch); + if (ret < 0) + goto fail; + ubuf += ret; + cnt++; + } + } + + if (copy_to_user((void __user *)arg, &req, sizeof(req))) + return -EFAULT; + return cnt; + +fail: + return ret; +} diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index d8bd7d8..eedbf9c 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c @@ -31,6 +31,7 @@ #include <linux/proc_fs.h> #include <linux/interrupt.h> #include <linux/uaccess.h> +#include <linux/delay.h> #include "gru.h" #include "grulib.h" #include "grutables.h" @@ -45,18 +46,66 @@ * resources. This will likely be replaced when we better understand the * kernel/user requirements. * - * At boot time, the kernel permanently reserves a fixed number of - * CBRs/DSRs for each cpu to use. The resources are all taken from - * the GRU chiplet 1 on the blade. This leaves the full set of resources - * of chiplet 0 available to be allocated to a single user. + * Blade percpu resources reserved for kernel use. These resources are + * reserved whenever the the kernel context for the blade is loaded. Note + * that the kernel context is not guaranteed to be always available. It is + * loaded on demand & can be stolen by a user if the user demand exceeds the + * kernel demand. The kernel can always reload the kernel context but + * a SLEEP may be required!!!. + * + * Async Overview: + * + * Each blade has one "kernel context" that owns GRU kernel resources + * located on the blade. Kernel drivers use GRU resources in this context + * for sending messages, zeroing memory, etc. + * + * The kernel context is dynamically loaded on demand. If it is not in + * use by the kernel, the kernel context can be unloaded & given to a user. + * The kernel context will be reloaded when needed. This may require that + * a context be stolen from a user. + * NOTE: frequent unloading/reloading of the kernel context is + * expensive. We are depending on batch schedulers, cpusets, sane + * drivers or some other mechanism to prevent the need for frequent + * stealing/reloading. + * + * The kernel context consists of two parts: + * - 1 CB & a few DSRs that are reserved for each cpu on the blade. + * Each cpu has it's own private resources & does not share them + * with other cpus. These resources are used serially, ie, + * locked, used & unlocked on each call to a function in + * grukservices. + * (Now that we have dynamic loading of kernel contexts, I + * may rethink this & allow sharing between cpus....) + * + * - Additional resources can be reserved long term & used directly + * by UV drivers located in the kernel. Drivers using these GRU + * resources can use asynchronous GRU instructions that send + * interrupts on completion. + * - these resources must be explicitly locked/unlocked + * - locked resources prevent (obviously) the kernel + * context from being unloaded. + * - drivers using these resource directly issue their own + * GRU instruction and must wait/check completion. + * + * When these resources are reserved, the caller can optionally + * associate a wait_queue with the resources and use asynchronous + * GRU instructions. When an async GRU instruction completes, the + * driver will do a wakeup on the event. + * */ -/* Blade percpu resources PERMANENTLY reserved for kernel use */ + +#define ASYNC_HAN_TO_BID(h) ((h) - 1) +#define ASYNC_BID_TO_HAN(b) ((b) + 1) +#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)] +#define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \ + (GRU_SIZE * GRU_CHIPLETS_PER_BLADE)) +#define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)] + #define GRU_NUM_KERNEL_CBR 1 #define GRU_NUM_KERNEL_DSR_BYTES 256 #define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \ GRU_CACHE_LINE_BYTES) -#define KERNEL_CTXNUM 15 /* GRU instruction attributes for all instructions */ #define IMA IMA_CB_DELAY @@ -98,6 +147,108 @@ struct message_header { #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) +/* + * Reload the blade's kernel context into a GRU chiplet. Called holding + * the bs_kgts_sema for READ. Will steal user contexts if necessary. + */ +static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id) +{ + struct gru_state *gru; + struct gru_thread_state *kgts; + void *vaddr; + int ctxnum, ncpus; + + up_read(&bs->bs_kgts_sema); + down_write(&bs->bs_kgts_sema); + + if (!bs->bs_kgts) + bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0); + kgts = bs->bs_kgts; + + if (!kgts->ts_gru) { + STAT(load_kernel_context); + ncpus = uv_blade_nr_possible_cpus(blade_id); + kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU( + GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs); + kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU( + GRU_NUM_KERNEL_DSR_BYTES * ncpus + + bs->bs_async_dsr_bytes); + while (!gru_assign_gru_context(kgts, blade_id)) { + msleep(1); + gru_steal_context(kgts, blade_id); + } + gru_load_context(kgts); + gru = bs->bs_kgts->ts_gru; + vaddr = gru->gs_gru_base_vaddr; + ctxnum = kgts->ts_ctxnum; + bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0); + bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0); + } + downgrade_write(&bs->bs_kgts_sema); +} + +/* + * Free all kernel contexts that are not currently in use. + * Returns 0 if all freed, else number of inuse context. + */ +static int gru_free_kernel_contexts(void) +{ + struct gru_blade_state *bs; + struct gru_thread_state *kgts; + int bid, ret = 0; + + for (bid = 0; bid < GRU_MAX_BLADES; bid++) { + bs = gru_base[bid]; + if (!bs) + continue; + if (down_write_trylock(&bs->bs_kgts_sema)) { + kgts = bs->bs_kgts; + if (kgts && kgts->ts_gru) + gru_unload_context(kgts, 0); + kfree(kgts); + bs->bs_kgts = NULL; + up_write(&bs->bs_kgts_sema); + } else { + ret++; + } + } + return ret; +} + +/* + * Lock & load the kernel context for the specified blade. + */ +static struct gru_blade_state *gru_lock_kernel_context(int blade_id) +{ + struct gru_blade_state *bs; + + STAT(lock_kernel_context); + bs = gru_base[blade_id]; + + down_read(&bs->bs_kgts_sema); + if (!bs->bs_kgts || !bs->bs_kgts->ts_gru) + gru_load_kernel_context(bs, blade_id); + return bs; + +} + +/* + * Unlock the kernel context for the specified blade. Context is not + * unloaded but may be stolen before next use. + */ +static void gru_unlock_kernel_context(int blade_id) +{ + struct gru_blade_state *bs; + + bs = gru_base[blade_id]; + up_read(&bs->bs_kgts_sema); + STAT(unlock_kernel_context); +} + +/* + * Reserve & get pointers to the DSR/CBRs reserved for the current cpu. + * - returns with preemption disabled + */ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) { struct gru_blade_state *bs; @@ -105,30 +256,148 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); preempt_disable(); - bs = gru_base[uv_numa_blade_id()]; + bs = gru_lock_kernel_context(uv_numa_blade_id()); lcpu = uv_blade_processor_id(); *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; return 0; } +/* + * Free the current cpus reserved DSR/CBR resources. + */ static void gru_free_cpu_resources(void *cb, void *dsr) { + gru_unlock_kernel_context(uv_numa_blade_id()); preempt_enable(); } +/* + * Reserve GRU resources to be used asynchronously. + * Note: currently supports only 1 reservation per blade. + * + * input: + * blade_id - blade on which resources should be reserved + * cbrs - number of CBRs + * dsr_bytes - number of DSR bytes needed + * output: + * handle to identify resource + * (0 = async resources already reserved) + */ +unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes, + struct completion *cmp) +{ + struct gru_blade_state *bs; + struct gru_thread_state *kgts; + int ret = 0; + + bs = gru_base[blade_id]; + + down_write(&bs->bs_kgts_sema); + + /* Verify no resources already reserved */ + if (bs->bs_async_dsr_bytes + bs->bs_async_cbrs) + goto done; + bs->bs_async_dsr_bytes = dsr_bytes; + bs->bs_async_cbrs = cbrs; + bs->bs_async_wq = cmp; + kgts = bs->bs_kgts; + + /* Resources changed. Unload context if already loaded */ + if (kgts && kgts->ts_gru) + gru_unload_context(kgts, 0); + ret = ASYNC_BID_TO_HAN(blade_id); + +done: + up_write(&bs->bs_kgts_sema); + return ret; +} + +/* + * Release async resources previously reserved. + * + * input: + * han - handle to identify resources + */ +void gru_release_async_resources(unsigned long han) +{ + struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han); + + down_write(&bs->bs_kgts_sema); + bs->bs_async_dsr_bytes = 0; + bs->bs_async_cbrs = 0; + bs->bs_async_wq = NULL; + up_write(&bs->bs_kgts_sema); +} + +/* + * Wait for async GRU instructions to complete. + * + * input: + * han - handle to identify resources + */ +void gru_wait_async_cbr(unsigned long han) +{ + struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han); + + wait_for_completion(bs->bs_async_wq); + mb(); +} + +/* + * Lock previous reserved async GRU resources + * + * input: + * han - handle to identify resources + * output: + * cb - pointer to first CBR + * dsr - pointer to first DSR + */ +void gru_lock_async_resource(unsigned long han, void **cb, void **dsr) +{ + struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han); + int blade_id = ASYNC_HAN_TO_BID(han); + int ncpus; + + gru_lock_kernel_context(blade_id); + ncpus = uv_blade_nr_possible_cpus(blade_id); + if (cb) + *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE; + if (dsr) + *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES; +} + +/* + * Unlock previous reserved async GRU resources + * + * input: + * han - handle to identify resources + */ +void gru_unlock_async_resource(unsigned long han) +{ + int blade_id = ASYNC_HAN_TO_BID(han); + + gru_unlock_kernel_context(blade_id); +} + +/*----------------------------------------------------------------------*/ int gru_get_cb_exception_detail(void *cb, struct control_block_extended_exc_detail *excdet) { struct gru_control_block_extended *cbe; + struct gru_blade_state *bs; + int cbrnum; - cbe = get_cbe(GRUBASE(cb), get_cb_number(cb)); - prefetchw(cbe); /* Harmless on hardware, required for emulator */ + bs = KCB_TO_BS(cb); + cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb)); + cbe = get_cbe(GRUBASE(cb), cbrnum); + gru_flush_cache(cbe); /* CBE not coherent */ excdet->opc = cbe->opccpy; excdet->exopc = cbe->exopccpy; excdet->ecause = cbe->ecause; excdet->exceptdet0 = cbe->idef1upd; excdet->exceptdet1 = cbe->idef3upd; + gru_flush_cache(cbe); return 0; } @@ -167,13 +436,13 @@ static int gru_retry_exception(void *cb) int retry = EXCEPTION_RETRY_LIMIT; while (1) { - if (gru_get_cb_message_queue_substatus(cb)) - break; if (gru_wait_idle_or_exception(gen) == CBS_IDLE) return CBS_IDLE; - + if (gru_get_cb_message_queue_substatus(cb)) + return CBS_EXCEPTION; gru_get_cb_exception_detail(cb, &excdet); - if (excdet.ecause & ~EXCEPTION_RETRY_BITS) + if ((excdet.ecause & ~EXCEPTION_RETRY_BITS) || + (excdet.cbrexecstatus & CBR_EXS_ABORT_OCC)) break; if (retry-- == 0) break; @@ -416,6 +685,29 @@ static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd) mqd->interrupt_vector); } +/* + * Handle a PUT failure. Note: if message was a 2-line message, one of the + * lines might have successfully have been written. Before sending the + * message, "present" must be cleared in BOTH lines to prevent the receiver + * from prematurely seeing the full message. + */ +static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd, + void *mesg, int lines) +{ + unsigned long m; + + m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); + if (lines == 2) { + gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA); + if (gru_wait(cb) != CBS_IDLE) + return MQE_UNEXPECTED_CB_ERR; + } + gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); + if (gru_wait(cb) != CBS_IDLE) + return MQE_UNEXPECTED_CB_ERR; + send_message_queue_interrupt(mqd); + return MQE_OK; +} /* * Handle a gru_mesq failure. Some of these failures are software recoverable @@ -425,7 +717,6 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd, void *mesg, int lines) { int substatus, ret = 0; - unsigned long m; substatus = gru_get_cb_message_queue_substatus(cb); switch (substatus) { @@ -447,14 +738,7 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd, break; case CBSS_PUT_NACKED: STAT(mesq_send_put_nacked); - m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); - gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); - if (gru_wait(cb) == CBS_IDLE) { - ret = MQE_OK; - send_message_queue_interrupt(mqd); - } else { - ret = MQE_UNEXPECTED_CB_ERR; - } + ret = send_message_put_nacked(cb, mqd, mesg, lines); break; default: BUG(); @@ -597,115 +881,177 @@ EXPORT_SYMBOL_GPL(gru_copy_gpa); /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/ /* Temp - will delete after we gain confidence in the GRU */ -static __cacheline_aligned unsigned long word0; -static __cacheline_aligned unsigned long word1; -static int quicktest(struct gru_state *gru) +static int quicktest0(unsigned long arg) { + unsigned long word0; + unsigned long word1; void *cb; - void *ds; + void *dsr; unsigned long *p; + int ret = -EIO; - cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); - ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); - p = ds; + if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr)) + return MQE_BUG_NO_RESOURCES; + p = dsr; word0 = MAGIC; + word1 = 0; - gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA); - if (gru_wait(cb) != CBS_IDLE) - BUG(); + gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); + if (gru_wait(cb) != CBS_IDLE) { + printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n"); + goto done; + } - if (*(unsigned long *)ds != MAGIC) - BUG(); - gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA); - if (gru_wait(cb) != CBS_IDLE) - BUG(); + if (*p != MAGIC) { + printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p); + goto done; + } + gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); + if (gru_wait(cb) != CBS_IDLE) { + printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n"); + goto done; + } - if (word0 != word1 || word0 != MAGIC) { - printk - ("GRU quicktest err: gid %d, found 0x%lx, expected 0x%lx\n", - gru->gs_gid, word1, MAGIC); - BUG(); /* ZZZ should not be fatal */ + if (word0 != word1 || word1 != MAGIC) { + printk(KERN_DEBUG + "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n", + word1, MAGIC); + goto done; } + ret = 0; - return 0; +done: + gru_free_cpu_resources(cb, dsr); + return ret; } +#define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1))) -int gru_kservices_init(struct gru_state *gru) +static int quicktest1(unsigned long arg) { - struct gru_blade_state *bs; - struct gru_context_configuration_handle *cch; - unsigned long cbr_map, dsr_map; - int err, num, cpus_possible; - - /* - * Currently, resources are reserved ONLY on the second chiplet - * on each blade. This leaves ALL resources on chiplet 0 available - * for user code. - */ - bs = gru->gs_blade; - if (gru != &bs->bs_grus[1]) - return 0; - - cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id); - - num = GRU_NUM_KERNEL_CBR * cpus_possible; - cbr_map = gru_reserve_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL); - gru->gs_reserved_cbrs += num; - - num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible; - dsr_map = gru_reserve_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL); - gru->gs_reserved_dsr_bytes += num; - - gru->gs_active_contexts++; - __set_bit(KERNEL_CTXNUM, &gru->gs_context_map); - cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM); - - bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, - KERNEL_CTXNUM, 0); - bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, - KERNEL_CTXNUM, 0); - - lock_cch_handle(cch); - cch->tfm_fault_bit_enable = 0; - cch->tlb_int_enable = 0; - cch->tfm_done_bit_enable = 0; - cch->unmap_enable = 1; - err = cch_allocate(cch, 0, 0, cbr_map, dsr_map); - if (err) { - gru_dbg(grudev, - "Unable to allocate kernel CCH: gid %d, err %d\n", - gru->gs_gid, err); - BUG(); + struct gru_message_queue_desc mqd; + void *p, *mq; + unsigned long *dw; + int i, ret = -EIO; + char mes[GRU_CACHE_LINE_BYTES], *m; + + /* Need 1K cacheline aligned that does not cross page boundary */ + p = kmalloc(4096, 0); + mq = ALIGNUP(p, 1024); + memset(mes, 0xee, sizeof(mes)); + dw = mq; + + gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0); + for (i = 0; i < 6; i++) { + mes[8] = i; + do { + ret = gru_send_message_gpa(&mqd, mes, sizeof(mes)); + } while (ret == MQE_CONGESTION); + if (ret) + break; } - if (cch_start(cch)) { - gru_dbg(grudev, "Unable to start kernel CCH: gid %d, err %d\n", - gru->gs_gid, err); - BUG(); + if (ret != MQE_QUEUE_FULL || i != 4) + goto done; + + for (i = 0; i < 6; i++) { + m = gru_get_next_message(&mqd); + if (!m || m[8] != i) + break; + gru_free_message(&mqd, m); } - unlock_cch_handle(cch); + ret = (i == 4) ? 0 : -EIO; - if (gru_options & GRU_QUICKLOOK) - quicktest(gru); - return 0; +done: + kfree(p); + return ret; } -void gru_kservices_exit(struct gru_state *gru) +static int quicktest2(unsigned long arg) { - struct gru_context_configuration_handle *cch; - struct gru_blade_state *bs; + static DECLARE_COMPLETION(cmp); + unsigned long han; + int blade_id = 0; + int numcb = 4; + int ret = 0; + unsigned long *buf; + void *cb0, *cb; + int i, k, istatus, bytes; + + bytes = numcb * 4 * 8; + buf = kmalloc(bytes, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = -EBUSY; + han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp); + if (!han) + goto done; + + gru_lock_async_resource(han, &cb0, NULL); + memset(buf, 0xee, bytes); + for (i = 0; i < numcb; i++) + gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0, + XTYPE_DW, 4, 1, IMA_INTERRUPT); + + ret = 0; + for (k = 0; k < numcb; k++) { + gru_wait_async_cbr(han); + for (i = 0; i < numcb; i++) { + cb = cb0 + i * GRU_HANDLE_STRIDE; + istatus = gru_check_status(cb); + if (istatus == CBS_ACTIVE) + continue; + if (istatus == CBS_EXCEPTION) + ret = -EFAULT; + else if (buf[i] || buf[i + 1] || buf[i + 2] || + buf[i + 3]) + ret = -EIO; + } + } + BUG_ON(cmp.done); - bs = gru->gs_blade; - if (gru != &bs->bs_grus[1]) - return; + gru_unlock_async_resource(han); + gru_release_async_resources(han); +done: + kfree(buf); + return ret; +} - cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM); - lock_cch_handle(cch); - if (cch_interrupt_sync(cch)) - BUG(); - if (cch_deallocate(cch)) +/* + * Debugging only. User hook for various kernel tests + * of driver & gru. + */ +int gru_ktest(unsigned long arg) +{ + int ret = -EINVAL; + + switch (arg & 0xff) { + case 0: + ret = quicktest0(arg); + break; + case 1: + ret = quicktest1(arg); + break; + case 2: + ret = quicktest2(arg); + break; + case 99: + ret = gru_free_kernel_contexts(); + break; + } + return ret; + +} + +int gru_kservices_init(void) +{ + return 0; +} + +void gru_kservices_exit(void) +{ + if (gru_free_kernel_contexts()) BUG(); - unlock_cch_handle(cch); } diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h index 747ed31..d60d34b 100644 --- a/drivers/misc/sgi-gru/grukservices.h +++ b/drivers/misc/sgi-gru/grukservices.h @@ -146,4 +146,55 @@ extern void *gru_get_next_message(struct gru_message_queue_desc *mqd); extern int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, unsigned int bytes); +/* + * Reserve GRU resources to be used asynchronously. + * + * input: + * blade_id - blade on which resources should be reserved + * cbrs - number of CBRs + * dsr_bytes - number of DSR bytes needed + * cmp - completion structure for waiting for + * async completions + * output: + * handle to identify resource + * (0 = no resources) + */ +extern unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes, + struct completion *cmp); + +/* + * Release async resources previously reserved. + * + * input: + * han - handle to identify resources + */ +extern void gru_release_async_resources(unsigned long han); + +/* + * Wait for async GRU instructions to complete. + * + * input: + * han - handle to identify resources + */ +extern void gru_wait_async_cbr(unsigned long han); + +/* + * Lock previous reserved async GRU resources + * + * input: + * han - handle to identify resources + * output: + * cb - pointer to first CBR + * dsr - pointer to first DSR + */ +extern void gru_lock_async_resource(unsigned long han, void **cb, void **dsr); + +/* + * Unlock previous reserved async GRU resources + * + * input: + * han - handle to identify resources + */ +extern void gru_unlock_async_resource(unsigned long han); + #endif /* __GRU_KSERVICES_H_ */ diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h index e56e196..889bc44 100644 --- a/drivers/misc/sgi-gru/grulib.h +++ b/drivers/misc/sgi-gru/grulib.h @@ -32,8 +32,8 @@ /* Set Number of Request Blocks */ #define GRU_CREATE_CONTEXT _IOWR(GRU_IOCTL_NUM, 1, void *) -/* Register task as using the slice */ -#define GRU_SET_TASK_SLICE _IOWR(GRU_IOCTL_NUM, 5, void *) +/* Set Context Options */ +#define GRU_SET_CONTEXT_OPTION _IOWR(GRU_IOCTL_NUM, 4, void *) /* Fetch exception detail */ #define GRU_USER_GET_EXCEPTION_DETAIL _IOWR(GRU_IOCTL_NUM, 6, void *) @@ -44,8 +44,11 @@ /* For user unload context */ #define GRU_USER_UNLOAD_CONTEXT _IOWR(GRU_IOCTL_NUM, 9, void *) -/* For fetching GRU chiplet status */ -#define GRU_GET_CHIPLET_STATUS _IOWR(GRU_IOCTL_NUM, 10, void *) +/* For dumpping GRU chiplet state */ +#define GRU_DUMP_CHIPLET_STATE _IOWR(GRU_IOCTL_NUM, 11, void *) + +/* For getting gseg statistics */ +#define GRU_GET_GSEG_STATISTICS _IOWR(GRU_IOCTL_NUM, 12, void *) /* For user TLB flushing (primarily for tests) */ #define GRU_USER_FLUSH_TLB _IOWR(GRU_IOCTL_NUM, 50, void *) @@ -53,8 +56,26 @@ /* Get some config options (primarily for tests & emulator) */ #define GRU_GET_CONFIG_INFO _IOWR(GRU_IOCTL_NUM, 51, void *) +/* Various kernel self-tests */ +#define GRU_KTEST _IOWR(GRU_IOCTL_NUM, 52, void *) + #define CONTEXT_WINDOW_BYTES(th) (GRU_GSEG_PAGESIZE * (th)) #define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th)) +#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1))) + +/* + * Statictics kept on a per-GTS basis. + */ +struct gts_statistics { + unsigned long fmm_tlbdropin; + unsigned long upm_tlbdropin; + unsigned long context_stolen; +}; + +struct gru_get_gseg_statistics_req { + unsigned long gseg; + struct gts_statistics stats; +}; /* * Structure used to pass TLB flush parameters to the driver @@ -75,6 +96,16 @@ struct gru_unload_context_req { }; /* + * Structure used to set context options + */ +enum {sco_gseg_owner, sco_cch_req_slice}; +struct gru_set_context_option_req { + unsigned long gseg; + int op; + unsigned long val1; +}; + +/* * Structure used to pass TLB flush parameters to the driver */ struct gru_flush_tlb_req { @@ -84,6 +115,36 @@ struct gru_flush_tlb_req { }; /* + * Structure used to pass TLB flush parameters to the driver + */ +enum {dcs_pid, dcs_gid}; +struct gru_dump_chiplet_state_req { + unsigned int op; + unsigned int gid; + int ctxnum; + char data_opt; + char lock_cch; + pid_t pid; + void *buf; + size_t buflen; + /* ---- output --- */ + unsigned int num_contexts; +}; + +#define GRU_DUMP_MAGIC 0x3474ab6c +struct gru_dump_context_header { + unsigned int magic; + unsigned int gid; + unsigned char ctxnum; + unsigned char cbrcnt; + unsigned char dsrcnt; + pid_t pid; + unsigned long vaddr; + int cch_locked; + unsigned long data[0]; +}; + +/* * GRU configuration info (temp - for testing) */ struct gru_config_info { diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index ec3f7a1..3bc643d 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -3,11 +3,21 @@ * * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. * - * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> @@ -96,7 +106,7 @@ static int gru_reset_asid_limit(struct gru_state *gru, int asid) gid = gru->gs_gid; again: for (i = 0; i < GRU_NUM_CCH; i++) { - if (!gru->gs_gts[i]) + if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i])) continue; inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", @@ -150,7 +160,7 @@ static unsigned long reserve_resources(unsigned long *p, int n, int mmax, unsigned long bits = 0; int i; - do { + while (n--) { i = find_first_bit(p, mmax); if (i == mmax) BUG(); @@ -158,7 +168,7 @@ static unsigned long reserve_resources(unsigned long *p, int n, int mmax, __set_bit(i, &bits); if (idx) *idx++ = i; - } while (--n); + } return bits; } @@ -299,38 +309,39 @@ static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data /* * Allocate a thread state structure. */ -static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, - struct gru_vma_data *vdata, - int tsid) +struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, + int cbr_au_count, int dsr_au_count, int options, int tsid) { struct gru_thread_state *gts; int bytes; - bytes = DSR_BYTES(vdata->vd_dsr_au_count) + - CBR_BYTES(vdata->vd_cbr_au_count); + bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); bytes += sizeof(struct gru_thread_state); - gts = kzalloc(bytes, GFP_KERNEL); + gts = kmalloc(bytes, GFP_KERNEL); if (!gts) return NULL; STAT(gts_alloc); + memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ atomic_set(>s->ts_refcnt, 1); mutex_init(>s->ts_ctxlock); - gts->ts_cbr_au_count = vdata->vd_cbr_au_count; - gts->ts_dsr_au_count = vdata->vd_dsr_au_count; - gts->ts_user_options = vdata->vd_user_options; + gts->ts_cbr_au_count = cbr_au_count; + gts->ts_dsr_au_count = dsr_au_count; + gts->ts_user_options = options; gts->ts_tsid = tsid; - gts->ts_user_options = vdata->vd_user_options; gts->ts_ctxnum = NULLCTX; - gts->ts_mm = current->mm; - gts->ts_vma = vma; gts->ts_tlb_int_select = -1; - gts->ts_gms = gru_register_mmu_notifier(); + gts->ts_cch_req_slice = -1; gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); - if (!gts->ts_gms) - goto err; + if (vma) { + gts->ts_mm = current->mm; + gts->ts_vma = vma; + gts->ts_gms = gru_register_mmu_notifier(); + if (!gts->ts_gms) + goto err; + } - gru_dbg(grudev, "alloc vdata %p, new gts %p\n", vdata, gts); + gru_dbg(grudev, "alloc gts %p\n", gts); return gts; err: @@ -381,7 +392,8 @@ struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, struct gru_vma_data *vdata = vma->vm_private_data; struct gru_thread_state *gts, *ngts; - gts = gru_alloc_gts(vma, vdata, tsid); + gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count, + vdata->vd_user_options, tsid); if (!gts) return NULL; @@ -458,7 +470,8 @@ static void gru_prefetch_context(void *gseg, void *cb, void *cbe, } static void gru_load_context_data(void *save, void *grubase, int ctxnum, - unsigned long cbrmap, unsigned long dsrmap) + unsigned long cbrmap, unsigned long dsrmap, + int data_valid) { void *gseg, *cb, *cbe; unsigned long length; @@ -471,12 +484,22 @@ static void gru_load_context_data(void *save, void *grubase, int ctxnum, gru_prefetch_context(gseg, cb, cbe, cbrmap, length); for_each_cbr_in_allocation_map(i, &cbrmap, scr) { - save += gru_copy_handle(cb, save); - save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, save); + if (data_valid) { + save += gru_copy_handle(cb, save); + save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, + save); + } else { + memset(cb, 0, GRU_CACHE_LINE_BYTES); + memset(cbe + i * GRU_HANDLE_STRIDE, 0, + GRU_CACHE_LINE_BYTES); + } cb += GRU_HANDLE_STRIDE; } - memcpy(gseg + GRU_DS_BASE, save, length); + if (data_valid) + memcpy(gseg + GRU_DS_BASE, save, length); + else + memset(gseg + GRU_DS_BASE, 0, length); } static void gru_unload_context_data(void *save, void *grubase, int ctxnum, @@ -506,7 +529,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate) struct gru_context_configuration_handle *cch; int ctxnum = gts->ts_ctxnum; - zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); + if (!is_kernel_context(gts)) + zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); gru_dbg(grudev, "gts %p\n", gts); @@ -514,11 +538,14 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate) if (cch_interrupt_sync(cch)) BUG(); - gru_unload_mm_tracker(gru, gts); - if (savestate) + if (!is_kernel_context(gts)) + gru_unload_mm_tracker(gru, gts); + if (savestate) { gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, gts->ts_cbr_map, gts->ts_dsr_map); + gts->ts_data_valid = 1; + } if (cch_deallocate(cch)) BUG(); @@ -526,24 +553,22 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate) unlock_cch_handle(cch); gru_free_gru_context(gts); - STAT(unload_context); } /* * Load a GRU context by copying it from the thread data structure in memory * to the GRU. */ -static void gru_load_context(struct gru_thread_state *gts) +void gru_load_context(struct gru_thread_state *gts) { struct gru_state *gru = gts->ts_gru; struct gru_context_configuration_handle *cch; - int err, asid, ctxnum = gts->ts_ctxnum; + int i, err, asid, ctxnum = gts->ts_ctxnum; gru_dbg(grudev, "gts %p\n", gts); cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); lock_cch_handle(cch); - asid = gru_load_mm_tracker(gru, gts); cch->tfm_fault_bit_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); @@ -552,9 +577,32 @@ static void gru_load_context(struct gru_thread_state *gts) gts->ts_tlb_int_select = gru_cpu_fault_map_id(); cch->tlb_int_select = gts->ts_tlb_int_select; } + if (gts->ts_cch_req_slice >= 0) { + cch->req_slice_set_enable = 1; + cch->req_slice = gts->ts_cch_req_slice; + } else { + cch->req_slice_set_enable =0; + } cch->tfm_done_bit_enable = 0; - err = cch_allocate(cch, asid, gts->ts_sizeavail, gts->ts_cbr_map, - gts->ts_dsr_map); + cch->dsr_allocation_map = gts->ts_dsr_map; + cch->cbr_allocation_map = gts->ts_cbr_map; + + if (is_kernel_context(gts)) { + cch->unmap_enable = 1; + cch->tfm_done_bit_enable = 1; + cch->cb_int_enable = 1; + } else { + cch->unmap_enable = 0; + cch->tfm_done_bit_enable = 0; + cch->cb_int_enable = 0; + asid = gru_load_mm_tracker(gru, gts); + for (i = 0; i < 8; i++) { + cch->asid[i] = asid + i; + cch->sizeavail[i] = gts->ts_sizeavail; + } + } + + err = cch_allocate(cch); if (err) { gru_dbg(grudev, "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", @@ -563,13 +611,11 @@ static void gru_load_context(struct gru_thread_state *gts) } gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, - gts->ts_cbr_map, gts->ts_dsr_map); + gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid); if (cch_start(cch)) BUG(); unlock_cch_handle(cch); - - STAT(load_context); } /* @@ -599,6 +645,9 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload) cch->sizeavail[i] = gts->ts_sizeavail; gts->ts_tlb_int_select = gru_cpu_fault_map_id(); cch->tlb_int_select = gru_cpu_fault_map_id(); + cch->tfm_fault_bit_enable = + (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL + || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); } else { for (i = 0; i < 8; i++) cch->asid[i] = 0; @@ -642,7 +691,28 @@ static int gru_retarget_intr(struct gru_thread_state *gts) #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ ((g)+1) : &(b)->bs_grus[0]) -static void gru_steal_context(struct gru_thread_state *gts) +static int is_gts_stealable(struct gru_thread_state *gts, + struct gru_blade_state *bs) +{ + if (is_kernel_context(gts)) + return down_write_trylock(&bs->bs_kgts_sema); + else + return mutex_trylock(>s->ts_ctxlock); +} + +static void gts_stolen(struct gru_thread_state *gts, + struct gru_blade_state *bs) +{ + if (is_kernel_context(gts)) { + up_write(&bs->bs_kgts_sema); + STAT(steal_kernel_context); + } else { + mutex_unlock(>s->ts_ctxlock); + STAT(steal_user_context); + } +} + +void gru_steal_context(struct gru_thread_state *gts, int blade_id) { struct gru_blade_state *blade; struct gru_state *gru, *gru0; @@ -652,8 +722,7 @@ static void gru_steal_context(struct gru_thread_state *gts) cbr = gts->ts_cbr_au_count; dsr = gts->ts_dsr_au_count; - preempt_disable(); - blade = gru_base[uv_numa_blade_id()]; + blade = gru_base[blade_id]; spin_lock(&blade->bs_lock); ctxnum = next_ctxnum(blade->bs_lru_ctxnum); @@ -676,7 +745,7 @@ static void gru_steal_context(struct gru_thread_state *gts) * success are high. If trylock fails, try to steal a * different GSEG. */ - if (ngts && mutex_trylock(&ngts->ts_ctxlock)) + if (ngts && is_gts_stealable(ngts, blade)) break; ngts = NULL; flag = 1; @@ -690,13 +759,12 @@ static void gru_steal_context(struct gru_thread_state *gts) blade->bs_lru_gru = gru; blade->bs_lru_ctxnum = ctxnum; spin_unlock(&blade->bs_lock); - preempt_enable(); if (ngts) { - STAT(steal_context); + gts->ustats.context_stolen++; ngts->ts_steal_jiffies = jiffies; - gru_unload_context(ngts, 1); - mutex_unlock(&ngts->ts_ctxlock); + gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1); + gts_stolen(ngts, blade); } else { STAT(steal_context_failed); } @@ -710,17 +778,17 @@ static void gru_steal_context(struct gru_thread_state *gts) /* * Scan the GRUs on the local blade & assign a GRU context. */ -static struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) +struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, + int blade) { struct gru_state *gru, *grux; int i, max_active_contexts; - preempt_disable(); again: gru = NULL; max_active_contexts = GRU_NUM_CCH; - for_each_gru_on_blade(grux, uv_numa_blade_id(), i) { + for_each_gru_on_blade(grux, blade, i) { if (check_gru_resources(grux, gts->ts_cbr_au_count, gts->ts_dsr_au_count, max_active_contexts)) { @@ -760,7 +828,6 @@ again: STAT(assign_context_failed); } - preempt_enable(); return gru; } @@ -775,6 +842,7 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct gru_thread_state *gts; unsigned long paddr, vaddr; + int blade_id; vaddr = (unsigned long)vmf->virtual_address; gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", @@ -789,8 +857,10 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) again: mutex_lock(>s->ts_ctxlock); preempt_disable(); + blade_id = uv_numa_blade_id(); + if (gts->ts_gru) { - if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { + if (gts->ts_gru->gs_blade_id != blade_id) { STAT(migrated_nopfn_unload); gru_unload_context(gts, 1); } else { @@ -800,12 +870,15 @@ again: } if (!gts->ts_gru) { - if (!gru_assign_gru_context(gts)) { - mutex_unlock(>s->ts_ctxlock); + STAT(load_user_context); + if (!gru_assign_gru_context(gts, blade_id)) { preempt_enable(); + mutex_unlock(>s->ts_ctxlock); + set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ + blade_id = uv_numa_blade_id(); if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) - gru_steal_context(gts); + gru_steal_context(gts, blade_id); goto again; } gru_load_context(gts); @@ -815,8 +888,8 @@ again: vma->vm_page_prot); } - mutex_unlock(>s->ts_ctxlock); preempt_enable(); + mutex_unlock(>s->ts_ctxlock); return VM_FAULT_NOPAGE; } diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c index ee74821..9cbf95b 100644 --- a/drivers/misc/sgi-gru/gruprocfs.c +++ b/drivers/misc/sgi-gru/gruprocfs.c @@ -51,9 +51,12 @@ static int statistics_show(struct seq_file *s, void *p) printstat(s, assign_context); printstat(s, assign_context_failed); printstat(s, free_context); - printstat(s, load_context); - printstat(s, unload_context); - printstat(s, steal_context); + printstat(s, load_user_context); + printstat(s, load_kernel_context); + printstat(s, lock_kernel_context); + printstat(s, unlock_kernel_context); + printstat(s, steal_user_context); + printstat(s, steal_kernel_context); printstat(s, steal_context_failed); printstat(s, nopfn); printstat(s, break_cow); @@ -70,7 +73,7 @@ static int statistics_show(struct seq_file *s, void *p) printstat(s, user_flush_tlb); printstat(s, user_unload_context); printstat(s, user_exception); - printstat(s, set_task_slice); + printstat(s, set_context_option); printstat(s, migrate_check); printstat(s, migrated_retarget); printstat(s, migrated_unload); @@ -84,6 +87,9 @@ static int statistics_show(struct seq_file *s, void *p) printstat(s, tlb_dropin_fail_range_active); printstat(s, tlb_dropin_fail_idle); printstat(s, tlb_dropin_fail_fmm); + printstat(s, tlb_dropin_fail_no_exception); + printstat(s, tlb_dropin_fail_no_exception_war); + printstat(s, tfh_stale_on_fault); printstat(s, mmu_invalidate_range); printstat(s, mmu_invalidate_page); printstat(s, mmu_clear_flush_young); @@ -158,8 +164,7 @@ static ssize_t options_write(struct file *file, const char __user *userbuf, unsigned long val; char buf[80]; - if (copy_from_user - (buf, userbuf, count < sizeof(buf) ? count : sizeof(buf))) + if (strncpy_from_user(buf, userbuf, sizeof(buf) - 1) < 0) return -EFAULT; buf[count - 1] = '\0'; if (!strict_strtoul(buf, 10, &val)) diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index bf1eeb7..34ab3d4 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -148,11 +148,13 @@ #include <linux/wait.h> #include <linux/mmu_notifier.h> #include "gru.h" +#include "grulib.h" #include "gruhandles.h" extern struct gru_stats_s gru_stats; extern struct gru_blade_state *gru_base[]; extern unsigned long gru_start_paddr, gru_end_paddr; +extern void *gru_start_vaddr; extern unsigned int gru_max_gids; #define GRU_MAX_BLADES MAX_NUMNODES @@ -174,9 +176,12 @@ struct gru_stats_s { atomic_long_t assign_context; atomic_long_t assign_context_failed; atomic_long_t free_context; - atomic_long_t load_context; - atomic_long_t unload_context; - atomic_long_t steal_context; + atomic_long_t load_user_context; + atomic_long_t load_kernel_context; + atomic_long_t lock_kernel_context; + atomic_long_t unlock_kernel_context; + atomic_long_t steal_user_context; + atomic_long_t steal_kernel_context; atomic_long_t steal_context_failed; atomic_long_t nopfn; atomic_long_t break_cow; @@ -193,7 +198,7 @@ struct gru_stats_s { atomic_long_t user_flush_tlb; atomic_long_t user_unload_context; atomic_long_t user_exception; - atomic_long_t set_task_slice; + atomic_long_t set_context_option; atomic_long_t migrate_check; atomic_long_t migrated_retarget; atomic_long_t migrated_unload; @@ -207,6 +212,9 @@ struct gru_stats_s { atomic_long_t tlb_dropin_fail_range_active; atomic_long_t tlb_dropin_fail_idle; atomic_long_t tlb_dropin_fail_fmm; + atomic_long_t tlb_dropin_fail_no_exception; + atomic_long_t tlb_dropin_fail_no_exception_war; + atomic_long_t tfh_stale_on_fault; atomic_long_t mmu_invalidate_range; atomic_long_t mmu_invalidate_page; atomic_long_t mmu_clear_flush_young; @@ -253,7 +261,6 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last]; #define OPT_DPRINT 1 #define OPT_STATS 2 -#define GRU_QUICKLOOK 4 #define IRQ_GRU 110 /* Starting IRQ number for interrupts */ @@ -373,6 +380,7 @@ struct gru_thread_state { required for contest */ unsigned char ts_cbr_au_count;/* Number of CBR resources required for contest */ + char ts_cch_req_slice;/* CCH packet slice */ char ts_blade; /* If >= 0, migrate context if ref from diferent blade */ char ts_force_cch_reload; @@ -380,6 +388,9 @@ struct gru_thread_state { after migration */ char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each allocated CB */ + int ts_data_valid; /* Indicates if ts_gdata has + valid data */ + struct gts_statistics ustats; /* User statistics */ unsigned long ts_gdata[0]; /* save area for GRU data (CB, DS, CBE) */ }; @@ -452,6 +463,14 @@ struct gru_blade_state { reserved cb */ void *kernel_dsr; /* First kernel reserved DSR */ + struct rw_semaphore bs_kgts_sema; /* lock for kgts */ + struct gru_thread_state *bs_kgts; /* GTS for kernel use */ + + /* ---- the following are used for managing kernel async GRU CBRs --- */ + int bs_async_dsr_bytes; /* DSRs for async */ + int bs_async_cbrs; /* CBRs AU for async */ + struct completion *bs_async_wq; + /* ---- the following are protected by the bs_lock spinlock ---- */ spinlock_t bs_lock; /* lock used for stealing contexts */ @@ -552,6 +571,12 @@ struct gru_blade_state { /* Lock hierarchy checking enabled only in emulator */ +/* 0 = lock failed, 1 = locked */ +static inline int __trylock_handle(void *h) +{ + return !test_and_set_bit(1, h); +} + static inline void __lock_handle(void *h) { while (test_and_set_bit(1, h)) @@ -563,6 +588,11 @@ static inline void __unlock_handle(void *h) clear_bit(1, h); } +static inline int trylock_cch_handle(struct gru_context_configuration_handle *cch) +{ + return __trylock_handle(cch); +} + static inline void lock_cch_handle(struct gru_context_configuration_handle *cch) { __lock_handle(cch); @@ -584,6 +614,11 @@ static inline void unlock_tgh_handle(struct gru_tlb_global_handle *tgh) __unlock_handle(tgh); } +static inline int is_kernel_context(struct gru_thread_state *gts) +{ + return !gts->ts_mm; +} + /*----------------------------------------------------------------------------- * Function prototypes & externs */ @@ -598,24 +633,32 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, int tsid); extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, int tsid); +extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, + int blade); +extern void gru_load_context(struct gru_thread_state *gts); +extern void gru_steal_context(struct gru_thread_state *gts, int blade_id); extern void gru_unload_context(struct gru_thread_state *gts, int savestate); extern int gru_update_cch(struct gru_thread_state *gts, int force_unload); extern void gts_drop(struct gru_thread_state *gts); extern void gru_tgh_flush_init(struct gru_state *gru); -extern int gru_kservices_init(struct gru_state *gru); -extern void gru_kservices_exit(struct gru_state *gru); +extern int gru_kservices_init(void); +extern void gru_kservices_exit(void); +extern int gru_dump_chiplet_request(unsigned long arg); +extern long gru_get_gseg_statistics(unsigned long arg); extern irqreturn_t gru_intr(int irq, void *dev_id); extern int gru_handle_user_call_os(unsigned long address); extern int gru_user_flush_tlb(unsigned long arg); extern int gru_user_unload_context(unsigned long arg); extern int gru_get_exception_detail(unsigned long arg); -extern int gru_set_task_slice(long address); +extern int gru_set_context_option(unsigned long address); extern int gru_cpu_fault_map_id(void); extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); extern void gru_flush_all_tlb(struct gru_state *gru); extern int gru_proc_init(void); extern void gru_proc_exit(void); +extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, + int cbr_au_count, int dsr_au_count, int options, int tsid); extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, char *cbmap); extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, @@ -624,6 +667,7 @@ extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf); extern struct gru_mm_struct *gru_register_mmu_notifier(void); extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms); +extern int gru_ktest(unsigned long arg); extern void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, unsigned long len); diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig index 3f06310..b1cd7a1 100644 --- a/drivers/mtd/ubi/Kconfig +++ b/drivers/mtd/ubi/Kconfig @@ -49,15 +49,16 @@ config MTD_UBI_BEB_RESERVE reserved. Leave the default value if unsure. config MTD_UBI_GLUEBI - bool "Emulate MTD devices" + tristate "MTD devices emulation driver (gluebi)" default n depends on MTD_UBI help - This option enables MTD devices emulation on top of UBI volumes: for - each UBI volumes an MTD device is created, and all I/O to this MTD - device is redirected to the UBI volume. This is handy to make - MTD-oriented software (like JFFS2) work on top of UBI. Do not enable - this if no legacy software will be used. + This option enables gluebi - an additional driver which emulates MTD + devices on top of UBI volumes: for each UBI volumes an MTD device is + created, and all I/O to this MTD device is redirected to the UBI + volume. This is handy to make MTD-oriented software (like JFFS2) + work on top of UBI. Do not enable this unless you use legacy + software. source "drivers/mtd/ubi/Kconfig.debug" endmenu diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile index dd834e0..c9302a5 100644 --- a/drivers/mtd/ubi/Makefile +++ b/drivers/mtd/ubi/Makefile @@ -4,4 +4,4 @@ ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o ubi-y += misc.o ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o -ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o +obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 4048db8..286ed59 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -41,6 +41,7 @@ #include <linux/miscdevice.h> #include <linux/log2.h> #include <linux/kthread.h> +#include <linux/reboot.h> #include "ubi.h" /* Maximum length of the 'mtd=' parameter */ @@ -122,6 +123,94 @@ static struct device_attribute dev_mtd_num = __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); /** + * ubi_volume_notify - send a volume change notification. + * @ubi: UBI device description object + * @vol: volume description object of the changed volume + * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) + * + * This is a helper function which notifies all subscribers about a volume + * change event (creation, removal, re-sizing, re-naming, updating). Returns + * zero in case of success and a negative error code in case of failure. + */ +int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) +{ + struct ubi_notification nt; + + ubi_do_get_device_info(ubi, &nt.di); + ubi_do_get_volume_info(ubi, vol, &nt.vi); + return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); +} + +/** + * ubi_notify_all - send a notification to all volumes. + * @ubi: UBI device description object + * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) + * @nb: the notifier to call + * + * This function walks all volumes of UBI device @ubi and sends the @ntype + * notification for each volume. If @nb is %NULL, then all registered notifiers + * are called, otherwise only the @nb notifier is called. Returns the number of + * sent notifications. + */ +int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) +{ + struct ubi_notification nt; + int i, count = 0; + + ubi_do_get_device_info(ubi, &nt.di); + + mutex_lock(&ubi->device_mutex); + for (i = 0; i < ubi->vtbl_slots; i++) { + /* + * Since the @ubi->device is locked, and we are not going to + * change @ubi->volumes, we do not have to lock + * @ubi->volumes_lock. + */ + if (!ubi->volumes[i]) + continue; + + ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); + if (nb) + nb->notifier_call(nb, ntype, &nt); + else + blocking_notifier_call_chain(&ubi_notifiers, ntype, + &nt); + count += 1; + } + mutex_unlock(&ubi->device_mutex); + + return count; +} + +/** + * ubi_enumerate_volumes - send "add" notification for all existing volumes. + * @nb: the notifier to call + * + * This function walks all UBI devices and volumes and sends the + * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all + * registered notifiers are called, otherwise only the @nb notifier is called. + * Returns the number of sent notifications. + */ +int ubi_enumerate_volumes(struct notifier_block *nb) +{ + int i, count = 0; + + /* + * Since the @ubi_devices_mutex is locked, and we are not going to + * change @ubi_devices, we do not have to lock @ubi_devices_lock. + */ + for (i = 0; i < UBI_MAX_DEVICES; i++) { + struct ubi_device *ubi = ubi_devices[i]; + + if (!ubi) + continue; + count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); + } + + return count; +} + +/** * ubi_get_device - get UBI device. * @ubi_num: UBI device number * @@ -380,7 +469,7 @@ static void free_user_volumes(struct ubi_device *ubi) * @ubi: UBI device description object * * This function returns zero in case of success and a negative error code in - * case of failure. Note, this function destroys all volumes if it failes. + * case of failure. Note, this function destroys all volumes if it fails. */ static int uif_init(struct ubi_device *ubi) { @@ -633,6 +722,15 @@ static int io_init(struct ubi_device *ubi) } /* + * Set maximum amount of physical erroneous eraseblocks to be 10%. + * Erroneous PEB are those which have read errors. + */ + ubi->max_erroneous = ubi->peb_count / 10; + if (ubi->max_erroneous < 16) + ubi->max_erroneous = 16; + dbg_msg("max_erroneous %d", ubi->max_erroneous); + + /* * It may happen that EC and VID headers are situated in one minimal * I/O unit. In this case we can only accept this UBI image in * read-only mode. @@ -726,6 +824,34 @@ static int autoresize(struct ubi_device *ubi, int vol_id) } /** + * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot. + * @n: reboot notifier object + * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF + * @cmd: pointer to command string for RESTART2 + * + * This function stops the UBI background thread so that the flash device + * remains quiescent when Linux restarts the system. Any queued work will be + * discarded, but this function will block until do_work() finishes if an + * operation is already in progress. + * + * This function solves a real-life problem observed on NOR flashes when an + * PEB erase operation starts, then the system is rebooted before the erase is + * finishes, and the boot loader gets confused and dies. So we prefer to finish + * the ongoing operation before rebooting. + */ +static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state, + void *cmd) +{ + struct ubi_device *ubi; + + ubi = container_of(n, struct ubi_device, reboot_notifier); + if (ubi->bgt_thread) + kthread_stop(ubi->bgt_thread); + ubi_sync(ubi->ubi_num); + return NOTIFY_DONE; +} + +/** * ubi_attach_mtd_dev - attach an MTD device. * @mtd: MTD device description object * @ubi_num: number to assign to the new UBI device @@ -806,8 +932,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) mutex_init(&ubi->buf_mutex); mutex_init(&ubi->ckvol_mutex); - mutex_init(&ubi->mult_mutex); - mutex_init(&ubi->volumes_mutex); + mutex_init(&ubi->device_mutex); spin_lock_init(&ubi->volumes_lock); ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); @@ -825,7 +950,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) if (!ubi->peb_buf2) goto out_free; -#ifdef CONFIG_MTD_UBI_DEBUG +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID mutex_init(&ubi->dbg_buf_mutex); ubi->dbg_peb_buf = vmalloc(ubi->peb_size); if (!ubi->dbg_peb_buf) @@ -872,11 +997,23 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) ubi->beb_rsvd_pebs); ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); + /* + * The below lock makes sure we do not race with 'ubi_thread()' which + * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. + */ + spin_lock(&ubi->wl_lock); if (!DBG_DISABLE_BGT) ubi->thread_enabled = 1; wake_up_process(ubi->bgt_thread); + spin_unlock(&ubi->wl_lock); + + /* Flash device priority is 0 - UBI needs to shut down first */ + ubi->reboot_notifier.priority = 1; + ubi->reboot_notifier.notifier_call = ubi_reboot_notifier; + register_reboot_notifier(&ubi->reboot_notifier); ubi_devices[ubi_num] = ubi; + ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); return ubi_num; out_uif: @@ -892,7 +1029,7 @@ out_detach: out_free: vfree(ubi->peb_buf1); vfree(ubi->peb_buf2); -#ifdef CONFIG_MTD_UBI_DEBUG +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID vfree(ubi->dbg_peb_buf); #endif kfree(ubi); @@ -919,13 +1056,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) return -EINVAL; - spin_lock(&ubi_devices_lock); - ubi = ubi_devices[ubi_num]; - if (!ubi) { - spin_unlock(&ubi_devices_lock); + ubi = ubi_get_device(ubi_num); + if (!ubi) return -EINVAL; - } + spin_lock(&ubi_devices_lock); + put_device(&ubi->dev); + ubi->ref_count -= 1; if (ubi->ref_count) { if (!anyway) { spin_unlock(&ubi_devices_lock); @@ -939,12 +1076,14 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) spin_unlock(&ubi_devices_lock); ubi_assert(ubi_num == ubi->ubi_num); + ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); /* * Before freeing anything, we have to stop the background thread to * prevent it from doing anything on this device while we are freeing. */ + unregister_reboot_notifier(&ubi->reboot_notifier); if (ubi->bgt_thread) kthread_stop(ubi->bgt_thread); @@ -961,7 +1100,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) put_mtd_device(ubi->mtd); vfree(ubi->peb_buf1); vfree(ubi->peb_buf2); -#ifdef CONFIG_MTD_UBI_DEBUG +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID vfree(ubi->dbg_peb_buf); #endif ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index f8e0f68..f237ddb 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -113,7 +113,8 @@ static int vol_cdev_open(struct inode *inode, struct file *file) else mode = UBI_READONLY; - dbg_gen("open volume %d, mode %d", vol_id, mode); + dbg_gen("open device %d, volume %d, mode %d", + ubi_num, vol_id, mode); desc = ubi_open_volume(ubi_num, vol_id, mode); if (IS_ERR(desc)) @@ -128,7 +129,8 @@ static int vol_cdev_release(struct inode *inode, struct file *file) struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; - dbg_gen("release volume %d, mode %d", vol->vol_id, desc->mode); + dbg_gen("release device %d, volume %d, mode %d", + vol->ubi->ubi_num, vol->vol_id, desc->mode); if (vol->updating) { ubi_warn("update of volume %d not finished, volume is damaged", @@ -393,7 +395,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf, vol->corrupted = 1; } vol->checked = 1; - ubi_gluebi_updated(vol); + ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED); revoke_exclusive(desc, UBI_READWRITE); } @@ -558,7 +560,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, break; } - /* Set volume property command*/ + /* Set volume property command */ case UBI_IOCSETPROP: { struct ubi_set_prop_req req; @@ -571,9 +573,9 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, } switch (req.property) { case UBI_PROP_DIRECT_WRITE: - mutex_lock(&ubi->volumes_mutex); + mutex_lock(&ubi->device_mutex); desc->vol->direct_writes = !!req.value; - mutex_unlock(&ubi->volumes_mutex); + mutex_unlock(&ubi->device_mutex); break; default: err = -EINVAL; @@ -810,9 +812,9 @@ static int rename_volumes(struct ubi_device *ubi, re->desc->vol->vol_id, re->desc->vol->name); } - mutex_lock(&ubi->volumes_mutex); + mutex_lock(&ubi->device_mutex); err = ubi_rename_volumes(ubi, &rename_list); - mutex_unlock(&ubi->volumes_mutex); + mutex_unlock(&ubi->device_mutex); out_free: list_for_each_entry_safe(re, re1, &rename_list, list) { @@ -856,9 +858,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, if (err) break; - mutex_lock(&ubi->volumes_mutex); + mutex_lock(&ubi->device_mutex); err = ubi_create_volume(ubi, &req); - mutex_unlock(&ubi->volumes_mutex); + mutex_unlock(&ubi->device_mutex); if (err) break; @@ -887,9 +889,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, break; } - mutex_lock(&ubi->volumes_mutex); + mutex_lock(&ubi->device_mutex); err = ubi_remove_volume(desc, 0); - mutex_unlock(&ubi->volumes_mutex); + mutex_unlock(&ubi->device_mutex); /* * The volume is deleted (unless an error occurred), and the @@ -926,9 +928,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, desc->vol->usable_leb_size); - mutex_lock(&ubi->volumes_mutex); + mutex_lock(&ubi->device_mutex); err = ubi_resize_volume(desc, pebs); - mutex_unlock(&ubi->volumes_mutex); + mutex_unlock(&ubi->device_mutex); ubi_close_volume(desc); break; } @@ -952,9 +954,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, break; } - mutex_lock(&ubi->mult_mutex); err = rename_volumes(ubi, req); - mutex_unlock(&ubi->mult_mutex); kfree(req); break; } diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 25def34..0f2034c 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -419,8 +419,9 @@ retry: * not implemented. */ if (err == UBI_IO_BAD_VID_HDR) { - ubi_warn("bad VID header at PEB %d, LEB" - "%d:%d", pnum, vol_id, lnum); + ubi_warn("corrupted VID header at PEB " + "%d, LEB %d:%d", pnum, vol_id, + lnum); err = -EBADMSG; } else ubi_ro_mode(ubi); @@ -940,6 +941,33 @@ write_error: } /** + * is_error_sane - check whether a read error is sane. + * @err: code of the error happened during reading + * + * This is a helper function for 'ubi_eba_copy_leb()' which is called when we + * cannot read data from the target PEB (an error @err happened). If the error + * code is sane, then we treat this error as non-fatal. Otherwise the error is + * fatal and UBI will be switched to R/O mode later. + * + * The idea is that we try not to switch to R/O mode if the read error is + * something which suggests there was a real read problem. E.g., %-EIO. Or a + * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O + * mode, simply because we do not know what happened at the MTD level, and we + * cannot handle this. E.g., the underlying driver may have become crazy, and + * it is safer to switch to R/O mode to preserve the data. + * + * And bear in mind, this is about reading from the target PEB, i.e. the PEB + * which we have just written. + */ +static int is_error_sane(int err) +{ + if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_VID_HDR || + err == -ETIMEDOUT) + return 0; + return 1; +} + +/** * ubi_eba_copy_leb - copy logical eraseblock. * @ubi: UBI device description object * @from: physical eraseblock number from where to copy @@ -950,12 +978,7 @@ write_error: * physical eraseblock @to. The @vid_hdr buffer may be changed by this * function. Returns: * o %0 in case of success; - * o %1 if the operation was canceled because the volume is being deleted - * or because the PEB was put meanwhile; - * o %2 if the operation was canceled because there was a write error to the - * target PEB; - * o %-EAGAIN if the operation was canceled because a bit-flip was detected - * in the target PEB; + * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_CANCEL_BITFLIPS, etc; * o a negative error code in case of failure. */ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, @@ -968,7 +991,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, vol_id = be32_to_cpu(vid_hdr->vol_id); lnum = be32_to_cpu(vid_hdr->lnum); - dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); + dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); if (vid_hdr->vol_type == UBI_VID_STATIC) { data_size = be32_to_cpu(vid_hdr->data_size); @@ -986,13 +1009,12 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. */ vol = ubi->volumes[idx]; + spin_unlock(&ubi->volumes_lock); if (!vol) { /* No need to do further work, cancel */ - dbg_eba("volume %d is being removed, cancel", vol_id); - spin_unlock(&ubi->volumes_lock); - return 1; + dbg_wl("volume %d is being removed, cancel", vol_id); + return MOVE_CANCEL_RACE; } - spin_unlock(&ubi->volumes_lock); /* * We do not want anybody to write to this logical eraseblock while we @@ -1004,12 +1026,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, * (@from). This task locks the LEB and goes sleep in the * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the - * LEB is already locked, we just do not move it and return %1. + * LEB is already locked, we just do not move it and return + * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later. */ err = leb_write_trylock(ubi, vol_id, lnum); if (err) { - dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum); - return err; + dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); + return MOVE_CANCEL_RACE; } /* @@ -1018,25 +1041,26 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, * cancel it. */ if (vol->eba_tbl[lnum] != from) { - dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " - "PEB %d, cancel", vol_id, lnum, from, - vol->eba_tbl[lnum]); - err = 1; + dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to " + "PEB %d, cancel", vol_id, lnum, from, + vol->eba_tbl[lnum]); + err = MOVE_CANCEL_RACE; goto out_unlock_leb; } /* * OK, now the LEB is locked and we can safely start moving it. Since - * this function utilizes the @ubi->peb1_buf buffer which is shared - * with some other functions, so lock the buffer by taking the + * this function utilizes the @ubi->peb_buf1 buffer which is shared + * with some other functions - we lock the buffer by taking the * @ubi->buf_mutex. */ mutex_lock(&ubi->buf_mutex); - dbg_eba("read %d bytes of data", aldata_size); + dbg_wl("read %d bytes of data", aldata_size); err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); if (err && err != UBI_IO_BITFLIPS) { ubi_warn("error %d while reading data from PEB %d", err, from); + err = MOVE_SOURCE_RD_ERR; goto out_unlock_buf; } @@ -1059,7 +1083,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, cond_resched(); /* - * It may turn out to me that the whole @from physical eraseblock + * It may turn out to be that the whole @from physical eraseblock * contains only 0xFF bytes. Then we have to only write the VID header * and do not write any data. This also means we should not set * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. @@ -1074,7 +1098,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); if (err) { if (err == -EIO) - err = 2; + err = MOVE_TARGET_WR_ERR; goto out_unlock_buf; } @@ -1083,10 +1107,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, /* Read the VID header back and check if it was written correctly */ err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); if (err) { - if (err != UBI_IO_BITFLIPS) - ubi_warn("cannot read VID header back from PEB %d", to); - else - err = -EAGAIN; + if (err != UBI_IO_BITFLIPS) { + ubi_warn("error %d while reading VID header back from " + "PEB %d", err, to); + if (is_error_sane(err)) + err = MOVE_TARGET_RD_ERR; + } else + err = MOVE_CANCEL_BITFLIPS; goto out_unlock_buf; } @@ -1094,7 +1121,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); if (err) { if (err == -EIO) - err = 2; + err = MOVE_TARGET_WR_ERR; goto out_unlock_buf; } @@ -1107,11 +1134,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size); if (err) { - if (err != UBI_IO_BITFLIPS) - ubi_warn("cannot read data back from PEB %d", - to); - else - err = -EAGAIN; + if (err != UBI_IO_BITFLIPS) { + ubi_warn("error %d while reading data back " + "from PEB %d", err, to); + if (is_error_sane(err)) + err = MOVE_TARGET_RD_ERR; + } else + err = MOVE_CANCEL_BITFLIPS; goto out_unlock_buf; } diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c index 49cd55a..95aaac0 100644 --- a/drivers/mtd/ubi/gluebi.c +++ b/drivers/mtd/ubi/gluebi.c @@ -19,17 +19,71 @@ */ /* - * This file includes implementation of fake MTD devices for each UBI volume. - * This sounds strange, but it is in fact quite useful to make MTD-oriented - * software (including all the legacy software) to work on top of UBI. + * This is a small driver which implements fake MTD devices on top of UBI + * volumes. This sounds strange, but it is in fact quite useful to make + * MTD-oriented software (including all the legacy software) work on top of + * UBI. * * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit - * size (mtd->writesize) is equivalent to the UBI minimal I/O unit. The + * size (@mtd->writesize) is equivalent to the UBI minimal I/O unit. The * eraseblock size is equivalent to the logical eraseblock size of the volume. */ +#include <linux/err.h> +#include <linux/list.h> +#include <linux/sched.h> #include <linux/math64.h> -#include "ubi.h" +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/mtd/ubi.h> +#include <linux/mtd/mtd.h> +#include "ubi-media.h" + +#define err_msg(fmt, ...) \ + printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \ + current->pid, __func__, ##__VA_ARGS__) + +/** + * struct gluebi_device - a gluebi device description data structure. + * @mtd: emulated MTD device description object + * @refcnt: gluebi device reference count + * @desc: UBI volume descriptor + * @ubi_num: UBI device number this gluebi device works on + * @vol_id: ID of UBI volume this gluebi device works on + * @list: link in a list of gluebi devices + */ +struct gluebi_device { + struct mtd_info mtd; + int refcnt; + struct ubi_volume_desc *desc; + int ubi_num; + int vol_id; + struct list_head list; +}; + +/* List of all gluebi devices */ +static LIST_HEAD(gluebi_devices); +static DEFINE_MUTEX(devices_mutex); + +/** + * find_gluebi_nolock - find a gluebi device. + * @ubi_num: UBI device number + * @vol_id: volume ID + * + * This function seraches for gluebi device corresponding to UBI device + * @ubi_num and UBI volume @vol_id. Returns the gluebi device description + * object in case of success and %NULL in case of failure. The caller has to + * have the &devices_mutex locked. + */ +static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id) +{ + struct gluebi_device *gluebi; + + list_for_each_entry(gluebi, &gluebi_devices, list) + if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id) + return gluebi; + return NULL; +} /** * gluebi_get_device - get MTD device reference. @@ -41,15 +95,18 @@ */ static int gluebi_get_device(struct mtd_info *mtd) { - struct ubi_volume *vol; + struct gluebi_device *gluebi; + int ubi_mode = UBI_READONLY; - vol = container_of(mtd, struct ubi_volume, gluebi_mtd); + if (!try_module_get(THIS_MODULE)) + return -ENODEV; - /* - * We do not introduce locks for gluebi reference count because the - * get_device()/put_device() calls are already serialized at MTD. - */ - if (vol->gluebi_refcount > 0) { + if (mtd->flags & MTD_WRITEABLE) + ubi_mode = UBI_READWRITE; + + gluebi = container_of(mtd, struct gluebi_device, mtd); + mutex_lock(&devices_mutex); + if (gluebi->refcnt > 0) { /* * The MTD device is already referenced and this is just one * more reference. MTD allows many users to open the same @@ -58,7 +115,8 @@ static int gluebi_get_device(struct mtd_info *mtd) * open the UBI volume again - just increase the reference * counter and return. */ - vol->gluebi_refcount += 1; + gluebi->refcnt += 1; + mutex_unlock(&devices_mutex); return 0; } @@ -66,11 +124,15 @@ static int gluebi_get_device(struct mtd_info *mtd) * This is the first reference to this UBI volume via the MTD device * interface. Open the corresponding volume in read-write mode. */ - vol->gluebi_desc = ubi_open_volume(vol->ubi->ubi_num, vol->vol_id, - UBI_READWRITE); - if (IS_ERR(vol->gluebi_desc)) - return PTR_ERR(vol->gluebi_desc); - vol->gluebi_refcount += 1; + gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id, + ubi_mode); + if (IS_ERR(gluebi->desc)) { + mutex_unlock(&devices_mutex); + module_put(THIS_MODULE); + return PTR_ERR(gluebi->desc); + } + gluebi->refcnt += 1; + mutex_unlock(&devices_mutex); return 0; } @@ -83,13 +145,15 @@ static int gluebi_get_device(struct mtd_info *mtd) */ static void gluebi_put_device(struct mtd_info *mtd) { - struct ubi_volume *vol; - - vol = container_of(mtd, struct ubi_volume, gluebi_mtd); - vol->gluebi_refcount -= 1; - ubi_assert(vol->gluebi_refcount >= 0); - if (vol->gluebi_refcount == 0) - ubi_close_volume(vol->gluebi_desc); + struct gluebi_device *gluebi; + + gluebi = container_of(mtd, struct gluebi_device, mtd); + mutex_lock(&devices_mutex); + gluebi->refcnt -= 1; + if (gluebi->refcnt == 0) + ubi_close_volume(gluebi->desc); + module_put(THIS_MODULE); + mutex_unlock(&devices_mutex); } /** @@ -107,16 +171,12 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, unsigned char *buf) { int err = 0, lnum, offs, total_read; - struct ubi_volume *vol; - struct ubi_device *ubi; - - dbg_gen("read %zd bytes from offset %lld", len, from); + struct gluebi_device *gluebi; if (len < 0 || from < 0 || from + len > mtd->size) return -EINVAL; - vol = container_of(mtd, struct ubi_volume, gluebi_mtd); - ubi = vol->ubi; + gluebi = container_of(mtd, struct gluebi_device, mtd); lnum = div_u64_rem(from, mtd->erasesize, &offs); total_read = len; @@ -126,7 +186,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, if (to_read > total_read) to_read = total_read; - err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0); + err = ubi_read(gluebi->desc, lnum, buf, offs, to_read); if (err) break; @@ -152,21 +212,17 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, * case of failure. */ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) + size_t *retlen, const u_char *buf) { int err = 0, lnum, offs, total_written; - struct ubi_volume *vol; - struct ubi_device *ubi; - - dbg_gen("write %zd bytes to offset %lld", len, to); + struct gluebi_device *gluebi; if (len < 0 || to < 0 || len + to > mtd->size) return -EINVAL; - vol = container_of(mtd, struct ubi_volume, gluebi_mtd); - ubi = vol->ubi; + gluebi = container_of(mtd, struct gluebi_device, mtd); - if (ubi->ro_mode) + if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; lnum = div_u64_rem(to, mtd->erasesize, &offs); @@ -181,8 +237,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, if (to_write > total_written) to_write = total_written; - err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write, - UBI_UNKNOWN); + err = ubi_write(gluebi->desc, lnum, buf, offs, to_write); if (err) break; @@ -207,41 +262,36 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) { int err, i, lnum, count; - struct ubi_volume *vol; - struct ubi_device *ubi; - - dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len, - (unsigned long long)instr->addr); + struct gluebi_device *gluebi; if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) return -EINVAL; - if (instr->len < 0 || instr->addr + instr->len > mtd->size) return -EINVAL; - if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) return -EINVAL; lnum = mtd_div_by_eb(instr->addr, mtd); count = mtd_div_by_eb(instr->len, mtd); - vol = container_of(mtd, struct ubi_volume, gluebi_mtd); - ubi = vol->ubi; + gluebi = container_of(mtd, struct gluebi_device, mtd); - if (ubi->ro_mode) + if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; - for (i = 0; i < count; i++) { - err = ubi_eba_unmap_leb(ubi, vol, lnum + i); + for (i = 0; i < count - 1; i++) { + err = ubi_leb_unmap(gluebi->desc, lnum + i); if (err) goto out_err; } - /* * MTD erase operations are synchronous, so we have to make sure the * physical eraseblock is wiped out. + * + * Thus, perform leb_erase instead of leb_unmap operation - leb_erase + * will wait for the end of operations */ - err = ubi_wl_flush(ubi); + err = ubi_leb_erase(gluebi->desc, lnum + i); if (err) goto out_err; @@ -256,28 +306,38 @@ out_err: } /** - * ubi_create_gluebi - initialize gluebi for an UBI volume. - * @ubi: UBI device description object - * @vol: volume description object + * gluebi_create - create a gluebi device for an UBI volume. + * @di: UBI device description object + * @vi: UBI volume description object * - * This function is called when an UBI volume is created in order to create + * This function is called when a new UBI volume is created in order to create * corresponding fake MTD device. Returns zero in case of success and a * negative error code in case of failure. */ -int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol) +static int gluebi_create(struct ubi_device_info *di, + struct ubi_volume_info *vi) { - struct mtd_info *mtd = &vol->gluebi_mtd; + struct gluebi_device *gluebi, *g; + struct mtd_info *mtd; - mtd->name = kmemdup(vol->name, vol->name_len + 1, GFP_KERNEL); - if (!mtd->name) + gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL); + if (!gluebi) return -ENOMEM; + mtd = &gluebi->mtd; + mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL); + if (!mtd->name) { + kfree(gluebi); + return -ENOMEM; + } + + gluebi->vol_id = vi->vol_id; mtd->type = MTD_UBIVOLUME; - if (!ubi->ro_mode) + if (!di->ro_mode) mtd->flags = MTD_WRITEABLE; - mtd->writesize = ubi->min_io_size; mtd->owner = THIS_MODULE; - mtd->erasesize = vol->usable_leb_size; + mtd->writesize = di->min_io_size; + mtd->erasesize = vi->usable_leb_size; mtd->read = gluebi_read; mtd->write = gluebi_write; mtd->erase = gluebi_erase; @@ -285,60 +345,196 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol) mtd->put_device = gluebi_put_device; /* - * In case of dynamic volume, MTD device size is just volume size. In + * In case of dynamic a volume, MTD device size is just volume size. In * case of a static volume the size is equivalent to the amount of data * bytes. */ - if (vol->vol_type == UBI_DYNAMIC_VOLUME) - mtd->size = (long long)vol->usable_leb_size * vol->reserved_pebs; + if (vi->vol_type == UBI_DYNAMIC_VOLUME) + mtd->size = (unsigned long long)vi->usable_leb_size * vi->size; else - mtd->size = vol->used_bytes; + mtd->size = vi->used_bytes; + + /* Just a sanity check - make sure this gluebi device does not exist */ + mutex_lock(&devices_mutex); + g = find_gluebi_nolock(vi->ubi_num, vi->vol_id); + if (g) + err_msg("gluebi MTD device %d form UBI device %d volume %d " + "already exists", g->mtd.index, vi->ubi_num, + vi->vol_id); + mutex_unlock(&devices_mutex); if (add_mtd_device(mtd)) { - ubi_err("cannot not add MTD device"); + err_msg("cannot add MTD device"); kfree(mtd->name); + kfree(gluebi); return -ENFILE; } - dbg_gen("added mtd%d (\"%s\"), size %llu, EB size %u", - mtd->index, mtd->name, (unsigned long long)mtd->size, mtd->erasesize); + mutex_lock(&devices_mutex); + list_add_tail(&gluebi->list, &gluebi_devices); + mutex_unlock(&devices_mutex); return 0; } /** - * ubi_destroy_gluebi - close gluebi for an UBI volume. - * @vol: volume description object + * gluebi_remove - remove a gluebi device. + * @vi: UBI volume description object * - * This function is called when an UBI volume is removed in order to remove + * This function is called when an UBI volume is removed and it removes * corresponding fake MTD device. Returns zero in case of success and a * negative error code in case of failure. */ -int ubi_destroy_gluebi(struct ubi_volume *vol) +static int gluebi_remove(struct ubi_volume_info *vi) { - int err; - struct mtd_info *mtd = &vol->gluebi_mtd; + int err = 0; + struct mtd_info *mtd; + struct gluebi_device *gluebi; + + mutex_lock(&devices_mutex); + gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); + if (!gluebi) { + err_msg("got remove notification for unknown UBI device %d " + "volume %d", vi->ubi_num, vi->vol_id); + err = -ENOENT; + } else if (gluebi->refcnt) + err = -EBUSY; + else + list_del(&gluebi->list); + mutex_unlock(&devices_mutex); + if (err) + return err; - dbg_gen("remove mtd%d", mtd->index); + mtd = &gluebi->mtd; err = del_mtd_device(mtd); - if (err) + if (err) { + err_msg("cannot remove fake MTD device %d, UBI device %d, " + "volume %d, error %d", mtd->index, gluebi->ubi_num, + gluebi->vol_id, err); + mutex_lock(&devices_mutex); + list_add_tail(&gluebi->list, &gluebi_devices); + mutex_unlock(&devices_mutex); return err; + } + kfree(mtd->name); + kfree(gluebi); return 0; } /** - * ubi_gluebi_updated - UBI volume was updated notifier. - * @vol: volume description object + * gluebi_updated - UBI volume was updated notifier. + * @vi: volume info structure * - * This function is called every time an UBI volume is updated. This function - * does nothing if volume @vol is dynamic, and changes MTD device size if the + * This function is called every time an UBI volume is updated. It does nothing + * if te volume @vol is dynamic, and changes MTD device size if the * volume is static. This is needed because static volumes cannot be read past - * data they contain. + * data they contain. This function returns zero in case of success and a + * negative error code in case of error. + */ +static int gluebi_updated(struct ubi_volume_info *vi) +{ + struct gluebi_device *gluebi; + + mutex_lock(&devices_mutex); + gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); + if (!gluebi) { + mutex_unlock(&devices_mutex); + err_msg("got update notification for unknown UBI device %d " + "volume %d", vi->ubi_num, vi->vol_id); + return -ENOENT; + } + + if (vi->vol_type == UBI_STATIC_VOLUME) + gluebi->mtd.size = vi->used_bytes; + mutex_unlock(&devices_mutex); + return 0; +} + +/** + * gluebi_resized - UBI volume was re-sized notifier. + * @vi: volume info structure + * + * This function is called every time an UBI volume is re-size. It changes the + * corresponding fake MTD device size. This function returns zero in case of + * success and a negative error code in case of error. + */ +static int gluebi_resized(struct ubi_volume_info *vi) +{ + struct gluebi_device *gluebi; + + mutex_lock(&devices_mutex); + gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); + if (!gluebi) { + mutex_unlock(&devices_mutex); + err_msg("got update notification for unknown UBI device %d " + "volume %d", vi->ubi_num, vi->vol_id); + return -ENOENT; + } + gluebi->mtd.size = vi->used_bytes; + mutex_unlock(&devices_mutex); + return 0; +} + +/** + * gluebi_notify - UBI notification handler. + * @nb: registered notifier block + * @l: notification type + * @ptr: pointer to the &struct ubi_notification object */ -void ubi_gluebi_updated(struct ubi_volume *vol) +static int gluebi_notify(struct notifier_block *nb, unsigned long l, + void *ns_ptr) { - struct mtd_info *mtd = &vol->gluebi_mtd; + struct ubi_notification *nt = ns_ptr; + + switch (l) { + case UBI_VOLUME_ADDED: + gluebi_create(&nt->di, &nt->vi); + break; + case UBI_VOLUME_REMOVED: + gluebi_remove(&nt->vi); + break; + case UBI_VOLUME_RESIZED: + gluebi_resized(&nt->vi); + break; + case UBI_VOLUME_UPDATED: + gluebi_updated(&nt->vi); + break; + default: + break; + } + return NOTIFY_OK; +} - if (vol->vol_type == UBI_STATIC_VOLUME) - mtd->size = vol->used_bytes; +static struct notifier_block gluebi_notifier = { + .notifier_call = gluebi_notify, +}; + +static int __init ubi_gluebi_init(void) +{ + return ubi_register_volume_notifier(&gluebi_notifier, 0); } + +static void __exit ubi_gluebi_exit(void) +{ + struct gluebi_device *gluebi, *g; + + list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) { + int err; + struct mtd_info *mtd = &gluebi->mtd; + + err = del_mtd_device(mtd); + if (err) + err_msg("error %d while removing gluebi MTD device %d, " + "UBI device %d, volume %d - ignoring", err, + mtd->index, gluebi->ubi_num, gluebi->vol_id); + kfree(mtd->name); + kfree(gluebi); + } + ubi_unregister_volume_notifier(&gluebi_notifier); +} + +module_init(ubi_gluebi_init); +module_exit(ubi_gluebi_exit); +MODULE_DESCRIPTION("MTD emulation layer over UBI volumes"); +MODULE_AUTHOR("Artem Bityutskiy, Joern Engel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index fe81039..effaff2 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -100,6 +100,7 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, const struct ubi_vid_hdr *vid_hdr); static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len); +static int paranoid_check_empty(struct ubi_device *ubi, int pnum); #else #define paranoid_check_not_bad(ubi, pnum) 0 #define paranoid_check_peb_ec_hdr(ubi, pnum) 0 @@ -107,6 +108,7 @@ static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, #define paranoid_check_peb_vid_hdr(ubi, pnum) 0 #define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0 #define paranoid_check_all_ff(ubi, pnum, offset, len) 0 +#define paranoid_check_empty(ubi, pnum) 0 #endif /** @@ -670,11 +672,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, if (read_err != -EBADMSG && check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { /* The physical eraseblock is supposedly empty */ - - /* - * The below is just a paranoid check, it has to be - * compiled out if paranoid checks are disabled. - */ err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size); if (err) @@ -902,7 +899,7 @@ bad: * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected * and corrected by the flash driver; this is harmless but may indicate that * this eraseblock may become bad soon; - * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC + * o %UBI_IO_BAD_VID_HDR if the volume identifier header is corrupted (a CRC * error detected); * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID * header there); @@ -955,8 +952,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, * The below is just a paranoid check, it has to be * compiled out if paranoid checks are disabled. */ - err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start, - ubi->leb_size); + err = paranoid_check_empty(ubi, pnum); if (err) return err > 0 ? UBI_IO_BAD_VID_HDR : err; @@ -1280,4 +1276,74 @@ error: return err; } +/** + * paranoid_check_empty - whether a PEB is empty. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to check + * + * This function makes sure PEB @pnum is empty, which means it contains only + * %0xFF data bytes. Returns zero if the PEB is empty, %1 if not, and a + * negative error code in case of failure. + * + * Empty PEBs have the EC header, and do not have the VID header. The caller of + * this function should have already made sure the PEB does not have the VID + * header. However, this function re-checks that, because it is possible that + * the header and data has already been written to the PEB. + * + * Let's consider a possible scenario. Suppose there are 2 tasks - A and B. + * Task A is in 'wear_leveling_worker()'. It is reading VID header of PEB X to + * find which LEB it corresponds to. PEB X is currently unmapped, and has no + * VID header. Task B is trying to write to PEB X. + * + * Task A: in 'ubi_io_read_vid_hdr()': reads the VID header from PEB X. The + * read data contain all 0xFF bytes; + * Task B: writes VID header and some data to PEB X; + * Task A: assumes PEB X is empty, calls 'paranoid_check_empty()'. And if we + * do not re-read the VID header, and do not cancel the checking if it + * is there, we fail. + */ +static int paranoid_check_empty(struct ubi_device *ubi, int pnum) +{ + int err, offs = ubi->vid_hdr_aloffset, len = ubi->vid_hdr_alsize; + size_t read; + uint32_t magic; + const struct ubi_vid_hdr *vid_hdr; + + mutex_lock(&ubi->dbg_buf_mutex); + err = ubi->mtd->read(ubi->mtd, offs, len, &read, ubi->dbg_peb_buf); + if (err && err != -EUCLEAN) { + ubi_err("error %d while reading %d bytes from PEB %d:%d, " + "read %zd bytes", err, len, pnum, offs, read); + goto error; + } + + vid_hdr = ubi->dbg_peb_buf; + magic = be32_to_cpu(vid_hdr->magic); + if (magic == UBI_VID_HDR_MAGIC) + /* The PEB contains VID header, so it is not empty */ + goto out; + + err = check_pattern(ubi->dbg_peb_buf, 0xFF, len); + if (err == 0) { + ubi_err("flash region at PEB %d:%d, length %d does not " + "contain all 0xFF bytes", pnum, offs, len); + goto fail; + } + +out: + mutex_unlock(&ubi->dbg_buf_mutex); + return 0; + +fail: + ubi_err("paranoid check failed for PEB %d", pnum); + ubi_msg("hex dump of the %d-%d region", offs, offs + len); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, + ubi->dbg_peb_buf, len, 1); + err = 1; +error: + ubi_dbg_dump_stack(); + mutex_unlock(&ubi->dbg_buf_mutex); + return err; +} + #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 4abbe57..88a72e9 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -26,6 +26,24 @@ #include "ubi.h" /** + * ubi_do_get_device_info - get information about UBI device. + * @ubi: UBI device description object + * @di: the information is stored here + * + * This function is the same as 'ubi_get_device_info()', but it assumes the UBI + * device is locked and cannot disappear. + */ +void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di) +{ + di->ubi_num = ubi->ubi_num; + di->leb_size = ubi->leb_size; + di->min_io_size = ubi->min_io_size; + di->ro_mode = ubi->ro_mode; + di->cdev = ubi->cdev.dev; +} +EXPORT_SYMBOL_GPL(ubi_do_get_device_info); + +/** * ubi_get_device_info - get information about UBI device. * @ubi_num: UBI device number * @di: the information is stored here @@ -39,33 +57,24 @@ int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) return -EINVAL; - ubi = ubi_get_device(ubi_num); if (!ubi) return -ENODEV; - - di->ubi_num = ubi->ubi_num; - di->leb_size = ubi->leb_size; - di->min_io_size = ubi->min_io_size; - di->ro_mode = ubi->ro_mode; - di->cdev = ubi->cdev.dev; - + ubi_do_get_device_info(ubi, di); ubi_put_device(ubi); return 0; } EXPORT_SYMBOL_GPL(ubi_get_device_info); /** - * ubi_get_volume_info - get information about UBI volume. - * @desc: volume descriptor + * ubi_do_get_volume_info - get information about UBI volume. + * @ubi: UBI device description object + * @vol: volume description object * @vi: the information is stored here */ -void ubi_get_volume_info(struct ubi_volume_desc *desc, - struct ubi_volume_info *vi) +void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, + struct ubi_volume_info *vi) { - const struct ubi_volume *vol = desc->vol; - const struct ubi_device *ubi = vol->ubi; - vi->vol_id = vol->vol_id; vi->ubi_num = ubi->ubi_num; vi->size = vol->reserved_pebs; @@ -79,6 +88,17 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc, vi->name = vol->name; vi->cdev = vol->cdev.dev; } + +/** + * ubi_get_volume_info - get information about UBI volume. + * @desc: volume descriptor + * @vi: the information is stored here + */ +void ubi_get_volume_info(struct ubi_volume_desc *desc, + struct ubi_volume_info *vi) +{ + ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi); +} EXPORT_SYMBOL_GPL(ubi_get_volume_info); /** @@ -106,7 +126,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) struct ubi_device *ubi; struct ubi_volume *vol; - dbg_gen("open device %d volume %d, mode %d", ubi_num, vol_id, mode); + dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode); if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) return ERR_PTR(-EINVAL); @@ -196,6 +216,8 @@ out_free: kfree(desc); out_put_ubi: ubi_put_device(ubi); + dbg_err("cannot open device %d, volume %d, error %d", + ubi_num, vol_id, err); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(ubi_open_volume); @@ -215,7 +237,7 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, struct ubi_device *ubi; struct ubi_volume_desc *ret; - dbg_gen("open volume %s, mode %d", name, mode); + dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode); if (!name) return ERR_PTR(-EINVAL); @@ -266,7 +288,8 @@ void ubi_close_volume(struct ubi_volume_desc *desc) struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; - dbg_gen("close volume %d, mode %d", vol->vol_id, desc->mode); + dbg_gen("close device %d, volume %d, mode %d", + ubi->ubi_num, vol->vol_id, desc->mode); spin_lock(&ubi->volumes_lock); switch (desc->mode) { @@ -558,7 +581,7 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum) EXPORT_SYMBOL_GPL(ubi_leb_unmap); /** - * ubi_leb_map - map logical erasblock to a physical eraseblock. + * ubi_leb_map - map logical eraseblock to a physical eraseblock. * @desc: volume descriptor * @lnum: logical eraseblock number * @dtype: expected data type @@ -656,3 +679,59 @@ int ubi_sync(int ubi_num) return 0; } EXPORT_SYMBOL_GPL(ubi_sync); + +BLOCKING_NOTIFIER_HEAD(ubi_notifiers); + +/** + * ubi_register_volume_notifier - register a volume notifier. + * @nb: the notifier description object + * @ignore_existing: if non-zero, do not send "added" notification for all + * already existing volumes + * + * This function registers a volume notifier, which means that + * 'nb->notifier_call()' will be invoked when an UBI volume is created, + * removed, re-sized, re-named, or updated. The first argument of the function + * is the notification type. The second argument is pointer to a + * &struct ubi_notification object which describes the notification event. + * Using UBI API from the volume notifier is prohibited. + * + * This function returns zero in case of success and a negative error code + * in case of failure. + */ +int ubi_register_volume_notifier(struct notifier_block *nb, + int ignore_existing) +{ + int err; + + err = blocking_notifier_chain_register(&ubi_notifiers, nb); + if (err != 0) + return err; + if (ignore_existing) + return 0; + + /* + * We are going to walk all UBI devices and all volumes, and + * notify the user about existing volumes by the %UBI_VOLUME_ADDED + * event. We have to lock the @ubi_devices_mutex to make sure UBI + * devices do not disappear. + */ + mutex_lock(&ubi_devices_mutex); + ubi_enumerate_volumes(nb); + mutex_unlock(&ubi_devices_mutex); + + return err; +} +EXPORT_SYMBOL_GPL(ubi_register_volume_notifier); + +/** + * ubi_unregister_volume_notifier - unregister the volume notifier. + * @nb: the notifier description object + * + * This function unregisters volume notifier @nm and returns zero in case of + * success and a negative error code in case of failure. + */ +int ubi_unregister_volume_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&ubi_notifiers, nb); +} +EXPORT_SYMBOL_GPL(ubi_unregister_volume_notifier); diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index c055511bb..28acd13 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -36,6 +36,7 @@ #include <linux/device.h> #include <linux/string.h> #include <linux/vmalloc.h> +#include <linux/notifier.h> #include <linux/mtd/mtd.h> #include <linux/mtd/ubi.h> @@ -100,6 +101,28 @@ enum { UBI_IO_BITFLIPS }; +/* + * Return codes of the 'ubi_eba_copy_leb()' function. + * + * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source + * PEB was put meanwhile, or there is I/O on the source PEB + * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source + * PEB + * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target + * PEB + * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target + * PEB + * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the + * target PEB + */ +enum { + MOVE_CANCEL_RACE = 1, + MOVE_SOURCE_RD_ERR, + MOVE_TARGET_RD_ERR, + MOVE_TARGET_WR_ERR, + MOVE_CANCEL_BITFLIPS, +}; + /** * struct ubi_wl_entry - wear-leveling entry. * @u.rb: link in the corresponding (free/used) RB-tree @@ -208,10 +231,6 @@ struct ubi_volume_desc; * @changing_leb: %1 if the atomic LEB change ioctl command is in progress * @direct_writes: %1 if direct writes are enabled for this volume * - * @gluebi_desc: gluebi UBI volume descriptor - * @gluebi_refcount: reference count of the gluebi MTD device - * @gluebi_mtd: MTD device description object of the gluebi MTD device - * * The @corrupted field indicates that the volume's contents is corrupted. * Since UBI protects only static volumes, this field is not relevant to * dynamic volumes - it is user's responsibility to assure their data @@ -255,17 +274,6 @@ struct ubi_volume { unsigned int updating:1; unsigned int changing_leb:1; unsigned int direct_writes:1; - -#ifdef CONFIG_MTD_UBI_GLUEBI - /* - * Gluebi-related stuff may be compiled out. - * Note: this should not be built into UBI but should be a separate - * ubimtd driver which works on top of UBI and emulates MTD devices. - */ - struct ubi_volume_desc *gluebi_desc; - int gluebi_refcount; - struct mtd_info gluebi_mtd; -#endif }; /** @@ -305,9 +313,9 @@ struct ubi_wl_entry; * @vtbl_slots: how many slots are available in the volume table * @vtbl_size: size of the volume table in bytes * @vtbl: in-RAM volume table copy - * @volumes_mutex: protects on-flash volume table and serializes volume - * changes, like creation, deletion, update, re-size, - * re-name and set property + * @device_mutex: protects on-flash volume table and serializes volume + * creation, deletion, update, re-size, re-name and set + * property * * @max_ec: current highest erase counter value * @mean_ec: current mean erase counter value @@ -318,14 +326,15 @@ struct ubi_wl_entry; * @alc_mutex: serializes "atomic LEB change" operations * * @used: RB-tree of used physical eraseblocks + * @erroneous: RB-tree of erroneous used physical eraseblocks * @free: RB-tree of free physical eraseblocks * @scrub: RB-tree of physical eraseblocks which need scrubbing * @pq: protection queue (contain physical eraseblocks which are temporarily * protected from the wear-leveling worker) * @pq_head: protection queue head * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, - * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works - * fields + * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, + * @erroneous, and @erroneous_peb_count fields * @move_mutex: serializes eraseblock moves * @work_sem: synchronizes the WL worker with use tasks * @wl_scheduled: non-zero if the wear-leveling was scheduled @@ -339,12 +348,15 @@ struct ubi_wl_entry; * @bgt_thread: background thread description object * @thread_enabled: if the background thread is enabled * @bgt_name: background thread name + * @reboot_notifier: notifier to terminate background thread before rebooting * * @flash_size: underlying MTD device size (in bytes) * @peb_count: count of physical eraseblocks on the MTD device * @peb_size: physical eraseblock size * @bad_peb_count: count of bad physical eraseblocks * @good_peb_count: count of good physical eraseblocks + * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous + * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks * @min_io_size: minimal input/output unit size of the underlying MTD device * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers * @ro_mode: if the UBI device is in read-only mode @@ -366,7 +378,6 @@ struct ubi_wl_entry; * @peb_buf2: another buffer of PEB size used for different purposes * @buf_mutex: protects @peb_buf1 and @peb_buf2 * @ckvol_mutex: serializes static volume checking when opening - * @mult_mutex: serializes operations on multiple volumes, like re-naming * @dbg_peb_buf: buffer of PEB size used for debugging * @dbg_buf_mutex: protects @dbg_peb_buf */ @@ -389,7 +400,7 @@ struct ubi_device { int vtbl_slots; int vtbl_size; struct ubi_vtbl_record *vtbl; - struct mutex volumes_mutex; + struct mutex device_mutex; int max_ec; /* Note, mean_ec is not updated run-time - should be fixed */ @@ -403,6 +414,7 @@ struct ubi_device { /* Wear-leveling sub-system's stuff */ struct rb_root used; + struct rb_root erroneous; struct rb_root free; struct rb_root scrub; struct list_head pq[UBI_PROT_QUEUE_LEN]; @@ -420,6 +432,7 @@ struct ubi_device { struct task_struct *bgt_thread; int thread_enabled; char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; + struct notifier_block reboot_notifier; /* I/O sub-system's stuff */ long long flash_size; @@ -427,6 +440,8 @@ struct ubi_device { int peb_size; int bad_peb_count; int good_peb_count; + int erroneous_peb_count; + int max_erroneous; int min_io_size; int hdrs_min_io_size; int ro_mode; @@ -444,8 +459,7 @@ struct ubi_device { void *peb_buf2; struct mutex buf_mutex; struct mutex ckvol_mutex; - struct mutex mult_mutex; -#ifdef CONFIG_MTD_UBI_DEBUG +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID void *dbg_peb_buf; struct mutex dbg_buf_mutex; #endif @@ -457,6 +471,7 @@ extern const struct file_operations ubi_cdev_operations; extern const struct file_operations ubi_vol_cdev_operations; extern struct class *ubi_class; extern struct mutex ubi_devices_mutex; +extern struct blocking_notifier_head ubi_notifiers; /* vtbl.c */ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, @@ -489,17 +504,6 @@ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int ubi_check_volume(struct ubi_device *ubi, int vol_id); void ubi_calculate_reserved(struct ubi_device *ubi); -/* gluebi.c */ -#ifdef CONFIG_MTD_UBI_GLUEBI -int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol); -int ubi_destroy_gluebi(struct ubi_volume *vol); -void ubi_gluebi_updated(struct ubi_volume *vol); -#else -#define ubi_create_gluebi(ubi, vol) 0 -#define ubi_destroy_gluebi(vol) 0 -#define ubi_gluebi_updated(vol) -#endif - /* eba.c */ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum); @@ -549,6 +553,16 @@ struct ubi_device *ubi_get_device(int ubi_num); void ubi_put_device(struct ubi_device *ubi); struct ubi_device *ubi_get_by_major(int major); int ubi_major2num(int major); +int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, + int ntype); +int ubi_notify_all(struct ubi_device *ubi, int ntype, + struct notifier_block *nb); +int ubi_enumerate_volumes(struct notifier_block *nb); + +/* kapi.c */ +void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di); +void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, + struct ubi_volume_info *vi); /* * ubi_rb_for_each_entry - walk an RB-tree. diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c index 6b4d1ae..74fdc40 100644 --- a/drivers/mtd/ubi/upd.c +++ b/drivers/mtd/ubi/upd.c @@ -68,10 +68,10 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol) sizeof(struct ubi_vtbl_record)); vtbl_rec.upd_marker = 1; - mutex_lock(&ubi->volumes_mutex); + mutex_lock(&ubi->device_mutex); err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); - mutex_unlock(&ubi->volumes_mutex); vol->upd_marker = 1; + mutex_unlock(&ubi->device_mutex); return err; } @@ -109,10 +109,10 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol, vol->last_eb_bytes = vol->usable_leb_size; } - mutex_lock(&ubi->volumes_mutex); + mutex_lock(&ubi->device_mutex); err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); - mutex_unlock(&ubi->volumes_mutex); vol->upd_marker = 0; + mutex_unlock(&ubi->device_mutex); return err; } diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index df54835..ab64cb5 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -198,7 +198,7 @@ static void volume_sysfs_close(struct ubi_volume *vol) * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume * and saves it in @req->vol_id. Returns zero in case of success and a negative * error code in case of failure. Note, the caller has to have the - * @ubi->volumes_mutex locked. + * @ubi->device_mutex locked. */ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) { @@ -232,8 +232,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) req->vol_id = vol_id; } - dbg_gen("volume ID %d, %llu bytes, type %d, name %s", - vol_id, (unsigned long long)req->bytes, + dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s", + ubi->ubi_num, vol_id, (unsigned long long)req->bytes, (int)req->vol_type, req->name); /* Ensure that this volume does not exist */ @@ -317,10 +317,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) goto out_mapping; } - err = ubi_create_gluebi(ubi, vol); - if (err) - goto out_cdev; - vol->dev.release = vol_release; vol->dev.parent = &ubi->dev; vol->dev.devt = dev; @@ -330,7 +326,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) err = device_register(&vol->dev); if (err) { ubi_err("cannot register device"); - goto out_gluebi; + goto out_cdev; } err = volume_sysfs_init(ubi, vol); @@ -358,7 +354,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) ubi->vol_count += 1; spin_unlock(&ubi->volumes_lock); - err = paranoid_check_volumes(ubi); + ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED); + if (paranoid_check_volumes(ubi)) + dbg_err("check failed while creating volume %d", vol_id); return err; out_sysfs: @@ -373,10 +371,6 @@ out_sysfs: do_free = 0; get_device(&vol->dev); volume_sysfs_close(vol); -out_gluebi: - if (ubi_destroy_gluebi(vol)) - dbg_err("cannot destroy gluebi for volume %d:%d", - ubi->ubi_num, vol_id); out_cdev: cdev_del(&vol->cdev); out_mapping: @@ -403,7 +397,7 @@ out_unlock: * * This function removes volume described by @desc. The volume has to be opened * in "exclusive" mode. Returns zero in case of success and a negative error - * code in case of failure. The caller has to have the @ubi->volumes_mutex + * code in case of failure. The caller has to have the @ubi->device_mutex * locked. */ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) @@ -412,7 +406,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) struct ubi_device *ubi = vol->ubi; int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; - dbg_gen("remove UBI volume %d", vol_id); + dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id); ubi_assert(desc->mode == UBI_EXCLUSIVE); ubi_assert(vol == ubi->volumes[vol_id]); @@ -431,10 +425,6 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) ubi->volumes[vol_id] = NULL; spin_unlock(&ubi->volumes_lock); - err = ubi_destroy_gluebi(vol); - if (err) - goto out_err; - if (!no_vtbl) { err = ubi_change_vtbl_record(ubi, vol_id, NULL); if (err) @@ -465,8 +455,10 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) ubi->vol_count -= 1; spin_unlock(&ubi->volumes_lock); - if (!no_vtbl) - err = paranoid_check_volumes(ubi); + ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED); + if (!no_vtbl && paranoid_check_volumes(ubi)) + dbg_err("check failed while removing volume %d", vol_id); + return err; out_err: @@ -485,7 +477,7 @@ out_unlock: * * This function re-sizes the volume and returns zero in case of success, and a * negative error code in case of failure. The caller has to have the - * @ubi->volumes_mutex locked. + * @ubi->device_mutex locked. */ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) { @@ -498,8 +490,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) if (ubi->ro_mode) return -EROFS; - dbg_gen("re-size volume %d to from %d to %d PEBs", - vol_id, vol->reserved_pebs, reserved_pebs); + dbg_gen("re-size device %d, volume %d to from %d to %d PEBs", + ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs); if (vol->vol_type == UBI_STATIC_VOLUME && reserved_pebs < vol->used_ebs) { @@ -587,7 +579,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) (long long)vol->used_ebs * vol->usable_leb_size; } - err = paranoid_check_volumes(ubi); + ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED); + if (paranoid_check_volumes(ubi)) + dbg_err("check failed while re-sizing volume %d", vol_id); return err; out_acc: @@ -632,11 +626,12 @@ int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list) vol->name_len = re->new_name_len; memcpy(vol->name, re->new_name, re->new_name_len + 1); spin_unlock(&ubi->volumes_lock); + ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED); } } - if (!err) - err = paranoid_check_volumes(ubi); + if (!err && paranoid_check_volumes(ubi)) + ; return err; } @@ -667,10 +662,6 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) return err; } - err = ubi_create_gluebi(ubi, vol); - if (err) - goto out_cdev; - vol->dev.release = vol_release; vol->dev.parent = &ubi->dev; vol->dev.devt = dev; @@ -678,21 +669,19 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); err = device_register(&vol->dev); if (err) - goto out_gluebi; + goto out_cdev; err = volume_sysfs_init(ubi, vol); if (err) { cdev_del(&vol->cdev); - err = ubi_destroy_gluebi(vol); volume_sysfs_close(vol); return err; } - err = paranoid_check_volumes(ubi); + if (paranoid_check_volumes(ubi)) + dbg_err("check failed while adding volume %d", vol_id); return err; -out_gluebi: - err = ubi_destroy_gluebi(vol); out_cdev: cdev_del(&vol->cdev); return err; @@ -708,12 +697,9 @@ out_cdev: */ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) { - int err; - dbg_gen("free volume %d", vol->vol_id); ubi->volumes[vol->vol_id] = NULL; - err = ubi_destroy_gluebi(vol); cdev_del(&vol->cdev); volume_sysfs_close(vol); } @@ -868,6 +854,7 @@ fail: if (vol) ubi_dbg_dump_vol_info(vol); ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); + dump_stack(); spin_unlock(&ubi->volumes_lock); return -EINVAL; } diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 891534f..2b24723 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -55,8 +55,8 @@ * * As it was said, for the UBI sub-system all physical eraseblocks are either * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while - * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or - * (temporarily) in the @wl->pq queue. + * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub + * RB-trees, as well as (temporarily) in the @wl->pq queue. * * When the WL sub-system returns a physical eraseblock, the physical * eraseblock is protected from being moved for some "time". For this reason, @@ -83,6 +83,8 @@ * used. The former state corresponds to the @wl->free tree. The latter state * is split up on several sub-states: * o the WL movement is allowed (@wl->used tree); + * o the WL movement is disallowed (@wl->erroneous) because the PEB is + * erroneous - e.g., there was a read error; * o the WL movement is temporarily prohibited (@wl->pq queue); * o scrubbing is needed (@wl->scrub tree). * @@ -653,7 +655,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, int cancel) { - int err, scrubbing = 0, torture = 0; + int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; + int vol_id = -1, uninitialized_var(lnum); struct ubi_wl_entry *e1, *e2; struct ubi_vid_hdr *vid_hdr; @@ -738,68 +741,78 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, /* * We are trying to move PEB without a VID header. UBI * always write VID headers shortly after the PEB was - * given, so we have a situation when it did not have - * chance to write it down because it was preempted. - * Just re-schedule the work, so that next time it will - * likely have the VID header in place. + * given, so we have a situation when it has not yet + * had a chance to write it, because it was preempted. + * So add this PEB to the protection queue so far, + * because presumably more data will be written there + * (including the missing VID header), and then we'll + * move it. */ dbg_wl("PEB %d has no VID header", e1->pnum); + protect = 1; goto out_not_moved; } ubi_err("error %d while reading VID header from PEB %d", err, e1->pnum); - if (err > 0) - err = -EIO; goto out_error; } + vol_id = be32_to_cpu(vid_hdr->vol_id); + lnum = be32_to_cpu(vid_hdr->lnum); + err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); if (err) { - if (err == -EAGAIN) + if (err == MOVE_CANCEL_RACE) { + /* + * The LEB has not been moved because the volume is + * being deleted or the PEB has been put meanwhile. We + * should prevent this PEB from being selected for + * wear-leveling movement again, so put it to the + * protection queue. + */ + protect = 1; goto out_not_moved; - if (err < 0) - goto out_error; - if (err == 2) { - /* Target PEB write error, torture it */ + } + + if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR || + err == MOVE_TARGET_RD_ERR) { + /* + * Target PEB had bit-flips or write error - torture it. + */ torture = 1; goto out_not_moved; } - /* - * The LEB has not been moved because the volume is being - * deleted or the PEB has been put meanwhile. We should prevent - * this PEB from being selected for wear-leveling movement - * again, so put it to the protection queue. - */ - - dbg_wl("canceled moving PEB %d", e1->pnum); - ubi_assert(err == 1); - - ubi_free_vid_hdr(ubi, vid_hdr); - vid_hdr = NULL; - - spin_lock(&ubi->wl_lock); - prot_queue_add(ubi, e1); - ubi_assert(!ubi->move_to_put); - ubi->move_from = ubi->move_to = NULL; - ubi->wl_scheduled = 0; - spin_unlock(&ubi->wl_lock); + if (err == MOVE_SOURCE_RD_ERR) { + /* + * An error happened while reading the source PEB. Do + * not switch to R/O mode in this case, and give the + * upper layers a possibility to recover from this, + * e.g. by unmapping corresponding LEB. Instead, just + * put this PEB to the @ubi->erroneous list to prevent + * UBI from trying to move it over and over again. + */ + if (ubi->erroneous_peb_count > ubi->max_erroneous) { + ubi_err("too many erroneous eraseblocks (%d)", + ubi->erroneous_peb_count); + goto out_error; + } + erroneous = 1; + goto out_not_moved; + } - e1 = NULL; - err = schedule_erase(ubi, e2, 0); - if (err) + if (err < 0) goto out_error; - mutex_unlock(&ubi->move_mutex); - return 0; + + ubi_assert(0); } /* The PEB has been successfully moved */ - ubi_free_vid_hdr(ubi, vid_hdr); - vid_hdr = NULL; if (scrubbing) - ubi_msg("scrubbed PEB %d, data moved to PEB %d", - e1->pnum, e2->pnum); + ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", + e1->pnum, vol_id, lnum, e2->pnum); + ubi_free_vid_hdr(ubi, vid_hdr); spin_lock(&ubi->wl_lock); if (!ubi->move_to_put) { @@ -812,8 +825,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, err = schedule_erase(ubi, e1, 0); if (err) { - e1 = NULL; - goto out_error; + kmem_cache_free(ubi_wl_entry_slab, e1); + if (e2) + kmem_cache_free(ubi_wl_entry_slab, e2); + goto out_ro; } if (e2) { @@ -821,10 +836,13 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * Well, the target PEB was put meanwhile, schedule it for * erasure. */ - dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); + dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", + e2->pnum, vol_id, lnum); err = schedule_erase(ubi, e2, 0); - if (err) - goto out_error; + if (err) { + kmem_cache_free(ubi_wl_entry_slab, e2); + goto out_ro; + } } dbg_wl("done"); @@ -837,11 +855,19 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * have been changed, schedule it for erasure. */ out_not_moved: - dbg_wl("canceled moving PEB %d", e1->pnum); - ubi_free_vid_hdr(ubi, vid_hdr); - vid_hdr = NULL; + if (vol_id != -1) + dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)", + e1->pnum, vol_id, lnum, e2->pnum, err); + else + dbg_wl("cancel moving PEB %d to PEB %d (%d)", + e1->pnum, e2->pnum, err); spin_lock(&ubi->wl_lock); - if (scrubbing) + if (protect) + prot_queue_add(ubi, e1); + else if (erroneous) { + wl_tree_add(e1, &ubi->erroneous); + ubi->erroneous_peb_count += 1; + } else if (scrubbing) wl_tree_add(e1, &ubi->scrub); else wl_tree_add(e1, &ubi->used); @@ -850,32 +876,36 @@ out_not_moved: ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); - e1 = NULL; + ubi_free_vid_hdr(ubi, vid_hdr); err = schedule_erase(ubi, e2, torture); - if (err) - goto out_error; - + if (err) { + kmem_cache_free(ubi_wl_entry_slab, e2); + goto out_ro; + } mutex_unlock(&ubi->move_mutex); return 0; out_error: - ubi_err("error %d while moving PEB %d to PEB %d", - err, e1->pnum, e2->pnum); - - ubi_free_vid_hdr(ubi, vid_hdr); + if (vol_id != -1) + ubi_err("error %d while moving PEB %d to PEB %d", + err, e1->pnum, e2->pnum); + else + ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d", + err, e1->pnum, vol_id, lnum, e2->pnum); spin_lock(&ubi->wl_lock); ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); - if (e1) - kmem_cache_free(ubi_wl_entry_slab, e1); - if (e2) - kmem_cache_free(ubi_wl_entry_slab, e2); - ubi_ro_mode(ubi); + ubi_free_vid_hdr(ubi, vid_hdr); + kmem_cache_free(ubi_wl_entry_slab, e1); + kmem_cache_free(ubi_wl_entry_slab, e2); +out_ro: + ubi_ro_mode(ubi); mutex_unlock(&ubi->move_mutex); - return err; + ubi_assert(err != 0); + return err < 0 ? err : -EIO; out_cancel: ubi->wl_scheduled = 0; @@ -1015,7 +1045,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, /* * If this is not %-EIO, we have no idea what to do. Scheduling * this physical eraseblock for erasure again would cause - * errors again and again. Well, lets switch to RO mode. + * errors again and again. Well, lets switch to R/O mode. */ goto out_ro; } @@ -1043,10 +1073,9 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ubi_err("no reserved physical eraseblocks"); goto out_ro; } - spin_unlock(&ubi->volumes_lock); - ubi_msg("mark PEB %d as bad", pnum); + ubi_msg("mark PEB %d as bad", pnum); err = ubi_io_mark_bad(ubi, pnum); if (err) goto out_ro; @@ -1056,7 +1085,9 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ubi->bad_peb_count += 1; ubi->good_peb_count -= 1; ubi_calculate_reserved(ubi); - if (ubi->beb_rsvd_pebs == 0) + if (ubi->beb_rsvd_pebs) + ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs); + else ubi_warn("last PEB from the reserved pool was used"); spin_unlock(&ubi->volumes_lock); @@ -1125,6 +1156,13 @@ retry: } else if (in_wl_tree(e, &ubi->scrub)) { paranoid_check_in_wl_tree(e, &ubi->scrub); rb_erase(&e->u.rb, &ubi->scrub); + } else if (in_wl_tree(e, &ubi->erroneous)) { + paranoid_check_in_wl_tree(e, &ubi->erroneous); + rb_erase(&e->u.rb, &ubi->erroneous); + ubi->erroneous_peb_count -= 1; + ubi_assert(ubi->erroneous_peb_count >= 0); + /* Erroneous PEBs should be tortured */ + torture = 1; } else { err = prot_queue_del(ubi, e->pnum); if (err) { @@ -1373,7 +1411,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) struct ubi_scan_leb *seb, *tmp; struct ubi_wl_entry *e; - ubi->used = ubi->free = ubi->scrub = RB_ROOT; + ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT; spin_lock_init(&ubi->wl_lock); mutex_init(&ubi->move_mutex); init_rwsem(&ubi->work_sem); @@ -1511,6 +1549,7 @@ void ubi_wl_close(struct ubi_device *ubi) cancel_pending(ubi); protection_queue_destroy(ubi); tree_destroy(&ubi->used); + tree_destroy(&ubi->erroneous); tree_destroy(&ubi->free); tree_destroy(&ubi->scrub); kfree(ubi->lookuptbl); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 3b63831..892a9e4 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2272,8 +2272,9 @@ config BNX2 config CNIC tristate "Broadcom CNIC support" - depends on BNX2 - depends on UIO + depends on PCI + select BNX2 + select UIO help This driver supports offload features of Broadcom NetXtremeII gigabit Ethernet cards. diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 7e37381..38f1c33 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -3552,14 +3552,14 @@ bnx2_set_rx_mode(struct net_device *dev) sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; } - if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) { + if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) { rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | BNX2_RPM_SORT_USER0_PROM_VLAN; } else if (!(dev->flags & IFF_PROMISC)) { /* Add all entries into to the match filter list */ i = 0; - list_for_each_entry(ha, &dev->uc_list, list) { + list_for_each_entry(ha, &dev->uc.list, list) { bnx2_set_mac_addr(bp, ha->addr, i + BNX2_START_UNICAST_ADDRESS_INDEX); sort_mode |= (1 << diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 0e9b9f9..2df8fb0a 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -2767,7 +2767,6 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev) dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); - clk_disable(emac_clk); platform_set_drvdata(pdev, NULL); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mdiobus_unregister(priv->mii_bus); diff --git a/drivers/net/e100.c b/drivers/net/e100.c index f7929e8..efa680f 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -2895,12 +2895,13 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) static int __e100_power_off(struct pci_dev *pdev, bool wake) { - if (wake) { + if (wake) return pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - return pci_set_power_state(pdev, PCI_D3hot); - } + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; } #ifdef CONFIG_PM diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 8d36743..5e3356f 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2370,7 +2370,7 @@ static void e1000_set_rx_mode(struct net_device *netdev) rctl |= E1000_RCTL_VFE; } - if (netdev->uc_count > rar_entries - 1) { + if (netdev->uc.count > rar_entries - 1) { rctl |= E1000_RCTL_UPE; } else if (!(netdev->flags & IFF_PROMISC)) { rctl &= ~E1000_RCTL_UPE; @@ -2394,7 +2394,7 @@ static void e1000_set_rx_mode(struct net_device *netdev) */ i = 1; if (use_uc) - list_for_each_entry(ha, &netdev->uc_list, list) { + list_for_each_entry(ha, &netdev->uc.list, list) { if (i == rar_entries) break; e1000_rar_set(hw, ha->addr, i++); diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index b60a304..1094d29 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -719,7 +719,8 @@ static const struct register_test nv_registers_test[] = { struct nv_skb_map { struct sk_buff *skb; dma_addr_t dma; - unsigned int dma_len; + unsigned int dma_len:31; + unsigned int dma_single:1; struct ring_desc_ex *first_tx_desc; struct nv_skb_map *next_tx_ctx; }; @@ -1912,6 +1913,7 @@ static void nv_init_tx(struct net_device *dev) np->tx_skb[i].skb = NULL; np->tx_skb[i].dma = 0; np->tx_skb[i].dma_len = 0; + np->tx_skb[i].dma_single = 0; np->tx_skb[i].first_tx_desc = NULL; np->tx_skb[i].next_tx_ctx = NULL; } @@ -1930,23 +1932,30 @@ static int nv_init_ring(struct net_device *dev) return nv_alloc_rx_optimized(dev); } -static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb) +static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) { - struct fe_priv *np = netdev_priv(dev); - if (tx_skb->dma) { - pci_unmap_page(np->pci_dev, tx_skb->dma, - tx_skb->dma_len, - PCI_DMA_TODEVICE); + if (tx_skb->dma_single) + pci_unmap_single(np->pci_dev, tx_skb->dma, + tx_skb->dma_len, + PCI_DMA_TODEVICE); + else + pci_unmap_page(np->pci_dev, tx_skb->dma, + tx_skb->dma_len, + PCI_DMA_TODEVICE); tx_skb->dma = 0; } +} + +static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) +{ + nv_unmap_txskb(np, tx_skb); if (tx_skb->skb) { dev_kfree_skb_any(tx_skb->skb); tx_skb->skb = NULL; return 1; - } else { - return 0; } + return 0; } static void nv_drain_tx(struct net_device *dev) @@ -1964,10 +1973,11 @@ static void nv_drain_tx(struct net_device *dev) np->tx_ring.ex[i].bufhigh = 0; np->tx_ring.ex[i].buflow = 0; } - if (nv_release_txskb(dev, &np->tx_skb[i])) + if (nv_release_txskb(np, &np->tx_skb[i])) dev->stats.tx_dropped++; np->tx_skb[i].dma = 0; np->tx_skb[i].dma_len = 0; + np->tx_skb[i].dma_single = 0; np->tx_skb[i].first_tx_desc = NULL; np->tx_skb[i].next_tx_ctx = NULL; } @@ -2171,6 +2181,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, PCI_DMA_TODEVICE); np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 1; put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); @@ -2196,6 +2207,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, PCI_DMA_TODEVICE); np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 0; put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); @@ -2291,6 +2303,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, PCI_DMA_TODEVICE); np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 1; put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); @@ -2317,6 +2330,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, PCI_DMA_TODEVICE); np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 0; put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); @@ -2434,10 +2448,7 @@ static int nv_tx_done(struct net_device *dev, int limit) dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", dev->name, flags); - pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, - np->get_tx_ctx->dma_len, - PCI_DMA_TODEVICE); - np->get_tx_ctx->dma = 0; + nv_unmap_txskb(np, np->get_tx_ctx); if (np->desc_ver == DESC_VER_1) { if (flags & NV_TX_LASTPACKET) { @@ -2502,10 +2513,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", dev->name, flags); - pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, - np->get_tx_ctx->dma_len, - PCI_DMA_TODEVICE); - np->get_tx_ctx->dma = 0; + nv_unmap_txskb(np, np->get_tx_ctx); if (flags & NV_TX2_LASTPACKET) { if (!(flags & NV_TX2_ERROR)) @@ -5091,7 +5099,7 @@ static int nv_loopback_test(struct net_device *dev) dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); } - pci_unmap_page(np->pci_dev, test_dma_addr, + pci_unmap_single(np->pci_dev, test_dma_addr, (skb_end_pointer(tx_skb) - tx_skb->data), PCI_DMA_TODEVICE); dev_kfree_skb_any(tx_skb); diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 5105548..abcd19a 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -260,7 +260,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) */ if (!netif_running(dev)) { kfree_skb(skb); - return -ENODEV; + return NETDEV_TX_OK; } skb_pull(skb, 1); diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index 8feda9f..1d3429a 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c @@ -1495,13 +1495,8 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev) hp100_outw(0x4210, TRACE); printk("hp100: %s: start_xmit_bm\n", dev->name); #endif - - if (skb == NULL) { - return 0; - } - if (skb->len <= 0) - return 0; + goto drop; if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN)) return 0; @@ -1514,10 +1509,10 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev) #endif /* not waited long enough since last tx? */ if (time_before(jiffies, dev->trans_start + HZ)) - return -EAGAIN; + goto drop; if (hp100_check_lan(dev)) - return -EIO; + goto drop; if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) { /* we have a 100Mb/s adapter but it isn't connected to hub */ @@ -1551,7 +1546,7 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev) } dev->trans_start = jiffies; - return -EAGAIN; + goto drop; } /* @@ -1591,6 +1586,10 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; return 0; + +drop: + dev_kfree_skb(skb); + return NETDEV_TX_OK; } @@ -1648,16 +1647,11 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev) hp100_outw(0x4212, TRACE); printk("hp100: %s: start_xmit\n", dev->name); #endif - - if (skb == NULL) { - return 0; - } - if (skb->len <= 0) - return 0; + goto drop; if (hp100_check_lan(dev)) - return -EIO; + goto drop; /* If there is not enough free memory on the card... */ i = hp100_inl(TX_MEM_FREE) & 0x7fffffff; @@ -1671,7 +1665,7 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev) printk("hp100: %s: trans_start timing problem\n", dev->name); #endif - return -EAGAIN; + goto drop; } if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) { /* we have a 100Mb/s adapter but it isn't connected to hub */ @@ -1705,7 +1699,7 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev) } } dev->trans_start = jiffies; - return -EAGAIN; + goto drop; } for (i = 0; i < 6000 && (hp100_inb(OPTION_MSW) & HP100_TX_CMD); i++) { @@ -1759,6 +1753,11 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev) #endif return 0; + +drop: + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index a551a96..e756e22 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2321,7 +2321,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev) IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); /* reprogram secondary unicast list */ - hw->mac.ops.update_uc_addr_list(hw, &netdev->uc_list); + hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list); /* reprogram multicast list */ addr_count = netdev->mc_count; @@ -5261,7 +5261,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) /** * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding - * netdev->dev_addr_list + * netdev->dev_addrs * @netdev: network interface device structure * * Returns non-zero on failure @@ -5282,7 +5282,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev) /** * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding - * netdev->dev_addr_list + * netdev->dev_addrs * @netdev: network interface device structure * * Returns non-zero on failure diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index b4e18a5..745ae8b4 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -1729,7 +1729,7 @@ static u32 uc_addr_filter_mask(struct net_device *dev) return 0; nibbles = 1 << (dev->dev_addr[5] & 0x0f); - list_for_each_entry(ha, &dev->uc_list, list) { + list_for_each_entry(ha, &dev->uc.list, list) { if (memcmp(dev->dev_addr, ha->addr, 5)) return 0; if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index ab11c2b..970cede 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -169,6 +169,7 @@ #define MAX_NUM_CARDS 4 #define MAX_BUFFERS_PER_CMD 32 +#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4) /* * Following are the states of the Phantom. Phantom will set them and @@ -1436,7 +1437,7 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p); struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev); void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, - struct nx_host_tx_ring *tx_ring, uint32_t crb_producer); + struct nx_host_tx_ring *tx_ring); /* * NetXen Board information @@ -1538,6 +1539,14 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter) } +static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) +{ + smp_mb(); + return find_diff_among(tx_ring->producer, + tx_ring->sw_consumer, tx_ring->num_desc); + +} + int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac); int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac); extern void netxen_change_ringparam(struct netxen_adapter *adapter); diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h index 7f0ddbf..3cc0478 100644 --- a/drivers/net/netxen/netxen_nic_hdr.h +++ b/drivers/net/netxen/netxen_nic_hdr.h @@ -355,6 +355,7 @@ enum { #define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR) +#define NETXEN_SRE_MISC (NETXEN_CRB_SRE + 0x0002c) #define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034) #define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014) #define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000) diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 42ffb825..ce3b89d 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -488,7 +488,7 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, tx_ring->producer = producer; - netxen_nic_update_cmd_producer(adapter, tx_ring, producer); + netxen_nic_update_cmd_producer(adapter, tx_ring); netif_tx_unlock_bh(adapter->netdev); @@ -2041,8 +2041,8 @@ void netxen_nic_get_firmware_info(struct netxen_adapter *adapter) fw_major, fw_minor, fw_build); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { - i = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); - adapter->ahw.cut_through = (i & 0x4) ? 1 : 0; + i = NXRD32(adapter, NETXEN_SRE_MISC); + adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; dev_info(&pdev->dev, "firmware running in %s mode\n", adapter->ahw.cut_through ? "cut-through" : "legacy"); } diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 6f77ad5..bdb143d 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -1292,7 +1292,6 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) return 1; sw_consumer = tx_ring->sw_consumer; - barrier(); /* hw_consumer can change underneath */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); while (sw_consumer != hw_consumer) { @@ -1319,14 +1318,15 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) break; } - tx_ring->sw_consumer = sw_consumer; - if (count && netif_running(netdev)) { + tx_ring->sw_consumer = sw_consumer; + smp_mb(); + if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { netif_tx_lock(netdev); - netif_wake_queue(netdev); - smp_mb(); + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_wake_queue(netdev); netif_tx_unlock(netdev); } } @@ -1343,7 +1343,6 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) * There is still a possible race condition and the host could miss an * interrupt. The card has to take care of this. */ - barrier(); /* hw_consumer can change underneath */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); done = (sw_consumer == hw_consumer); spin_unlock(&adapter->tx_clean_lock); diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 98737ef..71daa3d 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -107,9 +107,14 @@ static uint32_t crb_cmd_producer[4] = { void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, - struct nx_host_tx_ring *tx_ring, u32 producer) + struct nx_host_tx_ring *tx_ring) { - NXWR32(adapter, tx_ring->crb_cmd_producer, producer); + NXWR32(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); + + if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) { + netif_stop_queue(adapter->netdev); + smp_mb(); + } } static uint32_t crb_cmd_consumer[4] = { @@ -119,9 +124,9 @@ static uint32_t crb_cmd_consumer[4] = { static inline void netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, - struct nx_host_tx_ring *tx_ring, u32 consumer) + struct nx_host_tx_ring *tx_ring) { - NXWR32(adapter, tx_ring->crb_cmd_consumer, consumer); + NXWR32(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer); } static uint32_t msi_tgt_status[8] = { @@ -900,8 +905,11 @@ netxen_nic_attach(struct netxen_adapter *adapter) tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum]; tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum]; - netxen_nic_update_cmd_producer(adapter, tx_ring, 0); - netxen_nic_update_cmd_consumer(adapter, tx_ring, 0); + tx_ring->producer = 0; + tx_ring->sw_consumer = 0; + + netxen_nic_update_cmd_producer(adapter, tx_ring); + netxen_nic_update_cmd_consumer(adapter, tx_ring); } for (ring = 0; ring < adapter->max_rds_rings; ring++) { @@ -1362,7 +1370,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) dma_addr_t temp_dma; int i, k; - u32 producer, consumer; + u32 producer; int frag_count, no_of_desc; u32 num_txd = tx_ring->num_desc; bool is_tso = false; @@ -1372,15 +1380,13 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* 4 fragments per cmd des */ no_of_desc = (frag_count + 3) >> 2; - producer = tx_ring->producer; - smp_mb(); - consumer = tx_ring->sw_consumer; - if ((no_of_desc+2) >= find_diff_among(producer, consumer, num_txd)) { + if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) { netif_stop_queue(netdev); - smp_mb(); return NETDEV_TX_BUSY; } + producer = tx_ring->producer; + hwdesc = &tx_ring->desc_head[producer]; netxen_clear_cmddesc((u64 *)hwdesc); pbuf = &tx_ring->cmd_buf_arr[producer]; @@ -1493,7 +1499,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_ring->producer = producer; adapter->stats.txbytes += skb->len; - netxen_nic_update_cmd_producer(adapter, tx_ring, producer); + netxen_nic_update_cmd_producer(adapter, tx_ring); adapter->stats.xmitcalled++; diff --git a/drivers/net/niu.c b/drivers/net/niu.c index fa61a12..d2146d4 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -6376,7 +6376,7 @@ static void niu_set_rx_mode(struct net_device *dev) if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0)) np->flags |= NIU_FLAGS_MCAST; - alt_cnt = dev->uc_count; + alt_cnt = dev->uc.count; if (alt_cnt > niu_num_alt_addr(np)) { alt_cnt = 0; np->flags |= NIU_FLAGS_PROMISC; @@ -6385,7 +6385,7 @@ static void niu_set_rx_mode(struct net_device *dev) if (alt_cnt) { int index = 0; - list_for_each_entry(ha, &dev->uc_list, list) { + list_for_each_entry(ha, &dev->uc.list, list) { err = niu_set_alt_mac(np, index, ha->addr); if (err) printk(KERN_WARNING PFX "%s: Error %d " diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index a2ece89..eba937c 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -244,7 +244,7 @@ EXPORT_SYMBOL(get_phy_device); /** * phy_device_register - Register the phy device on the MDIO bus - * @phy_device: phy_device structure to be added to the MDIO bus + * @phydev: phy_device structure to be added to the MDIO bus */ int phy_device_register(struct phy_device *phydev) { diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 35196fa..4e22462 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -3811,22 +3811,11 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev) static void rtl8169_net_suspend(struct net_device *dev) { - struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; - if (!netif_running(dev)) return; netif_device_detach(dev); netif_stop_queue(dev); - - spin_lock_irq(&tp->lock); - - rtl8169_asic_down(ioaddr); - - rtl8169_rx_missed(dev, ioaddr); - - spin_unlock_irq(&tp->lock); } #ifdef CONFIG_PM @@ -3876,9 +3865,17 @@ static struct dev_pm_ops rtl8169_pm_ops = { static void rtl_shutdown(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; rtl8169_net_suspend(dev); + spin_lock_irq(&tp->lock); + + rtl8169_asic_down(ioaddr); + + spin_unlock_irq(&tp->lock); + if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, true); pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index e224766..1f040e8 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c @@ -1281,7 +1281,7 @@ static u16 sis190_default_phy(struct net_device *dev) else if (phy_lan) phy_default = phy_lan; else - phy_default = list_entry(&tp->first_phy, + phy_default = list_first_entry(&tp->first_phy, struct sis190_phy, list); } diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 6b5946f..7681d28 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -50,7 +50,7 @@ #include "sky2.h" #define DRV_NAME "sky2" -#define DRV_VERSION "1.22" +#define DRV_VERSION "1.23" #define PFX DRV_NAME " " /* @@ -65,9 +65,9 @@ #define RX_DEF_PENDING RX_MAX_PENDING #define TX_RING_SIZE 512 -#define TX_DEF_PENDING (TX_RING_SIZE - 1) -#define TX_MIN_PENDING 64 +#define TX_DEF_PENDING 128 #define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS) +#define TX_MIN_PENDING (MAX_SKB_TX_LE+1) #define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */ #define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le)) @@ -1151,7 +1151,14 @@ stopped: /* reset the Rx prefetch unit */ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); - mmiowb(); + + /* Reset the RAM Buffer receive queue */ + sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_RST_SET); + + /* Reset Rx MAC FIFO */ + sky2_write8(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), GMF_RST_SET); + + sky2_read8(hw, B0_CTST); } /* Clean out receive buffer area, assumes receiver hardware stopped */ @@ -1169,6 +1176,7 @@ static void sky2_rx_clean(struct sky2_port *sky2) re->skb = NULL; } } + skb_queue_purge(&sky2->rx_recycle); } /* Basic MII support */ @@ -1245,6 +1253,12 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp } #endif +/* Amount of required worst case padding in rx buffer */ +static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) +{ + return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2; +} + /* * Allocate an skb for receiving. If the MTU is large enough * make the skb non-linear with a fragment list of pages. @@ -1254,6 +1268,13 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2) struct sk_buff *skb; int i; + skb = __skb_dequeue(&sky2->rx_recycle); + if (!skb) + skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + + sky2_rx_pad(sky2->hw)); + if (!skb) + goto nomem; + if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { unsigned char *start; /* @@ -1262,18 +1283,10 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2) * The buffer returned from netdev_alloc_skb is * aligned except if slab debugging is enabled. */ - skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + 8); - if (!skb) - goto nomem; start = PTR_ALIGN(skb->data, 8); skb_reserve(skb, start - skb->data); - } else { - skb = netdev_alloc_skb(sky2->netdev, - sky2->rx_data_size + NET_IP_ALIGN); - if (!skb) - goto nomem; + } else skb_reserve(skb, NET_IP_ALIGN); - } for (i = 0; i < sky2->rx_nfrags; i++) { struct page *page = alloc_page(GFP_ATOMIC); @@ -1350,6 +1363,8 @@ static int sky2_rx_start(struct sky2_port *sky2) sky2->rx_data_size = size; + skb_queue_head_init(&sky2->rx_recycle); + /* Fill Rx ring */ for (i = 0; i < sky2->rx_pending; i++) { re = sky2->rx_ring + i; @@ -1488,6 +1503,7 @@ static int sky2_up(struct net_device *dev) imask = sky2_read32(hw, B0_IMSK); imask |= portirq_msk[port]; sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); sky2_set_multicast(dev); @@ -1756,14 +1772,22 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) } if (le->ctrl & EOP) { + struct sk_buff *skb = re->skb; + if (unlikely(netif_msg_tx_done(sky2))) printk(KERN_DEBUG "%s: tx done %u\n", dev->name, idx); dev->stats.tx_packets++; - dev->stats.tx_bytes += re->skb->len; + dev->stats.tx_bytes += skb->len; + + if (skb_queue_len(&sky2->rx_recycle) < sky2->rx_pending + && skb_recycle_check(skb, sky2->rx_data_size + + sky2_rx_pad(sky2->hw))) + __skb_queue_head(&sky2->rx_recycle, skb); + else + dev_kfree_skb_any(skb); - dev_kfree_skb_any(re->skb); sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE); } } @@ -1805,10 +1829,10 @@ static int sky2_down(struct net_device *dev) imask = sky2_read32(hw, B0_IMSK); imask &= ~portirq_msk[port]; sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); - synchronize_irq(hw->pdev->irq); - - sky2_gmac_reset(hw, port); + /* Force flow control off */ + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); /* Stop transmitter */ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); @@ -1821,9 +1845,6 @@ static int sky2_down(struct net_device *dev) ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); gma_write16(hw, port, GM_GP_CTRL, ctrl); - /* Make sure no packets are pending */ - napi_synchronize(&hw->napi); - sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); /* Workaround shared GMAC reset */ @@ -1854,6 +1875,15 @@ static int sky2_down(struct net_device *dev) sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); + /* Force any delayed status interrrupt and NAPI */ + sky2_write32(hw, STAT_LEV_TIMER_CNT, 0); + sky2_write32(hw, STAT_TX_TIMER_CNT, 0); + sky2_write32(hw, STAT_ISR_TIMER_CNT, 0); + sky2_read8(hw, STAT_ISR_TIMER_CTRL); + + synchronize_irq(hw->pdev->irq); + napi_synchronize(&hw->napi); + sky2_phy_power_down(hw, port); /* turn off LED's */ @@ -2343,11 +2373,45 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) } } +static inline void sky2_skb_rx(const struct sky2_port *sky2, + u32 status, struct sk_buff *skb) +{ +#ifdef SKY2_VLAN_TAG_USED + u16 vlan_tag = be16_to_cpu(sky2->rx_tag); + if (sky2->vlgrp && (status & GMR_FS_VLAN)) { + if (skb->ip_summed == CHECKSUM_NONE) + vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag); + else + vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp, + vlan_tag, skb); + return; + } +#endif + if (skb->ip_summed == CHECKSUM_NONE) + netif_receive_skb(skb); + else + napi_gro_receive(&sky2->hw->napi, skb); +} + +static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, + unsigned packets, unsigned bytes) +{ + if (packets) { + struct net_device *dev = hw->dev[port]; + + dev->stats.rx_packets += packets; + dev->stats.rx_bytes += bytes; + dev->last_rx = jiffies; + sky2_rx_update(netdev_priv(dev), rxqaddr[port]); + } +} + /* Process status response ring */ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) { int work_done = 0; - unsigned rx[2] = { 0, 0 }; + unsigned int total_bytes[2] = { 0 }; + unsigned int total_packets[2] = { 0 }; rmb(); do { @@ -2374,7 +2438,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) le->opcode = 0; switch (opcode & ~HW_OWNER) { case OP_RXSTAT: - ++rx[port]; + total_packets[port]++; + total_bytes[port] += length; skb = sky2_receive(dev, length, status); if (unlikely(!skb)) { dev->stats.rx_dropped++; @@ -2392,18 +2457,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) } skb->protocol = eth_type_trans(skb, dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; - dev->last_rx = jiffies; -#ifdef SKY2_VLAN_TAG_USED - if (sky2->vlgrp && (status & GMR_FS_VLAN)) { - vlan_hwaccel_receive_skb(skb, - sky2->vlgrp, - be16_to_cpu(sky2->rx_tag)); - } else -#endif - netif_receive_skb(skb); + sky2_skb_rx(sky2, status, skb); /* Stop after net poll weight */ if (++work_done >= to_do) @@ -2473,11 +2528,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); exit_loop: - if (rx[0]) - sky2_rx_update(netdev_priv(hw->dev[0]), Q_R1); - - if (rx[1]) - sky2_rx_update(netdev_priv(hw->dev[1]), Q_R2); + sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]); + sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]); return work_done; } @@ -4364,6 +4416,22 @@ static int __devinit sky2_probe(struct pci_dev *pdev, goto err_out; } + /* Get configuration information + * Note: only regular PCI config access once to test for HW issues + * other PCI access through shared memory for speed and to + * avoid MMCONFIG problems. + */ + err = pci_read_config_dword(pdev, PCI_DEV_REG2, ®); + if (err) { + dev_err(&pdev->dev, "PCI read config failed\n"); + goto err_out; + } + + if (~reg == 0) { + dev_err(&pdev->dev, "PCI configuration read error\n"); + goto err_out; + } + err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "cannot obtain PCI resources\n"); @@ -4389,21 +4457,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev, } } - /* Get configuration information - * Note: only regular PCI config access once to test for HW issues - * other PCI access through shared memory for speed and to - * avoid MMCONFIG problems. - */ - err = pci_read_config_dword(pdev, PCI_DEV_REG2, ®); - if (err) { - dev_err(&pdev->dev, "PCI read config failed\n"); - goto err_out_free_regions; - } - - /* size of available VPD, only impact sysfs */ - err = pci_vpd_truncate(pdev, 1ul << (((reg & PCI_VPD_ROM_SZ) >> 14) + 8)); - if (err) - dev_warn(&pdev->dev, "Can't set VPD size\n"); #ifdef __BIG_ENDIAN /* The sk98lin vendor driver uses hardware byte swapping but diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 92fb24b..b5549c9 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -2028,6 +2028,7 @@ struct sky2_port { u16 rx_pending; u16 rx_data_size; u16 rx_nfrags; + struct sk_buff_head rx_recycle; #ifdef SKY2_VLAN_TAG_USED u16 rx_tag; diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c index e4255d8..753a1fb 100644 --- a/drivers/net/sonic.c +++ b/drivers/net/sonic.c @@ -223,7 +223,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) if (!laddr) { printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name); dev_kfree_skb(skb); - return NETDEV_TX_BUSY + return NETDEV_TX_BUSY; } sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index e2f2e91..40c6eba 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -65,8 +65,6 @@ static DEFINE_SPINLOCK(ugeth_lock); -static void uec_configure_serdes(struct net_device *dev); - static struct { u32 msg_enable; } debug = { -1 }; @@ -1536,6 +1534,49 @@ static void adjust_link(struct net_device *dev) spin_unlock_irqrestore(&ugeth->lock, flags); } +/* Initialize TBI PHY interface for communicating with the + * SERDES lynx PHY on the chip. We communicate with this PHY + * through the MDIO bus on each controller, treating it as a + * "normal" PHY at the address found in the UTBIPA register. We assume + * that the UTBIPA register is valid. Either the MDIO bus code will set + * it to a value that doesn't conflict with other PHYs on the bus, or the + * value doesn't matter, as there are no other PHYs on the bus. + */ +static void uec_configure_serdes(struct net_device *dev) +{ + struct ucc_geth_private *ugeth = netdev_priv(dev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + struct phy_device *tbiphy; + + if (!ug_info->tbi_node) { + dev_warn(&dev->dev, "SGMII mode requires that the device " + "tree specify a tbi-handle\n"); + return; + } + + tbiphy = of_phy_find_device(ug_info->tbi_node); + if (!tbiphy) { + dev_err(&dev->dev, "error: Could not get TBI device\n"); + return; + } + + /* + * If the link is already up, we must already be ok, and don't need to + * configure and reset the TBI<->SerDes link. Maybe U-Boot configured + * everything for us? Resetting it takes the link down and requires + * several seconds for it to come back. + */ + if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) + return; + + /* Single clk mode, mii mode off(for serdes communication) */ + phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); + + phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); + + phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); +} + /* Configure the PHY for dev. * returns 0 if success. -1 if failure */ @@ -1577,41 +1618,7 @@ static int init_phy(struct net_device *dev) return 0; } -/* Initialize TBI PHY interface for communicating with the - * SERDES lynx PHY on the chip. We communicate with this PHY - * through the MDIO bus on each controller, treating it as a - * "normal" PHY at the address found in the UTBIPA register. We assume - * that the UTBIPA register is valid. Either the MDIO bus code will set - * it to a value that doesn't conflict with other PHYs on the bus, or the - * value doesn't matter, as there are no other PHYs on the bus. - */ -static void uec_configure_serdes(struct net_device *dev) -{ - struct ucc_geth_private *ugeth = netdev_priv(dev); - - if (!ugeth->tbiphy) { - printk(KERN_WARNING "SGMII mode requires that the device " - "tree specify a tbi-handle\n"); - return; - } - /* - * If the link is already up, we must already be ok, and don't need to - * configure and reset the TBI<->SerDes link. Maybe U-Boot configured - * everything for us? Resetting it takes the link down and requires - * several seconds for it to come back. - */ - if (phy_read(ugeth->tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) - return; - - /* Single clk mode, mii mode off(for serdes communication) */ - phy_write(ugeth->tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); - - phy_write(ugeth->tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); - - phy_write(ugeth->tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); - -} static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) { @@ -3711,6 +3718,9 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma } ug_info->phy_node = phy; + /* Find the TBI PHY node. If it's not there, we don't support SGMII */ + ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); + /* get the phy interface type, or default to MII */ prop = of_get_property(np, "phy-connection-type", NULL); if (!prop) { @@ -3818,37 +3828,6 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma ugeth->ndev = dev; ugeth->node = np; - /* Find the TBI PHY. If it's not there, we don't support SGMII */ - ph = of_get_property(np, "tbi-handle", NULL); - if (ph) { - struct device_node *tbi = of_find_node_by_phandle(*ph); - struct of_device *ofdev; - struct mii_bus *bus; - const unsigned int *id; - - if (!tbi) - return 0; - - mdio = of_get_parent(tbi); - if (!mdio) - return 0; - - ofdev = of_find_device_by_node(mdio); - - of_node_put(mdio); - - id = of_get_property(tbi, "reg", NULL); - if (!id) - return 0; - of_node_put(tbi); - - bus = dev_get_drvdata(&ofdev->dev); - if (!bus) - return 0; - - ugeth->tbiphy = bus->phy_map[*id]; - } - return 0; } diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index 5beba4c..195ab26 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -1125,6 +1125,7 @@ struct ucc_geth_info { u16 pausePeriod; u16 extensionField; struct device_node *phy_node; + struct device_node *tbi_node; u8 weightfactor[NUM_TX_QUEUES]; u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; @@ -1213,7 +1214,6 @@ struct ucc_geth_private { struct ugeth_mii_info *mii_info; struct phy_device *phydev; - struct phy_device *tbiphy; phy_interface_t phy_interface; int max_speed; uint32_t msg_enable; diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index e2a7725..b02f7ad 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -989,8 +989,10 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi if (ret < 0) goto err_iounmap; - if (velocity_get_link(dev)) + if (!velocity_get_link(dev)) { netif_carrier_off(dev); + vptr->mii_status |= VELOCITY_LINK_FAIL; + } velocity_print_info(vptr); pci_set_drvdata(pdev, dev); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 52198f6..2a6e81d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -709,7 +709,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) allmulti ? "en" : "dis"); /* MAC filter - use one buffer for both lists */ - mac_data = buf = kzalloc(((dev->uc_count + dev->mc_count) * ETH_ALEN) + + mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); if (!buf) { dev_warn(&dev->dev, "No memory for MAC address buffer\n"); @@ -719,16 +719,16 @@ static void virtnet_set_rx_mode(struct net_device *dev) sg_init_table(sg, 2); /* Store the unicast list and count in the front of the buffer */ - mac_data->entries = dev->uc_count; + mac_data->entries = dev->uc.count; i = 0; - list_for_each_entry(ha, &dev->uc_list, list) + list_for_each_entry(ha, &dev->uc.list, list) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[0], mac_data, - sizeof(mac_data->entries) + (dev->uc_count * ETH_ALEN)); + sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN)); /* multicast list and count fill the end */ - mac_data = (void *)&mac_data->macs[dev->uc_count][0]; + mac_data = (void *)&mac_data->macs[dev->uc.count][0]; mac_data->entries = dev->mc_count; addr = dev->mc_list; diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index 26cde57..58d2551 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c @@ -454,7 +454,7 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev) return VXGE_HW_OK; } -static enum vxge_hw_status +enum vxge_hw_status __vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev) { if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION || @@ -676,10 +676,12 @@ enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) { enum vxge_hw_status status = VXGE_HW_OK; - /* Validate the pci-e link width and speed */ - status = __vxge_hw_verify_pci_e_info(hldev); - if (status != VXGE_HW_OK) - goto exit; + if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev)) { + /* Validate the pci-e link width and speed */ + status = __vxge_hw_verify_pci_e_info(hldev); + if (status != VXGE_HW_OK) + goto exit; + } vxge_hw_wrr_rebalance(hldev); exit: diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index 6c838b3..6034497 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c @@ -4203,6 +4203,16 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) max_vpath_supported++; } + /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ + if ((VXGE_HW_FUNCTION_MODE_SRIOV == + ll_config.device_hw_info.function_mode) && + (max_config_dev > 1) && (pdev->is_physfn)) { + ret = pci_enable_sriov(pdev, max_config_dev - 1); + if (ret) + vxge_debug_ll_config(VXGE_ERR, + "Failed to enable SRIOV: %d \n", ret); + } + /* * Configure vpaths and get driver configured number of vpaths * which is less than or equal to the maximum vpaths per function. @@ -4366,6 +4376,7 @@ _exit6: vxge_device_unregister(hldev); _exit5: + pci_disable_sriov(pdev); vxge_hw_device_terminate(hldev); _exit4: iounmap(attr.bar1); @@ -4429,6 +4440,8 @@ vxge_remove(struct pci_dev *pdev) iounmap(vdev->bar0); iounmap(vdev->bar1); + pci_disable_sriov(pdev); + /* we are safe to free it now */ free_netdev(dev); diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h index 7da02c5..82786ff 100644 --- a/drivers/net/vxge/vxge-version.h +++ b/drivers/net/vxge/vxge-version.h @@ -17,7 +17,7 @@ #define VXGE_VERSION_MAJOR "2" #define VXGE_VERSION_MINOR "0" -#define VXGE_VERSION_FIX "1" -#define VXGE_VERSION_BUILD "17129" +#define VXGE_VERSION_FIX "4" +#define VXGE_VERSION_BUILD "17795" #define VXGE_VERSION_FOR "k" #endif diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 2dd78d2..aff4f6bd 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -149,46 +149,40 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) */ static int lapbeth_xmit(struct sk_buff *skb, struct net_device *dev) { - int err = -ENODEV; + int err; /* * Just to be *really* sure not to send anything if the interface * is down, the ethernet device may have gone. */ - if (!netif_running(dev)) { + if (!netif_running(dev)) goto drop; - } switch (skb->data[0]) { case 0x00: - err = 0; break; case 0x01: if ((err = lapb_connect_request(dev)) != LAPB_OK) printk(KERN_ERR "lapbeth: lapb_connect_request " "error: %d\n", err); - goto drop_ok; + goto drop; case 0x02: if ((err = lapb_disconnect_request(dev)) != LAPB_OK) printk(KERN_ERR "lapbeth: lapb_disconnect_request " "err: %d\n", err); /* Fall thru */ default: - goto drop_ok; + goto drop; } skb_pull(skb, 1); if ((err = lapb_data_request(dev, skb)) != LAPB_OK) { printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err); - err = -ENOMEM; goto drop; } - err = 0; out: - return err; -drop_ok: - err = 0; + return NETDEV_TX_OK; drop: kfree_skb(skb); goto out; diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index ec35503..2942f13 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -733,8 +733,9 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval) /* * Set the beacon register and enable all timers. */ - /* When in AP mode zero timer0 to start TSF */ - if (ah->ah_op_mode == NL80211_IFTYPE_AP) + /* When in AP or Mesh Point mode zero timer0 to start TSF */ + if (ah->ah_op_mode == NL80211_IFTYPE_AP || + ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT) ath5k_hw_reg_write(ah, 0, AR5K_TIMER0); ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0); diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 0ed1ac3..2d79610 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -1,7 +1,6 @@ config ATH9K tristate "Atheros 802.11n wireless cards support" depends on PCI && MAC80211 && WLAN_80211 - depends on RFKILL || RFKILL=n select ATH_COMMON select MAC80211_LEDS select LEDS_CLASS diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 515880a..5efc934 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -21,7 +21,6 @@ #include <linux/device.h> #include <net/mac80211.h> #include <linux/leds.h> -#include <linux/rfkill.h> #include "hw.h" #include "rc.h" @@ -460,12 +459,6 @@ struct ath_led { bool registered; }; -struct ath_rfkill { - struct rfkill *rfkill; - struct rfkill_ops ops; - char rfkill_name[32]; -}; - /********************/ /* Main driver core */ /********************/ @@ -505,7 +498,6 @@ struct ath_rfkill { #define SC_OP_PROTECT_ENABLE BIT(6) #define SC_OP_RXFLUSH BIT(7) #define SC_OP_LED_ASSOCIATED BIT(8) -#define SC_OP_RFKILL_REGISTERED BIT(9) #define SC_OP_WAIT_FOR_BEACON BIT(12) #define SC_OP_LED_ON BIT(13) #define SC_OP_SCANNING BIT(14) @@ -591,7 +583,6 @@ struct ath_softc { int beacon_interval; - struct ath_rfkill rf_kill; struct ath_ani ani; struct ath9k_node_stats nodestats; #ifdef CONFIG_ATH9K_DEBUG @@ -677,6 +668,7 @@ static inline void ath9k_ps_restore(struct ath_softc *sc) if (atomic_dec_and_test(&sc->ps_usecount)) if ((sc->hw->conf.flags & IEEE80211_CONF_PS) && !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | + SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_PSPOLL_DATA | SC_OP_WAIT_FOR_TX_ACK))) ath9k_hw_setpower(sc->sc_ah, diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 1579c94..34935a8 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2186,6 +2186,18 @@ static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); } +static void ath9k_enable_rfkill(struct ath_hw *ah) +{ + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, + AR_GPIO_INPUT_EN_VAL_RFSILENT_BB); + + REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2, + AR_GPIO_INPUT_MUX2_RFSILENT); + + ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); + REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB); +} + int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, bool bChannelChange) { @@ -2313,10 +2325,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_init_interrupt_masks(ah, ah->opmode); ath9k_hw_init_qos(ah); -#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) ath9k_enable_rfkill(ah); -#endif + ath9k_hw_init_user_settings(ah); REG_WRITE(ah, AR_STA_ID1, @@ -3613,20 +3624,6 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) AR_GPIO_BIT(gpio)); } -#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) -void ath9k_enable_rfkill(struct ath_hw *ah) -{ - REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, - AR_GPIO_INPUT_EN_VAL_RFSILENT_BB); - - REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2, - AR_GPIO_INPUT_MUX2_RFSILENT); - - ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); - REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB); -} -#endif - u32 ath9k_hw_getdefantenna(struct ath_hw *ah) { return REG_READ(ah, AR_DEF_ANTENNA) & 0x7; diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index dd8508e..9d0b31a 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -565,9 +565,6 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio); void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, u32 ah_signal_type); void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val); -#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) -void ath9k_enable_rfkill(struct ath_hw *ah); -#endif u32 ath9k_hw_getdefantenna(struct ath_hw *ah); void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); bool ath9k_hw_setantennaswitch(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index f7baa40..9f49a32 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -231,6 +231,19 @@ static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band) } } +static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc, + struct ieee80211_hw *hw) +{ + struct ieee80211_channel *curchan = hw->conf.channel; + struct ath9k_channel *channel; + u8 chan_idx; + + chan_idx = curchan->hw_value; + channel = &sc->sc_ah->channels[chan_idx]; + ath9k_update_ichannel(sc, hw, channel); + return channel; +} + /* * Set/change channels. If the channel is really being changed, it's done * by reseting the chip. To accomplish this we must first cleanup any pending @@ -283,7 +296,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, "reset status %d\n", channel->center_freq, r); spin_unlock_bh(&sc->sc_resetlock); - return r; + goto ps_restore; } spin_unlock_bh(&sc->sc_resetlock); @@ -292,14 +305,17 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, if (ath_startrecv(sc) != 0) { DPRINTF(sc, ATH_DBG_FATAL, "Unable to restart recv logic\n"); - return -EIO; + r = -EIO; + goto ps_restore; } ath_cache_conf_rate(sc, &hw->conf); ath_update_txpow(sc); ath9k_hw_set_interrupts(ah, sc->imask); + + ps_restore: ath9k_ps_restore(sc); - return 0; + return r; } /* @@ -1110,6 +1126,9 @@ void ath_radio_enable(struct ath_softc *sc) ath9k_ps_wakeup(sc); ath9k_hw_configpcipowersave(ah, 0); + if (!ah->curchan) + ah->curchan = ath_get_curchannel(sc, sc->hw); + spin_lock_bh(&sc->sc_resetlock); r = ath9k_hw_reset(ah, ah->curchan, false); if (r) { @@ -1162,6 +1181,9 @@ void ath_radio_disable(struct ath_softc *sc) ath_stoprecv(sc); /* turn off frame recv */ ath_flushrecv(sc); /* flush recv queue */ + if (!ah->curchan) + ah->curchan = ath_get_curchannel(sc, sc->hw); + spin_lock_bh(&sc->sc_resetlock); r = ath9k_hw_reset(ah, ah->curchan, false); if (r) { @@ -1178,8 +1200,6 @@ void ath_radio_disable(struct ath_softc *sc) ath9k_ps_restore(sc); } -#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) - /*******************/ /* Rfkill */ /*******************/ @@ -1192,81 +1212,27 @@ static bool ath_is_rfkill_set(struct ath_softc *sc) ah->rfkill_polarity; } -/* s/w rfkill handlers */ -static int ath_rfkill_set_block(void *data, bool blocked) +static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw) { - struct ath_softc *sc = data; - - if (blocked) - ath_radio_disable(sc); - else - ath_radio_enable(sc); - - return 0; -} - -static void ath_rfkill_poll_state(struct rfkill *rfkill, void *data) -{ - struct ath_softc *sc = data; + struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = aphy->sc; bool blocked = !!ath_is_rfkill_set(sc); - if (rfkill_set_hw_state(rfkill, blocked)) + wiphy_rfkill_set_hw_state(hw->wiphy, blocked); + + if (blocked) ath_radio_disable(sc); else ath_radio_enable(sc); } -/* Init s/w rfkill */ -static int ath_init_sw_rfkill(struct ath_softc *sc) -{ - sc->rf_kill.ops.set_block = ath_rfkill_set_block; - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) - sc->rf_kill.ops.poll = ath_rfkill_poll_state; - - snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name), - "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy)); - - sc->rf_kill.rfkill = rfkill_alloc(sc->rf_kill.rfkill_name, - wiphy_dev(sc->hw->wiphy), - RFKILL_TYPE_WLAN, - &sc->rf_kill.ops, sc); - if (!sc->rf_kill.rfkill) { - DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n"); - return -ENOMEM; - } - - return 0; -} - -/* Deinitialize rfkill */ -static void ath_deinit_rfkill(struct ath_softc *sc) -{ - if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) { - rfkill_unregister(sc->rf_kill.rfkill); - rfkill_destroy(sc->rf_kill.rfkill); - sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED; - } -} - -static int ath_start_rfkill_poll(struct ath_softc *sc) +static void ath_start_rfkill_poll(struct ath_softc *sc) { - if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) { - if (rfkill_register(sc->rf_kill.rfkill)) { - DPRINTF(sc, ATH_DBG_FATAL, - "Unable to register rfkill\n"); - rfkill_destroy(sc->rf_kill.rfkill); - - /* Deinitialize the device */ - ath_cleanup(sc); - return -EIO; - } else { - sc->sc_flags |= SC_OP_RFKILL_REGISTERED; - } - } + struct ath_hw *ah = sc->sc_ah; - return 0; + if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) + wiphy_rfkill_start_polling(sc->hw->wiphy); } -#endif /* CONFIG_RFKILL */ void ath_cleanup(struct ath_softc *sc) { @@ -1286,9 +1252,6 @@ void ath_detach(struct ath_softc *sc) DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n"); -#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) - ath_deinit_rfkill(sc); -#endif ath_deinit_leds(sc); cancel_work_sync(&sc->chan_work); cancel_delayed_work_sync(&sc->wiphy_work); @@ -1626,13 +1589,6 @@ int ath_attach(u16 devid, struct ath_softc *sc) if (error != 0) goto error_attach; -#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) - /* Initialize s/w rfkill */ - error = ath_init_sw_rfkill(sc); - if (error) - goto error_attach; -#endif - INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work); sc->wiphy_scheduler_int = msecs_to_jiffies(500); @@ -1648,6 +1604,7 @@ int ath_attach(u16 devid, struct ath_softc *sc) /* Initialize LED control */ ath_init_leds(sc); + ath_start_rfkill_poll(sc); return 0; @@ -1920,7 +1877,7 @@ static int ath9k_start(struct ieee80211_hw *hw) struct ath_softc *sc = aphy->sc; struct ieee80211_channel *curchan = hw->conf.channel; struct ath9k_channel *init_channel; - int r, pos; + int r; DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with " "initial channel: %d MHz\n", curchan->center_freq); @@ -1950,11 +1907,9 @@ static int ath9k_start(struct ieee80211_hw *hw) /* setup initial channel */ - pos = curchan->hw_value; + sc->chan_idx = curchan->hw_value; - sc->chan_idx = pos; - init_channel = &sc->sc_ah->channels[pos]; - ath9k_update_ichannel(sc, hw, init_channel); + init_channel = ath_get_curchannel(sc, hw); /* Reset SERDES registers */ ath9k_hw_configpcipowersave(sc->sc_ah, 0); @@ -2018,10 +1973,6 @@ static int ath9k_start(struct ieee80211_hw *hw) ieee80211_wake_queues(hw); -#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) - r = ath_start_rfkill_poll(sc); -#endif - mutex_unlock: mutex_unlock(&sc->mutex); @@ -2159,7 +2110,7 @@ static void ath9k_stop(struct ieee80211_hw *hw) } else sc->rx.rxlink = NULL; - rfkill_pause_polling(sc->rf_kill.rfkill); + wiphy_rfkill_stop_polling(sc->hw->wiphy); /* disable HAL and put h/w to sleep */ ath9k_hw_disable(sc->sc_ah); @@ -2765,6 +2716,7 @@ struct ieee80211_ops ath9k_ops = { .ampdu_action = ath9k_ampdu_action, .sw_scan_start = ath9k_sw_scan_start, .sw_scan_complete = ath9k_sw_scan_complete, + .rfkill_poll = ath9k_rfkill_poll_state, }; static struct { diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 5014a19..f99f3a7 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -817,6 +817,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) } if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | + SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_PSPOLL_DATA))) ath_rx_ps(sc, skb); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index a5637c4..6d1519e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2152,7 +2152,6 @@ static int iwl_mac_start(struct ieee80211_hw *hw) /* we should be verifying the device is ready to be opened */ mutex_lock(&priv->mutex); - memset(&priv->staging_rxon, 0, sizeof(struct iwl_rxon_cmd)); /* fetch ucode file from disk, alloc and copy to bus-master buffers ... * ucode filename and max sizes are card-specific. */ diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index f9d16ca..6ab0716 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -629,13 +629,9 @@ u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv, if (!sta_ht_inf->ht_supported) return 0; } - - if (iwl_ht_conf->ht_protection & IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) - return 1; - else - return iwl_is_channel_extension(priv, priv->band, - le16_to_cpu(priv->staging_rxon.channel), - iwl_ht_conf->extension_chan_offset); + return iwl_is_channel_extension(priv, priv->band, + le16_to_cpu(priv->staging_rxon.channel), + iwl_ht_conf->extension_chan_offset); } EXPORT_SYMBOL(iwl_is_fat_tx_allowed); @@ -826,9 +822,18 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info) RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); if (iwl_is_fat_tx_allowed(priv, NULL)) { /* pure 40 fat */ - if (rxon->flags & RXON_FLG_FAT_PROT_MSK) + if (ht_info->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; - else { + /* Note: control channel is opposite of extension channel */ + switch (ht_info->extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + } + } else { /* Note: control channel is opposite of extension channel */ switch (ht_info->extension_chan_offset) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: @@ -2390,39 +2395,46 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw, priv->ibss_beacon = ieee80211_beacon_get(hw, vif); } - if ((changes & BSS_CHANGED_BSSID) && !iwl_is_rfkill(priv)) { - /* If there is currently a HW scan going on in the background - * then we need to cancel it else the RXON below will fail. */ + if (changes & BSS_CHANGED_BEACON_INT) { + priv->beacon_int = bss_conf->beacon_int; + /* TODO: in AP mode, do something to make this take effect */ + } + + if (changes & BSS_CHANGED_BSSID) { + IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid); + + /* + * If there is currently a HW scan going on in the + * background then we need to cancel it else the RXON + * below/in post_associate will fail. + */ if (iwl_scan_cancel_timeout(priv, 100)) { - IWL_WARN(priv, "Aborted scan still in progress " - "after 100ms\n"); + IWL_WARN(priv, "Aborted scan still in progress after 100ms\n"); IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n"); mutex_unlock(&priv->mutex); return; } - memcpy(priv->staging_rxon.bssid_addr, - bss_conf->bssid, ETH_ALEN); - - /* TODO: Audit driver for usage of these members and see - * if mac80211 deprecates them (priv->bssid looks like it - * shouldn't be there, but I haven't scanned the IBSS code - * to verify) - jpk */ - memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); - - if (priv->iw_mode == NL80211_IFTYPE_AP) - iwlcore_config_ap(priv); - else { - int rc = iwlcore_commit_rxon(priv); - if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc) - iwl_rxon_add_station( - priv, priv->active_rxon.bssid_addr, 1); + + /* mac80211 only sets assoc when in STATION mode */ + if (priv->iw_mode == NL80211_IFTYPE_ADHOC || + bss_conf->assoc) { + memcpy(priv->staging_rxon.bssid_addr, + bss_conf->bssid, ETH_ALEN); + + /* currently needed in a few places */ + memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); + } else { + priv->staging_rxon.filter_flags &= + ~RXON_FILTER_ASSOC_MSK; } - } else if (!iwl_is_rfkill(priv)) { - iwl_scan_cancel_timeout(priv, 100); - priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - iwlcore_commit_rxon(priv); + } + /* + * This needs to be after setting the BSSID in case + * mac80211 decides to do both changes at once because + * it will invoke post_associate. + */ if (priv->iw_mode == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON) { struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); @@ -2431,8 +2443,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw, iwl_mac_beacon_update(hw, beacon); } - mutex_unlock(&priv->mutex); - if (changes & BSS_CHANGED_ERP_PREAMBLE) { IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n", bss_conf->use_short_preamble); @@ -2450,6 +2460,23 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw, priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK; } + if (changes & BSS_CHANGED_BASIC_RATES) { + /* XXX use this information + * + * To do that, remove code from iwl_set_rate() and put something + * like this here: + * + if (A-band) + priv->staging_rxon.ofdm_basic_rates = + bss_conf->basic_rates; + else + priv->staging_rxon.ofdm_basic_rates = + bss_conf->basic_rates >> 4; + priv->staging_rxon.cck_basic_rates = + bss_conf->basic_rates & 0xF; + */ + } + if (changes & BSS_CHANGED_HT) { iwl_ht_conf(priv, bss_conf); @@ -2459,10 +2486,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw, if (changes & BSS_CHANGED_ASSOC) { IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc); - /* This should never happen as this function should - * never be called from interrupt context. */ - if (WARN_ON_ONCE(in_interrupt())) - return; if (bss_conf->assoc) { priv->assoc_id = bss_conf->aid; priv->beacon_int = bss_conf->beacon_int; @@ -2470,27 +2493,35 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw, priv->timestamp = bss_conf->timestamp; priv->assoc_capability = bss_conf->assoc_capability; - /* we have just associated, don't start scan too early - * leave time for EAPOL exchange to complete + /* + * We have just associated, don't start scan too early + * leave time for EAPOL exchange to complete. + * + * XXX: do this in mac80211 */ priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN_AFTER_ASSOC; - mutex_lock(&priv->mutex); - priv->cfg->ops->lib->post_associate(priv); - mutex_unlock(&priv->mutex); - } else { + if (!iwl_is_rfkill(priv)) + priv->cfg->ops->lib->post_associate(priv); + } else priv->assoc_id = 0; - IWL_DEBUG_MAC80211(priv, "DISASSOC %d\n", bss_conf->assoc); + + } + + if (changes && iwl_is_associated(priv) && priv->assoc_id) { + IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n", + changes); + ret = iwl_send_rxon_assoc(priv); + if (!ret) { + /* Sync active_rxon with latest change. */ + memcpy((void *)&priv->active_rxon, + &priv->staging_rxon, + sizeof(struct iwl_rxon_cmd)); } - } else if (changes && iwl_is_associated(priv) && priv->assoc_id) { - IWL_DEBUG_MAC80211(priv, "Associated Changes %d\n", changes); - ret = iwl_send_rxon_assoc(priv); - if (!ret) - /* Sync active_rxon with latest change. */ - memcpy((void *)&priv->active_rxon, - &priv->staging_rxon, - sizeof(struct iwl_rxon_cmd)); } + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); } EXPORT_SYMBOL(iwl_bss_info_changed); diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 83d3160..cb9bd4c 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -2498,8 +2498,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv) struct iwl3945_rxon_cmd *active_rxon = (struct iwl3945_rxon_cmd *)(&priv->active_rxon); - memcpy(&priv->staging_rxon, &priv->active_rxon, - sizeof(priv->staging_rxon)); + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; } else { /* Initialize our rx_config data */ @@ -3147,7 +3146,6 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw) /* we should be verifying the device is ready to be opened */ mutex_lock(&priv->mutex); - memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); /* fetch ucode file from disk, alloc and copy to bus-master buffers ... * ucode filename and max sizes are card-specific. */ diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c index 06a46d7..6564282 100644 --- a/drivers/net/wireless/libertas/if_spi.c +++ b/drivers/net/wireless/libertas/if_spi.c @@ -812,7 +812,6 @@ out: static void if_spi_e2h(struct if_spi_card *card) { int err = 0; - unsigned long flags; u32 cause; struct lbs_private *priv = card->priv; @@ -827,10 +826,7 @@ static void if_spi_e2h(struct if_spi_card *card) /* generate a card interrupt */ spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, IF_SPI_CIC_HOST_EVENT); - spin_lock_irqsave(&priv->driver_lock, flags); lbs_queue_event(priv, cause & 0xff); - spin_unlock_irqrestore(&priv->driver_lock, flags); - out: if (err) lbs_pr_err("%s: error %d\n", __func__, err); @@ -875,7 +871,12 @@ static int lbs_spi_thread(void *data) err = if_spi_c2h_data(card); if (err) goto err; - if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY) { + + /* workaround: in PS mode, the card does not set the Command + * Download Ready bit, but it sets TX Download Ready. */ + if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY || + (card->priv->psstate != PS_STATE_FULL_POWER && + (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) { /* This means two things. First of all, * if there was a previous command sent, the card has * successfully received it. diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 3039fcb..1240351 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -99,11 +99,11 @@ void pci_update_resource(struct pci_dev *dev, int resno) int pci_claim_resource(struct pci_dev *dev, int resource) { struct resource *res = &dev->resource[resource]; - struct resource *root = NULL; + struct resource *root; char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; int err; - root = pcibios_select_root(dev, res); + root = pci_find_parent_resource(dev, res); err = -EINVAL; if (root != NULL) diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 2faf0e1..74909c4 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -177,7 +177,7 @@ dell_send_request(struct calling_interface_buffer *buffer, int class, static int dell_rfkill_set(void *data, bool blocked) { struct calling_interface_buffer buffer; - int disable = blocked ? 0 : 1; + int disable = blocked ? 1 : 0; unsigned long radio = (unsigned long)data; memset(&buffer, 0, sizeof(struct calling_interface_buffer)); diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index e48d9a4..dafaa4a 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -1133,8 +1133,9 @@ static void sony_nc_rfkill_update() continue; if (hwblock) { - if (rfkill_set_hw_state(sony_rfkill_devices[i], true)) - sony_nc_rfkill_set((void *)i, true); + if (rfkill_set_hw_state(sony_rfkill_devices[i], true)) { + /* we already know we're blocked */ + } continue; } diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig new file mode 100644 index 0000000..cc2eb8e --- /dev/null +++ b/drivers/pps/Kconfig @@ -0,0 +1,33 @@ +# +# PPS support configuration +# + +menu "PPS support" + +config PPS + tristate "PPS support" + depends on EXPERIMENTAL + ---help--- + PPS (Pulse Per Second) is a special pulse provided by some GPS + antennae. Userland can use it to get a high-precision time + reference. + + Some antennae's PPS signals are connected with the CD (Carrier + Detect) pin of the serial line they use to communicate with the + host. In this case use the SERIAL_LINE client support. + + Some antennae's PPS signals are connected with some special host + inputs so you have to enable the corresponding client support. + + To compile this driver as a module, choose M here: the module + will be called pps_core.ko. + +config PPS_DEBUG + bool "PPS debugging messages" + depends on PPS + help + Say Y here if you want the PPS support to produce a bunch of debug + messages to the system log. Select this if you are having a + problem with PPS support and want to see more of what is going on. + +endmenu diff --git a/drivers/pps/Makefile b/drivers/pps/Makefile new file mode 100644 index 0000000..19ea582 --- /dev/null +++ b/drivers/pps/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the PPS core. +# + +pps_core-y := pps.o kapi.o sysfs.o +obj-$(CONFIG_PPS) := pps_core.o + +ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c new file mode 100644 index 0000000..35a0b19 --- /dev/null +++ b/drivers/pps/kapi.c @@ -0,0 +1,329 @@ +/* + * kernel API + * + * + * Copyright (C) 2005-2009 Rodolfo Giometti <giometti@linux.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/time.h> +#include <linux/spinlock.h> +#include <linux/idr.h> +#include <linux/fs.h> +#include <linux/pps_kernel.h> + +/* + * Global variables + */ + +DEFINE_SPINLOCK(pps_idr_lock); +DEFINE_IDR(pps_idr); + +/* + * Local functions + */ + +static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset) +{ + ts->nsec += offset->nsec; + while (ts->nsec >= NSEC_PER_SEC) { + ts->nsec -= NSEC_PER_SEC; + ts->sec++; + } + while (ts->nsec < 0) { + ts->nsec += NSEC_PER_SEC; + ts->sec--; + } + ts->sec += offset->sec; +} + +/* + * Exported functions + */ + +/* pps_get_source - find a PPS source + * @source: the PPS source ID. + * + * This function is used to find an already registered PPS source into the + * system. + * + * The function returns NULL if found nothing, otherwise it returns a pointer + * to the PPS source data struct (the refcounter is incremented by 1). + */ + +struct pps_device *pps_get_source(int source) +{ + struct pps_device *pps; + unsigned long flags; + + spin_lock_irqsave(&pps_idr_lock, flags); + + pps = idr_find(&pps_idr, source); + if (pps != NULL) + atomic_inc(&pps->usage); + + spin_unlock_irqrestore(&pps_idr_lock, flags); + + return pps; +} + +/* pps_put_source - free the PPS source data + * @pps: a pointer to the PPS source. + * + * This function is used to free a PPS data struct if its refcount is 0. + */ + +void pps_put_source(struct pps_device *pps) +{ + unsigned long flags; + + spin_lock_irqsave(&pps_idr_lock, flags); + BUG_ON(atomic_read(&pps->usage) == 0); + + if (!atomic_dec_and_test(&pps->usage)) { + pps = NULL; + goto exit; + } + + /* No more reference to the PPS source. We can safely remove the + * PPS data struct. + */ + idr_remove(&pps_idr, pps->id); + +exit: + spin_unlock_irqrestore(&pps_idr_lock, flags); + kfree(pps); +} + +/* pps_register_source - add a PPS source in the system + * @info: the PPS info struct + * @default_params: the default PPS parameters of the new source + * + * This function is used to add a new PPS source in the system. The new + * source is described by info's fields and it will have, as default PPS + * parameters, the ones specified into default_params. + * + * The function returns, in case of success, the PPS source ID. + */ + +int pps_register_source(struct pps_source_info *info, int default_params) +{ + struct pps_device *pps; + int id; + int err; + + /* Sanity checks */ + if ((info->mode & default_params) != default_params) { + printk(KERN_ERR "pps: %s: unsupported default parameters\n", + info->name); + err = -EINVAL; + goto pps_register_source_exit; + } + if ((info->mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) != 0 && + info->echo == NULL) { + printk(KERN_ERR "pps: %s: echo function is not defined\n", + info->name); + err = -EINVAL; + goto pps_register_source_exit; + } + if ((info->mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) { + printk(KERN_ERR "pps: %s: unspecified time format\n", + info->name); + err = -EINVAL; + goto pps_register_source_exit; + } + + /* Allocate memory for the new PPS source struct */ + pps = kzalloc(sizeof(struct pps_device), GFP_KERNEL); + if (pps == NULL) { + err = -ENOMEM; + goto pps_register_source_exit; + } + + /* These initializations must be done before calling idr_get_new() + * in order to avoid reces into pps_event(). + */ + pps->params.api_version = PPS_API_VERS; + pps->params.mode = default_params; + pps->info = *info; + + init_waitqueue_head(&pps->queue); + spin_lock_init(&pps->lock); + atomic_set(&pps->usage, 1); + + /* Get new ID for the new PPS source */ + if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) { + err = -ENOMEM; + goto kfree_pps; + } + + spin_lock_irq(&pps_idr_lock); + + /* Now really allocate the PPS source. + * After idr_get_new() calling the new source will be freely available + * into the kernel. + */ + err = idr_get_new(&pps_idr, pps, &id); + if (err < 0) { + spin_unlock_irq(&pps_idr_lock); + goto kfree_pps; + } + + id = id & MAX_ID_MASK; + if (id >= PPS_MAX_SOURCES) { + spin_unlock_irq(&pps_idr_lock); + + printk(KERN_ERR "pps: %s: too many PPS sources in the system\n", + info->name); + err = -EBUSY; + goto free_idr; + } + pps->id = id; + + spin_unlock_irq(&pps_idr_lock); + + /* Create the char device */ + err = pps_register_cdev(pps); + if (err < 0) { + printk(KERN_ERR "pps: %s: unable to create char device\n", + info->name); + goto free_idr; + } + + pr_info("new PPS source %s at ID %d\n", info->name, id); + + return id; + +free_idr: + spin_lock_irq(&pps_idr_lock); + idr_remove(&pps_idr, id); + spin_unlock_irq(&pps_idr_lock); + +kfree_pps: + kfree(pps); + +pps_register_source_exit: + printk(KERN_ERR "pps: %s: unable to register source\n", info->name); + + return err; +} +EXPORT_SYMBOL(pps_register_source); + +/* pps_unregister_source - remove a PPS source from the system + * @source: the PPS source ID + * + * This function is used to remove a previously registered PPS source from + * the system. + */ + +void pps_unregister_source(int source) +{ + struct pps_device *pps; + + spin_lock_irq(&pps_idr_lock); + pps = idr_find(&pps_idr, source); + + if (!pps) { + BUG(); + spin_unlock_irq(&pps_idr_lock); + return; + } + spin_unlock_irq(&pps_idr_lock); + + pps_unregister_cdev(pps); + pps_put_source(pps); +} +EXPORT_SYMBOL(pps_unregister_source); + +/* pps_event - register a PPS event into the system + * @source: the PPS source ID + * @ts: the event timestamp + * @event: the event type + * @data: userdef pointer + * + * This function is used by each PPS client in order to register a new + * PPS event into the system (it's usually called inside an IRQ handler). + * + * If an echo function is associated with the PPS source it will be called + * as: + * pps->info.echo(source, event, data); + */ + +void pps_event(int source, struct pps_ktime *ts, int event, void *data) +{ + struct pps_device *pps; + unsigned long flags; + + if ((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0) { + printk(KERN_ERR "pps: unknown event (%x) for source %d\n", + event, source); + return; + } + + pps = pps_get_source(source); + if (!pps) + return; + + pr_debug("PPS event on source %d at %llu.%06u\n", + pps->id, (unsigned long long) ts->sec, ts->nsec); + + spin_lock_irqsave(&pps->lock, flags); + + /* Must call the echo function? */ + if ((pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR))) + pps->info.echo(source, event, data); + + /* Check the event */ + pps->current_mode = pps->params.mode; + if (event & PPS_CAPTUREASSERT) { + /* We have to add an offset? */ + if (pps->params.mode & PPS_OFFSETASSERT) + pps_add_offset(ts, &pps->params.assert_off_tu); + + /* Save the time stamp */ + pps->assert_tu = *ts; + pps->assert_sequence++; + pr_debug("capture assert seq #%u for source %d\n", + pps->assert_sequence, source); + } + if (event & PPS_CAPTURECLEAR) { + /* We have to add an offset? */ + if (pps->params.mode & PPS_OFFSETCLEAR) + pps_add_offset(ts, &pps->params.clear_off_tu); + + /* Save the time stamp */ + pps->clear_tu = *ts; + pps->clear_sequence++; + pr_debug("capture clear seq #%u for source %d\n", + pps->clear_sequence, source); + } + + pps->go = ~0; + wake_up_interruptible(&pps->queue); + + kill_fasync(&pps->async_queue, SIGIO, POLL_IN); + + spin_unlock_irqrestore(&pps->lock, flags); + + /* Now we can release the PPS source for (possible) deregistration */ + pps_put_source(pps); +} +EXPORT_SYMBOL(pps_event); diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c new file mode 100644 index 0000000..ac8cc8c --- /dev/null +++ b/drivers/pps/pps.c @@ -0,0 +1,312 @@ +/* + * PPS core file + * + * + * Copyright (C) 2005-2009 Rodolfo Giometti <giometti@linux.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/uaccess.h> +#include <linux/idr.h> +#include <linux/cdev.h> +#include <linux/poll.h> +#include <linux/pps_kernel.h> + +/* + * Local variables + */ + +static dev_t pps_devt; +static struct class *pps_class; + +/* + * Char device methods + */ + +static unsigned int pps_cdev_poll(struct file *file, poll_table *wait) +{ + struct pps_device *pps = file->private_data; + + poll_wait(file, &pps->queue, wait); + + return POLLIN | POLLRDNORM; +} + +static int pps_cdev_fasync(int fd, struct file *file, int on) +{ + struct pps_device *pps = file->private_data; + return fasync_helper(fd, file, on, &pps->async_queue); +} + +static long pps_cdev_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct pps_device *pps = file->private_data; + struct pps_kparams params; + struct pps_fdata fdata; + unsigned long ticks; + void __user *uarg = (void __user *) arg; + int __user *iuarg = (int __user *) arg; + int err; + + switch (cmd) { + case PPS_GETPARAMS: + pr_debug("PPS_GETPARAMS: source %d\n", pps->id); + + /* Return current parameters */ + err = copy_to_user(uarg, &pps->params, + sizeof(struct pps_kparams)); + if (err) + return -EFAULT; + + break; + + case PPS_SETPARAMS: + pr_debug("PPS_SETPARAMS: source %d\n", pps->id); + + /* Check the capabilities */ + if (!capable(CAP_SYS_TIME)) + return -EPERM; + + err = copy_from_user(¶ms, uarg, sizeof(struct pps_kparams)); + if (err) + return -EFAULT; + if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) { + pr_debug("capture mode unspecified (%x)\n", + params.mode); + return -EINVAL; + } + + /* Check for supported capabilities */ + if ((params.mode & ~pps->info.mode) != 0) { + pr_debug("unsupported capabilities (%x)\n", + params.mode); + return -EINVAL; + } + + spin_lock_irq(&pps->lock); + + /* Save the new parameters */ + pps->params = params; + + /* Restore the read only parameters */ + if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) { + /* section 3.3 of RFC 2783 interpreted */ + pr_debug("time format unspecified (%x)\n", + params.mode); + pps->params.mode |= PPS_TSFMT_TSPEC; + } + if (pps->info.mode & PPS_CANWAIT) + pps->params.mode |= PPS_CANWAIT; + pps->params.api_version = PPS_API_VERS; + + spin_unlock_irq(&pps->lock); + + break; + + case PPS_GETCAP: + pr_debug("PPS_GETCAP: source %d\n", pps->id); + + err = put_user(pps->info.mode, iuarg); + if (err) + return -EFAULT; + + break; + + case PPS_FETCH: + pr_debug("PPS_FETCH: source %d\n", pps->id); + + err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata)); + if (err) + return -EFAULT; + + pps->go = 0; + + /* Manage the timeout */ + if (fdata.timeout.flags & PPS_TIME_INVALID) + err = wait_event_interruptible(pps->queue, pps->go); + else { + pr_debug("timeout %lld.%09d\n", + (long long) fdata.timeout.sec, + fdata.timeout.nsec); + ticks = fdata.timeout.sec * HZ; + ticks += fdata.timeout.nsec / (NSEC_PER_SEC / HZ); + + if (ticks != 0) { + err = wait_event_interruptible_timeout( + pps->queue, pps->go, ticks); + if (err == 0) + return -ETIMEDOUT; + } + } + + /* Check for pending signals */ + if (err == -ERESTARTSYS) { + pr_debug("pending signal caught\n"); + return -EINTR; + } + + /* Return the fetched timestamp */ + spin_lock_irq(&pps->lock); + + fdata.info.assert_sequence = pps->assert_sequence; + fdata.info.clear_sequence = pps->clear_sequence; + fdata.info.assert_tu = pps->assert_tu; + fdata.info.clear_tu = pps->clear_tu; + fdata.info.current_mode = pps->current_mode; + + spin_unlock_irq(&pps->lock); + + err = copy_to_user(uarg, &fdata, sizeof(struct pps_fdata)); + if (err) + return -EFAULT; + + break; + + default: + return -ENOTTY; + break; + } + + return 0; +} + +static int pps_cdev_open(struct inode *inode, struct file *file) +{ + struct pps_device *pps = container_of(inode->i_cdev, + struct pps_device, cdev); + int found; + + found = pps_get_source(pps->id) != 0; + if (!found) + return -ENODEV; + + file->private_data = pps; + + return 0; +} + +static int pps_cdev_release(struct inode *inode, struct file *file) +{ + struct pps_device *pps = file->private_data; + + /* Free the PPS source and wake up (possible) deregistration */ + pps_put_source(pps); + + return 0; +} + +/* + * Char device stuff + */ + +static const struct file_operations pps_cdev_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .poll = pps_cdev_poll, + .fasync = pps_cdev_fasync, + .unlocked_ioctl = pps_cdev_ioctl, + .open = pps_cdev_open, + .release = pps_cdev_release, +}; + +int pps_register_cdev(struct pps_device *pps) +{ + int err; + + pps->devno = MKDEV(MAJOR(pps_devt), pps->id); + cdev_init(&pps->cdev, &pps_cdev_fops); + pps->cdev.owner = pps->info.owner; + + err = cdev_add(&pps->cdev, pps->devno, 1); + if (err) { + printk(KERN_ERR "pps: %s: failed to add char device %d:%d\n", + pps->info.name, MAJOR(pps_devt), pps->id); + return err; + } + pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL, + "pps%d", pps->id); + if (err) + goto del_cdev; + dev_set_drvdata(pps->dev, pps); + + pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, + MAJOR(pps_devt), pps->id); + + return 0; + +del_cdev: + cdev_del(&pps->cdev); + + return err; +} + +void pps_unregister_cdev(struct pps_device *pps) +{ + device_destroy(pps_class, pps->devno); + cdev_del(&pps->cdev); +} + +/* + * Module stuff + */ + +static void __exit pps_exit(void) +{ + class_destroy(pps_class); + unregister_chrdev_region(pps_devt, PPS_MAX_SOURCES); +} + +static int __init pps_init(void) +{ + int err; + + pps_class = class_create(THIS_MODULE, "pps"); + if (!pps_class) { + printk(KERN_ERR "pps: failed to allocate class\n"); + return -ENOMEM; + } + pps_class->dev_attrs = pps_attrs; + + err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps"); + if (err < 0) { + printk(KERN_ERR "pps: failed to allocate char device region\n"); + goto remove_class; + } + + pr_info("LinuxPPS API ver. %d registered\n", PPS_API_VERS); + pr_info("Software ver. %s - Copyright 2005-2007 Rodolfo Giometti " + "<giometti@linux.it>\n", PPS_VERSION); + + return 0; + +remove_class: + class_destroy(pps_class); + + return err; +} + +subsys_initcall(pps_init); +module_exit(pps_exit); + +MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); +MODULE_DESCRIPTION("LinuxPPS support (RFC 2783) - ver. " PPS_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/pps/sysfs.c b/drivers/pps/sysfs.c new file mode 100644 index 0000000..ef0978c --- /dev/null +++ b/drivers/pps/sysfs.c @@ -0,0 +1,98 @@ +/* + * PPS sysfs support + * + * + * Copyright (C) 2007-2009 Rodolfo Giometti <giometti@linux.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/pps_kernel.h> + +/* + * Attribute functions + */ + +static ssize_t pps_show_assert(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pps_device *pps = dev_get_drvdata(dev); + + if (!(pps->info.mode & PPS_CAPTUREASSERT)) + return 0; + + return sprintf(buf, "%lld.%09d#%d\n", + (long long) pps->assert_tu.sec, pps->assert_tu.nsec, + pps->assert_sequence); +} + +static ssize_t pps_show_clear(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pps_device *pps = dev_get_drvdata(dev); + + if (!(pps->info.mode & PPS_CAPTURECLEAR)) + return 0; + + return sprintf(buf, "%lld.%09d#%d\n", + (long long) pps->clear_tu.sec, pps->clear_tu.nsec, + pps->clear_sequence); +} + +static ssize_t pps_show_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pps_device *pps = dev_get_drvdata(dev); + + return sprintf(buf, "%4x\n", pps->info.mode); +} + +static ssize_t pps_show_echo(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pps_device *pps = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", !!pps->info.echo); +} + +static ssize_t pps_show_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pps_device *pps = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", pps->info.name); +} + +static ssize_t pps_show_path(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pps_device *pps = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", pps->info.path); +} + +struct device_attribute pps_attrs[] = { + __ATTR(assert, S_IRUGO, pps_show_assert, NULL), + __ATTR(clear, S_IRUGO, pps_show_clear, NULL), + __ATTR(mode, S_IRUGO, pps_show_mode, NULL), + __ATTR(echo, S_IRUGO, pps_show_echo, NULL), + __ATTR(name, S_IRUGO, pps_show_name, NULL), + __ATTR(path, S_IRUGO, pps_show_path, NULL), + __ATTR_NULL, +}; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 277d35d..81adbdb 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -296,6 +296,15 @@ config RTC_DRV_RX8581 This driver can also be built as a module. If so the module will be called rtc-rx8581. +config RTC_DRV_RX8025 + tristate "Epson RX-8025SA/NB" + help + If you say yes here you get support for the Epson + RX-8025SA/NB RTC chips. + + This driver can also be built as a module. If so, the module + will be called rtc-rx8025. + endif # I2C comment "SPI RTC drivers" diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 6c0639a..3c0f2b2 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -62,6 +62,7 @@ obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o +obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 2c4a653..8a6f9a9 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -31,6 +31,8 @@ enum ds_type { ds_1338, ds_1339, ds_1340, + ds_1388, + ds_3231, m41t00, rx_8025, // rs5c372 too? different address... @@ -66,6 +68,7 @@ enum ds_type { #define DS1337_REG_CONTROL 0x0e # define DS1337_BIT_nEOSC 0x80 # define DS1339_BIT_BBSQI 0x20 +# define DS3231_BIT_BBSQW 0x40 /* same as BBSQI */ # define DS1337_BIT_RS2 0x10 # define DS1337_BIT_RS1 0x08 # define DS1337_BIT_INTCN 0x04 @@ -94,6 +97,7 @@ enum ds_type { struct ds1307 { + u8 offset; /* register's offset */ u8 regs[11]; enum ds_type type; unsigned long flags; @@ -128,6 +132,9 @@ static const struct chip_desc chips[] = { }, [ds_1340] = { }, +[ds_3231] = { + .alarm = 1, +}, [m41t00] = { }, [rx_8025] = { @@ -138,7 +145,9 @@ static const struct i2c_device_id ds1307_id[] = { { "ds1337", ds_1337 }, { "ds1338", ds_1338 }, { "ds1339", ds_1339 }, + { "ds1388", ds_1388 }, { "ds1340", ds_1340 }, + { "ds3231", ds_3231 }, { "m41t00", m41t00 }, { "rx8025", rx_8025 }, { } @@ -291,7 +300,7 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) /* read the RTC date and time registers all at once */ tmp = ds1307->read_block_data(ds1307->client, - DS1307_REG_SECS, 7, ds1307->regs); + ds1307->offset, 7, ds1307->regs); if (tmp != 7) { dev_err(dev, "%s error %d\n", "read", tmp); return -EIO; @@ -353,6 +362,7 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) switch (ds1307->type) { case ds_1337: case ds_1339: + case ds_3231: buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY; break; case ds_1340: @@ -367,7 +377,8 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) "write", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); - result = ds1307->write_block_data(ds1307->client, 0, 7, buf); + result = ds1307->write_block_data(ds1307->client, + ds1307->offset, 7, buf); if (result < 0) { dev_err(dev, "%s error %d\n", "write", result); return result; @@ -624,6 +635,11 @@ static int __devinit ds1307_probe(struct i2c_client *client, struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); int want_irq = false; unsigned char *buf; + static const int bbsqi_bitpos[] = { + [ds_1337] = 0, + [ds_1339] = DS1339_BIT_BBSQI, + [ds_3231] = DS3231_BIT_BBSQW, + }; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA) && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) @@ -632,9 +648,12 @@ static int __devinit ds1307_probe(struct i2c_client *client, if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) return -ENOMEM; - ds1307->client = client; i2c_set_clientdata(client, ds1307); - ds1307->type = id->driver_data; + + ds1307->client = client; + ds1307->type = id->driver_data; + ds1307->offset = 0; + buf = ds1307->regs; if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { ds1307->read_block_data = i2c_smbus_read_i2c_block_data; @@ -647,6 +666,7 @@ static int __devinit ds1307_probe(struct i2c_client *client, switch (ds1307->type) { case ds_1337: case ds_1339: + case ds_3231: /* has IRQ? */ if (ds1307->client->irq > 0 && chip->alarm) { INIT_WORK(&ds1307->work, ds1307_work); @@ -666,12 +686,12 @@ static int __devinit ds1307_probe(struct i2c_client *client, ds1307->regs[0] &= ~DS1337_BIT_nEOSC; /* Using IRQ? Disable the square wave and both alarms. - * For ds1339, be sure alarms can trigger when we're - * running on Vbackup (BBSQI); we assume ds1337 will - * ignore that bit + * For some variants, be sure alarms can trigger when we're + * running on Vbackup (BBSQI/BBSQW) */ if (want_irq) { - ds1307->regs[0] |= DS1337_BIT_INTCN | DS1339_BIT_BBSQI; + ds1307->regs[0] |= DS1337_BIT_INTCN + | bbsqi_bitpos[ds1307->type]; ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE); } @@ -751,6 +771,9 @@ static int __devinit ds1307_probe(struct i2c_client *client, hour); } break; + case ds_1388: + ds1307->offset = 1; /* Seconds starts at 1 */ + break; default: break; } @@ -814,6 +837,8 @@ read_rtc: case rx_8025: case ds_1337: case ds_1339: + case ds_1388: + case ds_3231: break; } diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c index 38d472b..7172885 100644 --- a/drivers/rtc/rtc-ds1553.c +++ b/drivers/rtc/rtc-ds1553.c @@ -329,8 +329,7 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev) if (pdata->irq > 0) { writeb(0, ioaddr + RTC_INTERRUPTS); if (request_irq(pdata->irq, ds1553_rtc_interrupt, - IRQF_DISABLED | IRQF_SHARED, - pdev->name, pdev) < 0) { + IRQF_DISABLED, pdev->name, pdev) < 0) { dev_warn(&pdev->dev, "interrupt not available.\n"); pdata->irq = 0; } diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c index 8bc8501..0924945 100644 --- a/drivers/rtc/rtc-ds1742.c +++ b/drivers/rtc/rtc-ds1742.c @@ -57,6 +57,7 @@ struct rtc_plat_data { size_t size; resource_size_t baseaddr; unsigned long last_jiffies; + struct bin_attribute nvram_attr; }; static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm) @@ -157,18 +158,6 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj, return count; } -static struct bin_attribute ds1742_nvram_attr = { - .attr = { - .name = "nvram", - .mode = S_IRUGO | S_IWUSR, - }, - .read = ds1742_nvram_read, - .write = ds1742_nvram_write, - /* REVISIT: size in sysfs won't match actual size... if it's - * not a constant, each RTC should have its own attribute. - */ -}; - static int __devinit ds1742_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; @@ -199,6 +188,12 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev) pdata->size_nvram = pdata->size - RTC_SIZE; pdata->ioaddr_rtc = ioaddr + pdata->size_nvram; + pdata->nvram_attr.attr.name = "nvram"; + pdata->nvram_attr.attr.mode = S_IRUGO | S_IWUSR; + pdata->nvram_attr.read = ds1742_nvram_read; + pdata->nvram_attr.write = ds1742_nvram_write; + pdata->nvram_attr.size = pdata->size_nvram; + /* turn RTC on if it was not on */ ioaddr = pdata->ioaddr_rtc; sec = readb(ioaddr + RTC_SECONDS); @@ -221,11 +216,13 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev) pdata->rtc = rtc; pdata->last_jiffies = jiffies; platform_set_drvdata(pdev, pdata); - ds1742_nvram_attr.size = max(ds1742_nvram_attr.size, - pdata->size_nvram); - ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr); - if (ret) + + ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr); + if (ret) { + dev_err(&pdev->dev, "creating nvram file in sysfs failed\n"); goto out; + } + return 0; out: if (pdata->rtc) @@ -242,7 +239,7 @@ static int __devexit ds1742_rtc_remove(struct platform_device *pdev) { struct rtc_plat_data *pdata = platform_get_drvdata(pdev); - sysfs_remove_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr); + sysfs_remove_bin_file(&pdev->dev.kobj, &pdata->nvram_attr); rtc_device_unregister(pdata->rtc); iounmap(pdata->ioaddr_nvram); release_mem_region(pdata->baseaddr, pdata->size); diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c new file mode 100644 index 0000000..b1a29bc --- /dev/null +++ b/drivers/rtc/rtc-rx8025.c @@ -0,0 +1,688 @@ +/* + * Driver for Epson's RTC module RX-8025 SA/NB + * + * Copyright (C) 2009 Wolfgang Grandegger <wg@grandegger.com> + * + * Copyright (C) 2005 by Digi International Inc. + * All rights reserved. + * + * Modified by fengjh at rising.com.cn + * <http://lists.lm-sensors.org/mailman/listinfo/lm-sensors> + * 2006.11 + * + * Code cleanup by Sergei Poselenov, <sposelenov@emcraft.com> + * Converted to new style by Wolfgang Grandegger <wg@grandegger.com> + * Alarm and periodic interrupt added by Dmitry Rakhchev <rda@emcraft.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/bcd.h> +#include <linux/i2c.h> +#include <linux/list.h> +#include <linux/rtc.h> + +/* Register definitions */ +#define RX8025_REG_SEC 0x00 +#define RX8025_REG_MIN 0x01 +#define RX8025_REG_HOUR 0x02 +#define RX8025_REG_WDAY 0x03 +#define RX8025_REG_MDAY 0x04 +#define RX8025_REG_MONTH 0x05 +#define RX8025_REG_YEAR 0x06 +#define RX8025_REG_DIGOFF 0x07 +#define RX8025_REG_ALWMIN 0x08 +#define RX8025_REG_ALWHOUR 0x09 +#define RX8025_REG_ALWWDAY 0x0a +#define RX8025_REG_ALDMIN 0x0b +#define RX8025_REG_ALDHOUR 0x0c +/* 0x0d is reserved */ +#define RX8025_REG_CTRL1 0x0e +#define RX8025_REG_CTRL2 0x0f + +#define RX8025_BIT_CTRL1_CT (7 << 0) +/* 1 Hz periodic level irq */ +#define RX8025_BIT_CTRL1_CT_1HZ 4 +#define RX8025_BIT_CTRL1_TEST (1 << 3) +#define RX8025_BIT_CTRL1_1224 (1 << 5) +#define RX8025_BIT_CTRL1_DALE (1 << 6) +#define RX8025_BIT_CTRL1_WALE (1 << 7) + +#define RX8025_BIT_CTRL2_DAFG (1 << 0) +#define RX8025_BIT_CTRL2_WAFG (1 << 1) +#define RX8025_BIT_CTRL2_CTFG (1 << 2) +#define RX8025_BIT_CTRL2_PON (1 << 4) +#define RX8025_BIT_CTRL2_XST (1 << 5) +#define RX8025_BIT_CTRL2_VDET (1 << 6) + +/* Clock precision adjustment */ +#define RX8025_ADJ_RESOLUTION 3050 /* in ppb */ +#define RX8025_ADJ_DATA_MAX 62 +#define RX8025_ADJ_DATA_MIN -62 + +static const struct i2c_device_id rx8025_id[] = { + { "rx8025", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, rx8025_id); + +struct rx8025_data { + struct i2c_client *client; + struct rtc_device *rtc; + struct work_struct work; + u8 ctrl1; + unsigned exiting:1; +}; + +static int rx8025_read_reg(struct i2c_client *client, int number, u8 *value) +{ + int ret = i2c_smbus_read_byte_data(client, (number << 4) | 0x08); + + if (ret < 0) { + dev_err(&client->dev, "Unable to read register #%d\n", number); + return ret; + } + + *value = ret; + return 0; +} + +static int rx8025_read_regs(struct i2c_client *client, + int number, u8 length, u8 *values) +{ + int ret = i2c_smbus_read_i2c_block_data(client, (number << 4) | 0x08, + length, values); + + if (ret != length) { + dev_err(&client->dev, "Unable to read registers #%d..#%d\n", + number, number + length - 1); + return ret < 0 ? ret : -EIO; + } + + return 0; +} + +static int rx8025_write_reg(struct i2c_client *client, int number, u8 value) +{ + int ret = i2c_smbus_write_byte_data(client, number << 4, value); + + if (ret) + dev_err(&client->dev, "Unable to write register #%d\n", + number); + + return ret; +} + +static int rx8025_write_regs(struct i2c_client *client, + int number, u8 length, u8 *values) +{ + int ret = i2c_smbus_write_i2c_block_data(client, (number << 4) | 0x08, + length, values); + + if (ret) + dev_err(&client->dev, "Unable to write registers #%d..#%d\n", + number, number + length - 1); + + return ret; +} + +static irqreturn_t rx8025_irq(int irq, void *dev_id) +{ + struct i2c_client *client = dev_id; + struct rx8025_data *rx8025 = i2c_get_clientdata(client); + + disable_irq_nosync(irq); + schedule_work(&rx8025->work); + return IRQ_HANDLED; +} + +static void rx8025_work(struct work_struct *work) +{ + struct rx8025_data *rx8025 = container_of(work, struct rx8025_data, + work); + struct i2c_client *client = rx8025->client; + struct mutex *lock = &rx8025->rtc->ops_lock; + u8 status; + + mutex_lock(lock); + + if (rx8025_read_reg(client, RX8025_REG_CTRL2, &status)) + goto out; + + if (!(status & RX8025_BIT_CTRL2_XST)) + dev_warn(&client->dev, "Oscillation stop was detected," + "you may have to readjust the clock\n"); + + if (status & RX8025_BIT_CTRL2_CTFG) { + /* periodic */ + status &= ~RX8025_BIT_CTRL2_CTFG; + local_irq_disable(); + rtc_update_irq(rx8025->rtc, 1, RTC_PF | RTC_IRQF); + local_irq_enable(); + } + + if (status & RX8025_BIT_CTRL2_DAFG) { + /* alarm */ + status &= RX8025_BIT_CTRL2_DAFG; + if (rx8025_write_reg(client, RX8025_REG_CTRL1, + rx8025->ctrl1 & ~RX8025_BIT_CTRL1_DALE)) + goto out; + local_irq_disable(); + rtc_update_irq(rx8025->rtc, 1, RTC_AF | RTC_IRQF); + local_irq_enable(); + } + + /* acknowledge IRQ */ + rx8025_write_reg(client, RX8025_REG_CTRL2, + status | RX8025_BIT_CTRL2_XST); + +out: + if (!rx8025->exiting) + enable_irq(client->irq); + + mutex_unlock(lock); +} + +static int rx8025_get_time(struct device *dev, struct rtc_time *dt) +{ + struct rx8025_data *rx8025 = dev_get_drvdata(dev); + u8 date[7]; + int err; + + err = rx8025_read_regs(rx8025->client, RX8025_REG_SEC, 7, date); + if (err) + return err; + + dev_dbg(dev, "%s: read 0x%02x 0x%02x " + "0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", __func__, + date[0], date[1], date[2], date[3], date[4], + date[5], date[6]); + + dt->tm_sec = bcd2bin(date[RX8025_REG_SEC] & 0x7f); + dt->tm_min = bcd2bin(date[RX8025_REG_MIN] & 0x7f); + if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224) + dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x3f); + else + dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x1f) % 12 + + (date[RX8025_REG_HOUR] & 0x20 ? 12 : 0); + + dt->tm_mday = bcd2bin(date[RX8025_REG_MDAY] & 0x3f); + dt->tm_mon = bcd2bin(date[RX8025_REG_MONTH] & 0x1f) - 1; + dt->tm_year = bcd2bin(date[RX8025_REG_YEAR]); + + if (dt->tm_year < 70) + dt->tm_year += 100; + + dev_dbg(dev, "%s: date %ds %dm %dh %dmd %dm %dy\n", __func__, + dt->tm_sec, dt->tm_min, dt->tm_hour, + dt->tm_mday, dt->tm_mon, dt->tm_year); + + return rtc_valid_tm(dt); +} + +static int rx8025_set_time(struct device *dev, struct rtc_time *dt) +{ + struct rx8025_data *rx8025 = dev_get_drvdata(dev); + u8 date[7]; + + /* + * BUG: The HW assumes every year that is a multiple of 4 to be a leap + * year. Next time this is wrong is 2100, which will not be a leap + * year. + */ + + /* + * Here the read-only bits are written as "0". I'm not sure if that + * is sound. + */ + date[RX8025_REG_SEC] = bin2bcd(dt->tm_sec); + date[RX8025_REG_MIN] = bin2bcd(dt->tm_min); + if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224) + date[RX8025_REG_HOUR] = bin2bcd(dt->tm_hour); + else + date[RX8025_REG_HOUR] = (dt->tm_hour >= 12 ? 0x20 : 0) + | bin2bcd((dt->tm_hour + 11) % 12 + 1); + + date[RX8025_REG_WDAY] = bin2bcd(dt->tm_wday); + date[RX8025_REG_MDAY] = bin2bcd(dt->tm_mday); + date[RX8025_REG_MONTH] = bin2bcd(dt->tm_mon + 1); + date[RX8025_REG_YEAR] = bin2bcd(dt->tm_year % 100); + + dev_dbg(dev, + "%s: write 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", + __func__, + date[0], date[1], date[2], date[3], date[4], date[5], date[6]); + + return rx8025_write_regs(rx8025->client, RX8025_REG_SEC, 7, date); +} + +static int rx8025_init_client(struct i2c_client *client, int *need_reset) +{ + struct rx8025_data *rx8025 = i2c_get_clientdata(client); + u8 ctrl[2], ctrl2; + int need_clear = 0; + int err; + + err = rx8025_read_regs(rx8025->client, RX8025_REG_CTRL1, 2, ctrl); + if (err) + goto out; + + /* Keep test bit zero ! */ + rx8025->ctrl1 = ctrl[0] & ~RX8025_BIT_CTRL1_TEST; + + if (ctrl[1] & RX8025_BIT_CTRL2_PON) { + dev_warn(&client->dev, "power-on reset was detected, " + "you may have to readjust the clock\n"); + *need_reset = 1; + } + + if (ctrl[1] & RX8025_BIT_CTRL2_VDET) { + dev_warn(&client->dev, "a power voltage drop was detected, " + "you may have to readjust the clock\n"); + *need_reset = 1; + } + + if (!(ctrl[1] & RX8025_BIT_CTRL2_XST)) { + dev_warn(&client->dev, "Oscillation stop was detected," + "you may have to readjust the clock\n"); + *need_reset = 1; + } + + if (ctrl[1] & (RX8025_BIT_CTRL2_DAFG | RX8025_BIT_CTRL2_WAFG)) { + dev_warn(&client->dev, "Alarm was detected\n"); + need_clear = 1; + } + + if (!(ctrl[1] & RX8025_BIT_CTRL2_CTFG)) + need_clear = 1; + + if (*need_reset || need_clear) { + ctrl2 = ctrl[0]; + ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET | + RX8025_BIT_CTRL2_CTFG | RX8025_BIT_CTRL2_WAFG | + RX8025_BIT_CTRL2_DAFG); + ctrl2 |= RX8025_BIT_CTRL2_XST; + + err = rx8025_write_reg(client, RX8025_REG_CTRL2, ctrl2); + } +out: + return err; +} + +/* Alarm support */ +static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t) +{ + struct rx8025_data *rx8025 = dev_get_drvdata(dev); + struct i2c_client *client = rx8025->client; + u8 ctrl2, ald[2]; + int err; + + if (client->irq <= 0) + return -EINVAL; + + err = rx8025_read_regs(client, RX8025_REG_ALDMIN, 2, ald); + if (err) + return err; + + err = rx8025_read_reg(client, RX8025_REG_CTRL2, &ctrl2); + if (err) + return err; + + dev_dbg(dev, "%s: read alarm 0x%02x 0x%02x ctrl2 %02x\n", + __func__, ald[0], ald[1], ctrl2); + + /* Hardware alarms precision is 1 minute! */ + t->time.tm_sec = 0; + t->time.tm_min = bcd2bin(ald[0] & 0x7f); + if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224) + t->time.tm_hour = bcd2bin(ald[1] & 0x3f); + else + t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12 + + (ald[1] & 0x20 ? 12 : 0); + + t->time.tm_wday = -1; + t->time.tm_mday = -1; + t->time.tm_mon = -1; + t->time.tm_year = -1; + + dev_dbg(dev, "%s: date: %ds %dm %dh %dmd %dm %dy\n", + __func__, + t->time.tm_sec, t->time.tm_min, t->time.tm_hour, + t->time.tm_mday, t->time.tm_mon, t->time.tm_year); + t->enabled = !!(rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE); + t->pending = (ctrl2 & RX8025_BIT_CTRL2_DAFG) && t->enabled; + + return err; +} + +static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t) +{ + struct i2c_client *client = to_i2c_client(dev); + struct rx8025_data *rx8025 = dev_get_drvdata(dev); + u8 ald[2]; + int err; + + if (client->irq <= 0) + return -EINVAL; + + /* Hardware alarm precision is 1 minute! */ + ald[0] = bin2bcd(t->time.tm_min); + if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224) + ald[1] = bin2bcd(t->time.tm_hour); + else + ald[1] = (t->time.tm_hour >= 12 ? 0x20 : 0) + | bin2bcd((t->time.tm_hour + 11) % 12 + 1); + + dev_dbg(dev, "%s: write 0x%02x 0x%02x\n", __func__, ald[0], ald[1]); + + if (rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE) { + rx8025->ctrl1 &= ~RX8025_BIT_CTRL1_DALE; + err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1, + rx8025->ctrl1); + if (err) + return err; + } + err = rx8025_write_regs(rx8025->client, RX8025_REG_ALDMIN, 2, ald); + if (err) + return err; + + if (t->enabled) { + rx8025->ctrl1 |= RX8025_BIT_CTRL1_DALE; + err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1, + rx8025->ctrl1); + if (err) + return err; + } + + return 0; +} + +static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct rx8025_data *rx8025 = dev_get_drvdata(dev); + u8 ctrl1; + int err; + + ctrl1 = rx8025->ctrl1; + if (enabled) + ctrl1 |= RX8025_BIT_CTRL1_DALE; + else + ctrl1 &= ~RX8025_BIT_CTRL1_DALE; + + if (ctrl1 != rx8025->ctrl1) { + rx8025->ctrl1 = ctrl1; + err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1, + rx8025->ctrl1); + if (err) + return err; + } + return 0; +} + +static int rx8025_irq_set_state(struct device *dev, int enabled) +{ + struct i2c_client *client = to_i2c_client(dev); + struct rx8025_data *rx8025 = i2c_get_clientdata(client); + int ctrl1; + int err; + + if (client->irq <= 0) + return -ENXIO; + + ctrl1 = rx8025->ctrl1 & ~RX8025_BIT_CTRL1_CT; + if (enabled) + ctrl1 |= RX8025_BIT_CTRL1_CT_1HZ; + if (ctrl1 != rx8025->ctrl1) { + rx8025->ctrl1 = ctrl1; + err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1, + rx8025->ctrl1); + if (err) + return err; + } + + return 0; +} + +static struct rtc_class_ops rx8025_rtc_ops = { + .read_time = rx8025_get_time, + .set_time = rx8025_set_time, + .read_alarm = rx8025_read_alarm, + .set_alarm = rx8025_set_alarm, + .alarm_irq_enable = rx8025_alarm_irq_enable, + .irq_set_state = rx8025_irq_set_state, +}; + +/* + * Clock precision adjustment support + * + * According to the RX8025 SA/NB application manual the frequency and + * temperature charateristics can be approximated using the following + * equation: + * + * df = a * (ut - t)**2 + * + * df: Frequency deviation in any temperature + * a : Coefficient = (-35 +-5) * 10**-9 + * ut: Ultimate temperature in degree = +25 +-5 degree + * t : Any temperature in degree + * + * Note that the clock adjustment in ppb must be entered (which is + * the negative value of the deviation). + */ +static int rx8025_get_clock_adjust(struct device *dev, int *adj) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 digoff; + int err; + + err = rx8025_read_reg(client, RX8025_REG_DIGOFF, &digoff); + if (err) + return err; + + *adj = digoff >= 64 ? digoff - 128 : digoff; + if (*adj > 0) + (*adj)--; + *adj *= -RX8025_ADJ_RESOLUTION; + + return 0; +} + +static int rx8025_set_clock_adjust(struct device *dev, int adj) +{ + struct i2c_client *client = to_i2c_client(dev); + u8 digoff; + int err; + + adj /= -RX8025_ADJ_RESOLUTION; + if (adj > RX8025_ADJ_DATA_MAX) + adj = RX8025_ADJ_DATA_MAX; + else if (adj < RX8025_ADJ_DATA_MIN) + adj = RX8025_ADJ_DATA_MIN; + else if (adj > 0) + adj++; + else if (adj < 0) + adj += 128; + digoff = adj; + + err = rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff); + if (err) + return err; + + dev_dbg(dev, "%s: write 0x%02x\n", __func__, digoff); + + return 0; +} + +static ssize_t rx8025_sysfs_show_clock_adjust(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int err, adj; + + err = rx8025_get_clock_adjust(dev, &adj); + if (err) + return err; + + return sprintf(buf, "%d\n", adj); +} + +static ssize_t rx8025_sysfs_store_clock_adjust(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int adj, err; + + if (sscanf(buf, "%i", &adj) != 1) + return -EINVAL; + + err = rx8025_set_clock_adjust(dev, adj); + + return err ? err : count; +} + +static DEVICE_ATTR(clock_adjust_ppb, S_IRUGO | S_IWUSR, + rx8025_sysfs_show_clock_adjust, + rx8025_sysfs_store_clock_adjust); + +static int rx8025_sysfs_register(struct device *dev) +{ + return device_create_file(dev, &dev_attr_clock_adjust_ppb); +} + +static void rx8025_sysfs_unregister(struct device *dev) +{ + device_remove_file(dev, &dev_attr_clock_adjust_ppb); +} + +static int __devinit rx8025_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct rx8025_data *rx8025; + int err, need_reset = 0; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA + | I2C_FUNC_SMBUS_I2C_BLOCK)) { + dev_err(&adapter->dev, + "doesn't support required functionality\n"); + err = -EIO; + goto errout; + } + + rx8025 = kzalloc(sizeof(*rx8025), GFP_KERNEL); + if (!rx8025) { + dev_err(&adapter->dev, "failed to alloc memory\n"); + err = -ENOMEM; + goto errout; + } + + rx8025->client = client; + i2c_set_clientdata(client, rx8025); + INIT_WORK(&rx8025->work, rx8025_work); + + err = rx8025_init_client(client, &need_reset); + if (err) + goto errout_free; + + if (need_reset) { + struct rtc_time tm; + dev_info(&client->dev, + "bad conditions detected, resetting date\n"); + rtc_time_to_tm(0, &tm); /* 1970/1/1 */ + rx8025_set_time(&client->dev, &tm); + } + + rx8025->rtc = rtc_device_register(client->name, &client->dev, + &rx8025_rtc_ops, THIS_MODULE); + if (IS_ERR(rx8025->rtc)) { + err = PTR_ERR(rx8025->rtc); + dev_err(&client->dev, "unable to register the class device\n"); + goto errout_free; + } + + if (client->irq > 0) { + dev_info(&client->dev, "IRQ %d supplied\n", client->irq); + err = request_irq(client->irq, rx8025_irq, + 0, "rx8025", client); + if (err) { + dev_err(&client->dev, "unable to request IRQ\n"); + goto errout_reg; + } + } + + rx8025->rtc->irq_freq = 1; + rx8025->rtc->max_user_freq = 1; + + err = rx8025_sysfs_register(&client->dev); + if (err) + goto errout_irq; + + return 0; + +errout_irq: + if (client->irq > 0) + free_irq(client->irq, client); + +errout_reg: + rtc_device_unregister(rx8025->rtc); + +errout_free: + i2c_set_clientdata(client, NULL); + kfree(rx8025); + +errout: + dev_err(&adapter->dev, "probing for rx8025 failed\n"); + return err; +} + +static int __devexit rx8025_remove(struct i2c_client *client) +{ + struct rx8025_data *rx8025 = i2c_get_clientdata(client); + struct mutex *lock = &rx8025->rtc->ops_lock; + + if (client->irq > 0) { + mutex_lock(lock); + rx8025->exiting = 1; + mutex_unlock(lock); + + free_irq(client->irq, client); + flush_scheduled_work(); + } + + rx8025_sysfs_unregister(&client->dev); + rtc_device_unregister(rx8025->rtc); + i2c_set_clientdata(client, NULL); + kfree(rx8025); + return 0; +} + +static struct i2c_driver rx8025_driver = { + .driver = { + .name = "rtc-rx8025", + .owner = THIS_MODULE, + }, + .probe = rx8025_probe, + .remove = __devexit_p(rx8025_remove), + .id_table = rx8025_id, +}; + +static int __init rx8025_init(void) +{ + return i2c_add_driver(&rx8025_driver); +} + +static void __exit rx8025_exit(void) +{ + i2c_del_driver(&rx8025_driver); +} + +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); +MODULE_DESCRIPTION("RX-8025 SA/NB RTC driver"); +MODULE_LICENSE("GPL"); + +module_init(rx8025_init); +module_exit(rx8025_exit); diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c index 4ee4857..4a6ed11 100644 --- a/drivers/rtc/rtc-tx4939.c +++ b/drivers/rtc/rtc-tx4939.c @@ -261,10 +261,8 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev) tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP); if (devm_request_irq(&pdev->dev, irq, tx4939_rtc_interrupt, - IRQF_DISABLED | IRQF_SHARED, - pdev->name, &pdev->dev) < 0) { + IRQF_DISABLED, pdev->name, &pdev->dev) < 0) return -EBUSY; - } rtc = rtc_device_register(pdev->name, &pdev->dev, &tx4939_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 81d7f26..691cecd 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -655,7 +655,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev) for (dm = dev->mc_list; dm; dm = dm->next) qeth_l2_add_mc(card, dm->da_addr, 0); - list_for_each_entry(ha, &dev->uc_list, list) + list_for_each_entry(ha, &dev->uc.list, list) qeth_l2_add_mc(card, ha->addr, 1); spin_unlock_bh(&card->mclock); diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 3ac27ee..2ccbd18 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -470,6 +470,12 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (!adapter) return -ENOMEM; + adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); + if (!adapter->gs) { + kfree(adapter); + return -ENOMEM; + } + ccw_device->handler = NULL; adapter->ccw_device = ccw_device; atomic_set(&adapter->refcount, 0); @@ -523,8 +529,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) goto sysfs_failed; atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); - - zfcp_fc_nameserver_init(adapter); + zfcp_fc_wka_ports_init(adapter); if (!zfcp_adapter_scsi_register(adapter)) return 0; @@ -571,6 +576,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) kfree(adapter->req_list); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); + kfree(adapter->gs); kfree(adapter); } diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 2074d45..49d0532 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -22,6 +22,8 @@ #include <linux/syscalls.h> #include <linux/scatterlist.h> #include <linux/ioctl.h> +#include <scsi/fc/fc_fs.h> +#include <scsi/fc/fc_gs.h> #include <scsi/scsi.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_cmnd.h> @@ -29,6 +31,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_fc.h> +#include <scsi/scsi_bsg_fc.h> #include <asm/ccwdev.h> #include <asm/qdio.h> #include <asm/debug.h> @@ -228,11 +231,6 @@ struct zfcp_ls_adisc { /* FC-PH/FC-GS well-known address identifiers for generic services */ #define ZFCP_DID_WKA 0xFFFFF0 -#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA -#define ZFCP_DID_TIME_SERVICE 0xFFFFFB -#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC -#define ZFCP_DID_ALIAS_SERVICE 0xFFFFF8 -#define ZFCP_DID_KEY_DISTRIBUTION_SERVICE 0xFFFFF7 /* remote port status */ #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 @@ -376,6 +374,14 @@ struct zfcp_wka_port { struct delayed_work work; }; +struct zfcp_wka_ports { + struct zfcp_wka_port ms; /* management service */ + struct zfcp_wka_port ts; /* time service */ + struct zfcp_wka_port ds; /* directory service */ + struct zfcp_wka_port as; /* alias service */ + struct zfcp_wka_port ks; /* key distribution service */ +}; + struct zfcp_qdio_queue { struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; u8 first; /* index of next free bfr in queue */ @@ -461,7 +467,7 @@ struct zfcp_adapter { actions */ u32 erp_low_mem_count; /* nr of erp actions waiting for memory */ - struct zfcp_wka_port nsp; /* adapter's nameserver */ + struct zfcp_wka_ports *gs; /* generic services */ debug_info_t *rec_dbf; debug_info_t *hba_dbf; debug_info_t *san_dbf; /* debug feature areas */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index e50ea46..8030e25 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -719,7 +719,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) zfcp_qdio_close(adapter); zfcp_fsf_req_dismiss_all(adapter); adapter->fsf_req_seq_no = 0; - zfcp_fc_wka_port_force_offline(&adapter->nsp); + zfcp_fc_wka_port_force_offline(&adapter->gs->ds); /* all ports and units are closed */ zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 120a9a1..3044c60 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -106,8 +106,12 @@ extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *); extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); extern void zfcp_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); -extern void zfcp_fc_nameserver_init(struct zfcp_adapter *); extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); +extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *); +extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); +extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); +extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); + /* zfcp_fsf.c */ extern int zfcp_fsf_open_port(struct zfcp_erp_action *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 35493a8..2f0705d 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -120,14 +120,13 @@ static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port) schedule_delayed_work(&wka_port->work, HZ / 100); } -void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter) +static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, + struct zfcp_adapter *adapter) { - struct zfcp_wka_port *wka_port = &adapter->nsp; - init_waitqueue_head(&wka_port->completion_wq); wka_port->adapter = adapter; - wka_port->d_id = ZFCP_DID_DIRECTORY_SERVICE; + wka_port->d_id = d_id; wka_port->status = ZFCP_WKA_PORT_OFFLINE; atomic_set(&wka_port->refcount, 0); @@ -143,6 +142,17 @@ void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) mutex_unlock(&wka->mutex); } +void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter) +{ + struct zfcp_wka_ports *gs = adapter->gs; + + zfcp_fc_wka_port_init(&gs->ms, FC_FID_MGMT_SERV, adapter); + zfcp_fc_wka_port_init(&gs->ts, FC_FID_TIME_SERV, adapter); + zfcp_fc_wka_port_init(&gs->ds, FC_FID_DIR_SERV, adapter); + zfcp_fc_wka_port_init(&gs->as, FC_FID_ALIASES, adapter); + zfcp_fc_wka_port_init(&gs->ks, FC_FID_SEC_KEY, adapter); +} + static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, struct fcp_rscn_element *elem) { @@ -282,7 +292,7 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, /* setup parameters for send generic command */ gid_pn->port = erp_action->port; - gid_pn->ct.wka_port = &adapter->nsp; + gid_pn->ct.wka_port = &adapter->gs->ds; gid_pn->ct.handler = zfcp_fc_ns_handler; gid_pn->ct.handler_data = (unsigned long) &compl_rec; gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; @@ -329,13 +339,13 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) memset(gid_pn, 0, sizeof(*gid_pn)); - ret = zfcp_wka_port_get(&adapter->nsp); + ret = zfcp_wka_port_get(&adapter->gs->ds); if (ret) goto out; ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn); - zfcp_wka_port_put(&adapter->nsp); + zfcp_wka_port_put(&adapter->gs->ds); out: mempool_free(gid_pn, adapter->pool.data_gid_pn); return ret; @@ -525,7 +535,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, req->fc4_type = ZFCP_CT_SCSI_FCP; /* prepare zfcp_send_ct */ - ct->wka_port = &adapter->nsp; + ct->wka_port = &adapter->gs->ds; ct->handler = zfcp_fc_ns_handler; ct->handler_data = (unsigned long)&compl_rec; ct->timeout = 10; @@ -644,7 +654,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) return 0; - ret = zfcp_wka_port_get(&adapter->nsp); + ret = zfcp_wka_port_get(&adapter->gs->ds); if (ret) return ret; @@ -666,7 +676,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) } zfcp_free_sg_env(gpn_ft, buf_num); out: - zfcp_wka_port_put(&adapter->nsp); + zfcp_wka_port_put(&adapter->gs->ds); return ret; } @@ -675,3 +685,158 @@ void _zfcp_scan_ports_later(struct work_struct *work) { zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); } + +struct zfcp_els_fc_job { + struct zfcp_send_els els; + struct fc_bsg_job *job; +}; + +static void zfcp_fc_generic_els_handler(unsigned long data) +{ + struct zfcp_els_fc_job *els_fc_job = (struct zfcp_els_fc_job *) data; + struct fc_bsg_job *job = els_fc_job->job; + struct fc_bsg_reply *reply = job->reply; + + if (els_fc_job->els.status) { + /* request rejected or timed out */ + reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT; + goto out; + } + + reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; + reply->reply_payload_rcv_len = job->reply_payload.payload_len; + +out: + job->state_flags = FC_RQST_STATE_DONE; + job->job_done(job); + kfree(els_fc_job); +} + +int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job) +{ + struct zfcp_els_fc_job *els_fc_job; + struct fc_rport *rport = job->rport; + struct Scsi_Host *shost; + struct zfcp_adapter *adapter; + struct zfcp_port *port; + u8 *port_did; + + shost = rport ? rport_to_shost(rport) : job->shost; + adapter = (struct zfcp_adapter *)shost->hostdata[0]; + + if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) + return -EINVAL; + + els_fc_job = kzalloc(sizeof(struct zfcp_els_fc_job), GFP_KERNEL); + if (!els_fc_job) + return -ENOMEM; + + els_fc_job->els.adapter = adapter; + if (rport) { + read_lock_irq(&zfcp_data.config_lock); + port = rport->dd_data; + if (port) + els_fc_job->els.d_id = port->d_id; + read_unlock_irq(&zfcp_data.config_lock); + if (!port) { + kfree(els_fc_job); + return -EINVAL; + } + } else { + port_did = job->request->rqst_data.h_els.port_id; + els_fc_job->els.d_id = (port_did[0] << 16) + + (port_did[1] << 8) + port_did[2]; + } + + els_fc_job->els.req = job->request_payload.sg_list; + els_fc_job->els.resp = job->reply_payload.sg_list; + els_fc_job->els.handler = zfcp_fc_generic_els_handler; + els_fc_job->els.handler_data = (unsigned long) els_fc_job; + els_fc_job->job = job; + + return zfcp_fsf_send_els(&els_fc_job->els); +} + +struct zfcp_ct_fc_job { + struct zfcp_send_ct ct; + struct fc_bsg_job *job; +}; + +static void zfcp_fc_generic_ct_handler(unsigned long data) +{ + struct zfcp_ct_fc_job *ct_fc_job = (struct zfcp_ct_fc_job *) data; + struct fc_bsg_job *job = ct_fc_job->job; + + job->reply->reply_data.ctels_reply.status = ct_fc_job->ct.status ? + FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; + job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; + job->state_flags = FC_RQST_STATE_DONE; + job->job_done(job); + + zfcp_wka_port_put(ct_fc_job->ct.wka_port); + + kfree(ct_fc_job); +} + +int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) +{ + int ret; + u8 gs_type; + struct fc_rport *rport = job->rport; + struct Scsi_Host *shost; + struct zfcp_adapter *adapter; + struct zfcp_ct_fc_job *ct_fc_job; + u32 preamble_word1; + + shost = rport ? rport_to_shost(rport) : job->shost; + + adapter = (struct zfcp_adapter *)shost->hostdata[0]; + if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN)) + return -EINVAL; + + ct_fc_job = kzalloc(sizeof(struct zfcp_ct_fc_job), GFP_KERNEL); + if (!ct_fc_job) + return -ENOMEM; + + preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; + gs_type = (preamble_word1 & 0xff000000) >> 24; + + switch (gs_type) { + case FC_FST_ALIAS: + ct_fc_job->ct.wka_port = &adapter->gs->as; + break; + case FC_FST_MGMT: + ct_fc_job->ct.wka_port = &adapter->gs->ms; + break; + case FC_FST_TIME: + ct_fc_job->ct.wka_port = &adapter->gs->ts; + break; + case FC_FST_DIR: + ct_fc_job->ct.wka_port = &adapter->gs->ds; + break; + default: + kfree(ct_fc_job); + return -EINVAL; /* no such service */ + } + + ret = zfcp_wka_port_get(ct_fc_job->ct.wka_port); + if (ret) { + kfree(ct_fc_job); + return ret; + } + + ct_fc_job->ct.req = job->request_payload.sg_list; + ct_fc_job->ct.resp = job->reply_payload.sg_list; + ct_fc_job->ct.timeout = ZFCP_FSF_REQUEST_TIMEOUT; + ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler; + ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job; + ct_fc_job->ct.completion = NULL; + ct_fc_job->job = job; + + ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL, NULL); + if (ret) { + kfree(ct_fc_job); + zfcp_wka_port_put(ct_fc_job->ct.wka_port); + } + return ret; +} diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index e6dae37..c57658f 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1146,7 +1146,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) case FSF_RESPONSE_SIZE_TOO_LARGE: break; case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_port(req, port); + if (port) + zfcp_fsf_access_denied_port(req, port); break; case FSF_SBAL_MISMATCH: /* should never occure, avoided in zfcp_fsf_send_els */ diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 7d0da23..967ede7 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -623,6 +623,20 @@ void zfcp_scsi_scan(struct work_struct *work) zfcp_unit_put(unit); } +static int zfcp_execute_fc_job(struct fc_bsg_job *job) +{ + switch (job->request->msgcode) { + case FC_BSG_RPT_ELS: + case FC_BSG_HST_ELS_NOLOGIN: + return zfcp_fc_execute_els_fc_job(job); + case FC_BSG_RPT_CT: + case FC_BSG_HST_CT: + return zfcp_fc_execute_ct_fc_job(job); + default: + return -EINVAL; + } +} + struct fc_function_template zfcp_transport_functions = { .show_starget_port_id = 1, .show_starget_port_name = 1, @@ -644,6 +658,7 @@ struct fc_function_template zfcp_transport_functions = { .dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk, .terminate_rport_io = zfcp_scsi_terminate_rport_io, .show_host_port_state = 1, + .bsg_request = zfcp_execute_fc_job, /* no functions registered for following dynamic attributes but directly set by LLDD */ .show_host_port_type = 1, diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 0f829b3..75b2331 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -627,19 +627,15 @@ ahd_linux_target_alloc(struct scsi_target *starget) starget->id, &tstate); if ((flags & CFPACKETIZED) == 0) { - /* Do not negotiate packetized transfers */ - spi_rd_strm(starget) = 0; - spi_pcomp_en(starget) = 0; - spi_rti(starget) = 0; - spi_wr_flow(starget) = 0; - spi_hold_mcs(starget) = 0; + /* don't negotiate packetized (IU) transfers */ + spi_max_iu(starget) = 0; } else { if ((ahd->features & AHD_RTI) == 0) spi_rti(starget) = 0; } if ((flags & CFQAS) == 0) - spi_qas(starget) = 0; + spi_max_qas(starget) = 0; /* Transinfo values have been set to BIOS settings */ spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0; diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig index 820d428..b62b482 100644 --- a/drivers/scsi/bnx2i/Kconfig +++ b/drivers/scsi/bnx2i/Kconfig @@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI tristate "Broadcom NetXtreme II iSCSI support" select SCSI_ISCSI_ATTRS select CNIC + depends on PCI ---help--- This driver supports iSCSI offload for the Broadcom NetXtreme II devices. diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 5405698..1877d98 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -457,10 +457,6 @@ struct lpfc_hba { void (*lpfc_scsi_prep_cmnd) (struct lpfc_vport *, struct lpfc_scsi_buf *, struct lpfc_nodelist *); - int (*lpfc_scsi_prep_task_mgmt_cmd) - (struct lpfc_vport *, struct lpfc_scsi_buf *, - unsigned int, uint8_t); - /* IOCB interface function jump table entries */ int (*__lpfc_sli_issue_iocb) (struct lpfc_hba *, uint32_t, diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index d73e677..fc07be5 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -3113,6 +3113,9 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; + if (phba->sli_rev >= LPFC_SLI_REV4) + return -EPERM; + if ((off + count) > FF_REG_AREA_SIZE) return -ERANGE; @@ -3163,6 +3166,9 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; + if (phba->sli_rev >= LPFC_SLI_REV4) + return -EPERM; + if (off > FF_REG_AREA_SIZE) return -ERANGE; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 1dbccfd..0e532f0 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1732,7 +1732,9 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) uint32_t *ptr, str[4]; uint8_t *fwname; - if (vp->rev.rBit) { + if (phba->sli_rev == LPFC_SLI_REV4) + sprintf(fwrevision, "%s", vp->rev.opFwName); + else if (vp->rev.rBit) { if (psli->sli_flag & LPFC_SLI_ACTIVE) rev = vp->rev.sli2FwRev; else diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 6bdeb14..f72fdf2 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -168,6 +168,19 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, if (elsiocb == NULL) return NULL; + /* + * If this command is for fabric controller and HBA running + * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. + */ + if ((did == Fabric_DID) && + bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) && + ((elscmd == ELS_CMD_FLOGI) || + (elscmd == ELS_CMD_FDISC) || + (elscmd == ELS_CMD_LOGO))) + elsiocb->iocb_flag |= LPFC_FIP_ELS; + else + elsiocb->iocb_flag &= ~LPFC_FIP_ELS; + icmd = &elsiocb->iocb; /* fill in BDEs for command */ @@ -6108,9 +6121,17 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, icmd->un.elsreq64.myID = 0; icmd->un.elsreq64.fl = 1; - /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ - icmd->ulpCt_h = 1; - icmd->ulpCt_l = 0; + if (phba->sli_rev == LPFC_SLI_REV4) { + /* FDISC needs to be 1 for WQE VPI */ + elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; + elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ; + /* Set the ulpContext to the vpi */ + elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base; + } else { + /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ + icmd->ulpCt_h = 1; + icmd->ulpCt_l = 0; + } pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 35c41ae..ed46b24 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1197,6 +1197,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, { struct lpfc_fcf_conn_entry *conn_entry; + /* If FCF not available return 0 */ + if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || + !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) + return 0; + if (!phba->cfg_enable_fip) { *boot_flag = 0; *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, @@ -1216,6 +1221,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, *boot_flag = 0; *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); + + /* + * When there are no FCF connect entries, use driver's default + * addressing mode - FPMA. + */ + if (*addr_mode & LPFC_FCF_FPMA) + *addr_mode = LPFC_FCF_FPMA; + *vlan_id = 0xFFFF; return 1; } @@ -1241,6 +1254,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, } /* + * If connection record does not support any addressing mode, + * skip the FCF record. + */ + if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) + & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) + continue; + + /* * Check if the connection record specifies a required * addressing mode. */ @@ -1272,6 +1293,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, else *boot_flag = 0; + /* + * If user did not specify any addressing mode, or if the + * prefered addressing mode specified by user is not supported + * by FCF, allow fabric to pick the addressing mode. + */ *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record); /* @@ -1297,12 +1323,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && (*addr_mode & LPFC_FCF_FPMA)) *addr_mode = LPFC_FCF_FPMA; - /* - * If user did not specify any addressing mode, use FPMA if - * possible else use SPMA. - */ - else if (*addr_mode & LPFC_FCF_FPMA) - *addr_mode = LPFC_FCF_FPMA; if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) *vlan_id = conn_entry->conn_rec.vlan_tag; @@ -1864,7 +1884,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport->fc_flag &= ~FC_BYPASSED_MODE; spin_unlock_irq(shost->host_lock); - if (((phba->fc_eventTag + 1) < la->eventTag) || + if ((phba->fc_eventTag < la->eventTag) || (phba->fc_eventTag == la->eventTag)) { phba->fc_stat.LinkMultiEvent++; if (la->attType == AT_LINK_UP) @@ -2925,6 +2945,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_no_rpi(phba, ndlp); ndlp->nlp_rpi = 0; ndlp->nlp_flag &= ~NLP_RPI_VALID; + ndlp->nlp_flag &= ~NLP_NPR_ADISC; return 1; } return 0; diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 02aa016..8a3a026 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1183,7 +1183,6 @@ typedef struct { #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 #define PCI_VENDOR_ID_SERVERENGINE 0x19a2 #define PCI_DEVICE_ID_TIGERSHARK 0x0704 -#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705 #define JEDEC_ID_ADDRESS 0x0080001c #define FIREFLY_JEDEC_ID 0x1ACC diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 39c34b3..2995d12 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -422,9 +422,9 @@ struct lpfc_wqe_generic{ #define lpfc_wqe_gen_pri_WORD word10 uint32_t word11; #define lpfc_wqe_gen_cq_id_SHIFT 16 -#define lpfc_wqe_gen_cq_id_MASK 0x000003FF +#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF #define lpfc_wqe_gen_cq_id_WORD word11 -#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff +#define LPFC_WQE_CQ_ID_DEFAULT 0xffff #define lpfc_wqe_gen_wqec_SHIFT 7 #define lpfc_wqe_gen_wqec_MASK 0x00000001 #define lpfc_wqe_gen_wqec_WORD word11 @@ -1128,7 +1128,7 @@ struct fcf_record { #define lpfc_fcf_record_mac_5_WORD word4 #define lpfc_fcf_record_fcf_avail_SHIFT 16 #define lpfc_fcf_record_fcf_avail_MASK 0x000000FF -#define lpfc_fcf_record_fc_avail_WORD word4 +#define lpfc_fcf_record_fcf_avail_WORD word4 #define lpfc_fcf_record_mac_addr_prov_SHIFT 24 #define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF #define lpfc_fcf_record_mac_addr_prov_WORD word4 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 2f5907f..fc67cc6 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -428,7 +428,8 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Reset the DFT_HBA_Q_DEPTH to the max xri */ if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) phba->cfg_hba_queue_depth = - mb->un.varRdConfig.max_xri + 1; + (mb->un.varRdConfig.max_xri + 1) - + lpfc_sli4_get_els_iocb_cnt(phba); phba->lmt = mb->un.varRdConfig.lmt; @@ -1646,10 +1647,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) oneConnect = 1; m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; break; - case PCI_DEVICE_ID_TIGERSHARK_S: - oneConnect = 1; - m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"}; - break; default: m = (typeof(m)){ NULL }; break; @@ -3543,6 +3540,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) /* Free the allocated rpi headers. */ lpfc_sli4_remove_rpi_hdrs(phba); + lpfc_sli4_remove_rpis(phba); /* Free the ELS sgl list */ lpfc_free_active_sgl(phba); @@ -7184,16 +7182,19 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) { int max_xri = phba->sli4_hba.max_cfg_param.max_xri; - if (max_xri <= 100) - return 4; - else if (max_xri <= 256) - return 8; - else if (max_xri <= 512) - return 16; - else if (max_xri <= 1024) - return 32; - else - return 48; + if (phba->sli_rev == LPFC_SLI_REV4) { + if (max_xri <= 100) + return 4; + else if (max_xri <= 256) + return 8; + else if (max_xri <= 512) + return 16; + else if (max_xri <= 1024) + return 32; + else + return 48; + } else + return 0; } /** @@ -7642,7 +7643,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) switch (dev_id) { case PCI_DEVICE_ID_TIGERSHARK: - case PCI_DEVICE_ID_TIGERSHARK_S: rc = lpfc_pci_probe_one_s4(pdev, pid); break; default: @@ -7941,8 +7941,6 @@ static struct pci_device_id lpfc_id_table[] = { PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S, - PCI_ANY_ID, PCI_ANY_ID, }, { 0 } }; diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index b9b451c..3423571 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1631,6 +1631,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, /* In case of malloc fails, proceed with whatever we have */ if (!viraddr) break; + memset(viraddr, 0, PAGE_SIZE); mbox->sge_array->addr[pagen] = viraddr; /* Keep the first page for later sub-header construction */ if (pagen == 0) @@ -1715,8 +1716,10 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) /* Set up host requested features. */ bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); - /* Virtual fabrics and FIPs are not supported yet. */ - bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0); + if (phba->cfg_enable_fip) + bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0); + else + bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable DIF (block guard) only if configured to do so. */ if (phba->cfg_enable_bg) diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 09f659f..3e74136 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -497,7 +497,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - if ((ndlp->nlp_type & NLP_FABRIC) && + if ((ndlp->nlp_DID == Fabric_DID) && vport->port_type == LPFC_NPIV_PORT) { lpfc_linkdown_port(vport); mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 7991ba1..da59c4f 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -116,6 +116,27 @@ lpfc_debug_save_dif(struct scsi_cmnd *cmnd) } /** + * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. + * @phba: Pointer to HBA object. + * @lpfc_cmd: lpfc scsi command object pointer. + * + * This function is called from the lpfc_prep_task_mgmt_cmd function to + * set the last bit in the response sge entry. + **/ +static void +lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd) +{ + struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + if (sgl) { + sgl += 1; + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + } +} + +/** * lpfc_update_stats - Update statistical data for the command completion * @phba: Pointer to HBA object. * @lpfc_cmd: lpfc scsi command object pointer. @@ -1978,7 +1999,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, } /** - * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev + * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev * @phba: The HBA for which this call is being executed. * @psb: The scsi buffer which is going to be un-mapped. * @@ -1986,7 +2007,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, * field of @lpfc_cmd for device with SLI-3 interface spec. **/ static void -lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { /* * There are only two special cases to consider. (1) the scsi command @@ -2003,36 +2024,6 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) } /** - * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev - * @phba: The Hba for which this call is being executed. - * @psb: The scsi buffer which is going to be un-mapped. - * - * This routine does DMA un-mapping of scatter gather list of scsi command - * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to - * remove the sgl for this scsi buffer then we will do it here. For now - * we should be able to just call the sli3 unprep routine. - **/ -static void -lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) -{ - lpfc_scsi_unprep_dma_buf_s3(phba, psb); -} - -/** - * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list - * @phba: The Hba for which this call is being executed. - * @psb: The scsi buffer which is going to be un-mapped. - * - * This routine does DMA un-mapping of scatter gather list of scsi command - * field of @lpfc_cmd for device with SLI-4 interface spec. - **/ -static void -lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) -{ - phba->lpfc_scsi_unprep_dma_buf(phba, psb); -} - -/** * lpfc_handler_fcp_err - FCP response handler * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. @@ -2461,7 +2452,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) } /** - * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev + * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: The scsi command which needs to send. * @pnode: Pointer to lpfc_nodelist. @@ -2470,7 +2461,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) * to transfer for device with SLI3 interface spec. **/ static void -lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, +lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_nodelist *pnode) { struct lpfc_hba *phba = vport->phba; @@ -2558,46 +2549,7 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, } /** - * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev - * @vport: The virtual port for which this call is being executed. - * @lpfc_cmd: The scsi command which needs to send. - * @pnode: Pointer to lpfc_nodelist. - * - * This routine initializes fcp_cmnd and iocb data structure from scsi command - * to transfer for device with SLI4 interface spec. - **/ -static void -lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, - struct lpfc_nodelist *pnode) -{ - /* - * The prep cmnd routines do not touch the sgl or its - * entries. We may not have to do anything different. - * I will leave this function in place until we can - * run some IO through the driver and determine if changes - * are needed. - */ - return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode); -} - -/** - * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit - * @vport: The virtual port for which this call is being executed. - * @lpfc_cmd: The scsi command which needs to send. - * @pnode: Pointer to lpfc_nodelist. - * - * This routine wraps the actual convert SCSI cmnd function pointer from - * the lpfc_hba struct. - **/ -static inline void -lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, - struct lpfc_nodelist *pnode) -{ - vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode); -} - -/** - * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit + * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. * @lun: Logical unit number. @@ -2611,7 +2563,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, * 1 - Success **/ static int -lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, +lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, unsigned int lun, uint8_t task_mgmt_cmd) @@ -2653,68 +2605,13 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, * The driver will provide the timeout mechanism. */ piocb->ulpTimeout = 0; - } else { + } else piocb->ulpTimeout = lpfc_cmd->timeout; - } - - return 1; -} - -/** - * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit - * @vport: The virtual port for which this call is being executed. - * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. - * @lun: Logical unit number. - * @task_mgmt_cmd: SCSI task management command. - * - * This routine creates FCP information unit corresponding to @task_mgmt_cmd - * for device with SLI-4 interface spec. - * - * Return codes: - * 0 - Error - * 1 - Success - **/ -static int -lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, - struct lpfc_scsi_buf *lpfc_cmd, - unsigned int lun, - uint8_t task_mgmt_cmd) -{ - /* - * The prep cmnd routines do not touch the sgl or its - * entries. We may not have to do anything different. - * I will leave this function in place until we can - * run some IO through the driver and determine if changes - * are needed. - */ - return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun, - task_mgmt_cmd); -} -/** - * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info - * @vport: The virtual port for which this call is being executed. - * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. - * @lun: Logical unit number. - * @task_mgmt_cmd: SCSI task management command. - * - * This routine wraps the actual convert SCSI TM to FCP information unit - * function pointer from the lpfc_hba struct. - * - * Return codes: - * 0 - Error - * 1 - Success - **/ -static inline int -lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, - struct lpfc_scsi_buf *lpfc_cmd, - unsigned int lun, - uint8_t task_mgmt_cmd) -{ - struct lpfc_hba *phba = vport->phba; + if (vport->phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); - return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, - task_mgmt_cmd); + return 1; } /** @@ -2730,23 +2627,19 @@ int lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { + phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; + phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd; + phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; + switch (dev_grp) { case LPFC_PCI_DEV_LP: phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; - phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3; - phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3; - phba->lpfc_scsi_prep_task_mgmt_cmd = - lpfc_scsi_prep_task_mgmt_cmd_s3; phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; break; case LPFC_PCI_DEV_OC: phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; - phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4; - phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4; - phba->lpfc_scsi_prep_task_mgmt_cmd = - lpfc_scsi_prep_task_mgmt_cmd_s4; phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; break; default: @@ -2783,72 +2676,6 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, } /** - * lpfc_scsi_tgt_reset - Target reset handler - * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure - * @vport: The virtual port for which this call is being executed. - * @tgt_id: Target ID. - * @lun: Lun number. - * @rdata: Pointer to lpfc_rport_data. - * - * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID. - * - * Return Code: - * 0x2003 - Error - * 0x2002 - Success. - **/ -static int -lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, - unsigned tgt_id, unsigned int lun, - struct lpfc_rport_data *rdata) -{ - struct lpfc_hba *phba = vport->phba; - struct lpfc_iocbq *iocbq; - struct lpfc_iocbq *iocbqrsp; - int ret; - int status; - - if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) - return FAILED; - - lpfc_cmd->rdata = rdata; - status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, - FCP_TARGET_RESET); - if (!status) - return FAILED; - - iocbq = &lpfc_cmd->cur_iocbq; - iocbqrsp = lpfc_sli_get_iocbq(phba); - - if (!iocbqrsp) - return FAILED; - - /* Issue Target Reset to TGT <num> */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, - "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", - tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); - status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, - iocbq, iocbqrsp, lpfc_cmd->timeout); - if (status != IOCB_SUCCESS) { - if (status == IOCB_TIMEDOUT) { - iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; - ret = TIMEOUT_ERROR; - } else - ret = FAILED; - lpfc_cmd->status = IOSTAT_DRIVER_REJECT; - } else { - ret = SUCCESS; - lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; - lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; - if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && - (lpfc_cmd->result & IOERR_DRVR_MASK)) - lpfc_cmd->status = IOSTAT_DRIVER_REJECT; - } - - lpfc_sli_release_iocbq(phba, iocbqrsp); - return ret; -} - -/** * lpfc_info - Info entry point of scsi_host_template data structure * @host: The scsi host for which this call is being executed. * @@ -3228,156 +3055,334 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) return ret; } +static char * +lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) +{ + switch (task_mgmt_cmd) { + case FCP_ABORT_TASK_SET: + return "ABORT_TASK_SET"; + case FCP_CLEAR_TASK_SET: + return "FCP_CLEAR_TASK_SET"; + case FCP_BUS_RESET: + return "FCP_BUS_RESET"; + case FCP_LUN_RESET: + return "FCP_LUN_RESET"; + case FCP_TARGET_RESET: + return "FCP_TARGET_RESET"; + case FCP_CLEAR_ACA: + return "FCP_CLEAR_ACA"; + case FCP_TERMINATE_TASK: + return "FCP_TERMINATE_TASK"; + default: + return "unknown"; + } +} + /** - * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point - * @cmnd: Pointer to scsi_cmnd data structure. + * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler + * @vport: The virtual port for which this call is being executed. + * @rdata: Pointer to remote port local data + * @tgt_id: Target ID of remote device. + * @lun_id: Lun number for the TMF + * @task_mgmt_cmd: type of TMF to send * - * This routine does a device reset by sending a TARGET_RESET task management - * command. + * This routine builds and sends a TMF (SCSI Task Mgmt Function) to + * a remote port. * - * Return code : - * 0x2003 - Error - * 0x2002 - Success + * Return Code: + * 0x2003 - Error + * 0x2002 - Success. **/ static int -lpfc_device_reset_handler(struct scsi_cmnd *cmnd) +lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, + unsigned tgt_id, unsigned int lun_id, + uint8_t task_mgmt_cmd) { - struct Scsi_Host *shost = cmnd->device->host; - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_scsi_buf *lpfc_cmd; - struct lpfc_iocbq *iocbq, *iocbqrsp; - struct lpfc_rport_data *rdata = cmnd->device->hostdata; - struct lpfc_nodelist *pnode = rdata->pnode; - unsigned long later; - int ret = SUCCESS; + struct lpfc_iocbq *iocbq; + struct lpfc_iocbq *iocbqrsp; + int ret; int status; - int cnt; - struct lpfc_scsi_event_header scsi_event; - lpfc_block_error_handler(cmnd); - /* - * If target is not in a MAPPED state, delay the reset until - * target is rediscovered or devloss timeout expires. - */ - later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; - while (time_after(later, jiffies)) { - if (!pnode || !NLP_CHK_NODE_ACT(pnode)) - return FAILED; - if (pnode->nlp_state == NLP_STE_MAPPED_NODE) - break; - schedule_timeout_uninterruptible(msecs_to_jiffies(500)); - rdata = cmnd->device->hostdata; - if (!rdata) - break; - pnode = rdata->pnode; - } - - scsi_event.event_type = FC_REG_SCSI_EVENT; - scsi_event.subcategory = LPFC_EVENT_TGTRESET; - scsi_event.lun = 0; - memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); - memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); - - fc_host_post_vendor_event(shost, - fc_get_event_number(), - sizeof(scsi_event), - (char *)&scsi_event, - LPFC_NL_VENDOR_ID); - - if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0721 LUN Reset rport " - "failure: msec x%x rdata x%p\n", - jiffies_to_msecs(jiffies - later), rdata); + if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) return FAILED; - } + lpfc_cmd = lpfc_get_scsi_buf(phba); if (lpfc_cmd == NULL) return FAILED; lpfc_cmd->timeout = 60; lpfc_cmd->rdata = rdata; - status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, - cmnd->device->lun, - FCP_TARGET_RESET); + status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, + task_mgmt_cmd); if (!status) { lpfc_release_scsi_buf(phba, lpfc_cmd); return FAILED; } - iocbq = &lpfc_cmd->cur_iocbq; - /* get a buffer for this IOCB command response */ + iocbq = &lpfc_cmd->cur_iocbq; iocbqrsp = lpfc_sli_get_iocbq(phba); if (iocbqrsp == NULL) { lpfc_release_scsi_buf(phba, lpfc_cmd); return FAILED; } + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, - "0703 Issue target reset to TGT %d LUN %d " - "rpi x%x nlp_flag x%x\n", cmnd->device->id, - cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); + "0702 Issue %s to TGT %d LUN %d " + "rpi x%x nlp_flag x%x\n", + lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, + rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); + status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, iocbq, iocbqrsp, lpfc_cmd->timeout); - if (status == IOCB_TIMEDOUT) { - iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; - ret = TIMEOUT_ERROR; - } else { - if (status != IOCB_SUCCESS) + if (status != IOCB_SUCCESS) { + if (status == IOCB_TIMEDOUT) { + iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; + ret = TIMEOUT_ERROR; + } else ret = FAILED; - lpfc_release_scsi_buf(phba, lpfc_cmd); - } - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0713 SCSI layer issued device reset (%d, %d) " - "return x%x status x%x result x%x\n", - cmnd->device->id, cmnd->device->lun, ret, - iocbqrsp->iocb.ulpStatus, + lpfc_cmd->status = IOSTAT_DRIVER_REJECT; + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n", + lpfc_taskmgmt_name(task_mgmt_cmd), + tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, iocbqrsp->iocb.un.ulpWord[4]); + } else + ret = SUCCESS; + lpfc_sli_release_iocbq(phba, iocbqrsp); - cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, - LPFC_CTX_TGT); + + if (ret != TIMEOUT_ERROR) + lpfc_release_scsi_buf(phba, lpfc_cmd); + + return ret; +} + +/** + * lpfc_chk_tgt_mapped - + * @vport: The virtual port to check on + * @cmnd: Pointer to scsi_cmnd data structure. + * + * This routine delays until the scsi target (aka rport) for the + * command exists (is present and logged in) or we declare it non-existent. + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) +{ + struct lpfc_rport_data *rdata = cmnd->device->hostdata; + struct lpfc_nodelist *pnode = rdata->pnode; + unsigned long later; + + /* + * If target is not in a MAPPED state, delay until + * target is rediscovered or devloss timeout expires. + */ + later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + while (time_after(later, jiffies)) { + if (!pnode || !NLP_CHK_NODE_ACT(pnode)) + return FAILED; + if (pnode->nlp_state == NLP_STE_MAPPED_NODE) + return SUCCESS; + schedule_timeout_uninterruptible(msecs_to_jiffies(500)); + rdata = cmnd->device->hostdata; + if (!rdata) + return FAILED; + pnode = rdata->pnode; + } + if (!pnode || !NLP_CHK_NODE_ACT(pnode) || + (pnode->nlp_state != NLP_STE_MAPPED_NODE)) + return FAILED; + return SUCCESS; +} + +/** + * lpfc_reset_flush_io_context - + * @vport: The virtual port (scsi_host) for the flush context + * @tgt_id: If aborting by Target contect - specifies the target id + * @lun_id: If aborting by Lun context - specifies the lun id + * @context: specifies the context level to flush at. + * + * After a reset condition via TMF, we need to flush orphaned i/o + * contexts from the adapter. This routine aborts any contexts + * outstanding, then waits for their completions. The wait is + * bounded by devloss_tmo though. + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, + uint64_t lun_id, lpfc_ctx_cmd context) +{ + struct lpfc_hba *phba = vport->phba; + unsigned long later; + int cnt; + + cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); if (cnt) lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], - cmnd->device->id, cmnd->device->lun, - LPFC_CTX_TGT); + tgt_id, lun_id, context); later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; while (time_after(later, jiffies) && cnt) { schedule_timeout_uninterruptible(msecs_to_jiffies(20)); - cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, - cmnd->device->lun, LPFC_CTX_TGT); + cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); } if (cnt) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0719 device reset I/O flush failure: " - "cnt x%x\n", cnt); - ret = FAILED; + "0724 I/O flush failure for context %s : cnt x%x\n", + ((context == LPFC_CTX_LUN) ? "LUN" : + ((context == LPFC_CTX_TGT) ? "TGT" : + ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), + cnt); + return FAILED; } - return ret; + return SUCCESS; +} + +/** + * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point + * @cmnd: Pointer to scsi_cmnd data structure. + * + * This routine does a device reset by sending a LUN_RESET task management + * command. + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_device_reset_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_rport_data *rdata = cmnd->device->hostdata; + struct lpfc_nodelist *pnode = rdata->pnode; + unsigned tgt_id = cmnd->device->id; + unsigned int lun_id = cmnd->device->lun; + struct lpfc_scsi_event_header scsi_event; + int status; + + lpfc_block_error_handler(cmnd); + + status = lpfc_chk_tgt_mapped(vport, cmnd); + if (status == FAILED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0721 Device Reset rport failure: rdata x%p\n", rdata); + return FAILED; + } + + scsi_event.event_type = FC_REG_SCSI_EVENT; + scsi_event.subcategory = LPFC_EVENT_LUNRESET; + scsi_event.lun = lun_id; + memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); + memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); + + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); + + status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, + FCP_LUN_RESET); + + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0713 SCSI layer issued Device Reset (%d, %d) " + "return x%x\n", tgt_id, lun_id, status); + + /* + * We have to clean up i/o as : they may be orphaned by the TMF; + * or if the TMF failed, they may be in an indeterminate state. + * So, continue on. + * We will report success if all the i/o aborts successfully. + */ + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + LPFC_CTX_LUN); + return status; +} + +/** + * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point + * @cmnd: Pointer to scsi_cmnd data structure. + * + * This routine does a target reset by sending a TARGET_RESET task management + * command. + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_target_reset_handler(struct scsi_cmnd *cmnd) +{ + struct Scsi_Host *shost = cmnd->device->host; + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_rport_data *rdata = cmnd->device->hostdata; + struct lpfc_nodelist *pnode = rdata->pnode; + unsigned tgt_id = cmnd->device->id; + unsigned int lun_id = cmnd->device->lun; + struct lpfc_scsi_event_header scsi_event; + int status; + + lpfc_block_error_handler(cmnd); + + status = lpfc_chk_tgt_mapped(vport, cmnd); + if (status == FAILED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0722 Target Reset rport failure: rdata x%p\n", rdata); + return FAILED; + } + + scsi_event.event_type = FC_REG_SCSI_EVENT; + scsi_event.subcategory = LPFC_EVENT_TGTRESET; + scsi_event.lun = 0; + memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); + memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); + + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); + + status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, + FCP_TARGET_RESET); + + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0723 SCSI layer issued Target Reset (%d, %d) " + "return x%x\n", tgt_id, lun_id, status); + + /* + * We have to clean up i/o as : they may be orphaned by the TMF; + * or if the TMF failed, they may be in an indeterminate state. + * So, continue on. + * We will report success if all the i/o aborts successfully. + */ + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + LPFC_CTX_TGT); + return status; } /** * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point * @cmnd: Pointer to scsi_cmnd data structure. * - * This routine does target reset to all target on @cmnd->device->host. + * This routine does target reset to all targets on @cmnd->device->host. + * This emulates Parallel SCSI Bus Reset Semantics. * - * Return Code: - * 0x2003 - Error - * 0x2002 - Success + * Return code : + * 0x2003 - Error + * 0x2002 - Success **/ static int lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL; - int match; - int ret = SUCCESS, status = SUCCESS, i; - int cnt; - struct lpfc_scsi_buf * lpfc_cmd; - unsigned long later; struct lpfc_scsi_event_header scsi_event; + int match; + int ret = SUCCESS, status, i; scsi_event.event_type = FC_REG_SCSI_EVENT; scsi_event.subcategory = LPFC_EVENT_BUSRESET; @@ -3385,13 +3390,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); - fc_host_post_vendor_event(shost, - fc_get_event_number(), - sizeof(scsi_event), - (char *)&scsi_event, - LPFC_NL_VENDOR_ID); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); lpfc_block_error_handler(cmnd); + /* * Since the driver manages a single bus device, reset all * targets known to the driver. Should any target reset @@ -3414,16 +3417,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) spin_unlock_irq(shost->host_lock); if (!match) continue; - lpfc_cmd = lpfc_get_scsi_buf(phba); - if (lpfc_cmd) { - lpfc_cmd->timeout = 60; - status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, - cmnd->device->lun, - ndlp->rport->dd_data); - if (status != TIMEOUT_ERROR) - lpfc_release_scsi_buf(phba, lpfc_cmd); - } - if (!lpfc_cmd || status != SUCCESS) { + + status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data, + i, 0, FCP_TARGET_RESET); + + if (status != SUCCESS) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0700 Bus Reset on target %d failed\n", i); @@ -3431,25 +3429,16 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) } } /* - * All outstanding txcmplq I/Os should have been aborted by - * the targets. Unfortunately, some targets do not abide by - * this forcing the driver to double check. + * We have to clean up i/o as : they may be orphaned by the TMFs + * above; or if any of the TMFs failed, they may be in an + * indeterminate state. + * We will report success if all the i/o aborts successfully. */ - cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); - if (cnt) - lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], - 0, 0, LPFC_CTX_HOST); - later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; - while (time_after(later, jiffies) && cnt) { - schedule_timeout_uninterruptible(msecs_to_jiffies(20)); - cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); - } - if (cnt) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0715 Bus Reset I/O flush failure: " - "cnt x%x left x%x\n", cnt, i); + + status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST); + if (status != SUCCESS) ret = FAILED; - } + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); return ret; @@ -3582,7 +3571,8 @@ struct scsi_host_template lpfc_template = { .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_abort_handler = lpfc_abort_handler, - .eh_device_reset_handler= lpfc_device_reset_handler, + .eh_device_reset_handler = lpfc_device_reset_handler, + .eh_target_reset_handler = lpfc_target_reset_handler, .eh_bus_reset_handler = lpfc_bus_reset_handler, .slave_alloc = lpfc_slave_alloc, .slave_configure = lpfc_slave_configure, @@ -3602,7 +3592,8 @@ struct scsi_host_template lpfc_vport_template = { .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_abort_handler = lpfc_abort_handler, - .eh_device_reset_handler= lpfc_device_reset_handler, + .eh_device_reset_handler = lpfc_device_reset_handler, + .eh_target_reset_handler = lpfc_target_reset_handler, .eh_bus_reset_handler = lpfc_bus_reset_handler, .slave_alloc = lpfc_slave_alloc, .slave_configure = lpfc_slave_configure, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index ff04daf..acc43b0 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4139,8 +4139,11 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, return -EIO; } data_length = mqe->un.mb_words[5]; - if (data_length > DMP_FCOEPARAM_RGN_SIZE) + if (data_length > DMP_FCOEPARAM_RGN_SIZE) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); return -EIO; + } lpfc_parse_fcoe_conf(phba, mp->virt, data_length); lpfc_mbuf_free(phba, mp->virt, mp->phys); @@ -4211,27 +4214,6 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, return -EIO; } - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0380 Mailbox cmd x%x Status x%x " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " - "x%x x%x x%x x%x x%x x%x x%x x%x x%x " - "CQ: x%x x%x x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - bf_get(lpfc_mqe_command, mqe), - bf_get(lpfc_mqe_status, mqe), - mqe->un.mb_words[0], mqe->un.mb_words[1], - mqe->un.mb_words[2], mqe->un.mb_words[3], - mqe->un.mb_words[4], mqe->un.mb_words[5], - mqe->un.mb_words[6], mqe->un.mb_words[7], - mqe->un.mb_words[8], mqe->un.mb_words[9], - mqe->un.mb_words[10], mqe->un.mb_words[11], - mqe->un.mb_words[12], mqe->un.mb_words[13], - mqe->un.mb_words[14], mqe->un.mb_words[15], - mqe->un.mb_words[16], mqe->un.mb_words[50], - mboxq->mcqe.word0, - mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, - mboxq->mcqe.trailer); - /* * The available vpd length cannot be bigger than the * DMA buffer passed to the port. Catch the less than @@ -4337,21 +4319,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_free_vpd; mqe = &mboxq->u.mqe; - if ((bf_get(lpfc_mbx_rd_rev_sli_lvl, - &mqe->un.read_rev) != LPFC_SLI_REV4) || - (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) { + phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); + if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) + phba->hba_flag |= HBA_FCOE_SUPPORT; + if (phba->sli_rev != LPFC_SLI_REV4 || + !(phba->hba_flag & HBA_FCOE_SUPPORT)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0376 READ_REV Error. SLI Level %d " "FCoE enabled %d\n", - bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev), - bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)); + phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT); rc = -EIO; goto out_free_vpd; } - /* Single threaded at this point, no need for lock */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag |= HBA_FCOE_SUPPORT; - spin_unlock_irq(&phba->hbalock); /* * Evaluate the read rev and vpd data. Populate the driver * state with the results. If this routine fails, the failure @@ -4365,8 +4344,32 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = 0; } - /* By now, we should determine the SLI revision, hard code for now */ - phba->sli_rev = LPFC_SLI_REV4; + /* Save information as VPD data */ + phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; + phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; + phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; + phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, + &mqe->un.read_rev); + phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, + &mqe->un.read_rev); + phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, + &mqe->un.read_rev); + phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, + &mqe->un.read_rev); + phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; + memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); + phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; + memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); + phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; + memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0380 READ_REV Status x%x " + "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + bf_get(lpfc_mqe_status, mqe), + phba->vpd.rev.opFwName, + phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, + phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); /* * Discover the port's supported feature set and match it against the @@ -4491,8 +4494,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = -ENODEV; goto out_free_vpd; } - /* Temporary initialization of lpfc_fip_flag to non-fip */ - bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); + if (phba->cfg_enable_fip) + bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1); + else + bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); /* Set up all the queues to the device */ rc = lpfc_sli4_queue_setup(phba); @@ -5030,6 +5035,92 @@ out_not_finished: } /** + * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command + * @phba: Pointer to HBA context object. + * + * The function blocks the posting of SLI4 asynchronous mailbox commands from + * the driver internal pending mailbox queue. It will then try to wait out the + * possible outstanding mailbox command before return. + * + * Returns: + * 0 - the outstanding mailbox command completed; otherwise, the wait for + * the outstanding mailbox command timed out. + **/ +static int +lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + uint8_t actcmd = MBX_HEARTBEAT; + int rc = 0; + unsigned long timeout; + + /* Mark the asynchronous mailbox command posting as blocked */ + spin_lock_irq(&phba->hbalock); + psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; + if (phba->sli.mbox_active) + actcmd = phba->sli.mbox_active->u.mb.mbxCommand; + spin_unlock_irq(&phba->hbalock); + /* Determine how long we might wait for the active mailbox + * command to be gracefully completed by firmware. + */ + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + + jiffies; + /* Wait for the outstnading mailbox command to complete */ + while (phba->sli.mbox_active) { + /* Check active mailbox complete status every 2ms */ + msleep(2); + if (time_after(jiffies, timeout)) { + /* Timeout, marked the outstanding cmd not complete */ + rc = 1; + break; + } + } + + /* Can not cleanly block async mailbox command, fails it */ + if (rc) { + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + } + return rc; +} + +/** + * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command + * @phba: Pointer to HBA context object. + * + * The function unblocks and resume posting of SLI4 asynchronous mailbox + * commands from the driver internal pending mailbox queue. It makes sure + * that there is no outstanding mailbox command before resuming posting + * asynchronous mailbox commands. If, for any reason, there is outstanding + * mailbox command, it will try to wait it out before resuming asynchronous + * mailbox command posting. + **/ +static void +lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + + spin_lock_irq(&phba->hbalock); + if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { + /* Asynchronous mailbox posting is not blocked, do nothing */ + spin_unlock_irq(&phba->hbalock); + return; + } + + /* Outstanding synchronous mailbox command is guaranteed to be done, + * successful or timeout, after timing-out the outstanding mailbox + * command shall always be removed, so just unblock posting async + * mailbox command and resume + */ + psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + + /* wake up worker thread to post asynchronlous mailbox command */ + lpfc_worker_wake_up(phba); +} + +/** * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox * @phba: Pointer to HBA context object. * @mboxq: Pointer to mailbox object. @@ -5204,14 +5295,35 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, psli->sli_flag, flag); return rc; } else if (flag == MBX_POLL) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2542 Mailbox command x%x (x%x) " - "cannot issue Data: x%x x%x\n", + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "(%d):2542 Try to issue mailbox command " + "x%x (x%x) synchronously ahead of async" + "mailbox command queue: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli4_mbox_opcode_get(phba, mboxq), psli->sli_flag, flag); - return -EIO; + /* Try to block the asynchronous mailbox posting */ + rc = lpfc_sli4_async_mbox_block(phba); + if (!rc) { + /* Successfully blocked, now issue sync mbox cmd */ + rc = lpfc_sli4_post_sync_mbox(phba, mboxq); + if (rc != MBX_SUCCESS) + lpfc_printf_log(phba, KERN_ERR, + LOG_MBOX | LOG_SLI, + "(%d):2597 Mailbox command " + "x%x (x%x) cannot issue " + "Data: x%x x%x\n", + mboxq->vport ? + mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, + mboxq), + psli->sli_flag, flag); + /* Unblock the async mailbox posting afterward */ + lpfc_sli4_async_mbox_unblock(phba); + } + return rc; } /* Now, interrupt mode asynchrous mailbox command */ @@ -5749,18 +5861,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); /* The fcp commands will set command type */ - if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip)) - command_type = ELS_COMMAND_NON_FIP; - else if (!(iocbq->iocb_flag & LPFC_IO_FCP)) - command_type = ELS_COMMAND_FIP; - else if (iocbq->iocb_flag & LPFC_IO_FCP) + if (iocbq->iocb_flag & LPFC_IO_FCP) command_type = FCP_COMMAND; - else { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2019 Invalid cmd 0x%x\n", - iocbq->iocb.ulpCommand); - return IOCB_ERROR; - } + else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS)) + command_type = ELS_COMMAND_FIP; + else + command_type = ELS_COMMAND_NON_FIP; + /* Some of the fields are in the right position already */ memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); abort_tag = (uint32_t) iocbq->iotag; @@ -5814,11 +5921,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(lpfc_wqe_gen_context, &wqe->generic, iocbq->iocb.ulpContext); - if (iocbq->vport->fc_myDID != 0) { - bf_set(els_req64_sid, &wqe->els_req, - iocbq->vport->fc_myDID); - bf_set(els_req64_sp, &wqe->els_req, 1); - } bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); /* CCP CCPE PV PRI in word10 were set in the memcpy */ @@ -5877,14 +5979,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, * is set and we are sending our 2nd or greater command on * this exchange. */ + /* Always open the exchange */ + bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); - /* ALLOW read & write to fall through to ICMD64 */ + wqe->words[10] &= 0xffff0000; /* zero out ebde count */ + bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); + break; case CMD_FCP_ICMND64_CR: /* Always open the exchange */ bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); + wqe->words[4] = 0; wqe->words[10] &= 0xffff0000; /* zero out ebde count */ - bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); + bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); break; case CMD_GEN_REQUEST64_CR: /* word3 command length is described as byte offset to the @@ -7247,6 +7354,32 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, } /** + * lpfc_chk_iocb_flg - Test IOCB flag with lock held. + * @phba: Pointer to HBA context object.. + * @piocbq: Pointer to command iocb. + * @flag: Flag to test. + * + * This routine grabs the hbalock and then test the iocb_flag to + * see if the passed in flag is set. + * Returns: + * 1 if flag is set. + * 0 if flag is not set. + **/ +static int +lpfc_chk_iocb_flg(struct lpfc_hba *phba, + struct lpfc_iocbq *piocbq, uint32_t flag) +{ + unsigned long iflags; + int ret; + + spin_lock_irqsave(&phba->hbalock, iflags); + ret = piocbq->iocb_flag & flag; + spin_unlock_irqrestore(&phba->hbalock, iflags); + return ret; + +} + +/** * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands * @phba: Pointer to HBA context object.. * @pring: Pointer to sli ring. @@ -7313,7 +7446,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, if (retval == IOCB_SUCCESS) { timeout_req = timeout * HZ; timeleft = wait_event_timeout(done_q, - piocb->iocb_flag & LPFC_IO_WAKE, + lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); if (piocb->iocb_flag & LPFC_IO_WAKE) { @@ -7498,20 +7631,16 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) if ((HS_FFER1 & phba->work_hs) && ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | HS_FFER6 | HS_FFER7) & phba->work_hs)) { - spin_lock_irq(&phba->hbalock); phba->hba_flag |= DEFER_ERATT; - spin_unlock_irq(&phba->hbalock); /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); } /* Set the driver HA work bitmap */ - spin_lock_irq(&phba->hbalock); phba->work_ha |= HA_ERATT; /* Indicate polling handles this ERATT */ phba->hba_flag |= HBA_ERATT_HANDLED; - spin_unlock_irq(&phba->hbalock); return 1; } return 0; @@ -7557,12 +7686,10 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) return 0; phba->work_status[0] = uerr_sta_lo; phba->work_status[1] = uerr_sta_hi; - spin_lock_irq(&phba->hbalock); /* Set the driver HA work bitmap */ phba->work_ha |= HA_ERATT; /* Indicate polling handles this ERATT */ phba->hba_flag |= HBA_ERATT_HANDLED; - spin_unlock_irq(&phba->hbalock); return 1; } } @@ -9245,6 +9372,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, kfree(dmabuf); goto out_fail; } + memset(dmabuf->virt, 0, PAGE_SIZE); dmabuf->buffer_tag = x; list_add_tail(&dmabuf->list, &queue->page_list); /* initialize queue's entry array */ @@ -9667,7 +9795,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, /* link the wq onto the parent cq child list */ list_add_tail(&wq->list, &cq->child_list); out: - if (rc == MBX_TIMEOUT) + if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); return status; } @@ -11020,10 +11148,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) rpi_page->start_rpi); hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - else - rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); @@ -11363,6 +11488,7 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); + bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, LPFC_FCF_FPMA | LPFC_FCF_SPMA); diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 7d37eb7..3c53316 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -56,6 +56,7 @@ struct lpfc_iocbq { #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ +#define LPFC_FIP_ELS 0x40 uint8_t abort_count; uint8_t rsvd2; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 5196b466..3b276b4 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -229,7 +229,7 @@ struct lpfc_bmbx { #define LPFC_EQE_DEF_COUNT 1024 #define LPFC_CQE_DEF_COUNT 256 -#define LPFC_WQE_DEF_COUNT 64 +#define LPFC_WQE_DEF_COUNT 256 #define LPFC_MQE_DEF_COUNT 16 #define LPFC_RQE_DEF_COUNT 512 diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 6b8a148..41094e0 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.2" +#define LPFC_DRIVER_VERSION "8.3.3" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index a6313ee..e0b4992 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -695,8 +695,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport) } vport->unreg_vpi_cmpl = VPORT_INVAL; timeout = msecs_to_jiffies(phba->fc_ratov * 2000); - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) - goto skip_logo; if (!lpfc_issue_els_npiv_logo(vport, ndlp)) while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) timeout = schedule_timeout(timeout); diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index 3b7240e..e3c482a 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -5444,7 +5444,7 @@ static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl ** input speed faster than the period. */ kpc = per * clk; - while (--div >= 0) + while (--div > 0) if (kpc >= (div_10M[div] << 2)) break; /* diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 11a61ea..70b60ad 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -530,7 +530,7 @@ static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask, if (reg == 0xff) { break; } - } while ((time_out-- != 0) && (reg & mask) != 0); + } while ((--time_out != 0) && (reg & mask) != 0); if (time_out == 0) { nsp_msg(KERN_DEBUG, " %s signal off timeut", str); @@ -801,7 +801,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt) data->FifoCount = ocount; - if (time_out == 0) { + if (time_out < 0) { nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d", scsi_get_resid(SCpnt), SCpnt->SCp.this_residual, SCpnt->SCp.buffers_residual); @@ -897,7 +897,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt) data->FifoCount = ocount; - if (time_out == 0) { + if (time_out < 0) { nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x", scsi_get_resid(SCpnt)); } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index c8d0a17..245e7af 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -37,6 +37,7 @@ qla2100_intr_handler(int irq, void *dev_id) uint16_t hccr; uint16_t mb[4]; struct rsp_que *rsp; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -49,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id) reg = &ha->iobase->isp; status = 0; - spin_lock(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { hccr = RD_REG_WORD(®->hccr); @@ -101,7 +102,7 @@ qla2100_intr_handler(int irq, void *dev_id) RD_REG_WORD(®->hccr); } } - spin_unlock(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { @@ -133,6 +134,7 @@ qla2300_intr_handler(int irq, void *dev_id) uint16_t mb[4]; struct rsp_que *rsp; struct qla_hw_data *ha; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -145,7 +147,7 @@ qla2300_intr_handler(int irq, void *dev_id) reg = &ha->iobase->isp; status = 0; - spin_lock(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->u.isp2300.host_status); @@ -216,7 +218,7 @@ qla2300_intr_handler(int irq, void *dev_id) WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD_RELAXED(®->hccr); } - spin_unlock(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { @@ -1626,6 +1628,7 @@ qla24xx_intr_handler(int irq, void *dev_id) uint32_t hccr; uint16_t mb[4]; struct rsp_que *rsp; + unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1638,7 +1641,7 @@ qla24xx_intr_handler(int irq, void *dev_id) reg = &ha->iobase->isp24; status = 0; - spin_lock(&ha->hardware_lock); + spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->host_status); @@ -1688,7 +1691,7 @@ qla24xx_intr_handler(int irq, void *dev_id) WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(®->hccr); } - spin_unlock(&ha->hardware_lock); + spin_unlock_irqrestore(&ha->hardware_lock, flags); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 6260505..010e69b 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -945,7 +945,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, - vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), + (unsigned long long)vid.port_name, + (unsigned long long)vid.node_name, + le16_to_cpu(entry->vf_id), entry->q_qos, entry->f_qos)); if (i < QLA_PRECONFIG_VPORTS) { @@ -954,7 +956,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to create vport [%02x]: " "wwpn=%llx wwnn=%llx.\n", cnt, - vid.port_name, vid.node_name); + (unsigned long long)vid.port_name, + (unsigned long long)vid.node_name); } } done: diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index b134813..8821df9 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -225,6 +225,7 @@ static struct { {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index a152f89..3f64d93 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -35,6 +35,7 @@ #include <linux/netlink.h> #include <net/netlink.h> #include <scsi/scsi_netlink_fc.h> +#include <scsi/scsi_bsg_fc.h> #include "scsi_priv.h" #include "scsi_transport_fc_internal.h" @@ -43,6 +44,10 @@ static void fc_vport_sched_delete(struct work_struct *work); static int fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev, struct fc_vport_identifiers *ids, struct fc_vport **vport); +static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *); +static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *); +static void fc_bsg_remove(struct request_queue *); +static void fc_bsg_goose_queue(struct fc_rport *); /* * Redefine so that we can have same named attributes in the @@ -411,13 +416,26 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, return -ENOMEM; } + fc_bsg_hostadd(shost, fc_host); + /* ignore any bsg add error - we just can't do sgio */ + + return 0; +} + +static int fc_host_remove(struct transport_container *tc, struct device *dev, + struct device *cdev) +{ + struct Scsi_Host *shost = dev_to_shost(dev); + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + + fc_bsg_remove(fc_host->rqst_q); return 0; } static DECLARE_TRANSPORT_CLASS(fc_host_class, "fc_host", fc_host_setup, - NULL, + fc_host_remove, NULL); /* @@ -2375,6 +2393,7 @@ fc_rport_final_delete(struct work_struct *work) scsi_flush_work(shost); fc_terminate_rport_io(rport); + /* * Cancel any outstanding timers. These should really exist * only when rmmod'ing the LLDD and we're asking for @@ -2407,6 +2426,8 @@ fc_rport_final_delete(struct work_struct *work) (i->f->dev_loss_tmo_callbk)) i->f->dev_loss_tmo_callbk(rport); + fc_bsg_remove(rport->rqst_q); + transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); @@ -2494,6 +2515,9 @@ fc_rport_create(struct Scsi_Host *shost, int channel, transport_add_device(dev); transport_configure_device(dev); + fc_bsg_rportadd(shost, rport); + /* ignore any bsg add error - we just can't do sgio */ + if (rport->roles & FC_PORT_ROLE_FCP_TARGET) { /* initiate a scan of the target */ rport->flags |= FC_RPORT_SCAN_PENDING; @@ -2658,6 +2682,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, spin_unlock_irqrestore(shost->host_lock, flags); + fc_bsg_goose_queue(rport); + return rport; } } @@ -3343,6 +3369,592 @@ fc_vport_sched_delete(struct work_struct *work) } +/* + * BSG support + */ + + +/** + * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job + * @job: fc_bsg_job that is to be torn down + */ +static void +fc_destroy_bsgjob(struct fc_bsg_job *job) +{ + unsigned long flags; + + spin_lock_irqsave(&job->job_lock, flags); + if (job->ref_cnt) { + spin_unlock_irqrestore(&job->job_lock, flags); + return; + } + spin_unlock_irqrestore(&job->job_lock, flags); + + put_device(job->dev); /* release reference for the request */ + + kfree(job->request_payload.sg_list); + kfree(job->reply_payload.sg_list); + kfree(job); +} + + +/** + * fc_bsg_jobdone - completion routine for bsg requests that the LLD has + * completed + * @job: fc_bsg_job that is complete + */ +static void +fc_bsg_jobdone(struct fc_bsg_job *job) +{ + struct request *req = job->req; + struct request *rsp = req->next_rq; + unsigned long flags; + int err; + + spin_lock_irqsave(&job->job_lock, flags); + job->state_flags |= FC_RQST_STATE_DONE; + job->ref_cnt--; + spin_unlock_irqrestore(&job->job_lock, flags); + + err = job->req->errors = job->reply->result; + if (err < 0) + /* we're only returning the result field in the reply */ + job->req->sense_len = sizeof(uint32_t); + else + job->req->sense_len = job->reply_len; + + /* we assume all request payload was transferred, residual == 0 */ + req->resid_len = 0; + + if (rsp) { + WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len); + + /* set reply (bidi) residual */ + rsp->resid_len -= min(job->reply->reply_payload_rcv_len, + rsp->resid_len); + } + + blk_end_request_all(req, err); + + fc_destroy_bsgjob(job); +} + + +/** + * fc_bsg_job_timeout - handler for when a bsg request timesout + * @req: request that timed out + */ +static enum blk_eh_timer_return +fc_bsg_job_timeout(struct request *req) +{ + struct fc_bsg_job *job = (void *) req->special; + struct Scsi_Host *shost = job->shost; + struct fc_internal *i = to_fc_internal(shost->transportt); + unsigned long flags; + int err = 0, done = 0; + + if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED) + return BLK_EH_RESET_TIMER; + + spin_lock_irqsave(&job->job_lock, flags); + if (job->state_flags & FC_RQST_STATE_DONE) + done = 1; + else + job->ref_cnt++; + spin_unlock_irqrestore(&job->job_lock, flags); + + if (!done && i->f->bsg_timeout) { + /* call LLDD to abort the i/o as it has timed out */ + err = i->f->bsg_timeout(job); + if (err) + printk(KERN_ERR "ERROR: FC BSG request timeout - LLD " + "abort failed with status %d\n", err); + } + + if (!done) { + spin_lock_irqsave(&job->job_lock, flags); + job->ref_cnt--; + spin_unlock_irqrestore(&job->job_lock, flags); + fc_destroy_bsgjob(job); + } + + /* the blk_end_sync_io() doesn't check the error */ + return BLK_EH_HANDLED; +} + + + +static int +fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req) +{ + size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); + + BUG_ON(!req->nr_phys_segments); + + buf->sg_list = kzalloc(sz, GFP_KERNEL); + if (!buf->sg_list) + return -ENOMEM; + sg_init_table(buf->sg_list, req->nr_phys_segments); + buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); + buf->payload_len = blk_rq_bytes(req); + return 0; +} + + +/** + * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the + * bsg request + * @shost: SCSI Host corresponding to the bsg object + * @rport: (optional) FC Remote Port corresponding to the bsg object + * @req: BSG request that needs a job structure + */ +static int +fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport, + struct request *req) +{ + struct fc_internal *i = to_fc_internal(shost->transportt); + struct request *rsp = req->next_rq; + struct fc_bsg_job *job; + int ret; + + BUG_ON(req->special); + + job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size, + GFP_KERNEL); + if (!job) + return -ENOMEM; + + /* + * Note: this is a bit silly. + * The request gets formatted as a SGIO v4 ioctl request, which + * then gets reformatted as a blk request, which then gets + * reformatted as a fc bsg request. And on completion, we have + * to wrap return results such that SGIO v4 thinks it was a scsi + * status. I hope this was all worth it. + */ + + req->special = job; + job->shost = shost; + job->rport = rport; + job->req = req; + if (i->f->dd_bsg_size) + job->dd_data = (void *)&job[1]; + spin_lock_init(&job->job_lock); + job->request = (struct fc_bsg_request *)req->cmd; + job->request_len = req->cmd_len; + job->reply = req->sense; + job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer + * allocated */ + if (req->bio) { + ret = fc_bsg_map_buffer(&job->request_payload, req); + if (ret) + goto failjob_rls_job; + } + if (rsp && rsp->bio) { + ret = fc_bsg_map_buffer(&job->reply_payload, rsp); + if (ret) + goto failjob_rls_rqst_payload; + } + job->job_done = fc_bsg_jobdone; + if (rport) + job->dev = &rport->dev; + else + job->dev = &shost->shost_gendev; + get_device(job->dev); /* take a reference for the request */ + + job->ref_cnt = 1; + + return 0; + + +failjob_rls_rqst_payload: + kfree(job->request_payload.sg_list); +failjob_rls_job: + kfree(job); + return -ENOMEM; +} + + +enum fc_dispatch_result { + FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */ + FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */ + FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */ +}; + + +/** + * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD + * @shost: scsi host rport attached to + * @job: bsg job to be processed + */ +static enum fc_dispatch_result +fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost, + struct fc_bsg_job *job) +{ + struct fc_internal *i = to_fc_internal(shost->transportt); + int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ + int ret; + + /* Validate the host command */ + switch (job->request->msgcode) { + case FC_BSG_HST_ADD_RPORT: + cmdlen += sizeof(struct fc_bsg_host_add_rport); + break; + + case FC_BSG_HST_DEL_RPORT: + cmdlen += sizeof(struct fc_bsg_host_del_rport); + break; + + case FC_BSG_HST_ELS_NOLOGIN: + cmdlen += sizeof(struct fc_bsg_host_els); + /* there better be a xmt and rcv payloads */ + if ((!job->request_payload.payload_len) || + (!job->reply_payload.payload_len)) { + ret = -EINVAL; + goto fail_host_msg; + } + break; + + case FC_BSG_HST_CT: + cmdlen += sizeof(struct fc_bsg_host_ct); + /* there better be xmt and rcv payloads */ + if ((!job->request_payload.payload_len) || + (!job->reply_payload.payload_len)) { + ret = -EINVAL; + goto fail_host_msg; + } + break; + + case FC_BSG_HST_VENDOR: + cmdlen += sizeof(struct fc_bsg_host_vendor); + if ((shost->hostt->vendor_id == 0L) || + (job->request->rqst_data.h_vendor.vendor_id != + shost->hostt->vendor_id)) { + ret = -ESRCH; + goto fail_host_msg; + } + break; + + default: + ret = -EBADR; + goto fail_host_msg; + } + + /* check if we really have all the request data needed */ + if (job->request_len < cmdlen) { + ret = -ENOMSG; + goto fail_host_msg; + } + + ret = i->f->bsg_request(job); + if (!ret) + return FC_DISPATCH_UNLOCKED; + +fail_host_msg: + /* return the errno failure code as the only status */ + BUG_ON(job->reply_len < sizeof(uint32_t)); + job->reply->result = ret; + job->reply_len = sizeof(uint32_t); + fc_bsg_jobdone(job); + return FC_DISPATCH_UNLOCKED; +} + + +/* + * fc_bsg_goose_queue - restart rport queue in case it was stopped + * @rport: rport to be restarted + */ +static void +fc_bsg_goose_queue(struct fc_rport *rport) +{ + int flagset; + + if (!rport->rqst_q) + return; + + get_device(&rport->dev); + + spin_lock(rport->rqst_q->queue_lock); + flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) && + !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); + if (flagset) + queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); + __blk_run_queue(rport->rqst_q); + if (flagset) + queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); + spin_unlock(rport->rqst_q->queue_lock); + + put_device(&rport->dev); +} + + +/** + * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD + * @shost: scsi host rport attached to + * @rport: rport request destined to + * @job: bsg job to be processed + */ +static enum fc_dispatch_result +fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost, + struct fc_rport *rport, struct fc_bsg_job *job) +{ + struct fc_internal *i = to_fc_internal(shost->transportt); + int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ + int ret; + + /* Validate the rport command */ + switch (job->request->msgcode) { + case FC_BSG_RPT_ELS: + cmdlen += sizeof(struct fc_bsg_rport_els); + goto check_bidi; + + case FC_BSG_RPT_CT: + cmdlen += sizeof(struct fc_bsg_rport_ct); +check_bidi: + /* there better be xmt and rcv payloads */ + if ((!job->request_payload.payload_len) || + (!job->reply_payload.payload_len)) { + ret = -EINVAL; + goto fail_rport_msg; + } + break; + default: + ret = -EBADR; + goto fail_rport_msg; + } + + /* check if we really have all the request data needed */ + if (job->request_len < cmdlen) { + ret = -ENOMSG; + goto fail_rport_msg; + } + + ret = i->f->bsg_request(job); + if (!ret) + return FC_DISPATCH_UNLOCKED; + +fail_rport_msg: + /* return the errno failure code as the only status */ + BUG_ON(job->reply_len < sizeof(uint32_t)); + job->reply->result = ret; + job->reply_len = sizeof(uint32_t); + fc_bsg_jobdone(job); + return FC_DISPATCH_UNLOCKED; +} + + +/** + * fc_bsg_request_handler - generic handler for bsg requests + * @q: request queue to manage + * @shost: Scsi_Host related to the bsg object + * @rport: FC remote port related to the bsg object (optional) + * @dev: device structure for bsg object + */ +static void +fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost, + struct fc_rport *rport, struct device *dev) +{ + struct request *req; + struct fc_bsg_job *job; + enum fc_dispatch_result ret; + + if (!get_device(dev)) + return; + + while (!blk_queue_plugged(q)) { + if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED)) + break; + + req = blk_fetch_request(q); + if (!req) + break; + + if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) { + req->errors = -ENXIO; + spin_unlock_irq(q->queue_lock); + blk_end_request(req, -ENXIO, blk_rq_bytes(req)); + spin_lock_irq(q->queue_lock); + continue; + } + + spin_unlock_irq(q->queue_lock); + + ret = fc_req_to_bsgjob(shost, rport, req); + if (ret) { + req->errors = ret; + blk_end_request(req, ret, blk_rq_bytes(req)); + spin_lock_irq(q->queue_lock); + continue; + } + + job = req->special; + + /* check if we have the msgcode value at least */ + if (job->request_len < sizeof(uint32_t)) { + BUG_ON(job->reply_len < sizeof(uint32_t)); + job->reply->result = -ENOMSG; + job->reply_len = sizeof(uint32_t); + fc_bsg_jobdone(job); + spin_lock_irq(q->queue_lock); + continue; + } + + /* the dispatch routines will unlock the queue_lock */ + if (rport) + ret = fc_bsg_rport_dispatch(q, shost, rport, job); + else + ret = fc_bsg_host_dispatch(q, shost, job); + + /* did dispatcher hit state that can't process any more */ + if (ret == FC_DISPATCH_BREAK) + break; + + /* did dispatcher had released the lock */ + if (ret == FC_DISPATCH_UNLOCKED) + spin_lock_irq(q->queue_lock); + } + + spin_unlock_irq(q->queue_lock); + put_device(dev); + spin_lock_irq(q->queue_lock); +} + + +/** + * fc_bsg_host_handler - handler for bsg requests for a fc host + * @q: fc host request queue + */ +static void +fc_bsg_host_handler(struct request_queue *q) +{ + struct Scsi_Host *shost = q->queuedata; + + fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev); +} + + +/** + * fc_bsg_rport_handler - handler for bsg requests for a fc rport + * @q: rport request queue + */ +static void +fc_bsg_rport_handler(struct request_queue *q) +{ + struct fc_rport *rport = q->queuedata; + struct Scsi_Host *shost = rport_to_shost(rport); + + fc_bsg_request_handler(q, shost, rport, &rport->dev); +} + + +/** + * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests + * @shost: shost for fc_host + * @fc_host: fc_host adding the structures to + */ +static int +fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) +{ + struct device *dev = &shost->shost_gendev; + struct fc_internal *i = to_fc_internal(shost->transportt); + struct request_queue *q; + int err; + char bsg_name[BUS_ID_SIZE]; /*20*/ + + fc_host->rqst_q = NULL; + + if (!i->f->bsg_request) + return -ENOTSUPP; + + snprintf(bsg_name, sizeof(bsg_name), + "fc_host%d", shost->host_no); + + q = __scsi_alloc_queue(shost, fc_bsg_host_handler); + if (!q) { + printk(KERN_ERR "fc_host%d: bsg interface failed to " + "initialize - no request queue\n", + shost->host_no); + return -ENOMEM; + } + + q->queuedata = shost; + queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); + blk_queue_rq_timed_out(q, fc_bsg_job_timeout); + blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); + + err = bsg_register_queue(q, dev, bsg_name, NULL); + if (err) { + printk(KERN_ERR "fc_host%d: bsg interface failed to " + "initialize - register queue\n", + shost->host_no); + blk_cleanup_queue(q); + return err; + } + + fc_host->rqst_q = q; + return 0; +} + + +/** + * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests + * @shost: shost that rport is attached to + * @rport: rport that the bsg hooks are being attached to + */ +static int +fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) +{ + struct device *dev = &rport->dev; + struct fc_internal *i = to_fc_internal(shost->transportt); + struct request_queue *q; + int err; + + rport->rqst_q = NULL; + + if (!i->f->bsg_request) + return -ENOTSUPP; + + q = __scsi_alloc_queue(shost, fc_bsg_rport_handler); + if (!q) { + printk(KERN_ERR "%s: bsg interface failed to " + "initialize - no request queue\n", + dev->kobj.name); + return -ENOMEM; + } + + q->queuedata = rport; + queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); + blk_queue_rq_timed_out(q, fc_bsg_job_timeout); + blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); + + err = bsg_register_queue(q, dev, NULL, NULL); + if (err) { + printk(KERN_ERR "%s: bsg interface failed to " + "initialize - register queue\n", + dev->kobj.name); + blk_cleanup_queue(q); + return err; + } + + rport->rqst_q = q; + return 0; +} + + +/** + * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports + * @q: the request_queue that is to be torn down. + */ +static void +fc_bsg_remove(struct request_queue *q) +{ + if (q) { + bsg_unregister_queue(q); + blk_cleanup_queue(q); + } +} + + /* Original Author: Martin Hicks */ MODULE_AUTHOR("James Smart"); MODULE_DESCRIPTION("FC Transport Attributes"); diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index f49f55c..654a34f 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -234,8 +234,10 @@ static int spi_setup_transport_attrs(struct transport_container *tc, spi_width(starget) = 0; /* narrow */ spi_max_width(starget) = 1; spi_iu(starget) = 0; /* no IU */ + spi_max_iu(starget) = 1; spi_dt(starget) = 0; /* ST */ spi_qas(starget) = 0; + spi_max_qas(starget) = 1; spi_wr_flow(starget) = 0; spi_rd_strm(starget) = 0; spi_rti(starget) = 0; @@ -360,9 +362,9 @@ static DEVICE_ATTR(field, S_IRUGO, \ /* The Parallel SCSI Tranport Attributes: */ spi_transport_max_attr(offset, "%d\n"); spi_transport_max_attr(width, "%d\n"); -spi_transport_rd_attr(iu, "%d\n"); +spi_transport_max_attr(iu, "%d\n"); spi_transport_rd_attr(dt, "%d\n"); -spi_transport_rd_attr(qas, "%d\n"); +spi_transport_max_attr(qas, "%d\n"); spi_transport_rd_attr(wr_flow, "%d\n"); spi_transport_rd_attr(rd_strm, "%d\n"); spi_transport_rd_attr(rti, "%d\n"); @@ -874,13 +876,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) /* try QAS requests; this should be harmless to set if the * target supports it */ - if (scsi_device_qas(sdev)) { + if (scsi_device_qas(sdev) && spi_max_qas(starget)) { DV_SET(qas, 1); } else { DV_SET(qas, 0); } - if (scsi_device_ius(sdev) && min_period < 9) { + if (scsi_device_ius(sdev) && spi_max_iu(starget) && min_period < 9) { /* This u320 (or u640). Set IU transfers */ DV_SET(iu, 1); /* Then set the optional parameters */ @@ -1412,12 +1414,18 @@ static mode_t target_attribute_is_visible(struct kobject *kobj, else if (attr == &dev_attr_iu.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(iu); + else if (attr == &dev_attr_max_iu.attr && + spi_support_ius(starget)) + return TARGET_ATTRIBUTE_HELPER(iu); else if (attr == &dev_attr_dt.attr && spi_support_dt(starget)) return TARGET_ATTRIBUTE_HELPER(dt); else if (attr == &dev_attr_qas.attr && spi_support_qas(starget)) return TARGET_ATTRIBUTE_HELPER(qas); + else if (attr == &dev_attr_max_qas.attr && + spi_support_qas(starget)) + return TARGET_ATTRIBUTE_HELPER(qas); else if (attr == &dev_attr_wr_flow.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(wr_flow); @@ -1447,8 +1455,10 @@ static struct attribute *target_attributes[] = { &dev_attr_width.attr, &dev_attr_max_width.attr, &dev_attr_iu.attr, + &dev_attr_max_iu.attr, &dev_attr_dt.attr, &dev_attr_qas.attr, + &dev_attr_max_qas.attr, &dev_attr_wr_flow.attr, &dev_attr_rd_strm.attr, &dev_attr_rti.attr, diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index 12e443c..f5b3fdb 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c @@ -530,9 +530,6 @@ atmel_spi_interrupt(int irq, void *dev_id) return ret; } -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) - static int atmel_spi_setup(struct spi_device *spi) { struct atmel_spi *as; @@ -555,8 +552,6 @@ static int atmel_spi_setup(struct spi_device *spi) return -EINVAL; } - if (bits == 0) - bits = 8; if (bits < 8 || bits > 16) { dev_dbg(&spi->dev, "setup: invalid bits_per_word %u (8 to 16)\n", @@ -564,12 +559,6 @@ static int atmel_spi_setup(struct spi_device *spi) return -EINVAL; } - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } - /* see notes above re chipselect */ if (!atmel_spi_is_v2() && spi->chip_select == 0 @@ -775,6 +764,9 @@ static int __init atmel_spi_probe(struct platform_device *pdev) if (!master) goto out_free; + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->bus_num = pdev->id; master->num_chipselect = 4; master->setup = atmel_spi_setup; diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c index b02f25c..76cbc1a 100644 --- a/drivers/spi/au1550_spi.c +++ b/drivers/spi/au1550_spi.c @@ -284,27 +284,16 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) return 0; } -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST) - static int au1550_spi_setup(struct spi_device *spi) { struct au1550_spi *hw = spi_master_get_devdata(spi->master); - if (spi->bits_per_word == 0) - spi->bits_per_word = 8; if (spi->bits_per_word < 4 || spi->bits_per_word > 24) { dev_err(&spi->dev, "setup: invalid bits_per_word=%d\n", spi->bits_per_word); return -EINVAL; } - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } - if (spi->max_speed_hz == 0) spi->max_speed_hz = hw->freq_max; if (spi->max_speed_hz > hw->freq_max @@ -781,6 +770,9 @@ static int __init au1550_spi_probe(struct platform_device *pdev) goto err_nomem; } + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; + hw = spi_master_get_devdata(master); hw->master = spi_master_get(master); diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 68c77a9..1b74d5c 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/init.h> +#include <linux/types.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/of_platform.h> @@ -30,8 +31,7 @@ struct mpc52xx_psc_spi { /* fsl_spi_platform data */ - void (*activate_cs)(u8, u8); - void (*deactivate_cs)(u8, u8); + void (*cs_control)(struct spi_device *spi, bool on); u32 sysclk; /* driver internal data */ @@ -111,18 +111,16 @@ static void mpc52xx_psc_spi_activate_cs(struct spi_device *spi) out_be16((u16 __iomem *)&psc->ccr, ccr); mps->bits_per_word = cs->bits_per_word; - if (mps->activate_cs) - mps->activate_cs(spi->chip_select, - (spi->mode & SPI_CS_HIGH) ? 1 : 0); + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0); } static void mpc52xx_psc_spi_deactivate_cs(struct spi_device *spi) { struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); - if (mps->deactivate_cs) - mps->deactivate_cs(spi->chip_select, - (spi->mode & SPI_CS_HIGH) ? 1 : 0); + if (mps->cs_control) + mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1); } #define MPC52xx_PSC_BUFSIZE (MPC52xx_PSC_RFNUM_MASK + 1) @@ -261,9 +259,6 @@ static void mpc52xx_psc_spi_work(struct work_struct *work) spin_unlock_irq(&mps->lock); } -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST) - static int mpc52xx_psc_spi_setup(struct spi_device *spi) { struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); @@ -273,12 +268,6 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi) if (spi->bits_per_word%8) return -EINVAL; - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } - if (!cs) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) @@ -385,18 +374,19 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, dev_set_drvdata(dev, master); mps = spi_master_get_devdata(master); + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; + mps->irq = irq; if (pdata == NULL) { dev_warn(dev, "probe called without platform data, no " - "(de)activate_cs function will be called\n"); - mps->activate_cs = NULL; - mps->deactivate_cs = NULL; + "cs_control function will be called\n"); + mps->cs_control = NULL; mps->sysclk = 0; master->bus_num = bus_num; master->num_chipselect = 255; } else { - mps->activate_cs = pdata->activate_cs; - mps->deactivate_cs = pdata->deactivate_cs; + mps->cs_control = pdata->cs_control; mps->sysclk = pdata->sysclk; master->bus_num = pdata->bus_num; master->num_chipselect = pdata->max_chipselect; diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index d6d0c5d..eee4b6e 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c @@ -603,9 +603,6 @@ static int omap2_mcspi_request_dma(struct spi_device *spi) return 0; } -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) - static int omap2_mcspi_setup(struct spi_device *spi) { int ret; @@ -613,15 +610,7 @@ static int omap2_mcspi_setup(struct spi_device *spi) struct omap2_mcspi_dma *mcspi_dma; struct omap2_mcspi_cs *cs = spi->controller_state; - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } - - if (spi->bits_per_word == 0) - spi->bits_per_word = 8; - else if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { + if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { dev_dbg(&spi->dev, "setup: unsupported %d bit words\n", spi->bits_per_word); return -EINVAL; @@ -984,6 +973,9 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) return -ENOMEM; } + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + if (pdev->id != -1) master->bus_num = pdev->id; diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c index fe8b9ac..aa90ddb 100644 --- a/drivers/spi/omap_uwire.c +++ b/drivers/spi/omap_uwire.c @@ -339,8 +339,6 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t) bits = spi->bits_per_word; if (t != NULL && t->bits_per_word) bits = t->bits_per_word; - if (!bits) - bits = 8; if (bits > 16) { pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits); @@ -449,19 +447,10 @@ done: return status; } -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) - static int uwire_setup(struct spi_device *spi) { struct uwire_state *ust = spi->controller_state; - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } - if (ust == NULL) { ust = kzalloc(sizeof(*ust), GFP_KERNEL); if (ust == NULL) @@ -522,6 +511,9 @@ static int __init uwire_probe(struct platform_device *pdev) uwire_write_reg(UWIRE_SR3, 1); + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->bus_num = 2; /* "official" */ master->num_chipselect = 4; master->setup = uwire_setup; diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c index c8b0bab..3aea50d 100644 --- a/drivers/spi/orion_spi.c +++ b/drivers/spi/orion_spi.c @@ -358,20 +358,11 @@ static int orion_spi_setup(struct spi_device *spi) orion_spi = spi_master_get_devdata(spi->master); - if (spi->mode) { - dev_err(&spi->dev, "setup: unsupported mode bits %x\n", - spi->mode); - return -EINVAL; - } - /* Fix ac timing if required. */ if (orion_spi->spi_info->enable_clock_fix) orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG, (1 << 14)); - if (spi->bits_per_word == 0) - spi->bits_per_word = 8; - if ((spi->max_speed_hz == 0) || (spi->max_speed_hz > orion_spi->max_speed)) spi->max_speed_hz = orion_spi->max_speed; @@ -476,6 +467,9 @@ static int __init orion_spi_probe(struct platform_device *pdev) if (pdev->id != -1) master->bus_num = pdev->id; + /* we support only mode 0, and no options */ + master->mode_bits = 0; + master->setup = orion_spi_setup; master->transfer = orion_spi_transfer; master->num_chipselect = ORION_NUM_CHIPSELECTS; diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index 3f3c08c..d949dbf 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c @@ -1185,9 +1185,6 @@ static int transfer(struct spi_device *spi, struct spi_message *msg) return 0; } -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CPOL | SPI_CPHA) - static int setup_cs(struct spi_device *spi, struct chip_data *chip, struct pxa2xx_spi_chip *chip_info) { @@ -1236,9 +1233,6 @@ static int setup(struct spi_device *spi) uint tx_thres = TX_THRESH_DFLT; uint rx_thres = RX_THRESH_DFLT; - if (!spi->bits_per_word) - spi->bits_per_word = 8; - if (drv_data->ssp_type != PXA25x_SSP && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " @@ -1255,12 +1249,6 @@ static int setup(struct spi_device *spi) return -EINVAL; } - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } - /* Only alloc on first setup */ chip = spi_get_ctldata(spi); if (!chip) { @@ -1328,18 +1316,14 @@ static int setup(struct spi_device *spi) /* NOTE: PXA25x_SSP _could_ use external clocking ... */ if (drv_data->ssp_type != PXA25x_SSP) - dev_dbg(&spi->dev, "%d bits/word, %ld Hz, mode %d, %s\n", - spi->bits_per_word, + dev_dbg(&spi->dev, "%ld Hz actual, %s\n", clk_get_rate(ssp->clk) / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), - spi->mode & 0x3, chip->enable_dma ? "DMA" : "PIO"); else - dev_dbg(&spi->dev, "%d bits/word, %ld Hz, mode %d, %s\n", - spi->bits_per_word, + dev_dbg(&spi->dev, "%ld Hz actual, %s\n", clk_get_rate(ssp->clk) / 2 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), - spi->mode & 0x3, chip->enable_dma ? "DMA" : "PIO"); if (spi->bits_per_word <= 8) { @@ -1500,6 +1484,9 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) drv_data->pdev = pdev; drv_data->ssp = ssp; + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->bus_num = pdev->id; master->num_chipselect = platform_info->num_chipselect; master->dma_alignment = DMA_ALIGNMENT; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8eba98c..70845ccd 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -265,7 +265,7 @@ int spi_add_device(struct spi_device *spi) * normally rely on the device being setup. Devices * using SPI_CS_HIGH can't coexist well otherwise... */ - status = spi->master->setup(spi); + status = spi_setup(spi); if (status < 0) { dev_err(dev, "can't %s %s, status %d\n", "setup", dev_name(&spi->dev), status); @@ -583,6 +583,70 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master); /*-------------------------------------------------------------------------*/ +/* Core methods for SPI master protocol drivers. Some of the + * other core methods are currently defined as inline functions. + */ + +/** + * spi_setup - setup SPI mode and clock rate + * @spi: the device whose settings are being modified + * Context: can sleep, and no requests are queued to the device + * + * SPI protocol drivers may need to update the transfer mode if the + * device doesn't work with its default. They may likewise need + * to update clock rates or word sizes from initial values. This function + * changes those settings, and must be called from a context that can sleep. + * Except for SPI_CS_HIGH, which takes effect immediately, the changes take + * effect the next time the device is selected and data is transferred to + * or from it. When this function returns, the spi device is deselected. + * + * Note that this call will fail if the protocol driver specifies an option + * that the underlying controller or its driver does not support. For + * example, not all hardware supports wire transfers using nine bit words, + * LSB-first wire encoding, or active-high chipselects. + */ +int spi_setup(struct spi_device *spi) +{ + unsigned bad_bits; + int status; + + /* help drivers fail *cleanly* when they need options + * that aren't supported with their current master + */ + bad_bits = spi->mode & ~spi->master->mode_bits; + if (bad_bits) { + dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", + bad_bits); + return -EINVAL; + } + + if (!spi->bits_per_word) + spi->bits_per_word = 8; + + status = spi->master->setup(spi); + + dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s" + "%u bits/w, %u Hz max --> %d\n", + (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), + (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", + (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", + (spi->mode & SPI_3WIRE) ? "3wire, " : "", + (spi->mode & SPI_LOOP) ? "loopback, " : "", + spi->bits_per_word, spi->max_speed_hz, + status); + + return status; +} +EXPORT_SYMBOL_GPL(spi_setup); + + +/*-------------------------------------------------------------------------*/ + +/* Utility methods for SPI master protocol drivers, layered on + * top of the core. Some other utility methods are defined as + * inline functions. + */ + static void spi_complete(void *arg) { complete(arg); @@ -636,8 +700,8 @@ static u8 *buf; * @spi: device with which data will be exchanged * @txbuf: data to be written (need not be dma-safe) * @n_tx: size of txbuf, in bytes - * @rxbuf: buffer into which data will be read - * @n_rx: size of rxbuf, in bytes (need not be dma-safe) + * @rxbuf: buffer into which data will be read (need not be dma-safe) + * @n_rx: size of rxbuf, in bytes * Context: can sleep * * This performs a half duplex MicroWire style transaction with the diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c index 011c5bd..73e24ef 100644 --- a/drivers/spi/spi_bfin5xx.c +++ b/drivers/spi/spi_bfin5xx.c @@ -169,7 +169,7 @@ static int bfin_spi_flush(struct driver_data *drv_data) unsigned long limit = loops_per_jiffy << 1; /* wait for stop and clear stat */ - while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && limit--) + while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && --limit) cpu_relax(); write_STAT(drv_data, BIT_STAT_CLR); @@ -1010,16 +1010,6 @@ static int bfin_spi_setup(struct spi_device *spi) struct driver_data *drv_data = spi_master_get_devdata(spi->master); int ret; - /* Abort device setup if requested features are not supported */ - if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { - dev_err(&spi->dev, "requested mode not fully supported\n"); - return -EINVAL; - } - - /* Zero (the default) here means 8 bits */ - if (!spi->bits_per_word) - spi->bits_per_word = 8; - if (spi->bits_per_word != 8 && spi->bits_per_word != 16) return -EINVAL; @@ -1287,6 +1277,9 @@ static int __init bfin_spi_probe(struct platform_device *pdev) drv_data->pdev = pdev; drv_data->pin_req = platform_info->pin_req; + /* the spi->mode bits supported by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; + master->bus_num = pdev->id; master->num_chipselect = platform_info->num_chipselect; master->cleanup = bfin_spi_cleanup; diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index 85e61f4..2a5abc0 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c @@ -188,12 +188,6 @@ int spi_bitbang_setup(struct spi_device *spi) bitbang = spi_master_get_devdata(spi->master); - /* Bitbangers can support SPI_CS_HIGH, SPI_3WIRE, and so on; - * add those to master->flags, and provide the other support. - */ - if ((spi->mode & ~(SPI_CPOL|SPI_CPHA|bitbang->flags)) != 0) - return -EINVAL; - if (!cs) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) @@ -201,9 +195,6 @@ int spi_bitbang_setup(struct spi_device *spi) spi->controller_state = cs; } - if (!spi->bits_per_word) - spi->bits_per_word = 8; - /* per-word shift register access, in hardware or bitbanging */ cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; if (!cs->txrx_word) @@ -213,9 +204,7 @@ int spi_bitbang_setup(struct spi_device *spi) if (retval < 0) return retval; - dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n", - __func__, spi->mode & (SPI_CPOL | SPI_CPHA), - spi->bits_per_word, 2 * cs->nsecs); + dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); /* NOTE we _need_ to call chipselect() early, ideally with adapter * setup, unless the hardware defaults cooperate to avoid confusion @@ -457,6 +446,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang) spin_lock_init(&bitbang->lock); INIT_LIST_HEAD(&bitbang->queue); + if (!bitbang->master->mode_bits) + bitbang->master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags; + if (!bitbang->master->transfer) bitbang->master->transfer = spi_bitbang_transfer; if (!bitbang->txrx_bufs) { diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index 0671aee..c195e45 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c @@ -1171,9 +1171,6 @@ msg_rejected: return -EINVAL; } -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) - /* On first setup bad values must free chip_data memory since will cause spi_new_device to fail. Bad value setup from protocol driver are simply not applied and notified to the calling driver. */ @@ -1186,12 +1183,6 @@ static int setup(struct spi_device *spi) u32 tmp; int status = 0; - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } - /* Get controller data */ chip_info = spi->controller_data; @@ -1286,10 +1277,7 @@ static int setup(struct spi_device *spi) /* SPI word width */ tmp = spi->bits_per_word; - if (tmp == 0) { - tmp = 8; - spi->bits_per_word = 8; - } else if (tmp > 16) { + if (tmp > 16) { status = -EINVAL; dev_err(&spi->dev, "setup - " @@ -1481,6 +1469,9 @@ static int __init spi_imx_probe(struct platform_device *pdev) drv_data->master_info = platform_info; drv_data->pdev = pdev; + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->bus_num = pdev->id; master->num_chipselect = platform_info->num_chipselect; master->dma_alignment = DMA_ALIGNMENT; diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c index a32ccb4..ce61be9 100644 --- a/drivers/spi/spi_mpc83xx.c +++ b/drivers/spi/spi_mpc83xx.c @@ -419,10 +419,6 @@ static void mpc83xx_spi_work(struct work_struct *work) spin_unlock_irq(&mpc83xx_spi->lock); } -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ - | SPI_LSB_FIRST | SPI_LOOP) - static int mpc83xx_spi_setup(struct spi_device *spi) { struct mpc83xx_spi *mpc83xx_spi; @@ -430,12 +426,6 @@ static int mpc83xx_spi_setup(struct spi_device *spi) u32 hw_mode; struct spi_mpc83xx_cs *cs = spi->controller_state; - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } - if (!spi->max_speed_hz) return -EINVAL; @@ -447,9 +437,6 @@ static int mpc83xx_spi_setup(struct spi_device *spi) } mpc83xx_spi = spi_master_get_devdata(spi->master); - if (!spi->bits_per_word) - spi->bits_per_word = 8; - hw_mode = cs->hw_mode; /* Save orginal settings */ cs->hw_mode = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode); /* mask out bits we are going to set */ @@ -471,9 +458,6 @@ static int mpc83xx_spi_setup(struct spi_device *spi) return retval; } - dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u Hz\n", - __func__, spi->mode & (SPI_CPOL | SPI_CPHA), - spi->bits_per_word, spi->max_speed_hz); #if 0 /* Don't think this is needed */ /* NOTE we _need_ to call chipselect() early, ideally with adapter * setup, unless the hardware defaults cooperate to avoid confusion @@ -568,6 +552,10 @@ mpc83xx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) dev_set_drvdata(dev, master); + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH + | SPI_LSB_FIRST | SPI_LOOP; + master->setup = mpc83xx_spi_setup; master->transfer = mpc83xx_spi_transfer; master->cleanup = mpc83xx_spi_cleanup; diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index b3ebc1d..e0d44af 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c @@ -146,32 +146,16 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi, return 0; } -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) - static int s3c24xx_spi_setup(struct spi_device *spi) { int ret; - if (!spi->bits_per_word) - spi->bits_per_word = 8; - - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } - ret = s3c24xx_spi_setupxfer(spi, NULL); if (ret < 0) { dev_err(&spi->dev, "setupxfer returned %d\n", ret); return ret; } - dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", - __func__, spi->mode, spi->bits_per_word, - spi->max_speed_hz); - return 0; } @@ -290,6 +274,9 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev) /* setup the master state. */ + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + master->num_chipselect = hw->pdata->num_cs; master->bus_num = pdata->bus_num; diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c index 29cbb06..96057de 100644 --- a/drivers/spi/spi_txx9.c +++ b/drivers/spi/spi_txx9.c @@ -110,23 +110,17 @@ static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c, ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */ } -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CS_HIGH|SPI_CPOL|SPI_CPHA) - static int txx9spi_setup(struct spi_device *spi) { struct txx9spi *c = spi_master_get_devdata(spi->master); u8 bits_per_word; - if (spi->mode & ~MODEBITS) - return -EINVAL; - if (!spi->max_speed_hz || spi->max_speed_hz > c->max_speed_hz || spi->max_speed_hz < c->min_speed_hz) return -EINVAL; - bits_per_word = spi->bits_per_word ? : 8; + bits_per_word = spi->bits_per_word; if (bits_per_word != 8 && bits_per_word != 16) return -EINVAL; @@ -414,6 +408,9 @@ static int __init txx9spi_probe(struct platform_device *dev) (unsigned long long)res->start, irq, (c->baseclk + 500000) / 1000000); + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; + master->bus_num = dev->id; master->setup = txx9spi_setup; master->transfer = txx9spi_transfer; diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 494d3f7..46b8c5c 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c @@ -158,9 +158,6 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi, return 0; } -/* the spi->mode bits understood by this driver: */ -#define MODEBITS (SPI_CPOL | SPI_CPHA) - static int xilinx_spi_setup(struct spi_device *spi) { struct spi_bitbang *bitbang; @@ -170,22 +167,10 @@ static int xilinx_spi_setup(struct spi_device *spi) xspi = spi_master_get_devdata(spi->master); bitbang = &xspi->bitbang; - if (!spi->bits_per_word) - spi->bits_per_word = 8; - - if (spi->mode & ~MODEBITS) { - dev_err(&spi->dev, "%s, unsupported mode bits %x\n", - __func__, spi->mode & ~MODEBITS); - return -EINVAL; - } - retval = xilinx_spi_setup_transfer(spi, NULL); if (retval < 0) return retval; - dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n", - __func__, spi->mode & MODEBITS, spi->bits_per_word, 0); - return 0; } @@ -333,6 +318,9 @@ static int __init xilinx_spi_of_probe(struct of_device *ofdev, goto put_master; } + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA; + xspi = spi_master_get_devdata(master); xspi->bitbang.master = spi_master_get(master); xspi->bitbang.chipselect = xilinx_spi_chipselect; diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index d0fcf36..9256578 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -117,5 +117,7 @@ source "drivers/staging/serqt_usb/Kconfig" source "drivers/gpu/drm/radeon/Kconfig" +source "drivers/staging/octeon/Kconfig" + endif # !STAGING_EXCLUDE_BUILD endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 47dfd5b4..6da9c74 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -40,3 +40,4 @@ obj-$(CONFIG_PLAN9AUTH) += p9auth/ obj-$(CONFIG_HECI) += heci/ obj-$(CONFIG_LINE6_USB) += line6/ obj-$(CONFIG_USB_SERIAL_QUATECH_ESU100) += serqt_usb/ +obj-$(CONFIG_OCTEON_ETHERNET) += octeon/ diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig new file mode 100644 index 0000000..536e238 --- /dev/null +++ b/drivers/staging/octeon/Kconfig @@ -0,0 +1,12 @@ +config OCTEON_ETHERNET + tristate "Cavium Networks Octeon Ethernet support" + depends on CPU_CAVIUM_OCTEON + select MII + help + This driver supports the builtin ethernet ports on Cavium + Networks' products in the Octeon family. This driver supports the + CN3XXX and CN5XXX Octeon processors. + + To compile this driver as a module, choose M here. The module + will be called octeon-ethernet. + diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile new file mode 100644 index 0000000..3c839e3 --- /dev/null +++ b/drivers/staging/octeon/Makefile @@ -0,0 +1,30 @@ +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2005-2009 Cavium Networks +# + +# +# Makefile for Cavium OCTEON on-board ethernet driver +# + +obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o + +octeon-ethernet-objs := ethernet.o +octeon-ethernet-objs += ethernet-common.o +octeon-ethernet-objs += ethernet-mdio.o +octeon-ethernet-objs += ethernet-mem.o +octeon-ethernet-objs += ethernet-proc.o +octeon-ethernet-objs += ethernet-rgmii.o +octeon-ethernet-objs += ethernet-rx.o +octeon-ethernet-objs += ethernet-sgmii.o +octeon-ethernet-objs += ethernet-spi.o +octeon-ethernet-objs += ethernet-tx.o +octeon-ethernet-objs += ethernet-xaui.o +octeon-ethernet-objs += cvmx-pko.o cvmx-spi.o cvmx-cmd-queue.o \ + cvmx-helper-board.o cvmx-helper.o cvmx-helper-xaui.o \ + cvmx-helper-rgmii.o cvmx-helper-sgmii.o cvmx-helper-npi.o \ + cvmx-helper-loop.o cvmx-helper-spi.o cvmx-helper-util.o \ + cvmx-interrupt-decodes.o cvmx-interrupt-rsl.o + diff --git a/drivers/staging/octeon/cvmx-address.h b/drivers/staging/octeon/cvmx-address.h new file mode 100644 index 0000000..3c74d82 --- /dev/null +++ b/drivers/staging/octeon/cvmx-address.h @@ -0,0 +1,274 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2009 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * Typedefs and defines for working with Octeon physical addresses. + * + */ +#ifndef __CVMX_ADDRESS_H__ +#define __CVMX_ADDRESS_H__ + +#if 0 +typedef enum { + CVMX_MIPS_SPACE_XKSEG = 3LL, + CVMX_MIPS_SPACE_XKPHYS = 2LL, + CVMX_MIPS_SPACE_XSSEG = 1LL, + CVMX_MIPS_SPACE_XUSEG = 0LL +} cvmx_mips_space_t; +#endif + +typedef enum { + CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL, + CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL, + CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL, + CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL +} cvmx_mips_xkseg_space_t; + +/* decodes <14:13> of a kseg3 window address */ +typedef enum { + CVMX_ADD_WIN_SCR = 0L, + /* see cvmx_add_win_dma_dec_t for further decode */ + CVMX_ADD_WIN_DMA = 1L, + CVMX_ADD_WIN_UNUSED = 2L, + CVMX_ADD_WIN_UNUSED2 = 3L +} cvmx_add_win_dec_t; + +/* decode within DMA space */ +typedef enum { + /* + * Add store data to the write buffer entry, allocating it if + * necessary. + */ + CVMX_ADD_WIN_DMA_ADD = 0L, + /* send out the write buffer entry to DRAM */ + CVMX_ADD_WIN_DMA_SENDMEM = 1L, + /* store data must be normal DRAM memory space address in this case */ + /* send out the write buffer entry as an IOBDMA command */ + CVMX_ADD_WIN_DMA_SENDDMA = 2L, + /* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */ + /* send out the write buffer entry as an IO write */ + CVMX_ADD_WIN_DMA_SENDIO = 3L, + /* store data must be normal IO space address in this case */ + /* send out a single-tick command on the NCB bus */ + CVMX_ADD_WIN_DMA_SENDSINGLE = 4L, + /* no write buffer data needed/used */ +} cvmx_add_win_dma_dec_t; + +/* + * Physical Address Decode + * + * Octeon-I HW never interprets this X (<39:36> reserved + * for future expansion), software should set to 0. + * + * - 0x0 XXX0 0000 0000 to DRAM Cached + * - 0x0 XXX0 0FFF FFFF + * + * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000 + * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF) + * + * - 0x0 XXX0 2000 0000 to DRAM Cached + * - 0x0 XXXF FFFF FFFF + * + * - 0x1 00X0 0000 0000 to Boot Bus Uncached + * - 0x1 00XF FFFF FFFF + * + * - 0x1 01X0 0000 0000 to Other NCB Uncached + * - 0x1 FFXF FFFF FFFF devices + * + * Decode of all Octeon addresses + */ +typedef union { + + uint64_t u64; + /* mapped or unmapped virtual address */ + struct { + uint64_t R:2; + uint64_t offset:62; + } sva; + + /* mapped USEG virtual addresses (typically) */ + struct { + uint64_t zeroes:33; + uint64_t offset:31; + } suseg; + + /* mapped or unmapped virtual address */ + struct { + uint64_t ones:33; + uint64_t sp:2; + uint64_t offset:29; + } sxkseg; + + /* + * physical address accessed through xkphys unmapped virtual + * address. + */ + struct { + uint64_t R:2; /* CVMX_MIPS_SPACE_XKPHYS in this case */ + uint64_t cca:3; /* ignored by octeon */ + uint64_t mbz:10; + uint64_t pa:49; /* physical address */ + } sxkphys; + + /* physical address */ + struct { + uint64_t mbz:15; + /* if set, the address is uncached and resides on MCB bus */ + uint64_t is_io:1; + /* + * the hardware ignores this field when is_io==0, else + * device ID. + */ + uint64_t did:8; + /* the hardware ignores <39:36> in Octeon I */ + uint64_t unaddr:4; + uint64_t offset:36; + } sphys; + + /* physical mem address */ + struct { + /* techically, <47:40> are dont-cares */ + uint64_t zeroes:24; + /* the hardware ignores <39:36> in Octeon I */ + uint64_t unaddr:4; + uint64_t offset:36; + } smem; + + /* physical IO address */ + struct { + uint64_t mem_region:2; + uint64_t mbz:13; + /* 1 in this case */ + uint64_t is_io:1; + /* + * The hardware ignores this field when is_io==0, else + * device ID. + */ + uint64_t did:8; + /* the hardware ignores <39:36> in Octeon I */ + uint64_t unaddr:4; + uint64_t offset:36; + } sio; + + /* + * Scratchpad virtual address - accessed through a window at + * the end of kseg3 + */ + struct { + uint64_t ones:49; + /* CVMX_ADD_WIN_SCR (0) in this case */ + cvmx_add_win_dec_t csrdec:2; + uint64_t addr:13; + } sscr; + + /* there should only be stores to IOBDMA space, no loads */ + /* + * IOBDMA virtual address - accessed through a window at the + * end of kseg3 + */ + struct { + uint64_t ones:49; + uint64_t csrdec:2; /* CVMX_ADD_WIN_DMA (1) in this case */ + uint64_t unused2:3; + uint64_t type:3; + uint64_t addr:7; + } sdma; + + struct { + uint64_t didspace:24; + uint64_t unused:40; + } sfilldidspace; + +} cvmx_addr_t; + +/* These macros for used by 32 bit applications */ + +#define CVMX_MIPS32_SPACE_KSEG0 1l +#define CVMX_ADD_SEG32(segment, add) \ + (((int32_t)segment << 31) | (int32_t)(add)) + +/* + * Currently all IOs are performed using XKPHYS addressing. Linux uses + * the CvmMemCtl register to enable XKPHYS addressing to IO space from + * user mode. Future OSes may need to change the upper bits of IO + * addresses. The following define controls the upper two bits for all + * IO addresses generated by the simple executive library. + */ +#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS + +/* These macros simplify the process of creating common IO addresses */ +#define CVMX_ADD_SEG(segment, add) ((((uint64_t)segment) << 62) | (add)) +#ifndef CVMX_ADD_IO_SEG +#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add)) +#endif +#define CVMX_ADDR_DIDSPACE(did) (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did)) +#define CVMX_ADDR_DID(did) (CVMX_ADDR_DIDSPACE(did) << 40) +#define CVMX_FULL_DID(did, subdid) (((did) << 3) | (subdid)) + + /* from include/ncb_rsl_id.v */ +#define CVMX_OCT_DID_MIS 0ULL /* misc stuff */ +#define CVMX_OCT_DID_GMX0 1ULL +#define CVMX_OCT_DID_GMX1 2ULL +#define CVMX_OCT_DID_PCI 3ULL +#define CVMX_OCT_DID_KEY 4ULL +#define CVMX_OCT_DID_FPA 5ULL +#define CVMX_OCT_DID_DFA 6ULL +#define CVMX_OCT_DID_ZIP 7ULL +#define CVMX_OCT_DID_RNG 8ULL +#define CVMX_OCT_DID_IPD 9ULL +#define CVMX_OCT_DID_PKT 10ULL +#define CVMX_OCT_DID_TIM 11ULL +#define CVMX_OCT_DID_TAG 12ULL + /* the rest are not on the IO bus */ +#define CVMX_OCT_DID_L2C 16ULL +#define CVMX_OCT_DID_LMC 17ULL +#define CVMX_OCT_DID_SPX0 18ULL +#define CVMX_OCT_DID_SPX1 19ULL +#define CVMX_OCT_DID_PIP 20ULL +#define CVMX_OCT_DID_ASX0 22ULL +#define CVMX_OCT_DID_ASX1 23ULL +#define CVMX_OCT_DID_IOB 30ULL + +#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL) +#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL) +#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL) +#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL) +#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL) +#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL) +#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL) +#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL) +#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL) +#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL) +#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL) +#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL) +#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL) +#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL) +#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL) +#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL) +#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL) + +#endif /* __CVMX_ADDRESS_H__ */ diff --git a/drivers/staging/octeon/cvmx-asxx-defs.h b/drivers/staging/octeon/cvmx-asxx-defs.h new file mode 100644 index 0000000..91415a8 --- /dev/null +++ b/drivers/staging/octeon/cvmx-asxx-defs.h @@ -0,0 +1,475 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_ASXX_DEFS_H__ +#define __CVMX_ASXX_DEFS_H__ + +#define CVMX_ASXX_GMII_RX_CLK_SET(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000180ull + (((block_id) & 0) * 0x8000000ull)) +#define CVMX_ASXX_GMII_RX_DAT_SET(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000188ull + (((block_id) & 0) * 0x8000000ull)) +#define CVMX_ASXX_INT_EN(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000018ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_INT_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000010ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_MII_RX_DAT_SET(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000190ull + (((block_id) & 0) * 0x8000000ull)) +#define CVMX_ASXX_PRT_LOOP(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000040ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RLD_BYPASS(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000248ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RLD_BYPASS_SETTING(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000250ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RLD_COMP(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000220ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RLD_DATA_DRV(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000218ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RLD_FCRAM_MODE(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000210ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RLD_NCTL_STRONG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000230ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RLD_NCTL_WEAK(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000240ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RLD_PCTL_STRONG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000228ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RLD_PCTL_WEAK(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000238ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RLD_SETTING(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000258ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RX_CLK_SETX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000020ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RX_PRT_EN(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000000ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RX_WOL(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000100ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RX_WOL_MSK(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000108ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RX_WOL_POWOK(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000118ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_RX_WOL_SIG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000110ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_TX_CLK_SETX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000048ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_TX_COMP_BYP(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000068ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000080ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_ASXX_TX_PRT_EN(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000008ull + (((block_id) & 1) * 0x8000000ull)) + +union cvmx_asxx_gmii_rx_clk_set { + uint64_t u64; + struct cvmx_asxx_gmii_rx_clk_set_s { + uint64_t reserved_5_63:59; + uint64_t setting:5; + } s; + struct cvmx_asxx_gmii_rx_clk_set_s cn30xx; + struct cvmx_asxx_gmii_rx_clk_set_s cn31xx; + struct cvmx_asxx_gmii_rx_clk_set_s cn50xx; +}; + +union cvmx_asxx_gmii_rx_dat_set { + uint64_t u64; + struct cvmx_asxx_gmii_rx_dat_set_s { + uint64_t reserved_5_63:59; + uint64_t setting:5; + } s; + struct cvmx_asxx_gmii_rx_dat_set_s cn30xx; + struct cvmx_asxx_gmii_rx_dat_set_s cn31xx; + struct cvmx_asxx_gmii_rx_dat_set_s cn50xx; +}; + +union cvmx_asxx_int_en { + uint64_t u64; + struct cvmx_asxx_int_en_s { + uint64_t reserved_12_63:52; + uint64_t txpsh:4; + uint64_t txpop:4; + uint64_t ovrflw:4; + } s; + struct cvmx_asxx_int_en_cn30xx { + uint64_t reserved_11_63:53; + uint64_t txpsh:3; + uint64_t reserved_7_7:1; + uint64_t txpop:3; + uint64_t reserved_3_3:1; + uint64_t ovrflw:3; + } cn30xx; + struct cvmx_asxx_int_en_cn30xx cn31xx; + struct cvmx_asxx_int_en_s cn38xx; + struct cvmx_asxx_int_en_s cn38xxp2; + struct cvmx_asxx_int_en_cn30xx cn50xx; + struct cvmx_asxx_int_en_s cn58xx; + struct cvmx_asxx_int_en_s cn58xxp1; +}; + +union cvmx_asxx_int_reg { + uint64_t u64; + struct cvmx_asxx_int_reg_s { + uint64_t reserved_12_63:52; + uint64_t txpsh:4; + uint64_t txpop:4; + uint64_t ovrflw:4; + } s; + struct cvmx_asxx_int_reg_cn30xx { + uint64_t reserved_11_63:53; + uint64_t txpsh:3; + uint64_t reserved_7_7:1; + uint64_t txpop:3; + uint64_t reserved_3_3:1; + uint64_t ovrflw:3; + } cn30xx; + struct cvmx_asxx_int_reg_cn30xx cn31xx; + struct cvmx_asxx_int_reg_s cn38xx; + struct cvmx_asxx_int_reg_s cn38xxp2; + struct cvmx_asxx_int_reg_cn30xx cn50xx; + struct cvmx_asxx_int_reg_s cn58xx; + struct cvmx_asxx_int_reg_s cn58xxp1; +}; + +union cvmx_asxx_mii_rx_dat_set { + uint64_t u64; + struct cvmx_asxx_mii_rx_dat_set_s { + uint64_t reserved_5_63:59; + uint64_t setting:5; + } s; + struct cvmx_asxx_mii_rx_dat_set_s cn30xx; + struct cvmx_asxx_mii_rx_dat_set_s cn50xx; +}; + +union cvmx_asxx_prt_loop { + uint64_t u64; + struct cvmx_asxx_prt_loop_s { + uint64_t reserved_8_63:56; + uint64_t ext_loop:4; + uint64_t int_loop:4; + } s; + struct cvmx_asxx_prt_loop_cn30xx { + uint64_t reserved_7_63:57; + uint64_t ext_loop:3; + uint64_t reserved_3_3:1; + uint64_t int_loop:3; + } cn30xx; + struct cvmx_asxx_prt_loop_cn30xx cn31xx; + struct cvmx_asxx_prt_loop_s cn38xx; + struct cvmx_asxx_prt_loop_s cn38xxp2; + struct cvmx_asxx_prt_loop_cn30xx cn50xx; + struct cvmx_asxx_prt_loop_s cn58xx; + struct cvmx_asxx_prt_loop_s cn58xxp1; +}; + +union cvmx_asxx_rld_bypass { + uint64_t u64; + struct cvmx_asxx_rld_bypass_s { + uint64_t reserved_1_63:63; + uint64_t bypass:1; + } s; + struct cvmx_asxx_rld_bypass_s cn38xx; + struct cvmx_asxx_rld_bypass_s cn38xxp2; + struct cvmx_asxx_rld_bypass_s cn58xx; + struct cvmx_asxx_rld_bypass_s cn58xxp1; +}; + +union cvmx_asxx_rld_bypass_setting { + uint64_t u64; + struct cvmx_asxx_rld_bypass_setting_s { + uint64_t reserved_5_63:59; + uint64_t setting:5; + } s; + struct cvmx_asxx_rld_bypass_setting_s cn38xx; + struct cvmx_asxx_rld_bypass_setting_s cn38xxp2; + struct cvmx_asxx_rld_bypass_setting_s cn58xx; + struct cvmx_asxx_rld_bypass_setting_s cn58xxp1; +}; + +union cvmx_asxx_rld_comp { + uint64_t u64; + struct cvmx_asxx_rld_comp_s { + uint64_t reserved_9_63:55; + uint64_t pctl:5; + uint64_t nctl:4; + } s; + struct cvmx_asxx_rld_comp_cn38xx { + uint64_t reserved_8_63:56; + uint64_t pctl:4; + uint64_t nctl:4; + } cn38xx; + struct cvmx_asxx_rld_comp_cn38xx cn38xxp2; + struct cvmx_asxx_rld_comp_s cn58xx; + struct cvmx_asxx_rld_comp_s cn58xxp1; +}; + +union cvmx_asxx_rld_data_drv { + uint64_t u64; + struct cvmx_asxx_rld_data_drv_s { + uint64_t reserved_8_63:56; + uint64_t pctl:4; + uint64_t nctl:4; + } s; + struct cvmx_asxx_rld_data_drv_s cn38xx; + struct cvmx_asxx_rld_data_drv_s cn38xxp2; + struct cvmx_asxx_rld_data_drv_s cn58xx; + struct cvmx_asxx_rld_data_drv_s cn58xxp1; +}; + +union cvmx_asxx_rld_fcram_mode { + uint64_t u64; + struct cvmx_asxx_rld_fcram_mode_s { + uint64_t reserved_1_63:63; + uint64_t mode:1; + } s; + struct cvmx_asxx_rld_fcram_mode_s cn38xx; + struct cvmx_asxx_rld_fcram_mode_s cn38xxp2; +}; + +union cvmx_asxx_rld_nctl_strong { + uint64_t u64; + struct cvmx_asxx_rld_nctl_strong_s { + uint64_t reserved_5_63:59; + uint64_t nctl:5; + } s; + struct cvmx_asxx_rld_nctl_strong_s cn38xx; + struct cvmx_asxx_rld_nctl_strong_s cn38xxp2; + struct cvmx_asxx_rld_nctl_strong_s cn58xx; + struct cvmx_asxx_rld_nctl_strong_s cn58xxp1; +}; + +union cvmx_asxx_rld_nctl_weak { + uint64_t u64; + struct cvmx_asxx_rld_nctl_weak_s { + uint64_t reserved_5_63:59; + uint64_t nctl:5; + } s; + struct cvmx_asxx_rld_nctl_weak_s cn38xx; + struct cvmx_asxx_rld_nctl_weak_s cn38xxp2; + struct cvmx_asxx_rld_nctl_weak_s cn58xx; + struct cvmx_asxx_rld_nctl_weak_s cn58xxp1; +}; + +union cvmx_asxx_rld_pctl_strong { + uint64_t u64; + struct cvmx_asxx_rld_pctl_strong_s { + uint64_t reserved_5_63:59; + uint64_t pctl:5; + } s; + struct cvmx_asxx_rld_pctl_strong_s cn38xx; + struct cvmx_asxx_rld_pctl_strong_s cn38xxp2; + struct cvmx_asxx_rld_pctl_strong_s cn58xx; + struct cvmx_asxx_rld_pctl_strong_s cn58xxp1; +}; + +union cvmx_asxx_rld_pctl_weak { + uint64_t u64; + struct cvmx_asxx_rld_pctl_weak_s { + uint64_t reserved_5_63:59; + uint64_t pctl:5; + } s; + struct cvmx_asxx_rld_pctl_weak_s cn38xx; + struct cvmx_asxx_rld_pctl_weak_s cn38xxp2; + struct cvmx_asxx_rld_pctl_weak_s cn58xx; + struct cvmx_asxx_rld_pctl_weak_s cn58xxp1; +}; + +union cvmx_asxx_rld_setting { + uint64_t u64; + struct cvmx_asxx_rld_setting_s { + uint64_t reserved_13_63:51; + uint64_t dfaset:5; + uint64_t dfalag:1; + uint64_t dfalead:1; + uint64_t dfalock:1; + uint64_t setting:5; + } s; + struct cvmx_asxx_rld_setting_cn38xx { + uint64_t reserved_5_63:59; + uint64_t setting:5; + } cn38xx; + struct cvmx_asxx_rld_setting_cn38xx cn38xxp2; + struct cvmx_asxx_rld_setting_s cn58xx; + struct cvmx_asxx_rld_setting_s cn58xxp1; +}; + +union cvmx_asxx_rx_clk_setx { + uint64_t u64; + struct cvmx_asxx_rx_clk_setx_s { + uint64_t reserved_5_63:59; + uint64_t setting:5; + } s; + struct cvmx_asxx_rx_clk_setx_s cn30xx; + struct cvmx_asxx_rx_clk_setx_s cn31xx; + struct cvmx_asxx_rx_clk_setx_s cn38xx; + struct cvmx_asxx_rx_clk_setx_s cn38xxp2; + struct cvmx_asxx_rx_clk_setx_s cn50xx; + struct cvmx_asxx_rx_clk_setx_s cn58xx; + struct cvmx_asxx_rx_clk_setx_s cn58xxp1; +}; + +union cvmx_asxx_rx_prt_en { + uint64_t u64; + struct cvmx_asxx_rx_prt_en_s { + uint64_t reserved_4_63:60; + uint64_t prt_en:4; + } s; + struct cvmx_asxx_rx_prt_en_cn30xx { + uint64_t reserved_3_63:61; + uint64_t prt_en:3; + } cn30xx; + struct cvmx_asxx_rx_prt_en_cn30xx cn31xx; + struct cvmx_asxx_rx_prt_en_s cn38xx; + struct cvmx_asxx_rx_prt_en_s cn38xxp2; + struct cvmx_asxx_rx_prt_en_cn30xx cn50xx; + struct cvmx_asxx_rx_prt_en_s cn58xx; + struct cvmx_asxx_rx_prt_en_s cn58xxp1; +}; + +union cvmx_asxx_rx_wol { + uint64_t u64; + struct cvmx_asxx_rx_wol_s { + uint64_t reserved_2_63:62; + uint64_t status:1; + uint64_t enable:1; + } s; + struct cvmx_asxx_rx_wol_s cn38xx; + struct cvmx_asxx_rx_wol_s cn38xxp2; +}; + +union cvmx_asxx_rx_wol_msk { + uint64_t u64; + struct cvmx_asxx_rx_wol_msk_s { + uint64_t msk:64; + } s; + struct cvmx_asxx_rx_wol_msk_s cn38xx; + struct cvmx_asxx_rx_wol_msk_s cn38xxp2; +}; + +union cvmx_asxx_rx_wol_powok { + uint64_t u64; + struct cvmx_asxx_rx_wol_powok_s { + uint64_t reserved_1_63:63; + uint64_t powerok:1; + } s; + struct cvmx_asxx_rx_wol_powok_s cn38xx; + struct cvmx_asxx_rx_wol_powok_s cn38xxp2; +}; + +union cvmx_asxx_rx_wol_sig { + uint64_t u64; + struct cvmx_asxx_rx_wol_sig_s { + uint64_t reserved_32_63:32; + uint64_t sig:32; + } s; + struct cvmx_asxx_rx_wol_sig_s cn38xx; + struct cvmx_asxx_rx_wol_sig_s cn38xxp2; +}; + +union cvmx_asxx_tx_clk_setx { + uint64_t u64; + struct cvmx_asxx_tx_clk_setx_s { + uint64_t reserved_5_63:59; + uint64_t setting:5; + } s; + struct cvmx_asxx_tx_clk_setx_s cn30xx; + struct cvmx_asxx_tx_clk_setx_s cn31xx; + struct cvmx_asxx_tx_clk_setx_s cn38xx; + struct cvmx_asxx_tx_clk_setx_s cn38xxp2; + struct cvmx_asxx_tx_clk_setx_s cn50xx; + struct cvmx_asxx_tx_clk_setx_s cn58xx; + struct cvmx_asxx_tx_clk_setx_s cn58xxp1; +}; + +union cvmx_asxx_tx_comp_byp { + uint64_t u64; + struct cvmx_asxx_tx_comp_byp_s { + uint64_t reserved_0_63:64; + } s; + struct cvmx_asxx_tx_comp_byp_cn30xx { + uint64_t reserved_9_63:55; + uint64_t bypass:1; + uint64_t pctl:4; + uint64_t nctl:4; + } cn30xx; + struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx; + struct cvmx_asxx_tx_comp_byp_cn38xx { + uint64_t reserved_8_63:56; + uint64_t pctl:4; + uint64_t nctl:4; + } cn38xx; + struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2; + struct cvmx_asxx_tx_comp_byp_cn50xx { + uint64_t reserved_17_63:47; + uint64_t bypass:1; + uint64_t reserved_13_15:3; + uint64_t pctl:5; + uint64_t reserved_5_7:3; + uint64_t nctl:5; + } cn50xx; + struct cvmx_asxx_tx_comp_byp_cn58xx { + uint64_t reserved_13_63:51; + uint64_t pctl:5; + uint64_t reserved_5_7:3; + uint64_t nctl:5; + } cn58xx; + struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1; +}; + +union cvmx_asxx_tx_hi_waterx { + uint64_t u64; + struct cvmx_asxx_tx_hi_waterx_s { + uint64_t reserved_4_63:60; + uint64_t mark:4; + } s; + struct cvmx_asxx_tx_hi_waterx_cn30xx { + uint64_t reserved_3_63:61; + uint64_t mark:3; + } cn30xx; + struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx; + struct cvmx_asxx_tx_hi_waterx_s cn38xx; + struct cvmx_asxx_tx_hi_waterx_s cn38xxp2; + struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx; + struct cvmx_asxx_tx_hi_waterx_s cn58xx; + struct cvmx_asxx_tx_hi_waterx_s cn58xxp1; +}; + +union cvmx_asxx_tx_prt_en { + uint64_t u64; + struct cvmx_asxx_tx_prt_en_s { + uint64_t reserved_4_63:60; + uint64_t prt_en:4; + } s; + struct cvmx_asxx_tx_prt_en_cn30xx { + uint64_t reserved_3_63:61; + uint64_t prt_en:3; + } cn30xx; + struct cvmx_asxx_tx_prt_en_cn30xx cn31xx; + struct cvmx_asxx_tx_prt_en_s cn38xx; + struct cvmx_asxx_tx_prt_en_s cn38xxp2; + struct cvmx_asxx_tx_prt_en_cn30xx cn50xx; + struct cvmx_asxx_tx_prt_en_s cn58xx; + struct cvmx_asxx_tx_prt_en_s cn58xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-cmd-queue.c b/drivers/staging/octeon/cvmx-cmd-queue.c new file mode 100644 index 0000000..976227b --- /dev/null +++ b/drivers/staging/octeon/cvmx-cmd-queue.c @@ -0,0 +1,306 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Support functions for managing command queues used for + * various hardware blocks. + */ + +#include <linux/kernel.h> + +#include <asm/octeon/octeon.h> + +#include "cvmx-config.h" +#include "cvmx-fpa.h" +#include "cvmx-cmd-queue.h" + +#include <asm/octeon/cvmx-npei-defs.h> +#include <asm/octeon/cvmx-pexp-defs.h> +#include "cvmx-pko-defs.h" + +/** + * This application uses this pointer to access the global queue + * state. It points to a bootmem named block. + */ +__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr; + +/** + * Initialize the Global queue state pointer. + * + * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code + */ +static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void) +{ + char *alloc_name = "cvmx_cmd_queues"; +#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32 + extern uint64_t octeon_reserve32_memory; +#endif + + if (likely(__cvmx_cmd_queue_state_ptr)) + return CVMX_CMD_QUEUE_SUCCESS; + +#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32 + if (octeon_reserve32_memory) + __cvmx_cmd_queue_state_ptr = + cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr), + octeon_reserve32_memory, + octeon_reserve32_memory + + (CONFIG_CAVIUM_RESERVE32 << + 20) - 1, 128, alloc_name); + else +#endif + __cvmx_cmd_queue_state_ptr = + cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), + 128, + alloc_name); + if (__cvmx_cmd_queue_state_ptr) + memset(__cvmx_cmd_queue_state_ptr, 0, + sizeof(*__cvmx_cmd_queue_state_ptr)); + else { + struct cvmx_bootmem_named_block_desc *block_desc = + cvmx_bootmem_find_named_block(alloc_name); + if (block_desc) + __cvmx_cmd_queue_state_ptr = + cvmx_phys_to_ptr(block_desc->base_addr); + else { + cvmx_dprintf + ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n", + alloc_name); + return CVMX_CMD_QUEUE_NO_MEMORY; + } + } + return CVMX_CMD_QUEUE_SUCCESS; +} + +/** + * Initialize a command queue for use. The initial FPA buffer is + * allocated and the hardware unit is configured to point to the + * new command queue. + * + * @queue_id: Hardware command queue to initialize. + * @max_depth: Maximum outstanding commands that can be queued. + * @fpa_pool: FPA pool the command queues should come from. + * @pool_size: Size of each buffer in the FPA pool (bytes) + * + * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code + */ +cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, + int max_depth, int fpa_pool, + int pool_size) +{ + __cvmx_cmd_queue_state_t *qstate; + cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr(); + if (result != CVMX_CMD_QUEUE_SUCCESS) + return result; + + qstate = __cvmx_cmd_queue_get_state(queue_id); + if (qstate == NULL) + return CVMX_CMD_QUEUE_INVALID_PARAM; + + /* + * We artificially limit max_depth to 1<<20 words. It is an + * arbitrary limit. + */ + if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) { + if ((max_depth < 0) || (max_depth > 1 << 20)) + return CVMX_CMD_QUEUE_INVALID_PARAM; + } else if (max_depth != 0) + return CVMX_CMD_QUEUE_INVALID_PARAM; + + if ((fpa_pool < 0) || (fpa_pool > 7)) + return CVMX_CMD_QUEUE_INVALID_PARAM; + if ((pool_size < 128) || (pool_size > 65536)) + return CVMX_CMD_QUEUE_INVALID_PARAM; + + /* See if someone else has already initialized the queue */ + if (qstate->base_ptr_div128) { + if (max_depth != (int)qstate->max_depth) { + cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: " + "Queue already initalized with different " + "max_depth (%d).\n", + (int)qstate->max_depth); + return CVMX_CMD_QUEUE_INVALID_PARAM; + } + if (fpa_pool != qstate->fpa_pool) { + cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: " + "Queue already initalized with different " + "FPA pool (%u).\n", + qstate->fpa_pool); + return CVMX_CMD_QUEUE_INVALID_PARAM; + } + if ((pool_size >> 3) - 1 != qstate->pool_size_m1) { + cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: " + "Queue already initalized with different " + "FPA pool size (%u).\n", + (qstate->pool_size_m1 + 1) << 3); + return CVMX_CMD_QUEUE_INVALID_PARAM; + } + CVMX_SYNCWS; + return CVMX_CMD_QUEUE_ALREADY_SETUP; + } else { + union cvmx_fpa_ctl_status status; + void *buffer; + + status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS); + if (!status.s.enb) { + cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: " + "FPA is not enabled.\n"); + return CVMX_CMD_QUEUE_NO_MEMORY; + } + buffer = cvmx_fpa_alloc(fpa_pool); + if (buffer == NULL) { + cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: " + "Unable to allocate initial buffer.\n"); + return CVMX_CMD_QUEUE_NO_MEMORY; + } + + memset(qstate, 0, sizeof(*qstate)); + qstate->max_depth = max_depth; + qstate->fpa_pool = fpa_pool; + qstate->pool_size_m1 = (pool_size >> 3) - 1; + qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128; + /* + * We zeroed the now serving field so we need to also + * zero the ticket. + */ + __cvmx_cmd_queue_state_ptr-> + ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0; + CVMX_SYNCWS; + return CVMX_CMD_QUEUE_SUCCESS; + } +} + +/** + * Shutdown a queue a free it's command buffers to the FPA. The + * hardware connected to the queue must be stopped before this + * function is called. + * + * @queue_id: Queue to shutdown + * + * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code + */ +cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id) +{ + __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); + if (qptr == NULL) { + cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to " + "get queue information.\n"); + return CVMX_CMD_QUEUE_INVALID_PARAM; + } + + if (cvmx_cmd_queue_length(queue_id) > 0) { + cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still " + "has data in it.\n"); + return CVMX_CMD_QUEUE_FULL; + } + + __cvmx_cmd_queue_lock(queue_id, qptr); + if (qptr->base_ptr_div128) { + cvmx_fpa_free(cvmx_phys_to_ptr + ((uint64_t) qptr->base_ptr_div128 << 7), + qptr->fpa_pool, 0); + qptr->base_ptr_div128 = 0; + } + __cvmx_cmd_queue_unlock(qptr); + + return CVMX_CMD_QUEUE_SUCCESS; +} + +/** + * Return the number of command words pending in the queue. This + * function may be relatively slow for some hardware units. + * + * @queue_id: Hardware command queue to query + * + * Returns Number of outstanding commands + */ +int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id) +{ + if (CVMX_ENABLE_PARAMETER_CHECKING) { + if (__cvmx_cmd_queue_get_state(queue_id) == NULL) + return CVMX_CMD_QUEUE_INVALID_PARAM; + } + + /* + * The cast is here so gcc with check that all values in the + * cvmx_cmd_queue_id_t enumeration are here. + */ + switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) { + case CVMX_CMD_QUEUE_PKO_BASE: + /* + * FIXME: Need atomic lock on + * CVMX_PKO_REG_READ_IDX. Right now we are normally + * called with the queue lock, so that is a SLIGHT + * amount of protection. + */ + cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff); + if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) { + union cvmx_pko_mem_debug9 debug9; + debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9); + return debug9.cn38xx.doorbell; + } else { + union cvmx_pko_mem_debug8 debug8; + debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8); + return debug8.cn58xx.doorbell; + } + case CVMX_CMD_QUEUE_ZIP: + case CVMX_CMD_QUEUE_DFA: + case CVMX_CMD_QUEUE_RAID: + /* FIXME: Implement other lengths */ + return 0; + case CVMX_CMD_QUEUE_DMA_BASE: + { + union cvmx_npei_dmax_counts dmax_counts; + dmax_counts.u64 = + cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS + (queue_id & 0x7)); + return dmax_counts.s.dbell; + } + case CVMX_CMD_QUEUE_END: + return CVMX_CMD_QUEUE_INVALID_PARAM; + } + return CVMX_CMD_QUEUE_INVALID_PARAM; +} + +/** + * Return the command buffer to be written to. The purpose of this + * function is to allow CVMX routine access t othe low level buffer + * for initial hardware setup. User applications should not call this + * function directly. + * + * @queue_id: Command queue to query + * + * Returns Command buffer or NULL on failure + */ +void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id) +{ + __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); + if (qptr && qptr->base_ptr_div128) + return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7); + else + return NULL; +} diff --git a/drivers/staging/octeon/cvmx-cmd-queue.h b/drivers/staging/octeon/cvmx-cmd-queue.h new file mode 100644 index 0000000..f0cb20f --- /dev/null +++ b/drivers/staging/octeon/cvmx-cmd-queue.h @@ -0,0 +1,617 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * + * Support functions for managing command queues used for + * various hardware blocks. + * + * The common command queue infrastructure abstracts out the + * software necessary for adding to Octeon's chained queue + * structures. These structures are used for commands to the + * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each + * hardware unit takes commands and CSRs of different types, + * they all use basic linked command buffers to store the + * pending request. In general, users of the CVMX API don't + * call cvmx-cmd-queue functions directly. Instead the hardware + * unit specific wrapper should be used. The wrappers perform + * unit specific validation and CSR writes to submit the + * commands. + * + * Even though most software will never directly interact with + * cvmx-cmd-queue, knowledge of its internal working can help + * in diagnosing performance problems and help with debugging. + * + * Command queue pointers are stored in a global named block + * called "cvmx_cmd_queues". Except for the PKO queues, each + * hardware queue is stored in its own cache line to reduce SMP + * contention on spin locks. The PKO queues are stored such that + * every 16th queue is next to each other in memory. This scheme + * allows for queues being in separate cache lines when there + * are low number of queues per port. With 16 queues per port, + * the first queue for each port is in the same cache area. The + * second queues for each port are in another area, etc. This + * allows software to implement very efficient lockless PKO with + * 16 queues per port using a minimum of cache lines per core. + * All queues for a given core will be isolated in the same + * cache area. + * + * In addition to the memory pointer layout, cvmx-cmd-queue + * provides an optimized fair ll/sc locking mechanism for the + * queues. The lock uses a "ticket / now serving" model to + * maintain fair order on contended locks. In addition, it uses + * predicted locking time to limit cache contention. When a core + * know it must wait in line for a lock, it spins on the + * internal cycle counter to completely eliminate any causes of + * bus traffic. + * + */ + +#ifndef __CVMX_CMD_QUEUE_H__ +#define __CVMX_CMD_QUEUE_H__ + +#include <linux/prefetch.h> + +#include "cvmx-fpa.h" +/** + * By default we disable the max depth support. Most programs + * don't use it and it slows down the command queue processing + * significantly. + */ +#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH +#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0 +#endif + +/** + * Enumeration representing all hardware blocks that use command + * queues. Each hardware block has up to 65536 sub identifiers for + * multiple command queues. Not all chips support all hardware + * units. + */ +typedef enum { + CVMX_CMD_QUEUE_PKO_BASE = 0x00000, + +#define CVMX_CMD_QUEUE_PKO(queue) \ + ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue)))) + + CVMX_CMD_QUEUE_ZIP = 0x10000, + CVMX_CMD_QUEUE_DFA = 0x20000, + CVMX_CMD_QUEUE_RAID = 0x30000, + CVMX_CMD_QUEUE_DMA_BASE = 0x40000, + +#define CVMX_CMD_QUEUE_DMA(queue) \ + ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue)))) + + CVMX_CMD_QUEUE_END = 0x50000, +} cvmx_cmd_queue_id_t; + +/** + * Command write operations can fail if the comamnd queue needs + * a new buffer and the associated FPA pool is empty. It can also + * fail if the number of queued command words reaches the maximum + * set at initialization. + */ +typedef enum { + CVMX_CMD_QUEUE_SUCCESS = 0, + CVMX_CMD_QUEUE_NO_MEMORY = -1, + CVMX_CMD_QUEUE_FULL = -2, + CVMX_CMD_QUEUE_INVALID_PARAM = -3, + CVMX_CMD_QUEUE_ALREADY_SETUP = -4, +} cvmx_cmd_queue_result_t; + +typedef struct { + /* You have lock when this is your ticket */ + uint8_t now_serving; + uint64_t unused1:24; + /* Maximum outstanding command words */ + uint32_t max_depth; + /* FPA pool buffers come from */ + uint64_t fpa_pool:3; + /* Top of command buffer pointer shifted 7 */ + uint64_t base_ptr_div128:29; + uint64_t unused2:6; + /* FPA buffer size in 64bit words minus 1 */ + uint64_t pool_size_m1:13; + /* Number of comamnds already used in buffer */ + uint64_t index:13; +} __cvmx_cmd_queue_state_t; + +/** + * This structure contains the global state of all comamnd queues. + * It is stored in a bootmem named block and shared by all + * applications running on Octeon. Tickets are stored in a differnet + * cahce line that queue information to reduce the contention on the + * ll/sc used to get a ticket. If this is not the case, the update + * of queue state causes the ll/sc to fail quite often. + */ +typedef struct { + uint64_t ticket[(CVMX_CMD_QUEUE_END >> 16) * 256]; + __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256]; +} __cvmx_cmd_queue_all_state_t; + +/** + * Initialize a command queue for use. The initial FPA buffer is + * allocated and the hardware unit is configured to point to the + * new command queue. + * + * @queue_id: Hardware command queue to initialize. + * @max_depth: Maximum outstanding commands that can be queued. + * @fpa_pool: FPA pool the command queues should come from. + * @pool_size: Size of each buffer in the FPA pool (bytes) + * + * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code + */ +cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, + int max_depth, int fpa_pool, + int pool_size); + +/** + * Shutdown a queue a free it's command buffers to the FPA. The + * hardware connected to the queue must be stopped before this + * function is called. + * + * @queue_id: Queue to shutdown + * + * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code + */ +cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id); + +/** + * Return the number of command words pending in the queue. This + * function may be relatively slow for some hardware units. + * + * @queue_id: Hardware command queue to query + * + * Returns Number of outstanding commands + */ +int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id); + +/** + * Return the command buffer to be written to. The purpose of this + * function is to allow CVMX routine access t othe low level buffer + * for initial hardware setup. User applications should not call this + * function directly. + * + * @queue_id: Command queue to query + * + * Returns Command buffer or NULL on failure + */ +void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id); + +/** + * Get the index into the state arrays for the supplied queue id. + * + * @queue_id: Queue ID to get an index for + * + * Returns Index into the state arrays + */ +static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id) +{ + /* + * Warning: This code currently only works with devices that + * have 256 queues or less. Devices with more than 16 queues + * are layed out in memory to allow cores quick access to + * every 16th queue. This reduces cache thrashing when you are + * running 16 queues per port to support lockless operation. + */ + int unit = queue_id >> 16; + int q = (queue_id >> 4) & 0xf; + int core = queue_id & 0xf; + return unit * 256 + core * 16 + q; +} + +/** + * Lock the supplied queue so nobody else is updating it at the same + * time as us. + * + * @queue_id: Queue ID to lock + * @qptr: Pointer to the queue's global state + */ +static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, + __cvmx_cmd_queue_state_t *qptr) +{ + extern __cvmx_cmd_queue_all_state_t + *__cvmx_cmd_queue_state_ptr; + int tmp; + int my_ticket; + prefetch(qptr); + asm volatile ( + ".set push\n" + ".set noreorder\n" + "1:\n" + /* Atomic add one to ticket_ptr */ + "ll %[my_ticket], %[ticket_ptr]\n" + /* and store the original value */ + "li %[ticket], 1\n" + /* in my_ticket */ + "baddu %[ticket], %[my_ticket]\n" + "sc %[ticket], %[ticket_ptr]\n" + "beqz %[ticket], 1b\n" + " nop\n" + /* Load the current now_serving ticket */ + "lbu %[ticket], %[now_serving]\n" + "2:\n" + /* Jump out if now_serving == my_ticket */ + "beq %[ticket], %[my_ticket], 4f\n" + /* Find out how many tickets are in front of me */ + " subu %[ticket], %[my_ticket], %[ticket]\n" + /* Use tickets in front of me minus one to delay */ + "subu %[ticket], 1\n" + /* Delay will be ((tickets in front)-1)*32 loops */ + "cins %[ticket], %[ticket], 5, 7\n" + "3:\n" + /* Loop here until our ticket might be up */ + "bnez %[ticket], 3b\n" + " subu %[ticket], 1\n" + /* Jump back up to check out ticket again */ + "b 2b\n" + /* Load the current now_serving ticket */ + " lbu %[ticket], %[now_serving]\n" + "4:\n" + ".set pop\n" : + [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), + [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), + [my_ticket] "=r"(my_ticket) + ); +} + +/** + * Unlock the queue, flushing all writes. + * + * @qptr: Queue to unlock + */ +static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr) +{ + qptr->now_serving++; + CVMX_SYNCWS; +} + +/** + * Get the queue state structure for the given queue id + * + * @queue_id: Queue id to get + * + * Returns Queue structure or NULL on failure + */ +static inline __cvmx_cmd_queue_state_t + *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id) +{ + extern __cvmx_cmd_queue_all_state_t + *__cvmx_cmd_queue_state_ptr; + return &__cvmx_cmd_queue_state_ptr-> + state[__cvmx_cmd_queue_get_index(queue_id)]; +} + +/** + * Write an arbitrary number of command words to a command queue. + * This is a generic function; the fixed number of comamnd word + * functions yield higher performance. + * + * @queue_id: Hardware command queue to write to + * @use_locking: + * Use internal locking to ensure exclusive access for queue + * updates. If you don't use this locking you must ensure + * exclusivity some other way. Locking is strongly recommended. + * @cmd_count: Number of command words to write + * @cmds: Array of comamnds to write + * + * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code + */ +static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t + queue_id, + int use_locking, + int cmd_count, + uint64_t *cmds) +{ + __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); + + /* Make sure nobody else is updating the same queue */ + if (likely(use_locking)) + __cvmx_cmd_queue_lock(queue_id, qptr); + + /* + * If a max queue length was specified then make sure we don't + * exceed it. If any part of the command would be below the + * limit we allow it. + */ + if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) { + if (unlikely + (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) { + if (likely(use_locking)) + __cvmx_cmd_queue_unlock(qptr); + return CVMX_CMD_QUEUE_FULL; + } + } + + /* + * Normally there is plenty of room in the current buffer for + * the command. + */ + if (likely(qptr->index + cmd_count < qptr->pool_size_m1)) { + uint64_t *ptr = + (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr-> + base_ptr_div128 << 7); + ptr += qptr->index; + qptr->index += cmd_count; + while (cmd_count--) + *ptr++ = *cmds++; + } else { + uint64_t *ptr; + int count; + /* + * We need a new comamnd buffer. Fail if there isn't + * one available. + */ + uint64_t *new_buffer = + (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool); + if (unlikely(new_buffer == NULL)) { + if (likely(use_locking)) + __cvmx_cmd_queue_unlock(qptr); + return CVMX_CMD_QUEUE_NO_MEMORY; + } + ptr = + (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr-> + base_ptr_div128 << 7); + /* + * Figure out how many command words will fit in this + * buffer. One location will be needed for the next + * buffer pointer. + */ + count = qptr->pool_size_m1 - qptr->index; + ptr += qptr->index; + cmd_count -= count; + while (count--) + *ptr++ = *cmds++; + *ptr = cvmx_ptr_to_phys(new_buffer); + /* + * The current buffer is full and has a link to the + * next buffer. Time to write the rest of the commands + * into the new buffer. + */ + qptr->base_ptr_div128 = *ptr >> 7; + qptr->index = cmd_count; + ptr = new_buffer; + while (cmd_count--) + *ptr++ = *cmds++; + } + + /* All updates are complete. Release the lock and return */ + if (likely(use_locking)) + __cvmx_cmd_queue_unlock(qptr); + return CVMX_CMD_QUEUE_SUCCESS; +} + +/** + * Simple function to write two command words to a command + * queue. + * + * @queue_id: Hardware command queue to write to + * @use_locking: + * Use internal locking to ensure exclusive access for queue + * updates. If you don't use this locking you must ensure + * exclusivity some other way. Locking is strongly recommended. + * @cmd1: Command + * @cmd2: Command + * + * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code + */ +static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t + queue_id, + int use_locking, + uint64_t cmd1, + uint64_t cmd2) +{ + __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); + + /* Make sure nobody else is updating the same queue */ + if (likely(use_locking)) + __cvmx_cmd_queue_lock(queue_id, qptr); + + /* + * If a max queue length was specified then make sure we don't + * exceed it. If any part of the command would be below the + * limit we allow it. + */ + if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) { + if (unlikely + (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) { + if (likely(use_locking)) + __cvmx_cmd_queue_unlock(qptr); + return CVMX_CMD_QUEUE_FULL; + } + } + + /* + * Normally there is plenty of room in the current buffer for + * the command. + */ + if (likely(qptr->index + 2 < qptr->pool_size_m1)) { + uint64_t *ptr = + (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr-> + base_ptr_div128 << 7); + ptr += qptr->index; + qptr->index += 2; + ptr[0] = cmd1; + ptr[1] = cmd2; + } else { + uint64_t *ptr; + /* + * Figure out how many command words will fit in this + * buffer. One location will be needed for the next + * buffer pointer. + */ + int count = qptr->pool_size_m1 - qptr->index; + /* + * We need a new comamnd buffer. Fail if there isn't + * one available. + */ + uint64_t *new_buffer = + (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool); + if (unlikely(new_buffer == NULL)) { + if (likely(use_locking)) + __cvmx_cmd_queue_unlock(qptr); + return CVMX_CMD_QUEUE_NO_MEMORY; + } + count--; + ptr = + (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr-> + base_ptr_div128 << 7); + ptr += qptr->index; + *ptr++ = cmd1; + if (likely(count)) + *ptr++ = cmd2; + *ptr = cvmx_ptr_to_phys(new_buffer); + /* + * The current buffer is full and has a link to the + * next buffer. Time to write the rest of the commands + * into the new buffer. + */ + qptr->base_ptr_div128 = *ptr >> 7; + qptr->index = 0; + if (unlikely(count == 0)) { + qptr->index = 1; + new_buffer[0] = cmd2; + } + } + + /* All updates are complete. Release the lock and return */ + if (likely(use_locking)) + __cvmx_cmd_queue_unlock(qptr); + return CVMX_CMD_QUEUE_SUCCESS; +} + +/** + * Simple function to write three command words to a command + * queue. + * + * @queue_id: Hardware command queue to write to + * @use_locking: + * Use internal locking to ensure exclusive access for queue + * updates. If you don't use this locking you must ensure + * exclusivity some other way. Locking is strongly recommended. + * @cmd1: Command + * @cmd2: Command + * @cmd3: Command + * + * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code + */ +static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t + queue_id, + int use_locking, + uint64_t cmd1, + uint64_t cmd2, + uint64_t cmd3) +{ + __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id); + + /* Make sure nobody else is updating the same queue */ + if (likely(use_locking)) + __cvmx_cmd_queue_lock(queue_id, qptr); + + /* + * If a max queue length was specified then make sure we don't + * exceed it. If any part of the command would be below the + * limit we allow it. + */ + if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) { + if (unlikely + (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) { + if (likely(use_locking)) + __cvmx_cmd_queue_unlock(qptr); + return CVMX_CMD_QUEUE_FULL; + } + } + + /* + * Normally there is plenty of room in the current buffer for + * the command. + */ + if (likely(qptr->index + 3 < qptr->pool_size_m1)) { + uint64_t *ptr = + (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr-> + base_ptr_div128 << 7); + ptr += qptr->index; + qptr->index += 3; + ptr[0] = cmd1; + ptr[1] = cmd2; + ptr[2] = cmd3; + } else { + uint64_t *ptr; + /* + * Figure out how many command words will fit in this + * buffer. One location will be needed for the next + * buffer pointer + */ + int count = qptr->pool_size_m1 - qptr->index; + /* + * We need a new comamnd buffer. Fail if there isn't + * one available + */ + uint64_t *new_buffer = + (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool); + if (unlikely(new_buffer == NULL)) { + if (likely(use_locking)) + __cvmx_cmd_queue_unlock(qptr); + return CVMX_CMD_QUEUE_NO_MEMORY; + } + count--; + ptr = + (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr-> + base_ptr_div128 << 7); + ptr += qptr->index; + *ptr++ = cmd1; + if (count) { + *ptr++ = cmd2; + if (count > 1) + *ptr++ = cmd3; + } + *ptr = cvmx_ptr_to_phys(new_buffer); + /* + * The current buffer is full and has a link to the + * next buffer. Time to write the rest of the commands + * into the new buffer. + */ + qptr->base_ptr_div128 = *ptr >> 7; + qptr->index = 0; + ptr = new_buffer; + if (count == 0) { + *ptr++ = cmd2; + qptr->index++; + } + if (count < 2) { + *ptr++ = cmd3; + qptr->index++; + } + } + + /* All updates are complete. Release the lock and return */ + if (likely(use_locking)) + __cvmx_cmd_queue_unlock(qptr); + return CVMX_CMD_QUEUE_SUCCESS; +} + +#endif /* __CVMX_CMD_QUEUE_H__ */ diff --git a/drivers/staging/octeon/cvmx-config.h b/drivers/staging/octeon/cvmx-config.h new file mode 100644 index 0000000..078a520 --- /dev/null +++ b/drivers/staging/octeon/cvmx-config.h @@ -0,0 +1,169 @@ +#ifndef __CVMX_CONFIG_H__ +#define __CVMX_CONFIG_H__ + +/************************* Config Specific Defines ************************/ +#define CVMX_LLM_NUM_PORTS 1 +#define CVMX_NULL_POINTER_PROTECT 1 +#define CVMX_ENABLE_DEBUG_PRINTS 1 +/* PKO queues per port for interface 0 (ports 0-15) */ +#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 1 +/* PKO queues per port for interface 1 (ports 16-31) */ +#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 1 +/* Limit on the number of PKO ports enabled for interface 0 */ +#define CVMX_PKO_MAX_PORTS_INTERFACE0 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 +/* Limit on the number of PKO ports enabled for interface 1 */ +#define CVMX_PKO_MAX_PORTS_INTERFACE1 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 +/* PKO queues per port for PCI (ports 32-35) */ +#define CVMX_PKO_QUEUES_PER_PORT_PCI 1 +/* PKO queues per port for Loop devices (ports 36-39) */ +#define CVMX_PKO_QUEUES_PER_PORT_LOOP 1 + +/************************* FPA allocation *********************************/ +/* Pool sizes in bytes, must be multiple of a cache line */ +#define CVMX_FPA_POOL_0_SIZE (16 * CVMX_CACHE_LINE_SIZE) +#define CVMX_FPA_POOL_1_SIZE (1 * CVMX_CACHE_LINE_SIZE) +#define CVMX_FPA_POOL_2_SIZE (8 * CVMX_CACHE_LINE_SIZE) +#define CVMX_FPA_POOL_3_SIZE (0 * CVMX_CACHE_LINE_SIZE) +#define CVMX_FPA_POOL_4_SIZE (0 * CVMX_CACHE_LINE_SIZE) +#define CVMX_FPA_POOL_5_SIZE (0 * CVMX_CACHE_LINE_SIZE) +#define CVMX_FPA_POOL_6_SIZE (0 * CVMX_CACHE_LINE_SIZE) +#define CVMX_FPA_POOL_7_SIZE (0 * CVMX_CACHE_LINE_SIZE) + +/* Pools in use */ +/* Packet buffers */ +#define CVMX_FPA_PACKET_POOL (0) +#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE +/* Work queue entrys */ +#define CVMX_FPA_WQE_POOL (1) +#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE +/* PKO queue command buffers */ +#define CVMX_FPA_OUTPUT_BUFFER_POOL (2) +#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE + +/************************* FAU allocation ********************************/ +/* The fetch and add registers are allocated here. They are arranged + * in order of descending size so that all alignment constraints are + * automatically met. The enums are linked so that the following enum + * continues allocating where the previous one left off, so the + * numbering within each enum always starts with zero. The macros + * take care of the address increment size, so the values entered + * always increase by 1. FAU registers are accessed with byte + * addresses. + */ + +#define CVMX_FAU_REG_64_ADDR(x) ((x << 3) + CVMX_FAU_REG_64_START) +typedef enum { + CVMX_FAU_REG_64_START = 0, + CVMX_FAU_REG_64_END = CVMX_FAU_REG_64_ADDR(0), +} cvmx_fau_reg_64_t; + +#define CVMX_FAU_REG_32_ADDR(x) ((x << 2) + CVMX_FAU_REG_32_START) +typedef enum { + CVMX_FAU_REG_32_START = CVMX_FAU_REG_64_END, + CVMX_FAU_REG_32_END = CVMX_FAU_REG_32_ADDR(0), +} cvmx_fau_reg_32_t; + +#define CVMX_FAU_REG_16_ADDR(x) ((x << 1) + CVMX_FAU_REG_16_START) +typedef enum { + CVMX_FAU_REG_16_START = CVMX_FAU_REG_32_END, + CVMX_FAU_REG_16_END = CVMX_FAU_REG_16_ADDR(0), +} cvmx_fau_reg_16_t; + +#define CVMX_FAU_REG_8_ADDR(x) ((x) + CVMX_FAU_REG_8_START) +typedef enum { + CVMX_FAU_REG_8_START = CVMX_FAU_REG_16_END, + CVMX_FAU_REG_8_END = CVMX_FAU_REG_8_ADDR(0), +} cvmx_fau_reg_8_t; + +/* + * The name CVMX_FAU_REG_AVAIL_BASE is provided to indicate the first + * available FAU address that is not allocated in cvmx-config.h. This + * is 64 bit aligned. + */ +#define CVMX_FAU_REG_AVAIL_BASE ((CVMX_FAU_REG_8_END + 0x7) & (~0x7ULL)) +#define CVMX_FAU_REG_END (2048) + +/********************** scratch memory allocation *************************/ +/* Scratchpad memory allocation. Note that these are byte memory + * addresses. Some uses of scratchpad (IOBDMA for example) require + * the use of 8-byte aligned addresses, so proper alignment needs to + * be taken into account. + */ +/* Generic scratch iobdma area */ +#define CVMX_SCR_SCRATCH (0) +/* First location available after cvmx-config.h allocated region. */ +#define CVMX_SCR_REG_AVAIL_BASE (8) + +/* + * CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve + * before the beginning of the packet. If necessary, override the + * default here. See the IPD section of the hardware manual for MBUFF + * SKIP details. + */ +#define CVMX_HELPER_FIRST_MBUFF_SKIP 184 + +/* + * CVMX_HELPER_NOT_FIRST_MBUFF_SKIP is the number of bytes to reserve + * in each chained packet element. If necessary, override the default + * here. + */ +#define CVMX_HELPER_NOT_FIRST_MBUFF_SKIP 0 + +/* + * CVMX_HELPER_ENABLE_BACK_PRESSURE controls whether back pressure is + * enabled for all input ports. This controls if IPD sends + * backpressure to all ports if Octeon's FPA pools don't have enough + * packet or work queue entries. Even when this is off, it is still + * possible to get backpressure from individual hardware ports. When + * configuring backpressure, also check + * CVMX_HELPER_DISABLE_*_BACKPRESSURE below. If necessary, override + * the default here. + */ +#define CVMX_HELPER_ENABLE_BACK_PRESSURE 1 + +/* + * CVMX_HELPER_ENABLE_IPD controls if the IPD is enabled in the helper + * function. Once it is enabled the hardware starts accepting + * packets. You might want to skip the IPD enable if configuration + * changes are need from the default helper setup. If necessary, + * override the default here. + */ +#define CVMX_HELPER_ENABLE_IPD 0 + +/* + * CVMX_HELPER_INPUT_TAG_TYPE selects the type of tag that the IPD assigns + * to incoming packets. + */ +#define CVMX_HELPER_INPUT_TAG_TYPE CVMX_POW_TAG_TYPE_ORDERED + +#define CVMX_ENABLE_PARAMETER_CHECKING 0 + +/* + * The following select which fields are used by the PIP to generate + * the tag on INPUT + * 0: don't include + * 1: include + */ +#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP 0 +#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0 +#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0 +#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0 +#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0 +#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP 0 +#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0 +#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0 +#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0 +#define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL 0 +#define CVMX_HELPER_INPUT_TAG_INPUT_PORT 1 + +/* Select skip mode for input ports */ +#define CVMX_HELPER_INPUT_PORT_SKIP_MODE CVMX_PIP_PORT_CFG_MODE_SKIPL2 + +/* + * Force backpressure to be disabled. This overrides all other + * backpressure configuration. + */ +#define CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE 0 + +#endif /* __CVMX_CONFIG_H__ */ + diff --git a/drivers/staging/octeon/cvmx-dbg-defs.h b/drivers/staging/octeon/cvmx-dbg-defs.h new file mode 100644 index 0000000..abbf42d --- /dev/null +++ b/drivers/staging/octeon/cvmx-dbg-defs.h @@ -0,0 +1,72 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_DBG_DEFS_H__ +#define __CVMX_DBG_DEFS_H__ + +#define CVMX_DBG_DATA \ + CVMX_ADD_IO_SEG(0x00011F00000001E8ull) + +union cvmx_dbg_data { + uint64_t u64; + struct cvmx_dbg_data_s { + uint64_t reserved_23_63:41; + uint64_t c_mul:5; + uint64_t dsel_ext:1; + uint64_t data:17; + } s; + struct cvmx_dbg_data_cn30xx { + uint64_t reserved_31_63:33; + uint64_t pll_mul:3; + uint64_t reserved_23_27:5; + uint64_t c_mul:5; + uint64_t dsel_ext:1; + uint64_t data:17; + } cn30xx; + struct cvmx_dbg_data_cn30xx cn31xx; + struct cvmx_dbg_data_cn38xx { + uint64_t reserved_29_63:35; + uint64_t d_mul:4; + uint64_t dclk_mul2:1; + uint64_t cclk_div2:1; + uint64_t c_mul:5; + uint64_t dsel_ext:1; + uint64_t data:17; + } cn38xx; + struct cvmx_dbg_data_cn38xx cn38xxp2; + struct cvmx_dbg_data_cn30xx cn50xx; + struct cvmx_dbg_data_cn58xx { + uint64_t reserved_29_63:35; + uint64_t rem:6; + uint64_t c_mul:5; + uint64_t dsel_ext:1; + uint64_t data:17; + } cn58xx; + struct cvmx_dbg_data_cn58xx cn58xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-fau.h b/drivers/staging/octeon/cvmx-fau.h new file mode 100644 index 0000000..29bdce6 --- /dev/null +++ b/drivers/staging/octeon/cvmx-fau.h @@ -0,0 +1,597 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Interface to the hardware Fetch and Add Unit. + */ + +#ifndef __CVMX_FAU_H__ +#define __CVMX_FAU_H__ + +/* + * Octeon Fetch and Add Unit (FAU) + */ + +#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0) +#define CVMX_FAU_BITS_SCRADDR 63, 56 +#define CVMX_FAU_BITS_LEN 55, 48 +#define CVMX_FAU_BITS_INEVAL 35, 14 +#define CVMX_FAU_BITS_TAGWAIT 13, 13 +#define CVMX_FAU_BITS_NOADD 13, 13 +#define CVMX_FAU_BITS_SIZE 12, 11 +#define CVMX_FAU_BITS_REGISTER 10, 0 + +typedef enum { + CVMX_FAU_OP_SIZE_8 = 0, + CVMX_FAU_OP_SIZE_16 = 1, + CVMX_FAU_OP_SIZE_32 = 2, + CVMX_FAU_OP_SIZE_64 = 3 +} cvmx_fau_op_size_t; + +/** + * Tagwait return definition. If a timeout occurs, the error + * bit will be set. Otherwise the value of the register before + * the update will be returned. + */ +typedef struct { + uint64_t error:1; + int64_t value:63; +} cvmx_fau_tagwait64_t; + +/** + * Tagwait return definition. If a timeout occurs, the error + * bit will be set. Otherwise the value of the register before + * the update will be returned. + */ +typedef struct { + uint64_t error:1; + int32_t value:31; +} cvmx_fau_tagwait32_t; + +/** + * Tagwait return definition. If a timeout occurs, the error + * bit will be set. Otherwise the value of the register before + * the update will be returned. + */ +typedef struct { + uint64_t error:1; + int16_t value:15; +} cvmx_fau_tagwait16_t; + +/** + * Tagwait return definition. If a timeout occurs, the error + * bit will be set. Otherwise the value of the register before + * the update will be returned. + */ +typedef struct { + uint64_t error:1; + int8_t value:7; +} cvmx_fau_tagwait8_t; + +/** + * Asynchronous tagwait return definition. If a timeout occurs, + * the error bit will be set. Otherwise the value of the + * register before the update will be returned. + */ +typedef union { + uint64_t u64; + struct { + uint64_t invalid:1; + uint64_t data:63; /* unpredictable if invalid is set */ + } s; +} cvmx_fau_async_tagwait_result_t; + +/** + * Builds a store I/O address for writing to the FAU + * + * @noadd: 0 = Store value is atomically added to the current value + * 1 = Store value is atomically written over the current value + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 2 for 16 bit access. + * - Step by 4 for 32 bit access. + * - Step by 8 for 64 bit access. + * Returns Address to store for atomic update + */ +static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg) +{ + return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) | + cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) | + cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg); +} + +/** + * Builds a I/O address for accessing the FAU + * + * @tagwait: Should the atomic add wait for the current tag switch + * operation to complete. + * - 0 = Don't wait + * - 1 = Wait for tag switch to complete + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 2 for 16 bit access. + * - Step by 4 for 32 bit access. + * - Step by 8 for 64 bit access. + * @value: Signed value to add. + * Note: When performing 32 and 64 bit access, only the low + * 22 bits are available. + * Returns Address to read from for atomic update + */ +static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg, + int64_t value) +{ + return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) | + cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) | + cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) | + cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg); +} + +/** + * Perform an atomic 64 bit add + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 8 for 64 bit access. + * @value: Signed value to add. + * Note: Only the low 22 bits are available. + * Returns Value of the register before the update + */ +static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg, + int64_t value) +{ + return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value)); +} + +/** + * Perform an atomic 32 bit add + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 4 for 32 bit access. + * @value: Signed value to add. + * Note: Only the low 22 bits are available. + * Returns Value of the register before the update + */ +static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg, + int32_t value) +{ + return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value)); +} + +/** + * Perform an atomic 16 bit add + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 2 for 16 bit access. + * @value: Signed value to add. + * Returns Value of the register before the update + */ +static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg, + int16_t value) +{ + return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value)); +} + +/** + * Perform an atomic 8 bit add + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * @value: Signed value to add. + * Returns Value of the register before the update + */ +static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value) +{ + return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value)); +} + +/** + * Perform an atomic 64 bit add after the current tag switch + * completes + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 8 for 64 bit access. + * @value: Signed value to add. + * Note: Only the low 22 bits are available. + * Returns If a timeout occurs, the error bit will be set. Otherwise + * the value of the register before the update will be + * returned + */ +static inline cvmx_fau_tagwait64_t +cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value) +{ + union { + uint64_t i64; + cvmx_fau_tagwait64_t t; + } result; + result.i64 = + cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value)); + return result.t; +} + +/** + * Perform an atomic 32 bit add after the current tag switch + * completes + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 4 for 32 bit access. + * @value: Signed value to add. + * Note: Only the low 22 bits are available. + * Returns If a timeout occurs, the error bit will be set. Otherwise + * the value of the register before the update will be + * returned + */ +static inline cvmx_fau_tagwait32_t +cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value) +{ + union { + uint64_t i32; + cvmx_fau_tagwait32_t t; + } result; + result.i32 = + cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value)); + return result.t; +} + +/** + * Perform an atomic 16 bit add after the current tag switch + * completes + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 2 for 16 bit access. + * @value: Signed value to add. + * Returns If a timeout occurs, the error bit will be set. Otherwise + * the value of the register before the update will be + * returned + */ +static inline cvmx_fau_tagwait16_t +cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value) +{ + union { + uint64_t i16; + cvmx_fau_tagwait16_t t; + } result; + result.i16 = + cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value)); + return result.t; +} + +/** + * Perform an atomic 8 bit add after the current tag switch + * completes + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * @value: Signed value to add. + * Returns If a timeout occurs, the error bit will be set. Otherwise + * the value of the register before the update will be + * returned + */ +static inline cvmx_fau_tagwait8_t +cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value) +{ + union { + uint64_t i8; + cvmx_fau_tagwait8_t t; + } result; + result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value)); + return result.t; +} + +/** + * Builds I/O data for async operations + * + * @scraddr: Scratch pad byte addres to write to. Must be 8 byte aligned + * @value: Signed value to add. + * Note: When performing 32 and 64 bit access, only the low + * 22 bits are available. + * @tagwait: Should the atomic add wait for the current tag switch + * operation to complete. + * - 0 = Don't wait + * - 1 = Wait for tag switch to complete + * @size: The size of the operation: + * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits + * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits + * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits + * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 2 for 16 bit access. + * - Step by 4 for 32 bit access. + * - Step by 8 for 64 bit access. + * Returns Data to write using cvmx_send_single + */ +static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value, + uint64_t tagwait, + cvmx_fau_op_size_t size, + uint64_t reg) +{ + return CVMX_FAU_LOAD_IO_ADDRESS | + cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) | + cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) | + cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) | + cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) | + cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) | + cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg); +} + +/** + * Perform an async atomic 64 bit add. The old value is + * placed in the scratch memory at byte address scraddr. + * + * @scraddr: Scratch memory byte address to put response in. + * Must be 8 byte aligned. + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 8 for 64 bit access. + * @value: Signed value to add. + * Note: Only the low 22 bits are available. + * Returns Placed in the scratch pad register + */ +static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr, + cvmx_fau_reg_64_t reg, + int64_t value) +{ + cvmx_send_single(__cvmx_fau_iobdma_data + (scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg)); +} + +/** + * Perform an async atomic 32 bit add. The old value is + * placed in the scratch memory at byte address scraddr. + * + * @scraddr: Scratch memory byte address to put response in. + * Must be 8 byte aligned. + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 4 for 32 bit access. + * @value: Signed value to add. + * Note: Only the low 22 bits are available. + * Returns Placed in the scratch pad register + */ +static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr, + cvmx_fau_reg_32_t reg, + int32_t value) +{ + cvmx_send_single(__cvmx_fau_iobdma_data + (scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg)); +} + +/** + * Perform an async atomic 16 bit add. The old value is + * placed in the scratch memory at byte address scraddr. + * + * @scraddr: Scratch memory byte address to put response in. + * Must be 8 byte aligned. + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 2 for 16 bit access. + * @value: Signed value to add. + * Returns Placed in the scratch pad register + */ +static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr, + cvmx_fau_reg_16_t reg, + int16_t value) +{ + cvmx_send_single(__cvmx_fau_iobdma_data + (scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg)); +} + +/** + * Perform an async atomic 8 bit add. The old value is + * placed in the scratch memory at byte address scraddr. + * + * @scraddr: Scratch memory byte address to put response in. + * Must be 8 byte aligned. + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * @value: Signed value to add. + * Returns Placed in the scratch pad register + */ +static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr, + cvmx_fau_reg_8_t reg, + int8_t value) +{ + cvmx_send_single(__cvmx_fau_iobdma_data + (scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg)); +} + +/** + * Perform an async atomic 64 bit add after the current tag + * switch completes. + * + * @scraddr: Scratch memory byte address to put response in. Must be + * 8 byte aligned. If a timeout occurs, the error bit (63) + * will be set. Otherwise the value of the register before + * the update will be returned + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 8 for 64 bit access. + * @value: Signed value to add. + * Note: Only the low 22 bits are available. + * Returns Placed in the scratch pad register + */ +static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr, + cvmx_fau_reg_64_t reg, + int64_t value) +{ + cvmx_send_single(__cvmx_fau_iobdma_data + (scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg)); +} + +/** + * Perform an async atomic 32 bit add after the current tag + * switch completes. + * + * @scraddr: Scratch memory byte address to put response in. Must be + * 8 byte aligned. If a timeout occurs, the error bit (63) + * will be set. Otherwise the value of the register before + * the update will be returned + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 4 for 32 bit access. + * @value: Signed value to add. + * Note: Only the low 22 bits are available. + * Returns Placed in the scratch pad register + */ +static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr, + cvmx_fau_reg_32_t reg, + int32_t value) +{ + cvmx_send_single(__cvmx_fau_iobdma_data + (scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg)); +} + +/** + * Perform an async atomic 16 bit add after the current tag + * switch completes. + * + * @scraddr: Scratch memory byte address to put response in. Must be + * 8 byte aligned. If a timeout occurs, the error bit (63) + * will be set. Otherwise the value of the register before + * the update will be returned + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 2 for 16 bit access. + * @value: Signed value to add. + * + * Returns Placed in the scratch pad register + */ +static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr, + cvmx_fau_reg_16_t reg, + int16_t value) +{ + cvmx_send_single(__cvmx_fau_iobdma_data + (scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg)); +} + +/** + * Perform an async atomic 8 bit add after the current tag + * switch completes. + * + * @scraddr: Scratch memory byte address to put response in. Must be + * 8 byte aligned. If a timeout occurs, the error bit (63) + * will be set. Otherwise the value of the register before + * the update will be returned + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * @value: Signed value to add. + * + * Returns Placed in the scratch pad register + */ +static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr, + cvmx_fau_reg_8_t reg, + int8_t value) +{ + cvmx_send_single(__cvmx_fau_iobdma_data + (scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg)); +} + +/** + * Perform an atomic 64 bit add + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 8 for 64 bit access. + * @value: Signed value to add. + */ +static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value) +{ + cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value); +} + +/** + * Perform an atomic 32 bit add + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 4 for 32 bit access. + * @value: Signed value to add. + */ +static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value) +{ + cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value); +} + +/** + * Perform an atomic 16 bit add + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 2 for 16 bit access. + * @value: Signed value to add. + */ +static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value) +{ + cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value); +} + +/** + * Perform an atomic 8 bit add + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * @value: Signed value to add. + */ +static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value) +{ + cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value); +} + +/** + * Perform an atomic 64 bit write + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 8 for 64 bit access. + * @value: Signed value to write. + */ +static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value) +{ + cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value); +} + +/** + * Perform an atomic 32 bit write + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 4 for 32 bit access. + * @value: Signed value to write. + */ +static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value) +{ + cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value); +} + +/** + * Perform an atomic 16 bit write + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * - Step by 2 for 16 bit access. + * @value: Signed value to write. + */ +static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value) +{ + cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value); +} + +/** + * Perform an atomic 8 bit write + * + * @reg: FAU atomic register to access. 0 <= reg < 2048. + * @value: Signed value to write. + */ +static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value) +{ + cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value); +} + +#endif /* __CVMX_FAU_H__ */ diff --git a/drivers/staging/octeon/cvmx-fpa-defs.h b/drivers/staging/octeon/cvmx-fpa-defs.h new file mode 100644 index 0000000..bf5546b --- /dev/null +++ b/drivers/staging/octeon/cvmx-fpa-defs.h @@ -0,0 +1,403 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_FPA_DEFS_H__ +#define __CVMX_FPA_DEFS_H__ + +#define CVMX_FPA_BIST_STATUS \ + CVMX_ADD_IO_SEG(0x00011800280000E8ull) +#define CVMX_FPA_CTL_STATUS \ + CVMX_ADD_IO_SEG(0x0001180028000050ull) +#define CVMX_FPA_FPF0_MARKS \ + CVMX_ADD_IO_SEG(0x0001180028000000ull) +#define CVMX_FPA_FPF0_SIZE \ + CVMX_ADD_IO_SEG(0x0001180028000058ull) +#define CVMX_FPA_FPF1_MARKS \ + CVMX_ADD_IO_SEG(0x0001180028000008ull) +#define CVMX_FPA_FPF2_MARKS \ + CVMX_ADD_IO_SEG(0x0001180028000010ull) +#define CVMX_FPA_FPF3_MARKS \ + CVMX_ADD_IO_SEG(0x0001180028000018ull) +#define CVMX_FPA_FPF4_MARKS \ + CVMX_ADD_IO_SEG(0x0001180028000020ull) +#define CVMX_FPA_FPF5_MARKS \ + CVMX_ADD_IO_SEG(0x0001180028000028ull) +#define CVMX_FPA_FPF6_MARKS \ + CVMX_ADD_IO_SEG(0x0001180028000030ull) +#define CVMX_FPA_FPF7_MARKS \ + CVMX_ADD_IO_SEG(0x0001180028000038ull) +#define CVMX_FPA_FPFX_MARKS(offset) \ + CVMX_ADD_IO_SEG(0x0001180028000008ull + (((offset) & 7) * 8) - 8 * 1) +#define CVMX_FPA_FPFX_SIZE(offset) \ + CVMX_ADD_IO_SEG(0x0001180028000060ull + (((offset) & 7) * 8) - 8 * 1) +#define CVMX_FPA_INT_ENB \ + CVMX_ADD_IO_SEG(0x0001180028000048ull) +#define CVMX_FPA_INT_SUM \ + CVMX_ADD_IO_SEG(0x0001180028000040ull) +#define CVMX_FPA_QUE0_PAGE_INDEX \ + CVMX_ADD_IO_SEG(0x00011800280000F0ull) +#define CVMX_FPA_QUE1_PAGE_INDEX \ + CVMX_ADD_IO_SEG(0x00011800280000F8ull) +#define CVMX_FPA_QUE2_PAGE_INDEX \ + CVMX_ADD_IO_SEG(0x0001180028000100ull) +#define CVMX_FPA_QUE3_PAGE_INDEX \ + CVMX_ADD_IO_SEG(0x0001180028000108ull) +#define CVMX_FPA_QUE4_PAGE_INDEX \ + CVMX_ADD_IO_SEG(0x0001180028000110ull) +#define CVMX_FPA_QUE5_PAGE_INDEX \ + CVMX_ADD_IO_SEG(0x0001180028000118ull) +#define CVMX_FPA_QUE6_PAGE_INDEX \ + CVMX_ADD_IO_SEG(0x0001180028000120ull) +#define CVMX_FPA_QUE7_PAGE_INDEX \ + CVMX_ADD_IO_SEG(0x0001180028000128ull) +#define CVMX_FPA_QUEX_AVAILABLE(offset) \ + CVMX_ADD_IO_SEG(0x0001180028000098ull + (((offset) & 7) * 8)) +#define CVMX_FPA_QUEX_PAGE_INDEX(offset) \ + CVMX_ADD_IO_SEG(0x00011800280000F0ull + (((offset) & 7) * 8)) +#define CVMX_FPA_QUE_ACT \ + CVMX_ADD_IO_SEG(0x0001180028000138ull) +#define CVMX_FPA_QUE_EXP \ + CVMX_ADD_IO_SEG(0x0001180028000130ull) +#define CVMX_FPA_WART_CTL \ + CVMX_ADD_IO_SEG(0x00011800280000D8ull) +#define CVMX_FPA_WART_STATUS \ + CVMX_ADD_IO_SEG(0x00011800280000E0ull) + +union cvmx_fpa_bist_status { + uint64_t u64; + struct cvmx_fpa_bist_status_s { + uint64_t reserved_5_63:59; + uint64_t frd:1; + uint64_t fpf0:1; + uint64_t fpf1:1; + uint64_t ffr:1; + uint64_t fdr:1; + } s; + struct cvmx_fpa_bist_status_s cn30xx; + struct cvmx_fpa_bist_status_s cn31xx; + struct cvmx_fpa_bist_status_s cn38xx; + struct cvmx_fpa_bist_status_s cn38xxp2; + struct cvmx_fpa_bist_status_s cn50xx; + struct cvmx_fpa_bist_status_s cn52xx; + struct cvmx_fpa_bist_status_s cn52xxp1; + struct cvmx_fpa_bist_status_s cn56xx; + struct cvmx_fpa_bist_status_s cn56xxp1; + struct cvmx_fpa_bist_status_s cn58xx; + struct cvmx_fpa_bist_status_s cn58xxp1; +}; + +union cvmx_fpa_ctl_status { + uint64_t u64; + struct cvmx_fpa_ctl_status_s { + uint64_t reserved_18_63:46; + uint64_t reset:1; + uint64_t use_ldt:1; + uint64_t use_stt:1; + uint64_t enb:1; + uint64_t mem1_err:7; + uint64_t mem0_err:7; + } s; + struct cvmx_fpa_ctl_status_s cn30xx; + struct cvmx_fpa_ctl_status_s cn31xx; + struct cvmx_fpa_ctl_status_s cn38xx; + struct cvmx_fpa_ctl_status_s cn38xxp2; + struct cvmx_fpa_ctl_status_s cn50xx; + struct cvmx_fpa_ctl_status_s cn52xx; + struct cvmx_fpa_ctl_status_s cn52xxp1; + struct cvmx_fpa_ctl_status_s cn56xx; + struct cvmx_fpa_ctl_status_s cn56xxp1; + struct cvmx_fpa_ctl_status_s cn58xx; + struct cvmx_fpa_ctl_status_s cn58xxp1; +}; + +union cvmx_fpa_fpfx_marks { + uint64_t u64; + struct cvmx_fpa_fpfx_marks_s { + uint64_t reserved_22_63:42; + uint64_t fpf_wr:11; + uint64_t fpf_rd:11; + } s; + struct cvmx_fpa_fpfx_marks_s cn38xx; + struct cvmx_fpa_fpfx_marks_s cn38xxp2; + struct cvmx_fpa_fpfx_marks_s cn56xx; + struct cvmx_fpa_fpfx_marks_s cn56xxp1; + struct cvmx_fpa_fpfx_marks_s cn58xx; + struct cvmx_fpa_fpfx_marks_s cn58xxp1; +}; + +union cvmx_fpa_fpfx_size { + uint64_t u64; + struct cvmx_fpa_fpfx_size_s { + uint64_t reserved_11_63:53; + uint64_t fpf_siz:11; + } s; + struct cvmx_fpa_fpfx_size_s cn38xx; + struct cvmx_fpa_fpfx_size_s cn38xxp2; + struct cvmx_fpa_fpfx_size_s cn56xx; + struct cvmx_fpa_fpfx_size_s cn56xxp1; + struct cvmx_fpa_fpfx_size_s cn58xx; + struct cvmx_fpa_fpfx_size_s cn58xxp1; +}; + +union cvmx_fpa_fpf0_marks { + uint64_t u64; + struct cvmx_fpa_fpf0_marks_s { + uint64_t reserved_24_63:40; + uint64_t fpf_wr:12; + uint64_t fpf_rd:12; + } s; + struct cvmx_fpa_fpf0_marks_s cn38xx; + struct cvmx_fpa_fpf0_marks_s cn38xxp2; + struct cvmx_fpa_fpf0_marks_s cn56xx; + struct cvmx_fpa_fpf0_marks_s cn56xxp1; + struct cvmx_fpa_fpf0_marks_s cn58xx; + struct cvmx_fpa_fpf0_marks_s cn58xxp1; +}; + +union cvmx_fpa_fpf0_size { + uint64_t u64; + struct cvmx_fpa_fpf0_size_s { + uint64_t reserved_12_63:52; + uint64_t fpf_siz:12; + } s; + struct cvmx_fpa_fpf0_size_s cn38xx; + struct cvmx_fpa_fpf0_size_s cn38xxp2; + struct cvmx_fpa_fpf0_size_s cn56xx; + struct cvmx_fpa_fpf0_size_s cn56xxp1; + struct cvmx_fpa_fpf0_size_s cn58xx; + struct cvmx_fpa_fpf0_size_s cn58xxp1; +}; + +union cvmx_fpa_int_enb { + uint64_t u64; + struct cvmx_fpa_int_enb_s { + uint64_t reserved_28_63:36; + uint64_t q7_perr:1; + uint64_t q7_coff:1; + uint64_t q7_und:1; + uint64_t q6_perr:1; + uint64_t q6_coff:1; + uint64_t q6_und:1; + uint64_t q5_perr:1; + uint64_t q5_coff:1; + uint64_t q5_und:1; + uint64_t q4_perr:1; + uint64_t q4_coff:1; + uint64_t q4_und:1; + uint64_t q3_perr:1; + uint64_t q3_coff:1; + uint64_t q3_und:1; + uint64_t q2_perr:1; + uint64_t q2_coff:1; + uint64_t q2_und:1; + uint64_t q1_perr:1; + uint64_t q1_coff:1; + uint64_t q1_und:1; + uint64_t q0_perr:1; + uint64_t q0_coff:1; + uint64_t q0_und:1; + uint64_t fed1_dbe:1; + uint64_t fed1_sbe:1; + uint64_t fed0_dbe:1; + uint64_t fed0_sbe:1; + } s; + struct cvmx_fpa_int_enb_s cn30xx; + struct cvmx_fpa_int_enb_s cn31xx; + struct cvmx_fpa_int_enb_s cn38xx; + struct cvmx_fpa_int_enb_s cn38xxp2; + struct cvmx_fpa_int_enb_s cn50xx; + struct cvmx_fpa_int_enb_s cn52xx; + struct cvmx_fpa_int_enb_s cn52xxp1; + struct cvmx_fpa_int_enb_s cn56xx; + struct cvmx_fpa_int_enb_s cn56xxp1; + struct cvmx_fpa_int_enb_s cn58xx; + struct cvmx_fpa_int_enb_s cn58xxp1; +}; + +union cvmx_fpa_int_sum { + uint64_t u64; + struct cvmx_fpa_int_sum_s { + uint64_t reserved_28_63:36; + uint64_t q7_perr:1; + uint64_t q7_coff:1; + uint64_t q7_und:1; + uint64_t q6_perr:1; + uint64_t q6_coff:1; + uint64_t q6_und:1; + uint64_t q5_perr:1; + uint64_t q5_coff:1; + uint64_t q5_und:1; + uint64_t q4_perr:1; + uint64_t q4_coff:1; + uint64_t q4_und:1; + uint64_t q3_perr:1; + uint64_t q3_coff:1; + uint64_t q3_und:1; + uint64_t q2_perr:1; + uint64_t q2_coff:1; + uint64_t q2_und:1; + uint64_t q1_perr:1; + uint64_t q1_coff:1; + uint64_t q1_und:1; + uint64_t q0_perr:1; + uint64_t q0_coff:1; + uint64_t q0_und:1; + uint64_t fed1_dbe:1; + uint64_t fed1_sbe:1; + uint64_t fed0_dbe:1; + uint64_t fed0_sbe:1; + } s; + struct cvmx_fpa_int_sum_s cn30xx; + struct cvmx_fpa_int_sum_s cn31xx; + struct cvmx_fpa_int_sum_s cn38xx; + struct cvmx_fpa_int_sum_s cn38xxp2; + struct cvmx_fpa_int_sum_s cn50xx; + struct cvmx_fpa_int_sum_s cn52xx; + struct cvmx_fpa_int_sum_s cn52xxp1; + struct cvmx_fpa_int_sum_s cn56xx; + struct cvmx_fpa_int_sum_s cn56xxp1; + struct cvmx_fpa_int_sum_s cn58xx; + struct cvmx_fpa_int_sum_s cn58xxp1; +}; + +union cvmx_fpa_quex_available { + uint64_t u64; + struct cvmx_fpa_quex_available_s { + uint64_t reserved_29_63:35; + uint64_t que_siz:29; + } s; + struct cvmx_fpa_quex_available_s cn30xx; + struct cvmx_fpa_quex_available_s cn31xx; + struct cvmx_fpa_quex_available_s cn38xx; + struct cvmx_fpa_quex_available_s cn38xxp2; + struct cvmx_fpa_quex_available_s cn50xx; + struct cvmx_fpa_quex_available_s cn52xx; + struct cvmx_fpa_quex_available_s cn52xxp1; + struct cvmx_fpa_quex_available_s cn56xx; + struct cvmx_fpa_quex_available_s cn56xxp1; + struct cvmx_fpa_quex_available_s cn58xx; + struct cvmx_fpa_quex_available_s cn58xxp1; +}; + +union cvmx_fpa_quex_page_index { + uint64_t u64; + struct cvmx_fpa_quex_page_index_s { + uint64_t reserved_25_63:39; + uint64_t pg_num:25; + } s; + struct cvmx_fpa_quex_page_index_s cn30xx; + struct cvmx_fpa_quex_page_index_s cn31xx; + struct cvmx_fpa_quex_page_index_s cn38xx; + struct cvmx_fpa_quex_page_index_s cn38xxp2; + struct cvmx_fpa_quex_page_index_s cn50xx; + struct cvmx_fpa_quex_page_index_s cn52xx; + struct cvmx_fpa_quex_page_index_s cn52xxp1; + struct cvmx_fpa_quex_page_index_s cn56xx; + struct cvmx_fpa_quex_page_index_s cn56xxp1; + struct cvmx_fpa_quex_page_index_s cn58xx; + struct cvmx_fpa_quex_page_index_s cn58xxp1; +}; + +union cvmx_fpa_que_act { + uint64_t u64; + struct cvmx_fpa_que_act_s { + uint64_t reserved_29_63:35; + uint64_t act_que:3; + uint64_t act_indx:26; + } s; + struct cvmx_fpa_que_act_s cn30xx; + struct cvmx_fpa_que_act_s cn31xx; + struct cvmx_fpa_que_act_s cn38xx; + struct cvmx_fpa_que_act_s cn38xxp2; + struct cvmx_fpa_que_act_s cn50xx; + struct cvmx_fpa_que_act_s cn52xx; + struct cvmx_fpa_que_act_s cn52xxp1; + struct cvmx_fpa_que_act_s cn56xx; + struct cvmx_fpa_que_act_s cn56xxp1; + struct cvmx_fpa_que_act_s cn58xx; + struct cvmx_fpa_que_act_s cn58xxp1; +}; + +union cvmx_fpa_que_exp { + uint64_t u64; + struct cvmx_fpa_que_exp_s { + uint64_t reserved_29_63:35; + uint64_t exp_que:3; + uint64_t exp_indx:26; + } s; + struct cvmx_fpa_que_exp_s cn30xx; + struct cvmx_fpa_que_exp_s cn31xx; + struct cvmx_fpa_que_exp_s cn38xx; + struct cvmx_fpa_que_exp_s cn38xxp2; + struct cvmx_fpa_que_exp_s cn50xx; + struct cvmx_fpa_que_exp_s cn52xx; + struct cvmx_fpa_que_exp_s cn52xxp1; + struct cvmx_fpa_que_exp_s cn56xx; + struct cvmx_fpa_que_exp_s cn56xxp1; + struct cvmx_fpa_que_exp_s cn58xx; + struct cvmx_fpa_que_exp_s cn58xxp1; +}; + +union cvmx_fpa_wart_ctl { + uint64_t u64; + struct cvmx_fpa_wart_ctl_s { + uint64_t reserved_16_63:48; + uint64_t ctl:16; + } s; + struct cvmx_fpa_wart_ctl_s cn30xx; + struct cvmx_fpa_wart_ctl_s cn31xx; + struct cvmx_fpa_wart_ctl_s cn38xx; + struct cvmx_fpa_wart_ctl_s cn38xxp2; + struct cvmx_fpa_wart_ctl_s cn50xx; + struct cvmx_fpa_wart_ctl_s cn52xx; + struct cvmx_fpa_wart_ctl_s cn52xxp1; + struct cvmx_fpa_wart_ctl_s cn56xx; + struct cvmx_fpa_wart_ctl_s cn56xxp1; + struct cvmx_fpa_wart_ctl_s cn58xx; + struct cvmx_fpa_wart_ctl_s cn58xxp1; +}; + +union cvmx_fpa_wart_status { + uint64_t u64; + struct cvmx_fpa_wart_status_s { + uint64_t reserved_32_63:32; + uint64_t status:32; + } s; + struct cvmx_fpa_wart_status_s cn30xx; + struct cvmx_fpa_wart_status_s cn31xx; + struct cvmx_fpa_wart_status_s cn38xx; + struct cvmx_fpa_wart_status_s cn38xxp2; + struct cvmx_fpa_wart_status_s cn50xx; + struct cvmx_fpa_wart_status_s cn52xx; + struct cvmx_fpa_wart_status_s cn52xxp1; + struct cvmx_fpa_wart_status_s cn56xx; + struct cvmx_fpa_wart_status_s cn56xxp1; + struct cvmx_fpa_wart_status_s cn58xx; + struct cvmx_fpa_wart_status_s cn58xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-fpa.c b/drivers/staging/octeon/cvmx-fpa.c new file mode 100644 index 0000000..55d9147 --- /dev/null +++ b/drivers/staging/octeon/cvmx-fpa.c @@ -0,0 +1,183 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * @file + * + * Support library for the hardware Free Pool Allocator. + * + * + */ + +#include "cvmx-config.h" +#include "cvmx.h" +#include "cvmx-fpa.h" +#include "cvmx-ipd.h" + +/** + * Current state of all the pools. Use access functions + * instead of using it directly. + */ +CVMX_SHARED cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS]; + +/** + * Setup a FPA pool to control a new block of memory. The + * buffer pointer must be a physical address. + * + * @pool: Pool to initialize + * 0 <= pool < 8 + * @name: Constant character string to name this pool. + * String is not copied. + * @buffer: Pointer to the block of memory to use. This must be + * accessable by all processors and external hardware. + * @block_size: Size for each block controlled by the FPA + * @num_blocks: Number of blocks + * + * Returns 0 on Success, + * -1 on failure + */ +int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer, + uint64_t block_size, uint64_t num_blocks) +{ + char *ptr; + if (!buffer) { + cvmx_dprintf + ("ERROR: cvmx_fpa_setup_pool: NULL buffer pointer!\n"); + return -1; + } + if (pool >= CVMX_FPA_NUM_POOLS) { + cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Illegal pool!\n"); + return -1; + } + + if (block_size < CVMX_FPA_MIN_BLOCK_SIZE) { + cvmx_dprintf + ("ERROR: cvmx_fpa_setup_pool: Block size too small.\n"); + return -1; + } + + if (((unsigned long)buffer & (CVMX_FPA_ALIGNMENT - 1)) != 0) { + cvmx_dprintf + ("ERROR: cvmx_fpa_setup_pool: Buffer not aligned properly.\n"); + return -1; + } + + cvmx_fpa_pool_info[pool].name = name; + cvmx_fpa_pool_info[pool].size = block_size; + cvmx_fpa_pool_info[pool].starting_element_count = num_blocks; + cvmx_fpa_pool_info[pool].base = buffer; + + ptr = (char *)buffer; + while (num_blocks--) { + cvmx_fpa_free(ptr, pool, 0); + ptr += block_size; + } + return 0; +} + +/** + * Shutdown a Memory pool and validate that it had all of + * the buffers originally placed in it. + * + * @pool: Pool to shutdown + * Returns Zero on success + * - Positive is count of missing buffers + * - Negative is too many buffers or corrupted pointers + */ +uint64_t cvmx_fpa_shutdown_pool(uint64_t pool) +{ + uint64_t errors = 0; + uint64_t count = 0; + uint64_t base = cvmx_ptr_to_phys(cvmx_fpa_pool_info[pool].base); + uint64_t finish = + base + + cvmx_fpa_pool_info[pool].size * + cvmx_fpa_pool_info[pool].starting_element_count; + void *ptr; + uint64_t address; + + count = 0; + do { + ptr = cvmx_fpa_alloc(pool); + if (ptr) + address = cvmx_ptr_to_phys(ptr); + else + address = 0; + if (address) { + if ((address >= base) && (address < finish) && + (((address - + base) % cvmx_fpa_pool_info[pool].size) == 0)) { + count++; + } else { + cvmx_dprintf + ("ERROR: cvmx_fpa_shutdown_pool: Illegal address 0x%llx in pool %s(%d)\n", + (unsigned long long)address, + cvmx_fpa_pool_info[pool].name, (int)pool); + errors++; + } + } + } while (address); + +#ifdef CVMX_ENABLE_PKO_FUNCTIONS + if (pool == 0) + cvmx_ipd_free_ptr(); +#endif + + if (errors) { + cvmx_dprintf + ("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) started at 0x%llx, ended at 0x%llx, with a step of 0x%llx\n", + cvmx_fpa_pool_info[pool].name, (int)pool, + (unsigned long long)base, (unsigned long long)finish, + (unsigned long long)cvmx_fpa_pool_info[pool].size); + return -errors; + } else + return 0; +} + +uint64_t cvmx_fpa_get_block_size(uint64_t pool) +{ + switch (pool) { + case 0: + return CVMX_FPA_POOL_0_SIZE; + case 1: + return CVMX_FPA_POOL_1_SIZE; + case 2: + return CVMX_FPA_POOL_2_SIZE; + case 3: + return CVMX_FPA_POOL_3_SIZE; + case 4: + return CVMX_FPA_POOL_4_SIZE; + case 5: + return CVMX_FPA_POOL_5_SIZE; + case 6: + return CVMX_FPA_POOL_6_SIZE; + case 7: + return CVMX_FPA_POOL_7_SIZE; + default: + return 0; + } +} diff --git a/drivers/staging/octeon/cvmx-fpa.h b/drivers/staging/octeon/cvmx-fpa.h new file mode 100644 index 0000000..1d7788f --- /dev/null +++ b/drivers/staging/octeon/cvmx-fpa.h @@ -0,0 +1,299 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * @file + * + * Interface to the hardware Free Pool Allocator. + * + * + */ + +#ifndef __CVMX_FPA_H__ +#define __CVMX_FPA_H__ + +#include "cvmx-address.h" +#include "cvmx-fpa-defs.h" + +#define CVMX_FPA_NUM_POOLS 8 +#define CVMX_FPA_MIN_BLOCK_SIZE 128 +#define CVMX_FPA_ALIGNMENT 128 + +/** + * Structure describing the data format used for stores to the FPA. + */ +typedef union { + uint64_t u64; + struct { + /* + * the (64-bit word) location in scratchpad to write + * to (if len != 0) + */ + uint64_t scraddr:8; + /* the number of words in the response (0 => no response) */ + uint64_t len:8; + /* the ID of the device on the non-coherent bus */ + uint64_t did:8; + /* + * the address that will appear in the first tick on + * the NCB bus. + */ + uint64_t addr:40; + } s; +} cvmx_fpa_iobdma_data_t; + +/** + * Structure describing the current state of a FPA pool. + */ +typedef struct { + /* Name it was created under */ + const char *name; + /* Size of each block */ + uint64_t size; + /* The base memory address of whole block */ + void *base; + /* The number of elements in the pool at creation */ + uint64_t starting_element_count; +} cvmx_fpa_pool_info_t; + +/** + * Current state of all the pools. Use access functions + * instead of using it directly. + */ +extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS]; + +/* CSR typedefs have been moved to cvmx-csr-*.h */ + +/** + * Return the name of the pool + * + * @pool: Pool to get the name of + * Returns The name + */ +static inline const char *cvmx_fpa_get_name(uint64_t pool) +{ + return cvmx_fpa_pool_info[pool].name; +} + +/** + * Return the base of the pool + * + * @pool: Pool to get the base of + * Returns The base + */ +static inline void *cvmx_fpa_get_base(uint64_t pool) +{ + return cvmx_fpa_pool_info[pool].base; +} + +/** + * Check if a pointer belongs to an FPA pool. Return non-zero + * if the supplied pointer is inside the memory controlled by + * an FPA pool. + * + * @pool: Pool to check + * @ptr: Pointer to check + * Returns Non-zero if pointer is in the pool. Zero if not + */ +static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr) +{ + return ((ptr >= cvmx_fpa_pool_info[pool].base) && + ((char *)ptr < + ((char *)(cvmx_fpa_pool_info[pool].base)) + + cvmx_fpa_pool_info[pool].size * + cvmx_fpa_pool_info[pool].starting_element_count)); +} + +/** + * Enable the FPA for use. Must be performed after any CSR + * configuration but before any other FPA functions. + */ +static inline void cvmx_fpa_enable(void) +{ + union cvmx_fpa_ctl_status status; + + status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS); + if (status.s.enb) { + cvmx_dprintf + ("Warning: Enabling FPA when FPA already enabled.\n"); + } + + /* + * Do runtime check as we allow pass1 compiled code to run on + * pass2 chips. + */ + if (cvmx_octeon_is_pass1()) { + union cvmx_fpa_fpfx_marks marks; + int i; + for (i = 1; i < 8; i++) { + marks.u64 = + cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull); + marks.s.fpf_wr = 0xe0; + cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull, + marks.u64); + } + + /* Enforce a 10 cycle delay between config and enable */ + cvmx_wait(10); + } + + /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */ + status.u64 = 0; + status.s.enb = 1; + cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64); +} + +/** + * Get a new block from the FPA + * + * @pool: Pool to get the block from + * Returns Pointer to the block or NULL on failure + */ +static inline void *cvmx_fpa_alloc(uint64_t pool) +{ + uint64_t address = + cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool))); + if (address) + return cvmx_phys_to_ptr(address); + else + return NULL; +} + +/** + * Asynchronously get a new block from the FPA + * + * @scr_addr: Local scratch address to put response in. This is a byte address, + * but must be 8 byte aligned. + * @pool: Pool to get the block from + */ +static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool) +{ + cvmx_fpa_iobdma_data_t data; + + /* + * Hardware only uses 64 bit alligned locations, so convert + * from byte address to 64-bit index + */ + data.s.scraddr = scr_addr >> 3; + data.s.len = 1; + data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool); + data.s.addr = 0; + cvmx_send_single(data.u64); +} + +/** + * Free a block allocated with a FPA pool. Does NOT provide memory + * ordering in cases where the memory block was modified by the core. + * + * @ptr: Block to free + * @pool: Pool to put it in + * @num_cache_lines: + * Cache lines to invalidate + */ +static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool, + uint64_t num_cache_lines) +{ + cvmx_addr_t newptr; + newptr.u64 = cvmx_ptr_to_phys(ptr); + newptr.sfilldidspace.didspace = + CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)); + /* Prevent GCC from reordering around free */ + barrier(); + /* value written is number of cache lines not written back */ + cvmx_write_io(newptr.u64, num_cache_lines); +} + +/** + * Free a block allocated with a FPA pool. Provides required memory + * ordering in cases where memory block was modified by core. + * + * @ptr: Block to free + * @pool: Pool to put it in + * @num_cache_lines: + * Cache lines to invalidate + */ +static inline void cvmx_fpa_free(void *ptr, uint64_t pool, + uint64_t num_cache_lines) +{ + cvmx_addr_t newptr; + newptr.u64 = cvmx_ptr_to_phys(ptr); + newptr.sfilldidspace.didspace = + CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)); + /* + * Make sure that any previous writes to memory go out before + * we free this buffer. This also serves as a barrier to + * prevent GCC from reordering operations to after the + * free. + */ + CVMX_SYNCWS; + /* value written is number of cache lines not written back */ + cvmx_write_io(newptr.u64, num_cache_lines); +} + +/** + * Setup a FPA pool to control a new block of memory. + * This can only be called once per pool. Make sure proper + * locking enforces this. + * + * @pool: Pool to initialize + * 0 <= pool < 8 + * @name: Constant character string to name this pool. + * String is not copied. + * @buffer: Pointer to the block of memory to use. This must be + * accessable by all processors and external hardware. + * @block_size: Size for each block controlled by the FPA + * @num_blocks: Number of blocks + * + * Returns 0 on Success, + * -1 on failure + */ +extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer, + uint64_t block_size, uint64_t num_blocks); + +/** + * Shutdown a Memory pool and validate that it had all of + * the buffers originally placed in it. This should only be + * called by one processor after all hardware has finished + * using the pool. + * + * @pool: Pool to shutdown + * Returns Zero on success + * - Positive is count of missing buffers + * - Negative is too many buffers or corrupted pointers + */ +extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool); + +/** + * Get the size of blocks controlled by the pool + * This is resolved to a constant at compile time. + * + * @pool: Pool to access + * Returns Size of the block in bytes + */ +uint64_t cvmx_fpa_get_block_size(uint64_t pool); + +#endif /* __CVM_FPA_H__ */ diff --git a/drivers/staging/octeon/cvmx-gmxx-defs.h b/drivers/staging/octeon/cvmx-gmxx-defs.h new file mode 100644 index 0000000..946a43a --- /dev/null +++ b/drivers/staging/octeon/cvmx-gmxx-defs.h @@ -0,0 +1,2529 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_GMXX_DEFS_H__ +#define __CVMX_GMXX_DEFS_H__ + +#define CVMX_GMXX_BAD_REG(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000518ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_BIST(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000400ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_CLK_EN(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080007F0ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_HG2_CONTROL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000550ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_INF_MODE(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080007F8ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_NXA_ADR(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000510ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_PRTX_CBFC_CTL(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000580ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_PRTX_CFG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000010ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_ADR_CAM0(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000180ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_ADR_CAM1(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000188ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_ADR_CAM2(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000190ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_ADR_CAM3(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000198ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_ADR_CAM4(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080001A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_ADR_CAM5(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080001A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_ADR_CAM_EN(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000108ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_ADR_CTL(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000100ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_DECISION(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000040ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_FRM_CHK(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000020ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_FRM_CTL(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000018ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000030ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000028ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_IFG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000058ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_INT_EN(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000008ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_INT_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000000ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_JABBER(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000038ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_PAUSE_DROP_TIME(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000068ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_RX_INBND(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000060ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_STATS_CTL(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000050ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_STATS_OCTS(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000088ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_STATS_OCTS_CTL(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000098ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_STATS_OCTS_DMAC(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080000A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_STATS_OCTS_DRP(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080000B8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_STATS_PKTS(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000080ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_STATS_PKTS_BAD(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080000C0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_STATS_PKTS_CTL(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000090ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_STATS_PKTS_DMAC(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080000A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_STATS_PKTS_DRP(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080000B0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RXX_UDD_SKP(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000048ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RX_BP_DROPX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000420ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RX_BP_OFFX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000460ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RX_BP_ONX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000440ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RX_HG2_STATUS(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000548ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RX_PASS_EN(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080005F8ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000600ull + (((offset) & 15) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RX_PRTS(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000410ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RX_PRT_INFO(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080004E8ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RX_TX_STATUS(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080007E8ull + (((block_id) & 0) * 0x8000000ull)) +#define CVMX_GMXX_RX_XAUI_BAD_COL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000538ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_RX_XAUI_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000530ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_SMACX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000230ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_STAT_BP(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000520ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_APPEND(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000218ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_BURST(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000228ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_CBFC_XOFF(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080005A0ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_CBFC_XON(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080005C0ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_CLK(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000208ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_CTL(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000270ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_MIN_PKT(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000240ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000248ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_PAUSE_PKT_TIME(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000238ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_PAUSE_TOGO(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000258ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_PAUSE_ZERO(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000260ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_SGMII_CTL(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000300ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_SLOT(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000220ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_SOFT_PAUSE(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000250ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_STAT0(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000280ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_STAT1(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000288ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_STAT2(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000290ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_STAT3(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000298ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_STAT4(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080002A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_STAT5(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080002A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_STAT6(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080002B0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_STAT7(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080002B8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_STAT8(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080002C0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_STAT9(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800080002C8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_STATS_CTL(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000268ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TXX_THRESH(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000210ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_BP(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080004D0ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000780ull + (((offset) & 1) * 8) + (((block_id) & 0) * 0x0ull)) +#define CVMX_GMXX_TX_COL_ATTEMPT(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000498ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_CORRUPT(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080004D8ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_HG2_REG1(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000558ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_HG2_REG2(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000560ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_IFG(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000488ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_INT_EN(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000508ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_INT_REG(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000500ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_JAM(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000490ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_LFSR(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080004F8ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_OVR_BP(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080004C8ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_PAUSE_PKT_DMAC(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080004A0ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_PAUSE_PKT_TYPE(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080004A8ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_PRTS(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000480ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_SPI_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080004C0ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_SPI_DRAIN(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080004E0ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_SPI_MAX(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080004B0ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000680ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_SPI_THRESH(block_id) \ + CVMX_ADD_IO_SEG(0x00011800080004B8ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_TX_XAUI_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000528ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_GMXX_XAUI_EXT_LOOPBACK(block_id) \ + CVMX_ADD_IO_SEG(0x0001180008000540ull + (((block_id) & 1) * 0x8000000ull)) + +union cvmx_gmxx_bad_reg { + uint64_t u64; + struct cvmx_gmxx_bad_reg_s { + uint64_t reserved_31_63:33; + uint64_t inb_nxa:4; + uint64_t statovr:1; + uint64_t loststat:4; + uint64_t reserved_18_21:4; + uint64_t out_ovr:16; + uint64_t ncb_ovr:1; + uint64_t out_col:1; + } s; + struct cvmx_gmxx_bad_reg_cn30xx { + uint64_t reserved_31_63:33; + uint64_t inb_nxa:4; + uint64_t statovr:1; + uint64_t reserved_25_25:1; + uint64_t loststat:3; + uint64_t reserved_5_21:17; + uint64_t out_ovr:3; + uint64_t reserved_0_1:2; + } cn30xx; + struct cvmx_gmxx_bad_reg_cn30xx cn31xx; + struct cvmx_gmxx_bad_reg_s cn38xx; + struct cvmx_gmxx_bad_reg_s cn38xxp2; + struct cvmx_gmxx_bad_reg_cn30xx cn50xx; + struct cvmx_gmxx_bad_reg_cn52xx { + uint64_t reserved_31_63:33; + uint64_t inb_nxa:4; + uint64_t statovr:1; + uint64_t loststat:4; + uint64_t reserved_6_21:16; + uint64_t out_ovr:4; + uint64_t reserved_0_1:2; + } cn52xx; + struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1; + struct cvmx_gmxx_bad_reg_cn52xx cn56xx; + struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1; + struct cvmx_gmxx_bad_reg_s cn58xx; + struct cvmx_gmxx_bad_reg_s cn58xxp1; +}; + +union cvmx_gmxx_bist { + uint64_t u64; + struct cvmx_gmxx_bist_s { + uint64_t reserved_17_63:47; + uint64_t status:17; + } s; + struct cvmx_gmxx_bist_cn30xx { + uint64_t reserved_10_63:54; + uint64_t status:10; + } cn30xx; + struct cvmx_gmxx_bist_cn30xx cn31xx; + struct cvmx_gmxx_bist_cn30xx cn38xx; + struct cvmx_gmxx_bist_cn30xx cn38xxp2; + struct cvmx_gmxx_bist_cn50xx { + uint64_t reserved_12_63:52; + uint64_t status:12; + } cn50xx; + struct cvmx_gmxx_bist_cn52xx { + uint64_t reserved_16_63:48; + uint64_t status:16; + } cn52xx; + struct cvmx_gmxx_bist_cn52xx cn52xxp1; + struct cvmx_gmxx_bist_cn52xx cn56xx; + struct cvmx_gmxx_bist_cn52xx cn56xxp1; + struct cvmx_gmxx_bist_s cn58xx; + struct cvmx_gmxx_bist_s cn58xxp1; +}; + +union cvmx_gmxx_clk_en { + uint64_t u64; + struct cvmx_gmxx_clk_en_s { + uint64_t reserved_1_63:63; + uint64_t clk_en:1; + } s; + struct cvmx_gmxx_clk_en_s cn52xx; + struct cvmx_gmxx_clk_en_s cn52xxp1; + struct cvmx_gmxx_clk_en_s cn56xx; + struct cvmx_gmxx_clk_en_s cn56xxp1; +}; + +union cvmx_gmxx_hg2_control { + uint64_t u64; + struct cvmx_gmxx_hg2_control_s { + uint64_t reserved_19_63:45; + uint64_t hg2tx_en:1; + uint64_t hg2rx_en:1; + uint64_t phys_en:1; + uint64_t logl_en:16; + } s; + struct cvmx_gmxx_hg2_control_s cn52xx; + struct cvmx_gmxx_hg2_control_s cn52xxp1; + struct cvmx_gmxx_hg2_control_s cn56xx; +}; + +union cvmx_gmxx_inf_mode { + uint64_t u64; + struct cvmx_gmxx_inf_mode_s { + uint64_t reserved_10_63:54; + uint64_t speed:2; + uint64_t reserved_6_7:2; + uint64_t mode:2; + uint64_t reserved_3_3:1; + uint64_t p0mii:1; + uint64_t en:1; + uint64_t type:1; + } s; + struct cvmx_gmxx_inf_mode_cn30xx { + uint64_t reserved_3_63:61; + uint64_t p0mii:1; + uint64_t en:1; + uint64_t type:1; + } cn30xx; + struct cvmx_gmxx_inf_mode_cn31xx { + uint64_t reserved_2_63:62; + uint64_t en:1; + uint64_t type:1; + } cn31xx; + struct cvmx_gmxx_inf_mode_cn31xx cn38xx; + struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2; + struct cvmx_gmxx_inf_mode_cn30xx cn50xx; + struct cvmx_gmxx_inf_mode_cn52xx { + uint64_t reserved_10_63:54; + uint64_t speed:2; + uint64_t reserved_6_7:2; + uint64_t mode:2; + uint64_t reserved_2_3:2; + uint64_t en:1; + uint64_t type:1; + } cn52xx; + struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1; + struct cvmx_gmxx_inf_mode_cn52xx cn56xx; + struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1; + struct cvmx_gmxx_inf_mode_cn31xx cn58xx; + struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1; +}; + +union cvmx_gmxx_nxa_adr { + uint64_t u64; + struct cvmx_gmxx_nxa_adr_s { + uint64_t reserved_6_63:58; + uint64_t prt:6; + } s; + struct cvmx_gmxx_nxa_adr_s cn30xx; + struct cvmx_gmxx_nxa_adr_s cn31xx; + struct cvmx_gmxx_nxa_adr_s cn38xx; + struct cvmx_gmxx_nxa_adr_s cn38xxp2; + struct cvmx_gmxx_nxa_adr_s cn50xx; + struct cvmx_gmxx_nxa_adr_s cn52xx; + struct cvmx_gmxx_nxa_adr_s cn52xxp1; + struct cvmx_gmxx_nxa_adr_s cn56xx; + struct cvmx_gmxx_nxa_adr_s cn56xxp1; + struct cvmx_gmxx_nxa_adr_s cn58xx; + struct cvmx_gmxx_nxa_adr_s cn58xxp1; +}; + +union cvmx_gmxx_prtx_cbfc_ctl { + uint64_t u64; + struct cvmx_gmxx_prtx_cbfc_ctl_s { + uint64_t phys_en:16; + uint64_t logl_en:16; + uint64_t phys_bp:16; + uint64_t reserved_4_15:12; + uint64_t bck_en:1; + uint64_t drp_en:1; + uint64_t tx_en:1; + uint64_t rx_en:1; + } s; + struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx; + struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx; +}; + +union cvmx_gmxx_prtx_cfg { + uint64_t u64; + struct cvmx_gmxx_prtx_cfg_s { + uint64_t reserved_14_63:50; + uint64_t tx_idle:1; + uint64_t rx_idle:1; + uint64_t reserved_9_11:3; + uint64_t speed_msb:1; + uint64_t reserved_4_7:4; + uint64_t slottime:1; + uint64_t duplex:1; + uint64_t speed:1; + uint64_t en:1; + } s; + struct cvmx_gmxx_prtx_cfg_cn30xx { + uint64_t reserved_4_63:60; + uint64_t slottime:1; + uint64_t duplex:1; + uint64_t speed:1; + uint64_t en:1; + } cn30xx; + struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx; + struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx; + struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2; + struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx; + struct cvmx_gmxx_prtx_cfg_s cn52xx; + struct cvmx_gmxx_prtx_cfg_s cn52xxp1; + struct cvmx_gmxx_prtx_cfg_s cn56xx; + struct cvmx_gmxx_prtx_cfg_s cn56xxp1; + struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx; + struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1; +}; + +union cvmx_gmxx_rxx_adr_cam0 { + uint64_t u64; + struct cvmx_gmxx_rxx_adr_cam0_s { + uint64_t adr:64; + } s; + struct cvmx_gmxx_rxx_adr_cam0_s cn30xx; + struct cvmx_gmxx_rxx_adr_cam0_s cn31xx; + struct cvmx_gmxx_rxx_adr_cam0_s cn38xx; + struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2; + struct cvmx_gmxx_rxx_adr_cam0_s cn50xx; + struct cvmx_gmxx_rxx_adr_cam0_s cn52xx; + struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1; + struct cvmx_gmxx_rxx_adr_cam0_s cn56xx; + struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1; + struct cvmx_gmxx_rxx_adr_cam0_s cn58xx; + struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_adr_cam1 { + uint64_t u64; + struct cvmx_gmxx_rxx_adr_cam1_s { + uint64_t adr:64; + } s; + struct cvmx_gmxx_rxx_adr_cam1_s cn30xx; + struct cvmx_gmxx_rxx_adr_cam1_s cn31xx; + struct cvmx_gmxx_rxx_adr_cam1_s cn38xx; + struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2; + struct cvmx_gmxx_rxx_adr_cam1_s cn50xx; + struct cvmx_gmxx_rxx_adr_cam1_s cn52xx; + struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1; + struct cvmx_gmxx_rxx_adr_cam1_s cn56xx; + struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1; + struct cvmx_gmxx_rxx_adr_cam1_s cn58xx; + struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_adr_cam2 { + uint64_t u64; + struct cvmx_gmxx_rxx_adr_cam2_s { + uint64_t adr:64; + } s; + struct cvmx_gmxx_rxx_adr_cam2_s cn30xx; + struct cvmx_gmxx_rxx_adr_cam2_s cn31xx; + struct cvmx_gmxx_rxx_adr_cam2_s cn38xx; + struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2; + struct cvmx_gmxx_rxx_adr_cam2_s cn50xx; + struct cvmx_gmxx_rxx_adr_cam2_s cn52xx; + struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1; + struct cvmx_gmxx_rxx_adr_cam2_s cn56xx; + struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1; + struct cvmx_gmxx_rxx_adr_cam2_s cn58xx; + struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_adr_cam3 { + uint64_t u64; + struct cvmx_gmxx_rxx_adr_cam3_s { + uint64_t adr:64; + } s; + struct cvmx_gmxx_rxx_adr_cam3_s cn30xx; + struct cvmx_gmxx_rxx_adr_cam3_s cn31xx; + struct cvmx_gmxx_rxx_adr_cam3_s cn38xx; + struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2; + struct cvmx_gmxx_rxx_adr_cam3_s cn50xx; + struct cvmx_gmxx_rxx_adr_cam3_s cn52xx; + struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1; + struct cvmx_gmxx_rxx_adr_cam3_s cn56xx; + struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1; + struct cvmx_gmxx_rxx_adr_cam3_s cn58xx; + struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_adr_cam4 { + uint64_t u64; + struct cvmx_gmxx_rxx_adr_cam4_s { + uint64_t adr:64; + } s; + struct cvmx_gmxx_rxx_adr_cam4_s cn30xx; + struct cvmx_gmxx_rxx_adr_cam4_s cn31xx; + struct cvmx_gmxx_rxx_adr_cam4_s cn38xx; + struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2; + struct cvmx_gmxx_rxx_adr_cam4_s cn50xx; + struct cvmx_gmxx_rxx_adr_cam4_s cn52xx; + struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1; + struct cvmx_gmxx_rxx_adr_cam4_s cn56xx; + struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1; + struct cvmx_gmxx_rxx_adr_cam4_s cn58xx; + struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_adr_cam5 { + uint64_t u64; + struct cvmx_gmxx_rxx_adr_cam5_s { + uint64_t adr:64; + } s; + struct cvmx_gmxx_rxx_adr_cam5_s cn30xx; + struct cvmx_gmxx_rxx_adr_cam5_s cn31xx; + struct cvmx_gmxx_rxx_adr_cam5_s cn38xx; + struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2; + struct cvmx_gmxx_rxx_adr_cam5_s cn50xx; + struct cvmx_gmxx_rxx_adr_cam5_s cn52xx; + struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1; + struct cvmx_gmxx_rxx_adr_cam5_s cn56xx; + struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1; + struct cvmx_gmxx_rxx_adr_cam5_s cn58xx; + struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_adr_cam_en { + uint64_t u64; + struct cvmx_gmxx_rxx_adr_cam_en_s { + uint64_t reserved_8_63:56; + uint64_t en:8; + } s; + struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx; + struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx; + struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx; + struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2; + struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx; + struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx; + struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1; + struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx; + struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1; + struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx; + struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_adr_ctl { + uint64_t u64; + struct cvmx_gmxx_rxx_adr_ctl_s { + uint64_t reserved_4_63:60; + uint64_t cam_mode:1; + uint64_t mcst:2; + uint64_t bcst:1; + } s; + struct cvmx_gmxx_rxx_adr_ctl_s cn30xx; + struct cvmx_gmxx_rxx_adr_ctl_s cn31xx; + struct cvmx_gmxx_rxx_adr_ctl_s cn38xx; + struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2; + struct cvmx_gmxx_rxx_adr_ctl_s cn50xx; + struct cvmx_gmxx_rxx_adr_ctl_s cn52xx; + struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1; + struct cvmx_gmxx_rxx_adr_ctl_s cn56xx; + struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1; + struct cvmx_gmxx_rxx_adr_ctl_s cn58xx; + struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_decision { + uint64_t u64; + struct cvmx_gmxx_rxx_decision_s { + uint64_t reserved_5_63:59; + uint64_t cnt:5; + } s; + struct cvmx_gmxx_rxx_decision_s cn30xx; + struct cvmx_gmxx_rxx_decision_s cn31xx; + struct cvmx_gmxx_rxx_decision_s cn38xx; + struct cvmx_gmxx_rxx_decision_s cn38xxp2; + struct cvmx_gmxx_rxx_decision_s cn50xx; + struct cvmx_gmxx_rxx_decision_s cn52xx; + struct cvmx_gmxx_rxx_decision_s cn52xxp1; + struct cvmx_gmxx_rxx_decision_s cn56xx; + struct cvmx_gmxx_rxx_decision_s cn56xxp1; + struct cvmx_gmxx_rxx_decision_s cn58xx; + struct cvmx_gmxx_rxx_decision_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_frm_chk { + uint64_t u64; + struct cvmx_gmxx_rxx_frm_chk_s { + uint64_t reserved_10_63:54; + uint64_t niberr:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t lenerr:1; + uint64_t alnerr:1; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t maxerr:1; + uint64_t carext:1; + uint64_t minerr:1; + } s; + struct cvmx_gmxx_rxx_frm_chk_s cn30xx; + struct cvmx_gmxx_rxx_frm_chk_s cn31xx; + struct cvmx_gmxx_rxx_frm_chk_s cn38xx; + struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2; + struct cvmx_gmxx_rxx_frm_chk_cn50xx { + uint64_t reserved_10_63:54; + uint64_t niberr:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t reserved_6_6:1; + uint64_t alnerr:1; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t reserved_2_2:1; + uint64_t carext:1; + uint64_t reserved_0_0:1; + } cn50xx; + struct cvmx_gmxx_rxx_frm_chk_cn52xx { + uint64_t reserved_9_63:55; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t reserved_5_6:2; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t reserved_2_2:1; + uint64_t carext:1; + uint64_t reserved_0_0:1; + } cn52xx; + struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1; + struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx; + struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1; + struct cvmx_gmxx_rxx_frm_chk_s cn58xx; + struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_frm_ctl { + uint64_t u64; + struct cvmx_gmxx_rxx_frm_ctl_s { + uint64_t reserved_11_63:53; + uint64_t null_dis:1; + uint64_t pre_align:1; + uint64_t pad_len:1; + uint64_t vlan_len:1; + uint64_t pre_free:1; + uint64_t ctl_smac:1; + uint64_t ctl_mcst:1; + uint64_t ctl_bck:1; + uint64_t ctl_drp:1; + uint64_t pre_strp:1; + uint64_t pre_chk:1; + } s; + struct cvmx_gmxx_rxx_frm_ctl_cn30xx { + uint64_t reserved_9_63:55; + uint64_t pad_len:1; + uint64_t vlan_len:1; + uint64_t pre_free:1; + uint64_t ctl_smac:1; + uint64_t ctl_mcst:1; + uint64_t ctl_bck:1; + uint64_t ctl_drp:1; + uint64_t pre_strp:1; + uint64_t pre_chk:1; + } cn30xx; + struct cvmx_gmxx_rxx_frm_ctl_cn31xx { + uint64_t reserved_8_63:56; + uint64_t vlan_len:1; + uint64_t pre_free:1; + uint64_t ctl_smac:1; + uint64_t ctl_mcst:1; + uint64_t ctl_bck:1; + uint64_t ctl_drp:1; + uint64_t pre_strp:1; + uint64_t pre_chk:1; + } cn31xx; + struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx; + struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2; + struct cvmx_gmxx_rxx_frm_ctl_cn50xx { + uint64_t reserved_11_63:53; + uint64_t null_dis:1; + uint64_t pre_align:1; + uint64_t reserved_7_8:2; + uint64_t pre_free:1; + uint64_t ctl_smac:1; + uint64_t ctl_mcst:1; + uint64_t ctl_bck:1; + uint64_t ctl_drp:1; + uint64_t pre_strp:1; + uint64_t pre_chk:1; + } cn50xx; + struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx; + struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1; + struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx; + struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 { + uint64_t reserved_10_63:54; + uint64_t pre_align:1; + uint64_t reserved_7_8:2; + uint64_t pre_free:1; + uint64_t ctl_smac:1; + uint64_t ctl_mcst:1; + uint64_t ctl_bck:1; + uint64_t ctl_drp:1; + uint64_t pre_strp:1; + uint64_t pre_chk:1; + } cn56xxp1; + struct cvmx_gmxx_rxx_frm_ctl_s cn58xx; + struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1; +}; + +union cvmx_gmxx_rxx_frm_max { + uint64_t u64; + struct cvmx_gmxx_rxx_frm_max_s { + uint64_t reserved_16_63:48; + uint64_t len:16; + } s; + struct cvmx_gmxx_rxx_frm_max_s cn30xx; + struct cvmx_gmxx_rxx_frm_max_s cn31xx; + struct cvmx_gmxx_rxx_frm_max_s cn38xx; + struct cvmx_gmxx_rxx_frm_max_s cn38xxp2; + struct cvmx_gmxx_rxx_frm_max_s cn58xx; + struct cvmx_gmxx_rxx_frm_max_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_frm_min { + uint64_t u64; + struct cvmx_gmxx_rxx_frm_min_s { + uint64_t reserved_16_63:48; + uint64_t len:16; + } s; + struct cvmx_gmxx_rxx_frm_min_s cn30xx; + struct cvmx_gmxx_rxx_frm_min_s cn31xx; + struct cvmx_gmxx_rxx_frm_min_s cn38xx; + struct cvmx_gmxx_rxx_frm_min_s cn38xxp2; + struct cvmx_gmxx_rxx_frm_min_s cn58xx; + struct cvmx_gmxx_rxx_frm_min_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_ifg { + uint64_t u64; + struct cvmx_gmxx_rxx_ifg_s { + uint64_t reserved_4_63:60; + uint64_t ifg:4; + } s; + struct cvmx_gmxx_rxx_ifg_s cn30xx; + struct cvmx_gmxx_rxx_ifg_s cn31xx; + struct cvmx_gmxx_rxx_ifg_s cn38xx; + struct cvmx_gmxx_rxx_ifg_s cn38xxp2; + struct cvmx_gmxx_rxx_ifg_s cn50xx; + struct cvmx_gmxx_rxx_ifg_s cn52xx; + struct cvmx_gmxx_rxx_ifg_s cn52xxp1; + struct cvmx_gmxx_rxx_ifg_s cn56xx; + struct cvmx_gmxx_rxx_ifg_s cn56xxp1; + struct cvmx_gmxx_rxx_ifg_s cn58xx; + struct cvmx_gmxx_rxx_ifg_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_int_en { + uint64_t u64; + struct cvmx_gmxx_rxx_int_en_s { + uint64_t reserved_29_63:35; + uint64_t hg2cc:1; + uint64_t hg2fld:1; + uint64_t undat:1; + uint64_t uneop:1; + uint64_t unsop:1; + uint64_t bad_term:1; + uint64_t bad_seq:1; + uint64_t rem_fault:1; + uint64_t loc_fault:1; + uint64_t pause_drp:1; + uint64_t phy_dupx:1; + uint64_t phy_spd:1; + uint64_t phy_link:1; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t niberr:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t lenerr:1; + uint64_t alnerr:1; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t maxerr:1; + uint64_t carext:1; + uint64_t minerr:1; + } s; + struct cvmx_gmxx_rxx_int_en_cn30xx { + uint64_t reserved_19_63:45; + uint64_t phy_dupx:1; + uint64_t phy_spd:1; + uint64_t phy_link:1; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t niberr:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t lenerr:1; + uint64_t alnerr:1; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t maxerr:1; + uint64_t carext:1; + uint64_t minerr:1; + } cn30xx; + struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx; + struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx; + struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2; + struct cvmx_gmxx_rxx_int_en_cn50xx { + uint64_t reserved_20_63:44; + uint64_t pause_drp:1; + uint64_t phy_dupx:1; + uint64_t phy_spd:1; + uint64_t phy_link:1; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t niberr:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t reserved_6_6:1; + uint64_t alnerr:1; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t reserved_2_2:1; + uint64_t carext:1; + uint64_t reserved_0_0:1; + } cn50xx; + struct cvmx_gmxx_rxx_int_en_cn52xx { + uint64_t reserved_29_63:35; + uint64_t hg2cc:1; + uint64_t hg2fld:1; + uint64_t undat:1; + uint64_t uneop:1; + uint64_t unsop:1; + uint64_t bad_term:1; + uint64_t bad_seq:1; + uint64_t rem_fault:1; + uint64_t loc_fault:1; + uint64_t pause_drp:1; + uint64_t reserved_16_18:3; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t reserved_9_9:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t reserved_5_6:2; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t reserved_2_2:1; + uint64_t carext:1; + uint64_t reserved_0_0:1; + } cn52xx; + struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1; + struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx; + struct cvmx_gmxx_rxx_int_en_cn56xxp1 { + uint64_t reserved_27_63:37; + uint64_t undat:1; + uint64_t uneop:1; + uint64_t unsop:1; + uint64_t bad_term:1; + uint64_t bad_seq:1; + uint64_t rem_fault:1; + uint64_t loc_fault:1; + uint64_t pause_drp:1; + uint64_t reserved_16_18:3; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t reserved_9_9:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t reserved_5_6:2; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t reserved_2_2:1; + uint64_t carext:1; + uint64_t reserved_0_0:1; + } cn56xxp1; + struct cvmx_gmxx_rxx_int_en_cn58xx { + uint64_t reserved_20_63:44; + uint64_t pause_drp:1; + uint64_t phy_dupx:1; + uint64_t phy_spd:1; + uint64_t phy_link:1; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t niberr:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t lenerr:1; + uint64_t alnerr:1; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t maxerr:1; + uint64_t carext:1; + uint64_t minerr:1; + } cn58xx; + struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1; +}; + +union cvmx_gmxx_rxx_int_reg { + uint64_t u64; + struct cvmx_gmxx_rxx_int_reg_s { + uint64_t reserved_29_63:35; + uint64_t hg2cc:1; + uint64_t hg2fld:1; + uint64_t undat:1; + uint64_t uneop:1; + uint64_t unsop:1; + uint64_t bad_term:1; + uint64_t bad_seq:1; + uint64_t rem_fault:1; + uint64_t loc_fault:1; + uint64_t pause_drp:1; + uint64_t phy_dupx:1; + uint64_t phy_spd:1; + uint64_t phy_link:1; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t niberr:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t lenerr:1; + uint64_t alnerr:1; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t maxerr:1; + uint64_t carext:1; + uint64_t minerr:1; + } s; + struct cvmx_gmxx_rxx_int_reg_cn30xx { + uint64_t reserved_19_63:45; + uint64_t phy_dupx:1; + uint64_t phy_spd:1; + uint64_t phy_link:1; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t niberr:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t lenerr:1; + uint64_t alnerr:1; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t maxerr:1; + uint64_t carext:1; + uint64_t minerr:1; + } cn30xx; + struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx; + struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx; + struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2; + struct cvmx_gmxx_rxx_int_reg_cn50xx { + uint64_t reserved_20_63:44; + uint64_t pause_drp:1; + uint64_t phy_dupx:1; + uint64_t phy_spd:1; + uint64_t phy_link:1; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t niberr:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t reserved_6_6:1; + uint64_t alnerr:1; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t reserved_2_2:1; + uint64_t carext:1; + uint64_t reserved_0_0:1; + } cn50xx; + struct cvmx_gmxx_rxx_int_reg_cn52xx { + uint64_t reserved_29_63:35; + uint64_t hg2cc:1; + uint64_t hg2fld:1; + uint64_t undat:1; + uint64_t uneop:1; + uint64_t unsop:1; + uint64_t bad_term:1; + uint64_t bad_seq:1; + uint64_t rem_fault:1; + uint64_t loc_fault:1; + uint64_t pause_drp:1; + uint64_t reserved_16_18:3; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t reserved_9_9:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t reserved_5_6:2; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t reserved_2_2:1; + uint64_t carext:1; + uint64_t reserved_0_0:1; + } cn52xx; + struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1; + struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx; + struct cvmx_gmxx_rxx_int_reg_cn56xxp1 { + uint64_t reserved_27_63:37; + uint64_t undat:1; + uint64_t uneop:1; + uint64_t unsop:1; + uint64_t bad_term:1; + uint64_t bad_seq:1; + uint64_t rem_fault:1; + uint64_t loc_fault:1; + uint64_t pause_drp:1; + uint64_t reserved_16_18:3; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t reserved_9_9:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t reserved_5_6:2; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t reserved_2_2:1; + uint64_t carext:1; + uint64_t reserved_0_0:1; + } cn56xxp1; + struct cvmx_gmxx_rxx_int_reg_cn58xx { + uint64_t reserved_20_63:44; + uint64_t pause_drp:1; + uint64_t phy_dupx:1; + uint64_t phy_spd:1; + uint64_t phy_link:1; + uint64_t ifgerr:1; + uint64_t coldet:1; + uint64_t falerr:1; + uint64_t rsverr:1; + uint64_t pcterr:1; + uint64_t ovrerr:1; + uint64_t niberr:1; + uint64_t skperr:1; + uint64_t rcverr:1; + uint64_t lenerr:1; + uint64_t alnerr:1; + uint64_t fcserr:1; + uint64_t jabber:1; + uint64_t maxerr:1; + uint64_t carext:1; + uint64_t minerr:1; + } cn58xx; + struct cvmx_gmxx_rxx_int_reg_cn58xx cn58xxp1; +}; + +union cvmx_gmxx_rxx_jabber { + uint64_t u64; + struct cvmx_gmxx_rxx_jabber_s { + uint64_t reserved_16_63:48; + uint64_t cnt:16; + } s; + struct cvmx_gmxx_rxx_jabber_s cn30xx; + struct cvmx_gmxx_rxx_jabber_s cn31xx; + struct cvmx_gmxx_rxx_jabber_s cn38xx; + struct cvmx_gmxx_rxx_jabber_s cn38xxp2; + struct cvmx_gmxx_rxx_jabber_s cn50xx; + struct cvmx_gmxx_rxx_jabber_s cn52xx; + struct cvmx_gmxx_rxx_jabber_s cn52xxp1; + struct cvmx_gmxx_rxx_jabber_s cn56xx; + struct cvmx_gmxx_rxx_jabber_s cn56xxp1; + struct cvmx_gmxx_rxx_jabber_s cn58xx; + struct cvmx_gmxx_rxx_jabber_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_pause_drop_time { + uint64_t u64; + struct cvmx_gmxx_rxx_pause_drop_time_s { + uint64_t reserved_16_63:48; + uint64_t status:16; + } s; + struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx; + struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx; + struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1; + struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx; + struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1; + struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx; + struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_rx_inbnd { + uint64_t u64; + struct cvmx_gmxx_rxx_rx_inbnd_s { + uint64_t reserved_4_63:60; + uint64_t duplex:1; + uint64_t speed:2; + uint64_t status:1; + } s; + struct cvmx_gmxx_rxx_rx_inbnd_s cn30xx; + struct cvmx_gmxx_rxx_rx_inbnd_s cn31xx; + struct cvmx_gmxx_rxx_rx_inbnd_s cn38xx; + struct cvmx_gmxx_rxx_rx_inbnd_s cn38xxp2; + struct cvmx_gmxx_rxx_rx_inbnd_s cn50xx; + struct cvmx_gmxx_rxx_rx_inbnd_s cn58xx; + struct cvmx_gmxx_rxx_rx_inbnd_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_stats_ctl { + uint64_t u64; + struct cvmx_gmxx_rxx_stats_ctl_s { + uint64_t reserved_1_63:63; + uint64_t rd_clr:1; + } s; + struct cvmx_gmxx_rxx_stats_ctl_s cn30xx; + struct cvmx_gmxx_rxx_stats_ctl_s cn31xx; + struct cvmx_gmxx_rxx_stats_ctl_s cn38xx; + struct cvmx_gmxx_rxx_stats_ctl_s cn38xxp2; + struct cvmx_gmxx_rxx_stats_ctl_s cn50xx; + struct cvmx_gmxx_rxx_stats_ctl_s cn52xx; + struct cvmx_gmxx_rxx_stats_ctl_s cn52xxp1; + struct cvmx_gmxx_rxx_stats_ctl_s cn56xx; + struct cvmx_gmxx_rxx_stats_ctl_s cn56xxp1; + struct cvmx_gmxx_rxx_stats_ctl_s cn58xx; + struct cvmx_gmxx_rxx_stats_ctl_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_stats_octs { + uint64_t u64; + struct cvmx_gmxx_rxx_stats_octs_s { + uint64_t reserved_48_63:16; + uint64_t cnt:48; + } s; + struct cvmx_gmxx_rxx_stats_octs_s cn30xx; + struct cvmx_gmxx_rxx_stats_octs_s cn31xx; + struct cvmx_gmxx_rxx_stats_octs_s cn38xx; + struct cvmx_gmxx_rxx_stats_octs_s cn38xxp2; + struct cvmx_gmxx_rxx_stats_octs_s cn50xx; + struct cvmx_gmxx_rxx_stats_octs_s cn52xx; + struct cvmx_gmxx_rxx_stats_octs_s cn52xxp1; + struct cvmx_gmxx_rxx_stats_octs_s cn56xx; + struct cvmx_gmxx_rxx_stats_octs_s cn56xxp1; + struct cvmx_gmxx_rxx_stats_octs_s cn58xx; + struct cvmx_gmxx_rxx_stats_octs_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_stats_octs_ctl { + uint64_t u64; + struct cvmx_gmxx_rxx_stats_octs_ctl_s { + uint64_t reserved_48_63:16; + uint64_t cnt:48; + } s; + struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx; + struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx; + struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx; + struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2; + struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx; + struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx; + struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1; + struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx; + struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1; + struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx; + struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_stats_octs_dmac { + uint64_t u64; + struct cvmx_gmxx_rxx_stats_octs_dmac_s { + uint64_t reserved_48_63:16; + uint64_t cnt:48; + } s; + struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx; + struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx; + struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx; + struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2; + struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx; + struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx; + struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1; + struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx; + struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1; + struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx; + struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_stats_octs_drp { + uint64_t u64; + struct cvmx_gmxx_rxx_stats_octs_drp_s { + uint64_t reserved_48_63:16; + uint64_t cnt:48; + } s; + struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx; + struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx; + struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx; + struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2; + struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx; + struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx; + struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1; + struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx; + struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1; + struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx; + struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_stats_pkts { + uint64_t u64; + struct cvmx_gmxx_rxx_stats_pkts_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_gmxx_rxx_stats_pkts_s cn30xx; + struct cvmx_gmxx_rxx_stats_pkts_s cn31xx; + struct cvmx_gmxx_rxx_stats_pkts_s cn38xx; + struct cvmx_gmxx_rxx_stats_pkts_s cn38xxp2; + struct cvmx_gmxx_rxx_stats_pkts_s cn50xx; + struct cvmx_gmxx_rxx_stats_pkts_s cn52xx; + struct cvmx_gmxx_rxx_stats_pkts_s cn52xxp1; + struct cvmx_gmxx_rxx_stats_pkts_s cn56xx; + struct cvmx_gmxx_rxx_stats_pkts_s cn56xxp1; + struct cvmx_gmxx_rxx_stats_pkts_s cn58xx; + struct cvmx_gmxx_rxx_stats_pkts_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_stats_pkts_bad { + uint64_t u64; + struct cvmx_gmxx_rxx_stats_pkts_bad_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx; + struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx; + struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx; + struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2; + struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx; + struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx; + struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1; + struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx; + struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1; + struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx; + struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_stats_pkts_ctl { + uint64_t u64; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx; + struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_stats_pkts_dmac { + uint64_t u64; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx; + struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_stats_pkts_drp { + uint64_t u64; + struct cvmx_gmxx_rxx_stats_pkts_drp_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx; + struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx; + struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx; + struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2; + struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx; + struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx; + struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1; + struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx; + struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1; + struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx; + struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1; +}; + +union cvmx_gmxx_rxx_udd_skp { + uint64_t u64; + struct cvmx_gmxx_rxx_udd_skp_s { + uint64_t reserved_9_63:55; + uint64_t fcssel:1; + uint64_t reserved_7_7:1; + uint64_t len:7; + } s; + struct cvmx_gmxx_rxx_udd_skp_s cn30xx; + struct cvmx_gmxx_rxx_udd_skp_s cn31xx; + struct cvmx_gmxx_rxx_udd_skp_s cn38xx; + struct cvmx_gmxx_rxx_udd_skp_s cn38xxp2; + struct cvmx_gmxx_rxx_udd_skp_s cn50xx; + struct cvmx_gmxx_rxx_udd_skp_s cn52xx; + struct cvmx_gmxx_rxx_udd_skp_s cn52xxp1; + struct cvmx_gmxx_rxx_udd_skp_s cn56xx; + struct cvmx_gmxx_rxx_udd_skp_s cn56xxp1; + struct cvmx_gmxx_rxx_udd_skp_s cn58xx; + struct cvmx_gmxx_rxx_udd_skp_s cn58xxp1; +}; + +union cvmx_gmxx_rx_bp_dropx { + uint64_t u64; + struct cvmx_gmxx_rx_bp_dropx_s { + uint64_t reserved_6_63:58; + uint64_t mark:6; + } s; + struct cvmx_gmxx_rx_bp_dropx_s cn30xx; + struct cvmx_gmxx_rx_bp_dropx_s cn31xx; + struct cvmx_gmxx_rx_bp_dropx_s cn38xx; + struct cvmx_gmxx_rx_bp_dropx_s cn38xxp2; + struct cvmx_gmxx_rx_bp_dropx_s cn50xx; + struct cvmx_gmxx_rx_bp_dropx_s cn52xx; + struct cvmx_gmxx_rx_bp_dropx_s cn52xxp1; + struct cvmx_gmxx_rx_bp_dropx_s cn56xx; + struct cvmx_gmxx_rx_bp_dropx_s cn56xxp1; + struct cvmx_gmxx_rx_bp_dropx_s cn58xx; + struct cvmx_gmxx_rx_bp_dropx_s cn58xxp1; +}; + +union cvmx_gmxx_rx_bp_offx { + uint64_t u64; + struct cvmx_gmxx_rx_bp_offx_s { + uint64_t reserved_6_63:58; + uint64_t mark:6; + } s; + struct cvmx_gmxx_rx_bp_offx_s cn30xx; + struct cvmx_gmxx_rx_bp_offx_s cn31xx; + struct cvmx_gmxx_rx_bp_offx_s cn38xx; + struct cvmx_gmxx_rx_bp_offx_s cn38xxp2; + struct cvmx_gmxx_rx_bp_offx_s cn50xx; + struct cvmx_gmxx_rx_bp_offx_s cn52xx; + struct cvmx_gmxx_rx_bp_offx_s cn52xxp1; + struct cvmx_gmxx_rx_bp_offx_s cn56xx; + struct cvmx_gmxx_rx_bp_offx_s cn56xxp1; + struct cvmx_gmxx_rx_bp_offx_s cn58xx; + struct cvmx_gmxx_rx_bp_offx_s cn58xxp1; +}; + +union cvmx_gmxx_rx_bp_onx { + uint64_t u64; + struct cvmx_gmxx_rx_bp_onx_s { + uint64_t reserved_9_63:55; + uint64_t mark:9; + } s; + struct cvmx_gmxx_rx_bp_onx_s cn30xx; + struct cvmx_gmxx_rx_bp_onx_s cn31xx; + struct cvmx_gmxx_rx_bp_onx_s cn38xx; + struct cvmx_gmxx_rx_bp_onx_s cn38xxp2; + struct cvmx_gmxx_rx_bp_onx_s cn50xx; + struct cvmx_gmxx_rx_bp_onx_s cn52xx; + struct cvmx_gmxx_rx_bp_onx_s cn52xxp1; + struct cvmx_gmxx_rx_bp_onx_s cn56xx; + struct cvmx_gmxx_rx_bp_onx_s cn56xxp1; + struct cvmx_gmxx_rx_bp_onx_s cn58xx; + struct cvmx_gmxx_rx_bp_onx_s cn58xxp1; +}; + +union cvmx_gmxx_rx_hg2_status { + uint64_t u64; + struct cvmx_gmxx_rx_hg2_status_s { + uint64_t reserved_48_63:16; + uint64_t phtim2go:16; + uint64_t xof:16; + uint64_t lgtim2go:16; + } s; + struct cvmx_gmxx_rx_hg2_status_s cn52xx; + struct cvmx_gmxx_rx_hg2_status_s cn52xxp1; + struct cvmx_gmxx_rx_hg2_status_s cn56xx; +}; + +union cvmx_gmxx_rx_pass_en { + uint64_t u64; + struct cvmx_gmxx_rx_pass_en_s { + uint64_t reserved_16_63:48; + uint64_t en:16; + } s; + struct cvmx_gmxx_rx_pass_en_s cn38xx; + struct cvmx_gmxx_rx_pass_en_s cn38xxp2; + struct cvmx_gmxx_rx_pass_en_s cn58xx; + struct cvmx_gmxx_rx_pass_en_s cn58xxp1; +}; + +union cvmx_gmxx_rx_pass_mapx { + uint64_t u64; + struct cvmx_gmxx_rx_pass_mapx_s { + uint64_t reserved_4_63:60; + uint64_t dprt:4; + } s; + struct cvmx_gmxx_rx_pass_mapx_s cn38xx; + struct cvmx_gmxx_rx_pass_mapx_s cn38xxp2; + struct cvmx_gmxx_rx_pass_mapx_s cn58xx; + struct cvmx_gmxx_rx_pass_mapx_s cn58xxp1; +}; + +union cvmx_gmxx_rx_prt_info { + uint64_t u64; + struct cvmx_gmxx_rx_prt_info_s { + uint64_t reserved_32_63:32; + uint64_t drop:16; + uint64_t commit:16; + } s; + struct cvmx_gmxx_rx_prt_info_cn30xx { + uint64_t reserved_19_63:45; + uint64_t drop:3; + uint64_t reserved_3_15:13; + uint64_t commit:3; + } cn30xx; + struct cvmx_gmxx_rx_prt_info_cn30xx cn31xx; + struct cvmx_gmxx_rx_prt_info_s cn38xx; + struct cvmx_gmxx_rx_prt_info_cn30xx cn50xx; + struct cvmx_gmxx_rx_prt_info_cn52xx { + uint64_t reserved_20_63:44; + uint64_t drop:4; + uint64_t reserved_4_15:12; + uint64_t commit:4; + } cn52xx; + struct cvmx_gmxx_rx_prt_info_cn52xx cn52xxp1; + struct cvmx_gmxx_rx_prt_info_cn52xx cn56xx; + struct cvmx_gmxx_rx_prt_info_cn52xx cn56xxp1; + struct cvmx_gmxx_rx_prt_info_s cn58xx; + struct cvmx_gmxx_rx_prt_info_s cn58xxp1; +}; + +union cvmx_gmxx_rx_prts { + uint64_t u64; + struct cvmx_gmxx_rx_prts_s { + uint64_t reserved_3_63:61; + uint64_t prts:3; + } s; + struct cvmx_gmxx_rx_prts_s cn30xx; + struct cvmx_gmxx_rx_prts_s cn31xx; + struct cvmx_gmxx_rx_prts_s cn38xx; + struct cvmx_gmxx_rx_prts_s cn38xxp2; + struct cvmx_gmxx_rx_prts_s cn50xx; + struct cvmx_gmxx_rx_prts_s cn52xx; + struct cvmx_gmxx_rx_prts_s cn52xxp1; + struct cvmx_gmxx_rx_prts_s cn56xx; + struct cvmx_gmxx_rx_prts_s cn56xxp1; + struct cvmx_gmxx_rx_prts_s cn58xx; + struct cvmx_gmxx_rx_prts_s cn58xxp1; +}; + +union cvmx_gmxx_rx_tx_status { + uint64_t u64; + struct cvmx_gmxx_rx_tx_status_s { + uint64_t reserved_7_63:57; + uint64_t tx:3; + uint64_t reserved_3_3:1; + uint64_t rx:3; + } s; + struct cvmx_gmxx_rx_tx_status_s cn30xx; + struct cvmx_gmxx_rx_tx_status_s cn31xx; + struct cvmx_gmxx_rx_tx_status_s cn50xx; +}; + +union cvmx_gmxx_rx_xaui_bad_col { + uint64_t u64; + struct cvmx_gmxx_rx_xaui_bad_col_s { + uint64_t reserved_40_63:24; + uint64_t val:1; + uint64_t state:3; + uint64_t lane_rxc:4; + uint64_t lane_rxd:32; + } s; + struct cvmx_gmxx_rx_xaui_bad_col_s cn52xx; + struct cvmx_gmxx_rx_xaui_bad_col_s cn52xxp1; + struct cvmx_gmxx_rx_xaui_bad_col_s cn56xx; + struct cvmx_gmxx_rx_xaui_bad_col_s cn56xxp1; +}; + +union cvmx_gmxx_rx_xaui_ctl { + uint64_t u64; + struct cvmx_gmxx_rx_xaui_ctl_s { + uint64_t reserved_2_63:62; + uint64_t status:2; + } s; + struct cvmx_gmxx_rx_xaui_ctl_s cn52xx; + struct cvmx_gmxx_rx_xaui_ctl_s cn52xxp1; + struct cvmx_gmxx_rx_xaui_ctl_s cn56xx; + struct cvmx_gmxx_rx_xaui_ctl_s cn56xxp1; +}; + +union cvmx_gmxx_smacx { + uint64_t u64; + struct cvmx_gmxx_smacx_s { + uint64_t reserved_48_63:16; + uint64_t smac:48; + } s; + struct cvmx_gmxx_smacx_s cn30xx; + struct cvmx_gmxx_smacx_s cn31xx; + struct cvmx_gmxx_smacx_s cn38xx; + struct cvmx_gmxx_smacx_s cn38xxp2; + struct cvmx_gmxx_smacx_s cn50xx; + struct cvmx_gmxx_smacx_s cn52xx; + struct cvmx_gmxx_smacx_s cn52xxp1; + struct cvmx_gmxx_smacx_s cn56xx; + struct cvmx_gmxx_smacx_s cn56xxp1; + struct cvmx_gmxx_smacx_s cn58xx; + struct cvmx_gmxx_smacx_s cn58xxp1; +}; + +union cvmx_gmxx_stat_bp { + uint64_t u64; + struct cvmx_gmxx_stat_bp_s { + uint64_t reserved_17_63:47; + uint64_t bp:1; + uint64_t cnt:16; + } s; + struct cvmx_gmxx_stat_bp_s cn30xx; + struct cvmx_gmxx_stat_bp_s cn31xx; + struct cvmx_gmxx_stat_bp_s cn38xx; + struct cvmx_gmxx_stat_bp_s cn38xxp2; + struct cvmx_gmxx_stat_bp_s cn50xx; + struct cvmx_gmxx_stat_bp_s cn52xx; + struct cvmx_gmxx_stat_bp_s cn52xxp1; + struct cvmx_gmxx_stat_bp_s cn56xx; + struct cvmx_gmxx_stat_bp_s cn56xxp1; + struct cvmx_gmxx_stat_bp_s cn58xx; + struct cvmx_gmxx_stat_bp_s cn58xxp1; +}; + +union cvmx_gmxx_txx_append { + uint64_t u64; + struct cvmx_gmxx_txx_append_s { + uint64_t reserved_4_63:60; + uint64_t force_fcs:1; + uint64_t fcs:1; + uint64_t pad:1; + uint64_t preamble:1; + } s; + struct cvmx_gmxx_txx_append_s cn30xx; + struct cvmx_gmxx_txx_append_s cn31xx; + struct cvmx_gmxx_txx_append_s cn38xx; + struct cvmx_gmxx_txx_append_s cn38xxp2; + struct cvmx_gmxx_txx_append_s cn50xx; + struct cvmx_gmxx_txx_append_s cn52xx; + struct cvmx_gmxx_txx_append_s cn52xxp1; + struct cvmx_gmxx_txx_append_s cn56xx; + struct cvmx_gmxx_txx_append_s cn56xxp1; + struct cvmx_gmxx_txx_append_s cn58xx; + struct cvmx_gmxx_txx_append_s cn58xxp1; +}; + +union cvmx_gmxx_txx_burst { + uint64_t u64; + struct cvmx_gmxx_txx_burst_s { + uint64_t reserved_16_63:48; + uint64_t burst:16; + } s; + struct cvmx_gmxx_txx_burst_s cn30xx; + struct cvmx_gmxx_txx_burst_s cn31xx; + struct cvmx_gmxx_txx_burst_s cn38xx; + struct cvmx_gmxx_txx_burst_s cn38xxp2; + struct cvmx_gmxx_txx_burst_s cn50xx; + struct cvmx_gmxx_txx_burst_s cn52xx; + struct cvmx_gmxx_txx_burst_s cn52xxp1; + struct cvmx_gmxx_txx_burst_s cn56xx; + struct cvmx_gmxx_txx_burst_s cn56xxp1; + struct cvmx_gmxx_txx_burst_s cn58xx; + struct cvmx_gmxx_txx_burst_s cn58xxp1; +}; + +union cvmx_gmxx_txx_cbfc_xoff { + uint64_t u64; + struct cvmx_gmxx_txx_cbfc_xoff_s { + uint64_t reserved_16_63:48; + uint64_t xoff:16; + } s; + struct cvmx_gmxx_txx_cbfc_xoff_s cn52xx; + struct cvmx_gmxx_txx_cbfc_xoff_s cn56xx; +}; + +union cvmx_gmxx_txx_cbfc_xon { + uint64_t u64; + struct cvmx_gmxx_txx_cbfc_xon_s { + uint64_t reserved_16_63:48; + uint64_t xon:16; + } s; + struct cvmx_gmxx_txx_cbfc_xon_s cn52xx; + struct cvmx_gmxx_txx_cbfc_xon_s cn56xx; +}; + +union cvmx_gmxx_txx_clk { + uint64_t u64; + struct cvmx_gmxx_txx_clk_s { + uint64_t reserved_6_63:58; + uint64_t clk_cnt:6; + } s; + struct cvmx_gmxx_txx_clk_s cn30xx; + struct cvmx_gmxx_txx_clk_s cn31xx; + struct cvmx_gmxx_txx_clk_s cn38xx; + struct cvmx_gmxx_txx_clk_s cn38xxp2; + struct cvmx_gmxx_txx_clk_s cn50xx; + struct cvmx_gmxx_txx_clk_s cn58xx; + struct cvmx_gmxx_txx_clk_s cn58xxp1; +}; + +union cvmx_gmxx_txx_ctl { + uint64_t u64; + struct cvmx_gmxx_txx_ctl_s { + uint64_t reserved_2_63:62; + uint64_t xsdef_en:1; + uint64_t xscol_en:1; + } s; + struct cvmx_gmxx_txx_ctl_s cn30xx; + struct cvmx_gmxx_txx_ctl_s cn31xx; + struct cvmx_gmxx_txx_ctl_s cn38xx; + struct cvmx_gmxx_txx_ctl_s cn38xxp2; + struct cvmx_gmxx_txx_ctl_s cn50xx; + struct cvmx_gmxx_txx_ctl_s cn52xx; + struct cvmx_gmxx_txx_ctl_s cn52xxp1; + struct cvmx_gmxx_txx_ctl_s cn56xx; + struct cvmx_gmxx_txx_ctl_s cn56xxp1; + struct cvmx_gmxx_txx_ctl_s cn58xx; + struct cvmx_gmxx_txx_ctl_s cn58xxp1; +}; + +union cvmx_gmxx_txx_min_pkt { + uint64_t u64; + struct cvmx_gmxx_txx_min_pkt_s { + uint64_t reserved_8_63:56; + uint64_t min_size:8; + } s; + struct cvmx_gmxx_txx_min_pkt_s cn30xx; + struct cvmx_gmxx_txx_min_pkt_s cn31xx; + struct cvmx_gmxx_txx_min_pkt_s cn38xx; + struct cvmx_gmxx_txx_min_pkt_s cn38xxp2; + struct cvmx_gmxx_txx_min_pkt_s cn50xx; + struct cvmx_gmxx_txx_min_pkt_s cn52xx; + struct cvmx_gmxx_txx_min_pkt_s cn52xxp1; + struct cvmx_gmxx_txx_min_pkt_s cn56xx; + struct cvmx_gmxx_txx_min_pkt_s cn56xxp1; + struct cvmx_gmxx_txx_min_pkt_s cn58xx; + struct cvmx_gmxx_txx_min_pkt_s cn58xxp1; +}; + +union cvmx_gmxx_txx_pause_pkt_interval { + uint64_t u64; + struct cvmx_gmxx_txx_pause_pkt_interval_s { + uint64_t reserved_16_63:48; + uint64_t interval:16; + } s; + struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx; + struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx; + struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx; + struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2; + struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx; + struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx; + struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1; + struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx; + struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1; + struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx; + struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1; +}; + +union cvmx_gmxx_txx_pause_pkt_time { + uint64_t u64; + struct cvmx_gmxx_txx_pause_pkt_time_s { + uint64_t reserved_16_63:48; + uint64_t time:16; + } s; + struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx; + struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx; + struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx; + struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2; + struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx; + struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx; + struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1; + struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx; + struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1; + struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx; + struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1; +}; + +union cvmx_gmxx_txx_pause_togo { + uint64_t u64; + struct cvmx_gmxx_txx_pause_togo_s { + uint64_t reserved_32_63:32; + uint64_t msg_time:16; + uint64_t time:16; + } s; + struct cvmx_gmxx_txx_pause_togo_cn30xx { + uint64_t reserved_16_63:48; + uint64_t time:16; + } cn30xx; + struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx; + struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx; + struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2; + struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx; + struct cvmx_gmxx_txx_pause_togo_s cn52xx; + struct cvmx_gmxx_txx_pause_togo_s cn52xxp1; + struct cvmx_gmxx_txx_pause_togo_s cn56xx; + struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1; + struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx; + struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1; +}; + +union cvmx_gmxx_txx_pause_zero { + uint64_t u64; + struct cvmx_gmxx_txx_pause_zero_s { + uint64_t reserved_1_63:63; + uint64_t send:1; + } s; + struct cvmx_gmxx_txx_pause_zero_s cn30xx; + struct cvmx_gmxx_txx_pause_zero_s cn31xx; + struct cvmx_gmxx_txx_pause_zero_s cn38xx; + struct cvmx_gmxx_txx_pause_zero_s cn38xxp2; + struct cvmx_gmxx_txx_pause_zero_s cn50xx; + struct cvmx_gmxx_txx_pause_zero_s cn52xx; + struct cvmx_gmxx_txx_pause_zero_s cn52xxp1; + struct cvmx_gmxx_txx_pause_zero_s cn56xx; + struct cvmx_gmxx_txx_pause_zero_s cn56xxp1; + struct cvmx_gmxx_txx_pause_zero_s cn58xx; + struct cvmx_gmxx_txx_pause_zero_s cn58xxp1; +}; + +union cvmx_gmxx_txx_sgmii_ctl { + uint64_t u64; + struct cvmx_gmxx_txx_sgmii_ctl_s { + uint64_t reserved_1_63:63; + uint64_t align:1; + } s; + struct cvmx_gmxx_txx_sgmii_ctl_s cn52xx; + struct cvmx_gmxx_txx_sgmii_ctl_s cn52xxp1; + struct cvmx_gmxx_txx_sgmii_ctl_s cn56xx; + struct cvmx_gmxx_txx_sgmii_ctl_s cn56xxp1; +}; + +union cvmx_gmxx_txx_slot { + uint64_t u64; + struct cvmx_gmxx_txx_slot_s { + uint64_t reserved_10_63:54; + uint64_t slot:10; + } s; + struct cvmx_gmxx_txx_slot_s cn30xx; + struct cvmx_gmxx_txx_slot_s cn31xx; + struct cvmx_gmxx_txx_slot_s cn38xx; + struct cvmx_gmxx_txx_slot_s cn38xxp2; + struct cvmx_gmxx_txx_slot_s cn50xx; + struct cvmx_gmxx_txx_slot_s cn52xx; + struct cvmx_gmxx_txx_slot_s cn52xxp1; + struct cvmx_gmxx_txx_slot_s cn56xx; + struct cvmx_gmxx_txx_slot_s cn56xxp1; + struct cvmx_gmxx_txx_slot_s cn58xx; + struct cvmx_gmxx_txx_slot_s cn58xxp1; +}; + +union cvmx_gmxx_txx_soft_pause { + uint64_t u64; + struct cvmx_gmxx_txx_soft_pause_s { + uint64_t reserved_16_63:48; + uint64_t time:16; + } s; + struct cvmx_gmxx_txx_soft_pause_s cn30xx; + struct cvmx_gmxx_txx_soft_pause_s cn31xx; + struct cvmx_gmxx_txx_soft_pause_s cn38xx; + struct cvmx_gmxx_txx_soft_pause_s cn38xxp2; + struct cvmx_gmxx_txx_soft_pause_s cn50xx; + struct cvmx_gmxx_txx_soft_pause_s cn52xx; + struct cvmx_gmxx_txx_soft_pause_s cn52xxp1; + struct cvmx_gmxx_txx_soft_pause_s cn56xx; + struct cvmx_gmxx_txx_soft_pause_s cn56xxp1; + struct cvmx_gmxx_txx_soft_pause_s cn58xx; + struct cvmx_gmxx_txx_soft_pause_s cn58xxp1; +}; + +union cvmx_gmxx_txx_stat0 { + uint64_t u64; + struct cvmx_gmxx_txx_stat0_s { + uint64_t xsdef:32; + uint64_t xscol:32; + } s; + struct cvmx_gmxx_txx_stat0_s cn30xx; + struct cvmx_gmxx_txx_stat0_s cn31xx; + struct cvmx_gmxx_txx_stat0_s cn38xx; + struct cvmx_gmxx_txx_stat0_s cn38xxp2; + struct cvmx_gmxx_txx_stat0_s cn50xx; + struct cvmx_gmxx_txx_stat0_s cn52xx; + struct cvmx_gmxx_txx_stat0_s cn52xxp1; + struct cvmx_gmxx_txx_stat0_s cn56xx; + struct cvmx_gmxx_txx_stat0_s cn56xxp1; + struct cvmx_gmxx_txx_stat0_s cn58xx; + struct cvmx_gmxx_txx_stat0_s cn58xxp1; +}; + +union cvmx_gmxx_txx_stat1 { + uint64_t u64; + struct cvmx_gmxx_txx_stat1_s { + uint64_t scol:32; + uint64_t mcol:32; + } s; + struct cvmx_gmxx_txx_stat1_s cn30xx; + struct cvmx_gmxx_txx_stat1_s cn31xx; + struct cvmx_gmxx_txx_stat1_s cn38xx; + struct cvmx_gmxx_txx_stat1_s cn38xxp2; + struct cvmx_gmxx_txx_stat1_s cn50xx; + struct cvmx_gmxx_txx_stat1_s cn52xx; + struct cvmx_gmxx_txx_stat1_s cn52xxp1; + struct cvmx_gmxx_txx_stat1_s cn56xx; + struct cvmx_gmxx_txx_stat1_s cn56xxp1; + struct cvmx_gmxx_txx_stat1_s cn58xx; + struct cvmx_gmxx_txx_stat1_s cn58xxp1; +}; + +union cvmx_gmxx_txx_stat2 { + uint64_t u64; + struct cvmx_gmxx_txx_stat2_s { + uint64_t reserved_48_63:16; + uint64_t octs:48; + } s; + struct cvmx_gmxx_txx_stat2_s cn30xx; + struct cvmx_gmxx_txx_stat2_s cn31xx; + struct cvmx_gmxx_txx_stat2_s cn38xx; + struct cvmx_gmxx_txx_stat2_s cn38xxp2; + struct cvmx_gmxx_txx_stat2_s cn50xx; + struct cvmx_gmxx_txx_stat2_s cn52xx; + struct cvmx_gmxx_txx_stat2_s cn52xxp1; + struct cvmx_gmxx_txx_stat2_s cn56xx; + struct cvmx_gmxx_txx_stat2_s cn56xxp1; + struct cvmx_gmxx_txx_stat2_s cn58xx; + struct cvmx_gmxx_txx_stat2_s cn58xxp1; +}; + +union cvmx_gmxx_txx_stat3 { + uint64_t u64; + struct cvmx_gmxx_txx_stat3_s { + uint64_t reserved_32_63:32; + uint64_t pkts:32; + } s; + struct cvmx_gmxx_txx_stat3_s cn30xx; + struct cvmx_gmxx_txx_stat3_s cn31xx; + struct cvmx_gmxx_txx_stat3_s cn38xx; + struct cvmx_gmxx_txx_stat3_s cn38xxp2; + struct cvmx_gmxx_txx_stat3_s cn50xx; + struct cvmx_gmxx_txx_stat3_s cn52xx; + struct cvmx_gmxx_txx_stat3_s cn52xxp1; + struct cvmx_gmxx_txx_stat3_s cn56xx; + struct cvmx_gmxx_txx_stat3_s cn56xxp1; + struct cvmx_gmxx_txx_stat3_s cn58xx; + struct cvmx_gmxx_txx_stat3_s cn58xxp1; +}; + +union cvmx_gmxx_txx_stat4 { + uint64_t u64; + struct cvmx_gmxx_txx_stat4_s { + uint64_t hist1:32; + uint64_t hist0:32; + } s; + struct cvmx_gmxx_txx_stat4_s cn30xx; + struct cvmx_gmxx_txx_stat4_s cn31xx; + struct cvmx_gmxx_txx_stat4_s cn38xx; + struct cvmx_gmxx_txx_stat4_s cn38xxp2; + struct cvmx_gmxx_txx_stat4_s cn50xx; + struct cvmx_gmxx_txx_stat4_s cn52xx; + struct cvmx_gmxx_txx_stat4_s cn52xxp1; + struct cvmx_gmxx_txx_stat4_s cn56xx; + struct cvmx_gmxx_txx_stat4_s cn56xxp1; + struct cvmx_gmxx_txx_stat4_s cn58xx; + struct cvmx_gmxx_txx_stat4_s cn58xxp1; +}; + +union cvmx_gmxx_txx_stat5 { + uint64_t u64; + struct cvmx_gmxx_txx_stat5_s { + uint64_t hist3:32; + uint64_t hist2:32; + } s; + struct cvmx_gmxx_txx_stat5_s cn30xx; + struct cvmx_gmxx_txx_stat5_s cn31xx; + struct cvmx_gmxx_txx_stat5_s cn38xx; + struct cvmx_gmxx_txx_stat5_s cn38xxp2; + struct cvmx_gmxx_txx_stat5_s cn50xx; + struct cvmx_gmxx_txx_stat5_s cn52xx; + struct cvmx_gmxx_txx_stat5_s cn52xxp1; + struct cvmx_gmxx_txx_stat5_s cn56xx; + struct cvmx_gmxx_txx_stat5_s cn56xxp1; + struct cvmx_gmxx_txx_stat5_s cn58xx; + struct cvmx_gmxx_txx_stat5_s cn58xxp1; +}; + +union cvmx_gmxx_txx_stat6 { + uint64_t u64; + struct cvmx_gmxx_txx_stat6_s { + uint64_t hist5:32; + uint64_t hist4:32; + } s; + struct cvmx_gmxx_txx_stat6_s cn30xx; + struct cvmx_gmxx_txx_stat6_s cn31xx; + struct cvmx_gmxx_txx_stat6_s cn38xx; + struct cvmx_gmxx_txx_stat6_s cn38xxp2; + struct cvmx_gmxx_txx_stat6_s cn50xx; + struct cvmx_gmxx_txx_stat6_s cn52xx; + struct cvmx_gmxx_txx_stat6_s cn52xxp1; + struct cvmx_gmxx_txx_stat6_s cn56xx; + struct cvmx_gmxx_txx_stat6_s cn56xxp1; + struct cvmx_gmxx_txx_stat6_s cn58xx; + struct cvmx_gmxx_txx_stat6_s cn58xxp1; +}; + +union cvmx_gmxx_txx_stat7 { + uint64_t u64; + struct cvmx_gmxx_txx_stat7_s { + uint64_t hist7:32; + uint64_t hist6:32; + } s; + struct cvmx_gmxx_txx_stat7_s cn30xx; + struct cvmx_gmxx_txx_stat7_s cn31xx; + struct cvmx_gmxx_txx_stat7_s cn38xx; + struct cvmx_gmxx_txx_stat7_s cn38xxp2; + struct cvmx_gmxx_txx_stat7_s cn50xx; + struct cvmx_gmxx_txx_stat7_s cn52xx; + struct cvmx_gmxx_txx_stat7_s cn52xxp1; + struct cvmx_gmxx_txx_stat7_s cn56xx; + struct cvmx_gmxx_txx_stat7_s cn56xxp1; + struct cvmx_gmxx_txx_stat7_s cn58xx; + struct cvmx_gmxx_txx_stat7_s cn58xxp1; +}; + +union cvmx_gmxx_txx_stat8 { + uint64_t u64; + struct cvmx_gmxx_txx_stat8_s { + uint64_t mcst:32; + uint64_t bcst:32; + } s; + struct cvmx_gmxx_txx_stat8_s cn30xx; + struct cvmx_gmxx_txx_stat8_s cn31xx; + struct cvmx_gmxx_txx_stat8_s cn38xx; + struct cvmx_gmxx_txx_stat8_s cn38xxp2; + struct cvmx_gmxx_txx_stat8_s cn50xx; + struct cvmx_gmxx_txx_stat8_s cn52xx; + struct cvmx_gmxx_txx_stat8_s cn52xxp1; + struct cvmx_gmxx_txx_stat8_s cn56xx; + struct cvmx_gmxx_txx_stat8_s cn56xxp1; + struct cvmx_gmxx_txx_stat8_s cn58xx; + struct cvmx_gmxx_txx_stat8_s cn58xxp1; +}; + +union cvmx_gmxx_txx_stat9 { + uint64_t u64; + struct cvmx_gmxx_txx_stat9_s { + uint64_t undflw:32; + uint64_t ctl:32; + } s; + struct cvmx_gmxx_txx_stat9_s cn30xx; + struct cvmx_gmxx_txx_stat9_s cn31xx; + struct cvmx_gmxx_txx_stat9_s cn38xx; + struct cvmx_gmxx_txx_stat9_s cn38xxp2; + struct cvmx_gmxx_txx_stat9_s cn50xx; + struct cvmx_gmxx_txx_stat9_s cn52xx; + struct cvmx_gmxx_txx_stat9_s cn52xxp1; + struct cvmx_gmxx_txx_stat9_s cn56xx; + struct cvmx_gmxx_txx_stat9_s cn56xxp1; + struct cvmx_gmxx_txx_stat9_s cn58xx; + struct cvmx_gmxx_txx_stat9_s cn58xxp1; +}; + +union cvmx_gmxx_txx_stats_ctl { + uint64_t u64; + struct cvmx_gmxx_txx_stats_ctl_s { + uint64_t reserved_1_63:63; + uint64_t rd_clr:1; + } s; + struct cvmx_gmxx_txx_stats_ctl_s cn30xx; + struct cvmx_gmxx_txx_stats_ctl_s cn31xx; + struct cvmx_gmxx_txx_stats_ctl_s cn38xx; + struct cvmx_gmxx_txx_stats_ctl_s cn38xxp2; + struct cvmx_gmxx_txx_stats_ctl_s cn50xx; + struct cvmx_gmxx_txx_stats_ctl_s cn52xx; + struct cvmx_gmxx_txx_stats_ctl_s cn52xxp1; + struct cvmx_gmxx_txx_stats_ctl_s cn56xx; + struct cvmx_gmxx_txx_stats_ctl_s cn56xxp1; + struct cvmx_gmxx_txx_stats_ctl_s cn58xx; + struct cvmx_gmxx_txx_stats_ctl_s cn58xxp1; +}; + +union cvmx_gmxx_txx_thresh { + uint64_t u64; + struct cvmx_gmxx_txx_thresh_s { + uint64_t reserved_9_63:55; + uint64_t cnt:9; + } s; + struct cvmx_gmxx_txx_thresh_cn30xx { + uint64_t reserved_7_63:57; + uint64_t cnt:7; + } cn30xx; + struct cvmx_gmxx_txx_thresh_cn30xx cn31xx; + struct cvmx_gmxx_txx_thresh_s cn38xx; + struct cvmx_gmxx_txx_thresh_s cn38xxp2; + struct cvmx_gmxx_txx_thresh_cn30xx cn50xx; + struct cvmx_gmxx_txx_thresh_s cn52xx; + struct cvmx_gmxx_txx_thresh_s cn52xxp1; + struct cvmx_gmxx_txx_thresh_s cn56xx; + struct cvmx_gmxx_txx_thresh_s cn56xxp1; + struct cvmx_gmxx_txx_thresh_s cn58xx; + struct cvmx_gmxx_txx_thresh_s cn58xxp1; +}; + +union cvmx_gmxx_tx_bp { + uint64_t u64; + struct cvmx_gmxx_tx_bp_s { + uint64_t reserved_4_63:60; + uint64_t bp:4; + } s; + struct cvmx_gmxx_tx_bp_cn30xx { + uint64_t reserved_3_63:61; + uint64_t bp:3; + } cn30xx; + struct cvmx_gmxx_tx_bp_cn30xx cn31xx; + struct cvmx_gmxx_tx_bp_s cn38xx; + struct cvmx_gmxx_tx_bp_s cn38xxp2; + struct cvmx_gmxx_tx_bp_cn30xx cn50xx; + struct cvmx_gmxx_tx_bp_s cn52xx; + struct cvmx_gmxx_tx_bp_s cn52xxp1; + struct cvmx_gmxx_tx_bp_s cn56xx; + struct cvmx_gmxx_tx_bp_s cn56xxp1; + struct cvmx_gmxx_tx_bp_s cn58xx; + struct cvmx_gmxx_tx_bp_s cn58xxp1; +}; + +union cvmx_gmxx_tx_clk_mskx { + uint64_t u64; + struct cvmx_gmxx_tx_clk_mskx_s { + uint64_t reserved_1_63:63; + uint64_t msk:1; + } s; + struct cvmx_gmxx_tx_clk_mskx_s cn30xx; + struct cvmx_gmxx_tx_clk_mskx_s cn50xx; +}; + +union cvmx_gmxx_tx_col_attempt { + uint64_t u64; + struct cvmx_gmxx_tx_col_attempt_s { + uint64_t reserved_5_63:59; + uint64_t limit:5; + } s; + struct cvmx_gmxx_tx_col_attempt_s cn30xx; + struct cvmx_gmxx_tx_col_attempt_s cn31xx; + struct cvmx_gmxx_tx_col_attempt_s cn38xx; + struct cvmx_gmxx_tx_col_attempt_s cn38xxp2; + struct cvmx_gmxx_tx_col_attempt_s cn50xx; + struct cvmx_gmxx_tx_col_attempt_s cn52xx; + struct cvmx_gmxx_tx_col_attempt_s cn52xxp1; + struct cvmx_gmxx_tx_col_attempt_s cn56xx; + struct cvmx_gmxx_tx_col_attempt_s cn56xxp1; + struct cvmx_gmxx_tx_col_attempt_s cn58xx; + struct cvmx_gmxx_tx_col_attempt_s cn58xxp1; +}; + +union cvmx_gmxx_tx_corrupt { + uint64_t u64; + struct cvmx_gmxx_tx_corrupt_s { + uint64_t reserved_4_63:60; + uint64_t corrupt:4; + } s; + struct cvmx_gmxx_tx_corrupt_cn30xx { + uint64_t reserved_3_63:61; + uint64_t corrupt:3; + } cn30xx; + struct cvmx_gmxx_tx_corrupt_cn30xx cn31xx; + struct cvmx_gmxx_tx_corrupt_s cn38xx; + struct cvmx_gmxx_tx_corrupt_s cn38xxp2; + struct cvmx_gmxx_tx_corrupt_cn30xx cn50xx; + struct cvmx_gmxx_tx_corrupt_s cn52xx; + struct cvmx_gmxx_tx_corrupt_s cn52xxp1; + struct cvmx_gmxx_tx_corrupt_s cn56xx; + struct cvmx_gmxx_tx_corrupt_s cn56xxp1; + struct cvmx_gmxx_tx_corrupt_s cn58xx; + struct cvmx_gmxx_tx_corrupt_s cn58xxp1; +}; + +union cvmx_gmxx_tx_hg2_reg1 { + uint64_t u64; + struct cvmx_gmxx_tx_hg2_reg1_s { + uint64_t reserved_16_63:48; + uint64_t tx_xof:16; + } s; + struct cvmx_gmxx_tx_hg2_reg1_s cn52xx; + struct cvmx_gmxx_tx_hg2_reg1_s cn52xxp1; + struct cvmx_gmxx_tx_hg2_reg1_s cn56xx; +}; + +union cvmx_gmxx_tx_hg2_reg2 { + uint64_t u64; + struct cvmx_gmxx_tx_hg2_reg2_s { + uint64_t reserved_16_63:48; + uint64_t tx_xon:16; + } s; + struct cvmx_gmxx_tx_hg2_reg2_s cn52xx; + struct cvmx_gmxx_tx_hg2_reg2_s cn52xxp1; + struct cvmx_gmxx_tx_hg2_reg2_s cn56xx; +}; + +union cvmx_gmxx_tx_ifg { + uint64_t u64; + struct cvmx_gmxx_tx_ifg_s { + uint64_t reserved_8_63:56; + uint64_t ifg2:4; + uint64_t ifg1:4; + } s; + struct cvmx_gmxx_tx_ifg_s cn30xx; + struct cvmx_gmxx_tx_ifg_s cn31xx; + struct cvmx_gmxx_tx_ifg_s cn38xx; + struct cvmx_gmxx_tx_ifg_s cn38xxp2; + struct cvmx_gmxx_tx_ifg_s cn50xx; + struct cvmx_gmxx_tx_ifg_s cn52xx; + struct cvmx_gmxx_tx_ifg_s cn52xxp1; + struct cvmx_gmxx_tx_ifg_s cn56xx; + struct cvmx_gmxx_tx_ifg_s cn56xxp1; + struct cvmx_gmxx_tx_ifg_s cn58xx; + struct cvmx_gmxx_tx_ifg_s cn58xxp1; +}; + +union cvmx_gmxx_tx_int_en { + uint64_t u64; + struct cvmx_gmxx_tx_int_en_s { + uint64_t reserved_20_63:44; + uint64_t late_col:4; + uint64_t xsdef:4; + uint64_t xscol:4; + uint64_t reserved_6_7:2; + uint64_t undflw:4; + uint64_t ncb_nxa:1; + uint64_t pko_nxa:1; + } s; + struct cvmx_gmxx_tx_int_en_cn30xx { + uint64_t reserved_19_63:45; + uint64_t late_col:3; + uint64_t reserved_15_15:1; + uint64_t xsdef:3; + uint64_t reserved_11_11:1; + uint64_t xscol:3; + uint64_t reserved_5_7:3; + uint64_t undflw:3; + uint64_t reserved_1_1:1; + uint64_t pko_nxa:1; + } cn30xx; + struct cvmx_gmxx_tx_int_en_cn31xx { + uint64_t reserved_15_63:49; + uint64_t xsdef:3; + uint64_t reserved_11_11:1; + uint64_t xscol:3; + uint64_t reserved_5_7:3; + uint64_t undflw:3; + uint64_t reserved_1_1:1; + uint64_t pko_nxa:1; + } cn31xx; + struct cvmx_gmxx_tx_int_en_s cn38xx; + struct cvmx_gmxx_tx_int_en_cn38xxp2 { + uint64_t reserved_16_63:48; + uint64_t xsdef:4; + uint64_t xscol:4; + uint64_t reserved_6_7:2; + uint64_t undflw:4; + uint64_t ncb_nxa:1; + uint64_t pko_nxa:1; + } cn38xxp2; + struct cvmx_gmxx_tx_int_en_cn30xx cn50xx; + struct cvmx_gmxx_tx_int_en_cn52xx { + uint64_t reserved_20_63:44; + uint64_t late_col:4; + uint64_t xsdef:4; + uint64_t xscol:4; + uint64_t reserved_6_7:2; + uint64_t undflw:4; + uint64_t reserved_1_1:1; + uint64_t pko_nxa:1; + } cn52xx; + struct cvmx_gmxx_tx_int_en_cn52xx cn52xxp1; + struct cvmx_gmxx_tx_int_en_cn52xx cn56xx; + struct cvmx_gmxx_tx_int_en_cn52xx cn56xxp1; + struct cvmx_gmxx_tx_int_en_s cn58xx; + struct cvmx_gmxx_tx_int_en_s cn58xxp1; +}; + +union cvmx_gmxx_tx_int_reg { + uint64_t u64; + struct cvmx_gmxx_tx_int_reg_s { + uint64_t reserved_20_63:44; + uint64_t late_col:4; + uint64_t xsdef:4; + uint64_t xscol:4; + uint64_t reserved_6_7:2; + uint64_t undflw:4; + uint64_t ncb_nxa:1; + uint64_t pko_nxa:1; + } s; + struct cvmx_gmxx_tx_int_reg_cn30xx { + uint64_t reserved_19_63:45; + uint64_t late_col:3; + uint64_t reserved_15_15:1; + uint64_t xsdef:3; + uint64_t reserved_11_11:1; + uint64_t xscol:3; + uint64_t reserved_5_7:3; + uint64_t undflw:3; + uint64_t reserved_1_1:1; + uint64_t pko_nxa:1; + } cn30xx; + struct cvmx_gmxx_tx_int_reg_cn31xx { + uint64_t reserved_15_63:49; + uint64_t xsdef:3; + uint64_t reserved_11_11:1; + uint64_t xscol:3; + uint64_t reserved_5_7:3; + uint64_t undflw:3; + uint64_t reserved_1_1:1; + uint64_t pko_nxa:1; + } cn31xx; + struct cvmx_gmxx_tx_int_reg_s cn38xx; + struct cvmx_gmxx_tx_int_reg_cn38xxp2 { + uint64_t reserved_16_63:48; + uint64_t xsdef:4; + uint64_t xscol:4; + uint64_t reserved_6_7:2; + uint64_t undflw:4; + uint64_t ncb_nxa:1; + uint64_t pko_nxa:1; + } cn38xxp2; + struct cvmx_gmxx_tx_int_reg_cn30xx cn50xx; + struct cvmx_gmxx_tx_int_reg_cn52xx { + uint64_t reserved_20_63:44; + uint64_t late_col:4; + uint64_t xsdef:4; + uint64_t xscol:4; + uint64_t reserved_6_7:2; + uint64_t undflw:4; + uint64_t reserved_1_1:1; + uint64_t pko_nxa:1; + } cn52xx; + struct cvmx_gmxx_tx_int_reg_cn52xx cn52xxp1; + struct cvmx_gmxx_tx_int_reg_cn52xx cn56xx; + struct cvmx_gmxx_tx_int_reg_cn52xx cn56xxp1; + struct cvmx_gmxx_tx_int_reg_s cn58xx; + struct cvmx_gmxx_tx_int_reg_s cn58xxp1; +}; + +union cvmx_gmxx_tx_jam { + uint64_t u64; + struct cvmx_gmxx_tx_jam_s { + uint64_t reserved_8_63:56; + uint64_t jam:8; + } s; + struct cvmx_gmxx_tx_jam_s cn30xx; + struct cvmx_gmxx_tx_jam_s cn31xx; + struct cvmx_gmxx_tx_jam_s cn38xx; + struct cvmx_gmxx_tx_jam_s cn38xxp2; + struct cvmx_gmxx_tx_jam_s cn50xx; + struct cvmx_gmxx_tx_jam_s cn52xx; + struct cvmx_gmxx_tx_jam_s cn52xxp1; + struct cvmx_gmxx_tx_jam_s cn56xx; + struct cvmx_gmxx_tx_jam_s cn56xxp1; + struct cvmx_gmxx_tx_jam_s cn58xx; + struct cvmx_gmxx_tx_jam_s cn58xxp1; +}; + +union cvmx_gmxx_tx_lfsr { + uint64_t u64; + struct cvmx_gmxx_tx_lfsr_s { + uint64_t reserved_16_63:48; + uint64_t lfsr:16; + } s; + struct cvmx_gmxx_tx_lfsr_s cn30xx; + struct cvmx_gmxx_tx_lfsr_s cn31xx; + struct cvmx_gmxx_tx_lfsr_s cn38xx; + struct cvmx_gmxx_tx_lfsr_s cn38xxp2; + struct cvmx_gmxx_tx_lfsr_s cn50xx; + struct cvmx_gmxx_tx_lfsr_s cn52xx; + struct cvmx_gmxx_tx_lfsr_s cn52xxp1; + struct cvmx_gmxx_tx_lfsr_s cn56xx; + struct cvmx_gmxx_tx_lfsr_s cn56xxp1; + struct cvmx_gmxx_tx_lfsr_s cn58xx; + struct cvmx_gmxx_tx_lfsr_s cn58xxp1; +}; + +union cvmx_gmxx_tx_ovr_bp { + uint64_t u64; + struct cvmx_gmxx_tx_ovr_bp_s { + uint64_t reserved_48_63:16; + uint64_t tx_prt_bp:16; + uint64_t reserved_12_31:20; + uint64_t en:4; + uint64_t bp:4; + uint64_t ign_full:4; + } s; + struct cvmx_gmxx_tx_ovr_bp_cn30xx { + uint64_t reserved_11_63:53; + uint64_t en:3; + uint64_t reserved_7_7:1; + uint64_t bp:3; + uint64_t reserved_3_3:1; + uint64_t ign_full:3; + } cn30xx; + struct cvmx_gmxx_tx_ovr_bp_cn30xx cn31xx; + struct cvmx_gmxx_tx_ovr_bp_cn38xx { + uint64_t reserved_12_63:52; + uint64_t en:4; + uint64_t bp:4; + uint64_t ign_full:4; + } cn38xx; + struct cvmx_gmxx_tx_ovr_bp_cn38xx cn38xxp2; + struct cvmx_gmxx_tx_ovr_bp_cn30xx cn50xx; + struct cvmx_gmxx_tx_ovr_bp_s cn52xx; + struct cvmx_gmxx_tx_ovr_bp_s cn52xxp1; + struct cvmx_gmxx_tx_ovr_bp_s cn56xx; + struct cvmx_gmxx_tx_ovr_bp_s cn56xxp1; + struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xx; + struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xxp1; +}; + +union cvmx_gmxx_tx_pause_pkt_dmac { + uint64_t u64; + struct cvmx_gmxx_tx_pause_pkt_dmac_s { + uint64_t reserved_48_63:16; + uint64_t dmac:48; + } s; + struct cvmx_gmxx_tx_pause_pkt_dmac_s cn30xx; + struct cvmx_gmxx_tx_pause_pkt_dmac_s cn31xx; + struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xx; + struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xxp2; + struct cvmx_gmxx_tx_pause_pkt_dmac_s cn50xx; + struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xx; + struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xxp1; + struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xx; + struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xxp1; + struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xx; + struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xxp1; +}; + +union cvmx_gmxx_tx_pause_pkt_type { + uint64_t u64; + struct cvmx_gmxx_tx_pause_pkt_type_s { + uint64_t reserved_16_63:48; + uint64_t type:16; + } s; + struct cvmx_gmxx_tx_pause_pkt_type_s cn30xx; + struct cvmx_gmxx_tx_pause_pkt_type_s cn31xx; + struct cvmx_gmxx_tx_pause_pkt_type_s cn38xx; + struct cvmx_gmxx_tx_pause_pkt_type_s cn38xxp2; + struct cvmx_gmxx_tx_pause_pkt_type_s cn50xx; + struct cvmx_gmxx_tx_pause_pkt_type_s cn52xx; + struct cvmx_gmxx_tx_pause_pkt_type_s cn52xxp1; + struct cvmx_gmxx_tx_pause_pkt_type_s cn56xx; + struct cvmx_gmxx_tx_pause_pkt_type_s cn56xxp1; + struct cvmx_gmxx_tx_pause_pkt_type_s cn58xx; + struct cvmx_gmxx_tx_pause_pkt_type_s cn58xxp1; +}; + +union cvmx_gmxx_tx_prts { + uint64_t u64; + struct cvmx_gmxx_tx_prts_s { + uint64_t reserved_5_63:59; + uint64_t prts:5; + } s; + struct cvmx_gmxx_tx_prts_s cn30xx; + struct cvmx_gmxx_tx_prts_s cn31xx; + struct cvmx_gmxx_tx_prts_s cn38xx; + struct cvmx_gmxx_tx_prts_s cn38xxp2; + struct cvmx_gmxx_tx_prts_s cn50xx; + struct cvmx_gmxx_tx_prts_s cn52xx; + struct cvmx_gmxx_tx_prts_s cn52xxp1; + struct cvmx_gmxx_tx_prts_s cn56xx; + struct cvmx_gmxx_tx_prts_s cn56xxp1; + struct cvmx_gmxx_tx_prts_s cn58xx; + struct cvmx_gmxx_tx_prts_s cn58xxp1; +}; + +union cvmx_gmxx_tx_spi_ctl { + uint64_t u64; + struct cvmx_gmxx_tx_spi_ctl_s { + uint64_t reserved_2_63:62; + uint64_t tpa_clr:1; + uint64_t cont_pkt:1; + } s; + struct cvmx_gmxx_tx_spi_ctl_s cn38xx; + struct cvmx_gmxx_tx_spi_ctl_s cn38xxp2; + struct cvmx_gmxx_tx_spi_ctl_s cn58xx; + struct cvmx_gmxx_tx_spi_ctl_s cn58xxp1; +}; + +union cvmx_gmxx_tx_spi_drain { + uint64_t u64; + struct cvmx_gmxx_tx_spi_drain_s { + uint64_t reserved_16_63:48; + uint64_t drain:16; + } s; + struct cvmx_gmxx_tx_spi_drain_s cn38xx; + struct cvmx_gmxx_tx_spi_drain_s cn58xx; + struct cvmx_gmxx_tx_spi_drain_s cn58xxp1; +}; + +union cvmx_gmxx_tx_spi_max { + uint64_t u64; + struct cvmx_gmxx_tx_spi_max_s { + uint64_t reserved_23_63:41; + uint64_t slice:7; + uint64_t max2:8; + uint64_t max1:8; + } s; + struct cvmx_gmxx_tx_spi_max_cn38xx { + uint64_t reserved_16_63:48; + uint64_t max2:8; + uint64_t max1:8; + } cn38xx; + struct cvmx_gmxx_tx_spi_max_cn38xx cn38xxp2; + struct cvmx_gmxx_tx_spi_max_s cn58xx; + struct cvmx_gmxx_tx_spi_max_s cn58xxp1; +}; + +union cvmx_gmxx_tx_spi_roundx { + uint64_t u64; + struct cvmx_gmxx_tx_spi_roundx_s { + uint64_t reserved_16_63:48; + uint64_t round:16; + } s; + struct cvmx_gmxx_tx_spi_roundx_s cn58xx; + struct cvmx_gmxx_tx_spi_roundx_s cn58xxp1; +}; + +union cvmx_gmxx_tx_spi_thresh { + uint64_t u64; + struct cvmx_gmxx_tx_spi_thresh_s { + uint64_t reserved_6_63:58; + uint64_t thresh:6; + } s; + struct cvmx_gmxx_tx_spi_thresh_s cn38xx; + struct cvmx_gmxx_tx_spi_thresh_s cn38xxp2; + struct cvmx_gmxx_tx_spi_thresh_s cn58xx; + struct cvmx_gmxx_tx_spi_thresh_s cn58xxp1; +}; + +union cvmx_gmxx_tx_xaui_ctl { + uint64_t u64; + struct cvmx_gmxx_tx_xaui_ctl_s { + uint64_t reserved_11_63:53; + uint64_t hg_pause_hgi:2; + uint64_t hg_en:1; + uint64_t reserved_7_7:1; + uint64_t ls_byp:1; + uint64_t ls:2; + uint64_t reserved_2_3:2; + uint64_t uni_en:1; + uint64_t dic_en:1; + } s; + struct cvmx_gmxx_tx_xaui_ctl_s cn52xx; + struct cvmx_gmxx_tx_xaui_ctl_s cn52xxp1; + struct cvmx_gmxx_tx_xaui_ctl_s cn56xx; + struct cvmx_gmxx_tx_xaui_ctl_s cn56xxp1; +}; + +union cvmx_gmxx_xaui_ext_loopback { + uint64_t u64; + struct cvmx_gmxx_xaui_ext_loopback_s { + uint64_t reserved_5_63:59; + uint64_t en:1; + uint64_t thresh:4; + } s; + struct cvmx_gmxx_xaui_ext_loopback_s cn52xx; + struct cvmx_gmxx_xaui_ext_loopback_s cn52xxp1; + struct cvmx_gmxx_xaui_ext_loopback_s cn56xx; + struct cvmx_gmxx_xaui_ext_loopback_s cn56xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-helper-board.c b/drivers/staging/octeon/cvmx-helper-board.c new file mode 100644 index 0000000..3085e38 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-board.c @@ -0,0 +1,706 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * + * Helper functions to abstract board specific data about + * network ports from the rest of the cvmx-helper files. + */ + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-bootinfo.h> + +#include "cvmx-config.h" + +#include "cvmx-mdio.h" + +#include "cvmx-helper.h" +#include "cvmx-helper-util.h" +#include "cvmx-helper-board.h" + +#include "cvmx-gmxx-defs.h" +#include "cvmx-asxx-defs.h" + +/** + * cvmx_override_board_link_get(int ipd_port) is a function + * pointer. It is meant to allow customization of the process of + * talking to a PHY to determine link speed. It is called every + * time a PHY must be polled for link status. Users should set + * this pointer to a function before calling any cvmx-helper + * operations. + */ +cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port) = + NULL; + +/** + * Return the MII PHY address associated with the given IPD + * port. A result of -1 means there isn't a MII capable PHY + * connected to this port. On chips supporting multiple MII + * busses the bus number is encoded in bits <15:8>. + * + * This function must be modified for every new Octeon board. + * Internally it uses switch statements based on the cvmx_sysinfo + * data to determine board types and revisions. It replies on the + * fact that every Octeon board receives a unique board type + * enumeration from the bootloader. + * + * @ipd_port: Octeon IPD port to get the MII address for. + * + * Returns MII PHY address and bus number or -1. + */ +int cvmx_helper_board_get_mii_address(int ipd_port) +{ + switch (cvmx_sysinfo_get()->board_type) { + case CVMX_BOARD_TYPE_SIM: + /* Simulator doesn't have MII */ + return -1; + case CVMX_BOARD_TYPE_EBT3000: + case CVMX_BOARD_TYPE_EBT5800: + case CVMX_BOARD_TYPE_THUNDER: + case CVMX_BOARD_TYPE_NICPRO2: + /* Interface 0 is SPI4, interface 1 is RGMII */ + if ((ipd_port >= 16) && (ipd_port < 20)) + return ipd_port - 16; + else + return -1; + case CVMX_BOARD_TYPE_KODAMA: + case CVMX_BOARD_TYPE_EBH3100: + case CVMX_BOARD_TYPE_HIKARI: + case CVMX_BOARD_TYPE_CN3010_EVB_HS5: + case CVMX_BOARD_TYPE_CN3005_EVB_HS5: + case CVMX_BOARD_TYPE_CN3020_EVB_HS5: + /* + * Port 0 is WAN connected to a PHY, Port 1 is GMII + * connected to a switch + */ + if (ipd_port == 0) + return 4; + else if (ipd_port == 1) + return 9; + else + return -1; + case CVMX_BOARD_TYPE_NAC38: + /* Board has 8 RGMII ports PHYs are 0-7 */ + if ((ipd_port >= 0) && (ipd_port < 4)) + return ipd_port; + else if ((ipd_port >= 16) && (ipd_port < 20)) + return ipd_port - 16 + 4; + else + return -1; + case CVMX_BOARD_TYPE_EBH3000: + /* Board has dual SPI4 and no PHYs */ + return -1; + case CVMX_BOARD_TYPE_EBH5200: + case CVMX_BOARD_TYPE_EBH5201: + case CVMX_BOARD_TYPE_EBT5200: + /* + * Board has 4 SGMII ports. The PHYs start right after the MII + * ports MII0 = 0, MII1 = 1, SGMII = 2-5. + */ + if ((ipd_port >= 0) && (ipd_port < 4)) + return ipd_port + 2; + else + return -1; + case CVMX_BOARD_TYPE_EBH5600: + case CVMX_BOARD_TYPE_EBH5601: + case CVMX_BOARD_TYPE_EBH5610: + /* + * Board has 8 SGMII ports. 4 connect out, two connect + * to a switch, and 2 loop to each other + */ + if ((ipd_port >= 0) && (ipd_port < 4)) + return ipd_port + 1; + else + return -1; + case CVMX_BOARD_TYPE_CUST_NB5: + if (ipd_port == 2) + return 4; + else + return -1; + case CVMX_BOARD_TYPE_NIC_XLE_4G: + /* Board has 4 SGMII ports. connected QLM3(interface 1) */ + if ((ipd_port >= 16) && (ipd_port < 20)) + return ipd_port - 16 + 1; + else + return -1; + case CVMX_BOARD_TYPE_BBGW_REF: + /* + * No PHYs are connected to Octeon, everything is + * through switch. + */ + return -1; + } + + /* Some unknown board. Somebody forgot to update this function... */ + cvmx_dprintf + ("cvmx_helper_board_get_mii_address: Unknown board type %d\n", + cvmx_sysinfo_get()->board_type); + return -1; +} + +/** + * This function is the board specific method of determining an + * ethernet ports link speed. Most Octeon boards have Marvell PHYs + * and are handled by the fall through case. This function must be + * updated for boards that don't have the normal Marvell PHYs. + * + * This function must be modified for every new Octeon board. + * Internally it uses switch statements based on the cvmx_sysinfo + * data to determine board types and revisions. It relies on the + * fact that every Octeon board receives a unique board type + * enumeration from the bootloader. + * + * @ipd_port: IPD input port associated with the port we want to get link + * status for. + * + * Returns The ports link status. If the link isn't fully resolved, this must + * return zero. + */ +cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port) +{ + cvmx_helper_link_info_t result; + int phy_addr; + int is_broadcom_phy = 0; + + /* Give the user a chance to override the processing of this function */ + if (cvmx_override_board_link_get) + return cvmx_override_board_link_get(ipd_port); + + /* Unless we fix it later, all links are defaulted to down */ + result.u64 = 0; + + /* + * This switch statement should handle all ports that either don't use + * Marvell PHYS, or don't support in-band status. + */ + switch (cvmx_sysinfo_get()->board_type) { + case CVMX_BOARD_TYPE_SIM: + /* The simulator gives you a simulated 1Gbps full duplex link */ + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 1000; + return result; + case CVMX_BOARD_TYPE_EBH3100: + case CVMX_BOARD_TYPE_CN3010_EVB_HS5: + case CVMX_BOARD_TYPE_CN3005_EVB_HS5: + case CVMX_BOARD_TYPE_CN3020_EVB_HS5: + /* Port 1 on these boards is always Gigabit */ + if (ipd_port == 1) { + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 1000; + return result; + } + /* Fall through to the generic code below */ + break; + case CVMX_BOARD_TYPE_CUST_NB5: + /* Port 1 on these boards is always Gigabit */ + if (ipd_port == 1) { + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 1000; + return result; + } else /* The other port uses a broadcom PHY */ + is_broadcom_phy = 1; + break; + case CVMX_BOARD_TYPE_BBGW_REF: + /* Port 1 on these boards is always Gigabit */ + if (ipd_port == 2) { + /* Port 2 is not hooked up */ + result.u64 = 0; + return result; + } else { + /* Ports 0 and 1 connect to the switch */ + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 1000; + return result; + } + break; + } + + phy_addr = cvmx_helper_board_get_mii_address(ipd_port); + if (phy_addr != -1) { + if (is_broadcom_phy) { + /* + * Below we are going to read SMI/MDIO + * register 0x19 which works on Broadcom + * parts + */ + int phy_status = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + 0x19); + switch ((phy_status >> 8) & 0x7) { + case 0: + result.u64 = 0; + break; + case 1: + result.s.link_up = 1; + result.s.full_duplex = 0; + result.s.speed = 10; + break; + case 2: + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 10; + break; + case 3: + result.s.link_up = 1; + result.s.full_duplex = 0; + result.s.speed = 100; + break; + case 4: + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 100; + break; + case 5: + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 100; + break; + case 6: + result.s.link_up = 1; + result.s.full_duplex = 0; + result.s.speed = 1000; + break; + case 7: + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 1000; + break; + } + } else { + /* + * This code assumes we are using a Marvell + * Gigabit PHY. All the speed information can + * be read from register 17 in one + * go. Somebody using a different PHY will + * need to handle it above in the board + * specific area. + */ + int phy_status = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17); + + /* + * If the resolve bit 11 isn't set, see if + * autoneg is turned off (bit 12, reg 0). The + * resolve bit doesn't get set properly when + * autoneg is off, so force it. + */ + if ((phy_status & (1 << 11)) == 0) { + int auto_status = + cvmx_mdio_read(phy_addr >> 8, + phy_addr & 0xff, 0); + if ((auto_status & (1 << 12)) == 0) + phy_status |= 1 << 11; + } + + /* + * Only return a link if the PHY has finished + * auto negotiation and set the resolved bit + * (bit 11) + */ + if (phy_status & (1 << 11)) { + result.s.link_up = 1; + result.s.full_duplex = ((phy_status >> 13) & 1); + switch ((phy_status >> 14) & 3) { + case 0: /* 10 Mbps */ + result.s.speed = 10; + break; + case 1: /* 100 Mbps */ + result.s.speed = 100; + break; + case 2: /* 1 Gbps */ + result.s.speed = 1000; + break; + case 3: /* Illegal */ + result.u64 = 0; + break; + } + } + } + } else if (OCTEON_IS_MODEL(OCTEON_CN3XXX) + || OCTEON_IS_MODEL(OCTEON_CN58XX) + || OCTEON_IS_MODEL(OCTEON_CN50XX)) { + /* + * We don't have a PHY address, so attempt to use + * in-band status. It is really important that boards + * not supporting in-band status never get + * here. Reading broken in-band status tends to do bad + * things + */ + union cvmx_gmxx_rxx_rx_inbnd inband_status; + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + inband_status.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_RX_INBND(index, interface)); + + result.s.link_up = inband_status.s.status; + result.s.full_duplex = inband_status.s.duplex; + switch (inband_status.s.speed) { + case 0: /* 10 Mbps */ + result.s.speed = 10; + break; + case 1: /* 100 Mbps */ + result.s.speed = 100; + break; + case 2: /* 1 Gbps */ + result.s.speed = 1000; + break; + case 3: /* Illegal */ + result.u64 = 0; + break; + } + } else { + /* + * We don't have a PHY address and we don't have + * in-band status. There is no way to determine the + * link speed. Return down assuming this port isn't + * wired + */ + result.u64 = 0; + } + + /* If link is down, return all fields as zero. */ + if (!result.s.link_up) + result.u64 = 0; + + return result; +} + +/** + * This function as a board specific method of changing the PHY + * speed, duplex, and auto-negotiation. This programs the PHY and + * not Octeon. This can be used to force Octeon's links to + * specific settings. + * + * @phy_addr: The address of the PHY to program + * @enable_autoneg: + * Non zero if you want to enable auto-negotiation. + * @link_info: Link speed to program. If the speed is zero and auto-negotiation + * is enabled, all possible negotiation speeds are advertised. + * + * Returns Zero on success, negative on failure + */ +int cvmx_helper_board_link_set_phy(int phy_addr, + cvmx_helper_board_set_phy_link_flags_types_t + link_flags, + cvmx_helper_link_info_t link_info) +{ + + /* Set the flow control settings based on link_flags */ + if ((link_flags & set_phy_link_flags_flow_control_mask) != + set_phy_link_flags_flow_control_dont_touch) { + cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver; + reg_autoneg_adver.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_AUTONEG_ADVER); + reg_autoneg_adver.s.asymmetric_pause = + (link_flags & set_phy_link_flags_flow_control_mask) == + set_phy_link_flags_flow_control_enable; + reg_autoneg_adver.s.pause = + (link_flags & set_phy_link_flags_flow_control_mask) == + set_phy_link_flags_flow_control_enable; + cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_AUTONEG_ADVER, + reg_autoneg_adver.u16); + } + + /* If speed isn't set and autoneg is on advertise all supported modes */ + if ((link_flags & set_phy_link_flags_autoneg) + && (link_info.s.speed == 0)) { + cvmx_mdio_phy_reg_control_t reg_control; + cvmx_mdio_phy_reg_status_t reg_status; + cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver; + cvmx_mdio_phy_reg_extended_status_t reg_extended_status; + cvmx_mdio_phy_reg_control_1000_t reg_control_1000; + + reg_status.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_STATUS); + reg_autoneg_adver.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_AUTONEG_ADVER); + reg_autoneg_adver.s.advert_100base_t4 = + reg_status.s.capable_100base_t4; + reg_autoneg_adver.s.advert_10base_tx_full = + reg_status.s.capable_10_full; + reg_autoneg_adver.s.advert_10base_tx_half = + reg_status.s.capable_10_half; + reg_autoneg_adver.s.advert_100base_tx_full = + reg_status.s.capable_100base_x_full; + reg_autoneg_adver.s.advert_100base_tx_half = + reg_status.s.capable_100base_x_half; + cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_AUTONEG_ADVER, + reg_autoneg_adver.u16); + if (reg_status.s.capable_extended_status) { + reg_extended_status.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_EXTENDED_STATUS); + reg_control_1000.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_CONTROL_1000); + reg_control_1000.s.advert_1000base_t_full = + reg_extended_status.s.capable_1000base_t_full; + reg_control_1000.s.advert_1000base_t_half = + reg_extended_status.s.capable_1000base_t_half; + cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_CONTROL_1000, + reg_control_1000.u16); + } + reg_control.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_CONTROL); + reg_control.s.autoneg_enable = 1; + reg_control.s.restart_autoneg = 1; + cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16); + } else if ((link_flags & set_phy_link_flags_autoneg)) { + cvmx_mdio_phy_reg_control_t reg_control; + cvmx_mdio_phy_reg_status_t reg_status; + cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver; + cvmx_mdio_phy_reg_extended_status_t reg_extended_status; + cvmx_mdio_phy_reg_control_1000_t reg_control_1000; + + reg_status.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_STATUS); + reg_autoneg_adver.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_AUTONEG_ADVER); + reg_autoneg_adver.s.advert_100base_t4 = 0; + reg_autoneg_adver.s.advert_10base_tx_full = 0; + reg_autoneg_adver.s.advert_10base_tx_half = 0; + reg_autoneg_adver.s.advert_100base_tx_full = 0; + reg_autoneg_adver.s.advert_100base_tx_half = 0; + if (reg_status.s.capable_extended_status) { + reg_extended_status.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_EXTENDED_STATUS); + reg_control_1000.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_CONTROL_1000); + reg_control_1000.s.advert_1000base_t_full = 0; + reg_control_1000.s.advert_1000base_t_half = 0; + } + switch (link_info.s.speed) { + case 10: + reg_autoneg_adver.s.advert_10base_tx_full = + link_info.s.full_duplex; + reg_autoneg_adver.s.advert_10base_tx_half = + !link_info.s.full_duplex; + break; + case 100: + reg_autoneg_adver.s.advert_100base_tx_full = + link_info.s.full_duplex; + reg_autoneg_adver.s.advert_100base_tx_half = + !link_info.s.full_duplex; + break; + case 1000: + reg_control_1000.s.advert_1000base_t_full = + link_info.s.full_duplex; + reg_control_1000.s.advert_1000base_t_half = + !link_info.s.full_duplex; + break; + } + cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_AUTONEG_ADVER, + reg_autoneg_adver.u16); + if (reg_status.s.capable_extended_status) + cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_CONTROL_1000, + reg_control_1000.u16); + reg_control.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_CONTROL); + reg_control.s.autoneg_enable = 1; + reg_control.s.restart_autoneg = 1; + cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16); + } else { + cvmx_mdio_phy_reg_control_t reg_control; + reg_control.u16 = + cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_CONTROL); + reg_control.s.autoneg_enable = 0; + reg_control.s.restart_autoneg = 1; + reg_control.s.duplex = link_info.s.full_duplex; + if (link_info.s.speed == 1000) { + reg_control.s.speed_msb = 1; + reg_control.s.speed_lsb = 0; + } else if (link_info.s.speed == 100) { + reg_control.s.speed_msb = 0; + reg_control.s.speed_lsb = 1; + } else if (link_info.s.speed == 10) { + reg_control.s.speed_msb = 0; + reg_control.s.speed_lsb = 0; + } + cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff, + CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16); + } + return 0; +} + +/** + * This function is called by cvmx_helper_interface_probe() after it + * determines the number of ports Octeon can support on a specific + * interface. This function is the per board location to override + * this value. It is called with the number of ports Octeon might + * support and should return the number of actual ports on the + * board. + * + * This function must be modifed for every new Octeon board. + * Internally it uses switch statements based on the cvmx_sysinfo + * data to determine board types and revisions. It relys on the + * fact that every Octeon board receives a unique board type + * enumeration from the bootloader. + * + * @interface: Interface to probe + * @supported_ports: + * Number of ports Octeon supports. + * + * Returns Number of ports the actual board supports. Many times this will + * simple be "support_ports". + */ +int __cvmx_helper_board_interface_probe(int interface, int supported_ports) +{ + switch (cvmx_sysinfo_get()->board_type) { + case CVMX_BOARD_TYPE_CN3005_EVB_HS5: + if (interface == 0) + return 2; + break; + case CVMX_BOARD_TYPE_BBGW_REF: + if (interface == 0) + return 2; + break; + case CVMX_BOARD_TYPE_NIC_XLE_4G: + if (interface == 0) + return 0; + break; + /* The 2nd interface on the EBH5600 is connected to the Marvel switch, + which we don't support. Disable ports connected to it */ + case CVMX_BOARD_TYPE_EBH5600: + if (interface == 1) + return 0; + break; + } + return supported_ports; +} + +/** + * Enable packet input/output from the hardware. This function is + * called after by cvmx_helper_packet_hardware_enable() to + * perform board specific initialization. For most boards + * nothing is needed. + * + * @interface: Interface to enable + * + * Returns Zero on success, negative on failure + */ +int __cvmx_helper_board_hardware_enable(int interface) +{ + if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5) { + if (interface == 0) { + /* Different config for switch port */ + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0); + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0); + /* + * Boards with gigabit WAN ports need a + * different setting that is compatible with + * 100 Mbit settings + */ + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), + 0xc); + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), + 0xc); + } + } else if (cvmx_sysinfo_get()->board_type == + CVMX_BOARD_TYPE_CN3010_EVB_HS5) { + /* + * Broadcom PHYs require differnet ASX + * clocks. Unfortunately many boards don't define a + * new board Id and simply mangle the + * CN3010_EVB_HS5 + */ + if (interface == 0) { + /* + * Some boards use a hacked up bootloader that + * identifies them as CN3010_EVB_HS5 + * evaluation boards. This leads to all kinds + * of configuration problems. Detect one + * case, and print warning, while trying to do + * the right thing. + */ + int phy_addr = cvmx_helper_board_get_mii_address(0); + if (phy_addr != -1) { + int phy_identifier = + cvmx_mdio_read(phy_addr >> 8, + phy_addr & 0xff, 0x2); + /* Is it a Broadcom PHY? */ + if (phy_identifier == 0x0143) { + cvmx_dprintf("\n"); + cvmx_dprintf("ERROR:\n"); + cvmx_dprintf + ("ERROR: Board type is CVMX_BOARD_TYPE_CN3010_EVB_HS5, but Broadcom PHY found.\n"); + cvmx_dprintf + ("ERROR: The board type is mis-configured, and software malfunctions are likely.\n"); + cvmx_dprintf + ("ERROR: All boards require a unique board type to identify them.\n"); + cvmx_dprintf("ERROR:\n"); + cvmx_dprintf("\n"); + cvmx_wait(1000000000); + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX + (0, interface), 5); + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX + (0, interface), 5); + } + } + } + } + return 0; +} + +cvmx_helper_board_usb_clock_types_t __cvmx_helper_board_usb_get_clock_type(void) +{ + switch (cvmx_sysinfo_get()->board_type) { + case CVMX_BOARD_TYPE_BBGW_REF: + return USB_CLOCK_TYPE_CRYSTAL_12; + } + return USB_CLOCK_TYPE_REF_48; +} + +int __cvmx_helper_board_usb_get_num_ports(int supported_ports) +{ + switch (cvmx_sysinfo_get()->board_type) { + case CVMX_BOARD_TYPE_NIC_XLE_4G: + return 0; + } + + return supported_ports; +} diff --git a/drivers/staging/octeon/cvmx-helper-board.h b/drivers/staging/octeon/cvmx-helper-board.h new file mode 100644 index 0000000..dc20b01 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-board.h @@ -0,0 +1,180 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * + * Helper functions to abstract board specific data about + * network ports from the rest of the cvmx-helper files. + * + */ +#ifndef __CVMX_HELPER_BOARD_H__ +#define __CVMX_HELPER_BOARD_H__ + +#include "cvmx-helper.h" + +typedef enum { + USB_CLOCK_TYPE_REF_12, + USB_CLOCK_TYPE_REF_24, + USB_CLOCK_TYPE_REF_48, + USB_CLOCK_TYPE_CRYSTAL_12, +} cvmx_helper_board_usb_clock_types_t; + +typedef enum { + set_phy_link_flags_autoneg = 0x1, + set_phy_link_flags_flow_control_dont_touch = 0x0 << 1, + set_phy_link_flags_flow_control_enable = 0x1 << 1, + set_phy_link_flags_flow_control_disable = 0x2 << 1, + set_phy_link_flags_flow_control_mask = 0x3 << 1, /* Mask for 2 bit wide flow control field */ +} cvmx_helper_board_set_phy_link_flags_types_t; + +/** + * cvmx_override_board_link_get(int ipd_port) is a function + * pointer. It is meant to allow customization of the process of + * talking to a PHY to determine link speed. It is called every + * time a PHY must be polled for link status. Users should set + * this pointer to a function before calling any cvmx-helper + * operations. + */ +extern cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port); + +/** + * Return the MII PHY address associated with the given IPD + * port. A result of -1 means there isn't a MII capable PHY + * connected to this port. On chips supporting multiple MII + * busses the bus number is encoded in bits <15:8>. + * + * This function must be modifed for every new Octeon board. + * Internally it uses switch statements based on the cvmx_sysinfo + * data to determine board types and revisions. It relys on the + * fact that every Octeon board receives a unique board type + * enumeration from the bootloader. + * + * @ipd_port: Octeon IPD port to get the MII address for. + * + * Returns MII PHY address and bus number or -1. + */ +extern int cvmx_helper_board_get_mii_address(int ipd_port); + +/** + * This function as a board specific method of changing the PHY + * speed, duplex, and autonegotiation. This programs the PHY and + * not Octeon. This can be used to force Octeon's links to + * specific settings. + * + * @phy_addr: The address of the PHY to program + * @link_flags: + * Flags to control autonegotiation. Bit 0 is autonegotiation + * enable/disable to maintain backware compatability. + * @link_info: Link speed to program. If the speed is zero and autonegotiation + * is enabled, all possible negotiation speeds are advertised. + * + * Returns Zero on success, negative on failure + */ +int cvmx_helper_board_link_set_phy(int phy_addr, + cvmx_helper_board_set_phy_link_flags_types_t + link_flags, + cvmx_helper_link_info_t link_info); + +/** + * This function is the board specific method of determining an + * ethernet ports link speed. Most Octeon boards have Marvell PHYs + * and are handled by the fall through case. This function must be + * updated for boards that don't have the normal Marvell PHYs. + * + * This function must be modifed for every new Octeon board. + * Internally it uses switch statements based on the cvmx_sysinfo + * data to determine board types and revisions. It relys on the + * fact that every Octeon board receives a unique board type + * enumeration from the bootloader. + * + * @ipd_port: IPD input port associated with the port we want to get link + * status for. + * + * Returns The ports link status. If the link isn't fully resolved, this must + * return zero. + */ +extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port); + +/** + * This function is called by cvmx_helper_interface_probe() after it + * determines the number of ports Octeon can support on a specific + * interface. This function is the per board location to override + * this value. It is called with the number of ports Octeon might + * support and should return the number of actual ports on the + * board. + * + * This function must be modifed for every new Octeon board. + * Internally it uses switch statements based on the cvmx_sysinfo + * data to determine board types and revisions. It relys on the + * fact that every Octeon board receives a unique board type + * enumeration from the bootloader. + * + * @interface: Interface to probe + * @supported_ports: + * Number of ports Octeon supports. + * + * Returns Number of ports the actual board supports. Many times this will + * simple be "support_ports". + */ +extern int __cvmx_helper_board_interface_probe(int interface, + int supported_ports); + +/** + * Enable packet input/output from the hardware. This function is + * called after by cvmx_helper_packet_hardware_enable() to + * perform board specific initialization. For most boards + * nothing is needed. + * + * @interface: Interface to enable + * + * Returns Zero on success, negative on failure + */ +extern int __cvmx_helper_board_hardware_enable(int interface); + +/** + * Gets the clock type used for the USB block based on board type. + * Used by the USB code for auto configuration of clock type. + * + * Returns USB clock type enumeration + */ +cvmx_helper_board_usb_clock_types_t +__cvmx_helper_board_usb_get_clock_type(void); + +/** + * Adjusts the number of available USB ports on Octeon based on board + * specifics. + * + * @supported_ports: expected number of ports based on chip type; + * + * + * Returns number of available usb ports, based on board specifics. + * Return value is supported_ports if function does not + * override. + */ +int __cvmx_helper_board_usb_get_num_ports(int supported_ports); + +#endif /* __CVMX_HELPER_BOARD_H__ */ diff --git a/drivers/staging/octeon/cvmx-helper-fpa.c b/drivers/staging/octeon/cvmx-helper-fpa.c new file mode 100644 index 0000000..c239e5f --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-fpa.c @@ -0,0 +1,243 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * @file + * + * Helper functions for FPA setup. + * + */ +#include "executive-config.h" +#include "cvmx-config.h" +#include "cvmx.h" +#include "cvmx-bootmem.h" +#include "cvmx-fpa.h" +#include "cvmx-helper-fpa.h" + +/** + * Allocate memory for and initialize a single FPA pool. + * + * @pool: Pool to initialize + * @buffer_size: Size of buffers to allocate in bytes + * @buffers: Number of buffers to put in the pool. Zero is allowed + * @name: String name of the pool for debugging purposes + * Returns Zero on success, non-zero on failure + */ +static int __cvmx_helper_initialize_fpa_pool(int pool, uint64_t buffer_size, + uint64_t buffers, const char *name) +{ + uint64_t current_num; + void *memory; + uint64_t align = CVMX_CACHE_LINE_SIZE; + + /* + * Align the allocation so that power of 2 size buffers are + * naturally aligned. + */ + while (align < buffer_size) + align = align << 1; + + if (buffers == 0) + return 0; + + current_num = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(pool)); + if (current_num) { + cvmx_dprintf("Fpa pool %d(%s) already has %llu buffers. " + "Skipping setup.\n", + pool, name, (unsigned long long)current_num); + return 0; + } + + memory = cvmx_bootmem_alloc(buffer_size * buffers, align); + if (memory == NULL) { + cvmx_dprintf("Out of memory initializing fpa pool %d(%s).\n", + pool, name); + return -1; + } + cvmx_fpa_setup_pool(pool, name, memory, buffer_size, buffers); + return 0; +} + +/** + * Allocate memory and initialize the FPA pools using memory + * from cvmx-bootmem. Specifying zero for the number of + * buffers will cause that FPA pool to not be setup. This is + * useful if you aren't using some of the hardware and want + * to save memory. Use cvmx_helper_initialize_fpa instead of + * this function directly. + * + * @pip_pool: Should always be CVMX_FPA_PACKET_POOL + * @pip_size: Should always be CVMX_FPA_PACKET_POOL_SIZE + * @pip_buffers: + * Number of packet buffers. + * @wqe_pool: Should always be CVMX_FPA_WQE_POOL + * @wqe_size: Should always be CVMX_FPA_WQE_POOL_SIZE + * @wqe_entries: + * Number of work queue entries + * @pko_pool: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL + * @pko_size: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE + * @pko_buffers: + * PKO Command buffers. You should at minimum have two per + * each PKO queue. + * @tim_pool: Should always be CVMX_FPA_TIMER_POOL + * @tim_size: Should always be CVMX_FPA_TIMER_POOL_SIZE + * @tim_buffers: + * TIM ring buffer command queues. At least two per timer bucket + * is recommened. + * @dfa_pool: Should always be CVMX_FPA_DFA_POOL + * @dfa_size: Should always be CVMX_FPA_DFA_POOL_SIZE + * @dfa_buffers: + * DFA command buffer. A relatively small (32 for example) + * number should work. + * Returns Zero on success, non-zero if out of memory + */ +static int __cvmx_helper_initialize_fpa(int pip_pool, int pip_size, + int pip_buffers, int wqe_pool, + int wqe_size, int wqe_entries, + int pko_pool, int pko_size, + int pko_buffers, int tim_pool, + int tim_size, int tim_buffers, + int dfa_pool, int dfa_size, + int dfa_buffers) +{ + int status; + + cvmx_fpa_enable(); + + if ((pip_buffers > 0) && (pip_buffers <= 64)) + cvmx_dprintf + ("Warning: %d packet buffers may not be enough for hardware" + " prefetch. 65 or more is recommended.\n", pip_buffers); + + if (pip_pool >= 0) { + status = + __cvmx_helper_initialize_fpa_pool(pip_pool, pip_size, + pip_buffers, + "Packet Buffers"); + if (status) + return status; + } + + if (wqe_pool >= 0) { + status = + __cvmx_helper_initialize_fpa_pool(wqe_pool, wqe_size, + wqe_entries, + "Work Queue Entries"); + if (status) + return status; + } + + if (pko_pool >= 0) { + status = + __cvmx_helper_initialize_fpa_pool(pko_pool, pko_size, + pko_buffers, + "PKO Command Buffers"); + if (status) + return status; + } + + if (tim_pool >= 0) { + status = + __cvmx_helper_initialize_fpa_pool(tim_pool, tim_size, + tim_buffers, + "TIM Command Buffers"); + if (status) + return status; + } + + if (dfa_pool >= 0) { + status = + __cvmx_helper_initialize_fpa_pool(dfa_pool, dfa_size, + dfa_buffers, + "DFA Command Buffers"); + if (status) + return status; + } + + return 0; +} + +/** + * Allocate memory and initialize the FPA pools using memory + * from cvmx-bootmem. Sizes of each element in the pools is + * controlled by the cvmx-config.h header file. Specifying + * zero for any parameter will cause that FPA pool to not be + * setup. This is useful if you aren't using some of the + * hardware and want to save memory. + * + * @packet_buffers: + * Number of packet buffers to allocate + * @work_queue_entries: + * Number of work queue entries + * @pko_buffers: + * PKO Command buffers. You should at minimum have two per + * each PKO queue. + * @tim_buffers: + * TIM ring buffer command queues. At least two per timer bucket + * is recommened. + * @dfa_buffers: + * DFA command buffer. A relatively small (32 for example) + * number should work. + * Returns Zero on success, non-zero if out of memory + */ +int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries, + int pko_buffers, int tim_buffers, + int dfa_buffers) +{ +#ifndef CVMX_FPA_PACKET_POOL +#define CVMX_FPA_PACKET_POOL -1 +#define CVMX_FPA_PACKET_POOL_SIZE 0 +#endif +#ifndef CVMX_FPA_WQE_POOL +#define CVMX_FPA_WQE_POOL -1 +#define CVMX_FPA_WQE_POOL_SIZE 0 +#endif +#ifndef CVMX_FPA_OUTPUT_BUFFER_POOL +#define CVMX_FPA_OUTPUT_BUFFER_POOL -1 +#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 0 +#endif +#ifndef CVMX_FPA_TIMER_POOL +#define CVMX_FPA_TIMER_POOL -1 +#define CVMX_FPA_TIMER_POOL_SIZE 0 +#endif +#ifndef CVMX_FPA_DFA_POOL +#define CVMX_FPA_DFA_POOL -1 +#define CVMX_FPA_DFA_POOL_SIZE 0 +#endif + return __cvmx_helper_initialize_fpa(CVMX_FPA_PACKET_POOL, + CVMX_FPA_PACKET_POOL_SIZE, + packet_buffers, CVMX_FPA_WQE_POOL, + CVMX_FPA_WQE_POOL_SIZE, + work_queue_entries, + CVMX_FPA_OUTPUT_BUFFER_POOL, + CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, + pko_buffers, CVMX_FPA_TIMER_POOL, + CVMX_FPA_TIMER_POOL_SIZE, + tim_buffers, CVMX_FPA_DFA_POOL, + CVMX_FPA_DFA_POOL_SIZE, + dfa_buffers); +} diff --git a/drivers/staging/octeon/cvmx-helper-fpa.h b/drivers/staging/octeon/cvmx-helper-fpa.h new file mode 100644 index 0000000..5ff8c93 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-fpa.h @@ -0,0 +1,64 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * @file + * + * Helper functions for FPA setup. + * + */ +#ifndef __CVMX_HELPER_H_FPA__ +#define __CVMX_HELPER_H_FPA__ + +/** + * Allocate memory and initialize the FPA pools using memory + * from cvmx-bootmem. Sizes of each element in the pools is + * controlled by the cvmx-config.h header file. Specifying + * zero for any parameter will cause that FPA pool to not be + * setup. This is useful if you aren't using some of the + * hardware and want to save memory. + * + * @packet_buffers: + * Number of packet buffers to allocate + * @work_queue_entries: + * Number of work queue entries + * @pko_buffers: + * PKO Command buffers. You should at minimum have two per + * each PKO queue. + * @tim_buffers: + * TIM ring buffer command queues. At least two per timer bucket + * is recommened. + * @dfa_buffers: + * DFA command buffer. A relatively small (32 for example) + * number should work. + * Returns Zero on success, non-zero if out of memory + */ +extern int cvmx_helper_initialize_fpa(int packet_buffers, + int work_queue_entries, int pko_buffers, + int tim_buffers, int dfa_buffers); + +#endif /* __CVMX_HELPER_H__ */ diff --git a/drivers/staging/octeon/cvmx-helper-loop.c b/drivers/staging/octeon/cvmx-helper-loop.c new file mode 100644 index 0000000..55a571a --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-loop.c @@ -0,0 +1,85 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Functions for LOOP initialization, configuration, + * and monitoring. + */ +#include <asm/octeon/octeon.h> + +#include "cvmx-config.h" + +#include "cvmx-helper.h" +#include "cvmx-pip-defs.h" + +/** + * Probe a LOOP interface and determine the number of ports + * connected to it. The LOOP interface should still be down + * after this call. + * + * @interface: Interface to probe + * + * Returns Number of ports on the interface. Zero to disable. + */ +int __cvmx_helper_loop_probe(int interface) +{ + union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; + int num_ports = 4; + int port; + + /* We need to disable length checking so packet < 64 bytes and jumbo + frames don't get errors */ + for (port = 0; port < num_ports; port++) { + union cvmx_pip_prt_cfgx port_cfg; + int ipd_port = cvmx_helper_get_ipd_port(interface, port); + port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); + port_cfg.s.maxerr_en = 0; + port_cfg.s.minerr_en = 0; + cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64); + } + + /* Disable FCS stripping for loopback ports */ + ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); + ipd_sub_port_fcs.s.port_bit2 = 0; + cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64); + return num_ports; +} + +/** + * Bringup and enable a LOOP interface. After this call packet + * I/O should be fully functional. This is called with IPD + * enabled but PKO disabled. + * + * @interface: Interface to bring up + * + * Returns Zero on success, negative on failure + */ +int __cvmx_helper_loop_enable(int interface) +{ + /* Do nothing. */ + return 0; +} diff --git a/drivers/staging/octeon/cvmx-helper-loop.h b/drivers/staging/octeon/cvmx-helper-loop.h new file mode 100644 index 0000000..e646a6c --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-loop.h @@ -0,0 +1,59 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as published by + * the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, + * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or NONINFRINGEMENT. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * @file + * + * Functions for LOOP initialization, configuration, + * and monitoring. + * + */ +#ifndef __CVMX_HELPER_LOOP_H__ +#define __CVMX_HELPER_LOOP_H__ + +/** + * Probe a LOOP interface and determine the number of ports + * connected to it. The LOOP interface should still be down after + * this call. + * + * @interface: Interface to probe + * + * Returns Number of ports on the interface. Zero to disable. + */ +extern int __cvmx_helper_loop_probe(int interface); + +/** + * Bringup and enable a LOOP interface. After this call packet + * I/O should be fully functional. This is called with IPD + * enabled but PKO disabled. + * + * @interface: Interface to bring up + * + * Returns Zero on success, negative on failure + */ +extern int __cvmx_helper_loop_enable(int interface); + +#endif diff --git a/drivers/staging/octeon/cvmx-helper-npi.c b/drivers/staging/octeon/cvmx-helper-npi.c new file mode 100644 index 0000000..7388a1e --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-npi.c @@ -0,0 +1,113 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Functions for NPI initialization, configuration, + * and monitoring. + */ +#include <asm/octeon/octeon.h> + +#include "cvmx-config.h" + +#include "cvmx-helper.h" + +#include "cvmx-pip-defs.h" + +/** + * Probe a NPI interface and determine the number of ports + * connected to it. The NPI interface should still be down + * after this call. + * + * @interface: Interface to probe + * + * Returns Number of ports on the interface. Zero to disable. + */ +int __cvmx_helper_npi_probe(int interface) +{ +#if CVMX_PKO_QUEUES_PER_PORT_PCI > 0 + if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) + return 4; + else if (OCTEON_IS_MODEL(OCTEON_CN56XX) + && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) + /* The packet engines didn't exist before pass 2 */ + return 4; + else if (OCTEON_IS_MODEL(OCTEON_CN52XX) + && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) + /* The packet engines didn't exist before pass 2 */ + return 4; +#if 0 + /* + * Technically CN30XX, CN31XX, and CN50XX contain packet + * engines, but nobody ever uses them. Since this is the case, + * we disable them here. + */ + else if (OCTEON_IS_MODEL(OCTEON_CN31XX) + || OCTEON_IS_MODEL(OCTEON_CN50XX)) + return 2; + else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) + return 1; +#endif +#endif + return 0; +} + +/** + * Bringup and enable a NPI interface. After this call packet + * I/O should be fully functional. This is called with IPD + * enabled but PKO disabled. + * + * @interface: Interface to bring up + * + * Returns Zero on success, negative on failure + */ +int __cvmx_helper_npi_enable(int interface) +{ + /* + * On CN50XX, CN52XX, and CN56XX we need to disable length + * checking so packet < 64 bytes and jumbo frames don't get + * errors. + */ + if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) && + !OCTEON_IS_MODEL(OCTEON_CN58XX)) { + int num_ports = cvmx_helper_ports_on_interface(interface); + int port; + for (port = 0; port < num_ports; port++) { + union cvmx_pip_prt_cfgx port_cfg; + int ipd_port = + cvmx_helper_get_ipd_port(interface, port); + port_cfg.u64 = + cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); + port_cfg.s.maxerr_en = 0; + port_cfg.s.minerr_en = 0; + cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), + port_cfg.u64); + } + } + + /* Enables are controlled by the remote host, so nothing to do here */ + return 0; +} diff --git a/drivers/staging/octeon/cvmx-helper-npi.h b/drivers/staging/octeon/cvmx-helper-npi.h new file mode 100644 index 0000000..908e7b0 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-npi.h @@ -0,0 +1,60 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * @file + * + * Functions for NPI initialization, configuration, + * and monitoring. + * + */ +#ifndef __CVMX_HELPER_NPI_H__ +#define __CVMX_HELPER_NPI_H__ + +/** + * Probe a NPI interface and determine the number of ports + * connected to it. The NPI interface should still be down after + * this call. + * + * @interface: Interface to probe + * + * Returns Number of ports on the interface. Zero to disable. + */ +extern int __cvmx_helper_npi_probe(int interface); + +/** + * Bringup and enable a NPI interface. After this call packet + * I/O should be fully functional. This is called with IPD + * enabled but PKO disabled. + * + * @interface: Interface to bring up + * + * Returns Zero on success, negative on failure + */ +extern int __cvmx_helper_npi_enable(int interface); + +#endif diff --git a/drivers/staging/octeon/cvmx-helper-rgmii.c b/drivers/staging/octeon/cvmx-helper-rgmii.c new file mode 100644 index 0000000..aa2d5d7 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-rgmii.c @@ -0,0 +1,525 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Functions for RGMII/GMII/MII initialization, configuration, + * and monitoring. + */ +#include <asm/octeon/octeon.h> + +#include "cvmx-config.h" + + +#include "cvmx-mdio.h" +#include "cvmx-pko.h" +#include "cvmx-helper.h" +#include "cvmx-helper-board.h" + +#include <asm/octeon/cvmx-npi-defs.h> +#include "cvmx-gmxx-defs.h" +#include "cvmx-asxx-defs.h" +#include "cvmx-dbg-defs.h" + +void __cvmx_interrupt_gmxx_enable(int interface); +void __cvmx_interrupt_asxx_enable(int block); + +/** + * Probe RGMII ports and determine the number present + * + * @interface: Interface to probe + * + * Returns Number of RGMII/GMII/MII ports (0-4). + */ +int __cvmx_helper_rgmii_probe(int interface) +{ + int num_ports = 0; + union cvmx_gmxx_inf_mode mode; + mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); + + if (mode.s.type) { + if (OCTEON_IS_MODEL(OCTEON_CN38XX) + || OCTEON_IS_MODEL(OCTEON_CN58XX)) { + cvmx_dprintf("ERROR: RGMII initialize called in " + "SPI interface\n"); + } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) + || OCTEON_IS_MODEL(OCTEON_CN30XX) + || OCTEON_IS_MODEL(OCTEON_CN50XX)) { + /* + * On these chips "type" says we're in + * GMII/MII mode. This limits us to 2 ports + */ + num_ports = 2; + } else { + cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n", + __func__); + } + } else { + if (OCTEON_IS_MODEL(OCTEON_CN38XX) + || OCTEON_IS_MODEL(OCTEON_CN58XX)) { + num_ports = 4; + } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) + || OCTEON_IS_MODEL(OCTEON_CN30XX) + || OCTEON_IS_MODEL(OCTEON_CN50XX)) { + num_ports = 3; + } else { + cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n", + __func__); + } + } + return num_ports; +} + +/** + * Put an RGMII interface in loopback mode. Internal packets sent + * out will be received back again on the same port. Externally + * received packets will echo back out. + * + * @port: IPD port number to loop. + */ +void cvmx_helper_rgmii_internal_loopback(int port) +{ + int interface = (port >> 4) & 1; + int index = port & 0xf; + uint64_t tmp; + + union cvmx_gmxx_prtx_cfg gmx_cfg; + gmx_cfg.u64 = 0; + gmx_cfg.s.duplex = 1; + gmx_cfg.s.slottime = 1; + gmx_cfg.s.speed = 1; + cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1); + cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200); + cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000); + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); + tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface)); + cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp); + tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface)); + cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp); + tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)); + cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp); + gmx_cfg.s.en = 1; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); +} + +/** + * Workaround ASX setup errata with CN38XX pass1 + * + * @interface: Interface to setup + * @port: Port to setup (0..3) + * @cpu_clock_hz: + * Chip frequency in Hertz + * + * Returns Zero on success, negative on failure + */ +static int __cvmx_helper_errata_asx_pass1(int interface, int port, + int cpu_clock_hz) +{ + /* Set hi water mark as per errata GMX-4 */ + if (cpu_clock_hz >= 325000000 && cpu_clock_hz < 375000000) + cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 12); + else if (cpu_clock_hz >= 375000000 && cpu_clock_hz < 437000000) + cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 11); + else if (cpu_clock_hz >= 437000000 && cpu_clock_hz < 550000000) + cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 10); + else if (cpu_clock_hz >= 550000000 && cpu_clock_hz < 687000000) + cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 9); + else + cvmx_dprintf("Illegal clock frequency (%d). " + "CVMX_ASXX_TX_HI_WATERX not set\n", cpu_clock_hz); + return 0; +} + +/** + * Configure all of the ASX, GMX, and PKO regsiters required + * to get RGMII to function on the supplied interface. + * + * @interface: PKO Interface to configure (0 or 1) + * + * Returns Zero on success + */ +int __cvmx_helper_rgmii_enable(int interface) +{ + int num_ports = cvmx_helper_ports_on_interface(interface); + int port; + struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get(); + union cvmx_gmxx_inf_mode mode; + union cvmx_asxx_tx_prt_en asx_tx; + union cvmx_asxx_rx_prt_en asx_rx; + + mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); + + if (mode.s.en == 0) + return -1; + if ((OCTEON_IS_MODEL(OCTEON_CN38XX) || + OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1) + /* Ignore SPI interfaces */ + return -1; + + /* Configure the ASX registers needed to use the RGMII ports */ + asx_tx.u64 = 0; + asx_tx.s.prt_en = cvmx_build_mask(num_ports); + cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64); + + asx_rx.u64 = 0; + asx_rx.s.prt_en = cvmx_build_mask(num_ports); + cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64); + + /* Configure the GMX registers needed to use the RGMII ports */ + for (port = 0; port < num_ports; port++) { + /* Setting of CVMX_GMXX_TXX_THRESH has been moved to + __cvmx_helper_setup_gmx() */ + + if (cvmx_octeon_is_pass1()) + __cvmx_helper_errata_asx_pass1(interface, port, + sys_info_ptr-> + cpu_clock_hz); + else { + /* + * Configure more flexible RGMII preamble + * checking. Pass 1 doesn't support this + * feature. + */ + union cvmx_gmxx_rxx_frm_ctl frm_ctl; + frm_ctl.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL + (port, interface)); + /* New field, so must be compile time */ + frm_ctl.s.pre_free = 1; + cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface), + frm_ctl.u64); + } + + /* + * Each pause frame transmitted will ask for about 10M + * bit times before resume. If buffer space comes + * available before that time has expired, an XON + * pause frame (0 time) will be transmitted to restart + * the flow. + */ + cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface), + 20000); + cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL + (port, interface), 19000); + + if (OCTEON_IS_MODEL(OCTEON_CN50XX)) { + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), + 16); + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), + 16); + } else { + cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), + 24); + cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), + 24); + } + } + + __cvmx_helper_setup_gmx(interface, num_ports); + + /* enable the ports now */ + for (port = 0; port < num_ports; port++) { + union cvmx_gmxx_prtx_cfg gmx_cfg; + cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port + (interface, port)); + gmx_cfg.u64 = + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface)); + gmx_cfg.s.en = 1; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface), + gmx_cfg.u64); + } + __cvmx_interrupt_asxx_enable(interface); + __cvmx_interrupt_gmxx_enable(interface); + + return 0; +} + +/** + * Return the link state of an IPD/PKO port as returned by + * auto negotiation. The result of this function may not match + * Octeon's link config if auto negotiation has changed since + * the last call to cvmx_helper_link_set(). + * + * @ipd_port: IPD/PKO port to query + * + * Returns Link state + */ +cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port) +{ + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + union cvmx_asxx_prt_loop asxx_prt_loop; + + asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface)); + if (asxx_prt_loop.s.int_loop & (1 << index)) { + /* Force 1Gbps full duplex on internal loopback */ + cvmx_helper_link_info_t result; + result.u64 = 0; + result.s.full_duplex = 1; + result.s.link_up = 1; + result.s.speed = 1000; + return result; + } else + return __cvmx_helper_board_link_get(ipd_port); +} + +/** + * Configure an IPD/PKO port for the specified link state. This + * function does not influence auto negotiation at the PHY level. + * The passed link state must always match the link state returned + * by cvmx_helper_link_get(). It is normally best to use + * cvmx_helper_link_autoconf() instead. + * + * @ipd_port: IPD/PKO port to configure + * @link_info: The new link state + * + * Returns Zero on success, negative on failure + */ +int __cvmx_helper_rgmii_link_set(int ipd_port, + cvmx_helper_link_info_t link_info) +{ + int result = 0; + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + union cvmx_gmxx_prtx_cfg original_gmx_cfg; + union cvmx_gmxx_prtx_cfg new_gmx_cfg; + union cvmx_pko_mem_queue_qos pko_mem_queue_qos; + union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16]; + union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp; + union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save; + int i; + + /* Ignore speed sets in the simulator */ + if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) + return 0; + + /* Read the current settings so we know the current enable state */ + original_gmx_cfg.u64 = + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + new_gmx_cfg = original_gmx_cfg; + + /* Disable the lowest level RX */ + cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), + cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) & + ~(1 << index)); + + /* Disable all queues so that TX should become idle */ + for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) { + int queue = cvmx_pko_get_base_queue(ipd_port) + i; + cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue); + pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS); + pko_mem_queue_qos.s.pid = ipd_port; + pko_mem_queue_qos.s.qid = queue; + pko_mem_queue_qos_save[i] = pko_mem_queue_qos; + pko_mem_queue_qos.s.qos_mask = 0; + cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64); + } + + /* Disable backpressure */ + gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface)); + gmx_tx_ovr_bp_save = gmx_tx_ovr_bp; + gmx_tx_ovr_bp.s.bp &= ~(1 << index); + gmx_tx_ovr_bp.s.en |= 1 << index; + cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64); + cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface)); + + /* + * Poll the GMX state machine waiting for it to become + * idle. Preferably we should only change speed when it is + * idle. If it doesn't become idle we will still do the speed + * change, but there is a slight chance that GMX will + * lockup. + */ + cvmx_write_csr(CVMX_NPI_DBG_SELECT, + interface * 0x800 + index * 0x100 + 0x880); + CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7, + ==, 0, 10000); + CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf, + ==, 0, 10000); + + /* Disable the port before we make any changes */ + new_gmx_cfg.s.en = 0; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64); + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + + /* Set full/half duplex */ + if (cvmx_octeon_is_pass1()) + /* Half duplex is broken for 38XX Pass 1 */ + new_gmx_cfg.s.duplex = 1; + else if (!link_info.s.link_up) + /* Force full duplex on down links */ + new_gmx_cfg.s.duplex = 1; + else + new_gmx_cfg.s.duplex = link_info.s.full_duplex; + + /* Set the link speed. Anything unknown is set to 1Gbps */ + if (link_info.s.speed == 10) { + new_gmx_cfg.s.slottime = 0; + new_gmx_cfg.s.speed = 0; + } else if (link_info.s.speed == 100) { + new_gmx_cfg.s.slottime = 0; + new_gmx_cfg.s.speed = 0; + } else { + new_gmx_cfg.s.slottime = 1; + new_gmx_cfg.s.speed = 1; + } + + /* Adjust the clocks */ + if (link_info.s.speed == 10) { + cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50); + cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40); + cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0); + } else if (link_info.s.speed == 100) { + cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5); + cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40); + cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0); + } else { + cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1); + cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200); + cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000); + } + + if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) { + if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) { + union cvmx_gmxx_inf_mode mode; + mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); + + /* + * Port .en .type .p0mii Configuration + * ---- --- ----- ------ ----------------------------------------- + * X 0 X X All links are disabled. + * 0 1 X 0 Port 0 is RGMII + * 0 1 X 1 Port 0 is MII + * 1 1 0 X Ports 1 and 2 are configured as RGMII ports. + * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or + * MII port is selected by GMX_PRT1_CFG[SPEED]. + */ + + /* In MII mode, CLK_CNT = 1. */ + if (((index == 0) && (mode.s.p0mii == 1)) + || ((index != 0) && (mode.s.type == 1))) { + cvmx_write_csr(CVMX_GMXX_TXX_CLK + (index, interface), 1); + } + } + } + + /* Do a read to make sure all setup stuff is complete */ + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + + /* Save the new GMX setting without enabling the port */ + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64); + + /* Enable the lowest level RX */ + cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), + cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 << + index)); + + /* Re-enable the TX path */ + for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) { + int queue = cvmx_pko_get_base_queue(ipd_port) + i; + cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue); + cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, + pko_mem_queue_qos_save[i].u64); + } + + /* Restore backpressure */ + cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64); + + /* Restore the GMX enable state. Port config is complete */ + new_gmx_cfg.s.en = original_gmx_cfg.s.en; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64); + + return result; +} + +/** + * Configure a port for internal and/or external loopback. Internal loopback + * causes packets sent by the port to be received by Octeon. External loopback + * causes packets received from the wire to sent out again. + * + * @ipd_port: IPD/PKO port to loopback. + * @enable_internal: + * Non zero if you want internal loopback + * @enable_external: + * Non zero if you want external loopback + * + * Returns Zero on success, negative on failure. + */ +int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal, + int enable_external) +{ + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + int original_enable; + union cvmx_gmxx_prtx_cfg gmx_cfg; + union cvmx_asxx_prt_loop asxx_prt_loop; + + /* Read the current enable state and save it */ + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + original_enable = gmx_cfg.s.en; + /* Force port to be disabled */ + gmx_cfg.s.en = 0; + if (enable_internal) { + /* Force speed if we're doing internal loopback */ + gmx_cfg.s.duplex = 1; + gmx_cfg.s.slottime = 1; + gmx_cfg.s.speed = 1; + cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1); + cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200); + cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000); + } + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); + + /* Set the loopback bits */ + asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface)); + if (enable_internal) + asxx_prt_loop.s.int_loop |= 1 << index; + else + asxx_prt_loop.s.int_loop &= ~(1 << index); + if (enable_external) + asxx_prt_loop.s.ext_loop |= 1 << index; + else + asxx_prt_loop.s.ext_loop &= ~(1 << index); + cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64); + + /* Force enables in internal loopback */ + if (enable_internal) { + uint64_t tmp; + tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface)); + cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), + (1 << index) | tmp); + tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)); + cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), + (1 << index) | tmp); + original_enable = 1; + } + + /* Restore the enable state */ + gmx_cfg.s.en = original_enable; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); + return 0; +} diff --git a/drivers/staging/octeon/cvmx-helper-rgmii.h b/drivers/staging/octeon/cvmx-helper-rgmii.h new file mode 100644 index 0000000..ea26526 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-rgmii.h @@ -0,0 +1,110 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * @file + * + * Functions for RGMII/GMII/MII initialization, configuration, + * and monitoring. + * + */ +#ifndef __CVMX_HELPER_RGMII_H__ +#define __CVMX_HELPER_RGMII_H__ + +/** + * Probe RGMII ports and determine the number present + * + * @interface: Interface to probe + * + * Returns Number of RGMII/GMII/MII ports (0-4). + */ +extern int __cvmx_helper_rgmii_probe(int interface); + +/** + * Put an RGMII interface in loopback mode. Internal packets sent + * out will be received back again on the same port. Externally + * received packets will echo back out. + * + * @port: IPD port number to loop. + */ +extern void cvmx_helper_rgmii_internal_loopback(int port); + +/** + * Configure all of the ASX, GMX, and PKO regsiters required + * to get RGMII to function on the supplied interface. + * + * @interface: PKO Interface to configure (0 or 1) + * + * Returns Zero on success + */ +extern int __cvmx_helper_rgmii_enable(int interface); + +/** + * Return the link state of an IPD/PKO port as returned by + * auto negotiation. The result of this function may not match + * Octeon's link config if auto negotiation has changed since + * the last call to cvmx_helper_link_set(). + * + * @ipd_port: IPD/PKO port to query + * + * Returns Link state + */ +extern cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port); + +/** + * Configure an IPD/PKO port for the specified link state. This + * function does not influence auto negotiation at the PHY level. + * The passed link state must always match the link state returned + * by cvmx_helper_link_get(). It is normally best to use + * cvmx_helper_link_autoconf() instead. + * + * @ipd_port: IPD/PKO port to configure + * @link_info: The new link state + * + * Returns Zero on success, negative on failure + */ +extern int __cvmx_helper_rgmii_link_set(int ipd_port, + cvmx_helper_link_info_t link_info); + +/** + * Configure a port for internal and/or external loopback. Internal loopback + * causes packets sent by the port to be received by Octeon. External loopback + * causes packets received from the wire to sent out again. + * + * @ipd_port: IPD/PKO port to loopback. + * @enable_internal: + * Non zero if you want internal loopback + * @enable_external: + * Non zero if you want external loopback + * + * Returns Zero on success, negative on failure. + */ +extern int __cvmx_helper_rgmii_configure_loopback(int ipd_port, + int enable_internal, + int enable_external); + +#endif diff --git a/drivers/staging/octeon/cvmx-helper-sgmii.c b/drivers/staging/octeon/cvmx-helper-sgmii.c new file mode 100644 index 0000000..6214e3b --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-sgmii.c @@ -0,0 +1,550 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Functions for SGMII initialization, configuration, + * and monitoring. + */ + +#include <asm/octeon/octeon.h> + +#include "cvmx-config.h" + +#include "cvmx-mdio.h" +#include "cvmx-helper.h" +#include "cvmx-helper-board.h" + +#include "cvmx-gmxx-defs.h" +#include "cvmx-pcsx-defs.h" + +void __cvmx_interrupt_gmxx_enable(int interface); +void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block); +void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index); + +/** + * Perform initialization required only once for an SGMII port. + * + * @interface: Interface to init + * @index: Index of prot on the interface + * + * Returns Zero on success, negative on failure + */ +static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index) +{ + const uint64_t clock_mhz = cvmx_sysinfo_get()->cpu_clock_hz / 1000000; + union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg; + union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg; + union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg; + + /* Disable GMX */ + gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + gmxx_prtx_cfg.s.en = 0; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64); + + /* + * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the + * appropriate value. 1000BASE-X specifies a 10ms + * interval. SGMII specifies a 1.6ms interval. + */ + pcs_misc_ctl_reg.u64 = + cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface)); + pcsx_linkx_timer_count_reg.u64 = + cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface)); + if (pcs_misc_ctl_reg.s.mode) { + /* 1000BASE-X */ + pcsx_linkx_timer_count_reg.s.count = + (10000ull * clock_mhz) >> 10; + } else { + /* SGMII */ + pcsx_linkx_timer_count_reg.s.count = + (1600ull * clock_mhz) >> 10; + } + cvmx_write_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface), + pcsx_linkx_timer_count_reg.u64); + + /* + * Write the advertisement register to be used as the + * tx_Config_Reg<D15:D0> of the autonegotiation. In + * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG. + * In SGMII PHY mode, tx_Config_Reg<D15:D0> is + * PCS*_SGM*_AN_ADV_REG. In SGMII MAC mode, + * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this + * step can be skipped. + */ + if (pcs_misc_ctl_reg.s.mode) { + /* 1000BASE-X */ + union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg; + pcsx_anx_adv_reg.u64 = + cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface)); + pcsx_anx_adv_reg.s.rem_flt = 0; + pcsx_anx_adv_reg.s.pause = 3; + pcsx_anx_adv_reg.s.hfd = 1; + pcsx_anx_adv_reg.s.fd = 1; + cvmx_write_csr(CVMX_PCSX_ANX_ADV_REG(index, interface), + pcsx_anx_adv_reg.u64); + } else { + union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg; + pcsx_miscx_ctl_reg.u64 = + cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface)); + if (pcsx_miscx_ctl_reg.s.mac_phy) { + /* PHY Mode */ + union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg; + pcsx_sgmx_an_adv_reg.u64 = + cvmx_read_csr(CVMX_PCSX_SGMX_AN_ADV_REG + (index, interface)); + pcsx_sgmx_an_adv_reg.s.link = 1; + pcsx_sgmx_an_adv_reg.s.dup = 1; + pcsx_sgmx_an_adv_reg.s.speed = 2; + cvmx_write_csr(CVMX_PCSX_SGMX_AN_ADV_REG + (index, interface), + pcsx_sgmx_an_adv_reg.u64); + } else { + /* MAC Mode - Nothing to do */ + } + } + return 0; +} + +/** + * Initialize the SERTES link for the first time or after a loss + * of link. + * + * @interface: Interface to init + * @index: Index of prot on the interface + * + * Returns Zero on success, negative on failure + */ +static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index) +{ + union cvmx_pcsx_mrx_control_reg control_reg; + + /* + * Take PCS through a reset sequence. + * PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero. + * Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the + * value of the other PCS*_MR*_CONTROL_REG bits). Read + * PCS*_MR*_CONTROL_REG[RESET] until it changes value to + * zero. + */ + control_reg.u64 = + cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface)); + if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) { + control_reg.s.reset = 1; + cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), + control_reg.u64); + if (CVMX_WAIT_FOR_FIELD64 + (CVMX_PCSX_MRX_CONTROL_REG(index, interface), + union cvmx_pcsx_mrx_control_reg, reset, ==, 0, 10000)) { + cvmx_dprintf("SGMII%d: Timeout waiting for port %d " + "to finish reset\n", + interface, index); + return -1; + } + } + + /* + * Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh + * sgmii negotiation starts. + */ + control_reg.s.rst_an = 1; + control_reg.s.an_en = 1; + control_reg.s.pwr_dn = 0; + cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), + control_reg.u64); + + /* + * Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating + * that sgmii autonegotiation is complete. In MAC mode this + * isn't an ethernet link, but a link between Octeon and the + * PHY. + */ + if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) && + CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface), + union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1, + 10000)) { + /* cvmx_dprintf("SGMII%d: Port %d link timeout\n", interface, index); */ + return -1; + } + return 0; +} + +/** + * Configure an SGMII link to the specified speed after the SERTES + * link is up. + * + * @interface: Interface to init + * @index: Index of prot on the interface + * @link_info: Link state to configure + * + * Returns Zero on success, negative on failure + */ +static int __cvmx_helper_sgmii_hardware_init_link_speed(int interface, + int index, + cvmx_helper_link_info_t + link_info) +{ + int is_enabled; + union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg; + union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg; + + /* Disable GMX before we make any changes. Remember the enable state */ + gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + is_enabled = gmxx_prtx_cfg.s.en; + gmxx_prtx_cfg.s.en = 0; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64); + + /* Wait for GMX to be idle */ + if (CVMX_WAIT_FOR_FIELD64 + (CVMX_GMXX_PRTX_CFG(index, interface), union cvmx_gmxx_prtx_cfg, + rx_idle, ==, 1, 10000) + || CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface), + union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1, + 10000)) { + cvmx_dprintf + ("SGMII%d: Timeout waiting for port %d to be idle\n", + interface, index); + return -1; + } + + /* Read GMX CFG again to make sure the disable completed */ + gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + + /* + * Get the misc control for PCS. We will need to set the + * duplication amount. + */ + pcsx_miscx_ctl_reg.u64 = + cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface)); + + /* + * Use GMXENO to force the link down if the status we get says + * it should be down. + */ + pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up; + + /* Only change the duplex setting if the link is up */ + if (link_info.s.link_up) + gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex; + + /* Do speed based setting for GMX */ + switch (link_info.s.speed) { + case 10: + gmxx_prtx_cfg.s.speed = 0; + gmxx_prtx_cfg.s.speed_msb = 1; + gmxx_prtx_cfg.s.slottime = 0; + /* Setting from GMX-603 */ + pcsx_miscx_ctl_reg.s.samp_pt = 25; + cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64); + cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0); + break; + case 100: + gmxx_prtx_cfg.s.speed = 0; + gmxx_prtx_cfg.s.speed_msb = 0; + gmxx_prtx_cfg.s.slottime = 0; + pcsx_miscx_ctl_reg.s.samp_pt = 0x5; + cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64); + cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0); + break; + case 1000: + gmxx_prtx_cfg.s.speed = 1; + gmxx_prtx_cfg.s.speed_msb = 0; + gmxx_prtx_cfg.s.slottime = 1; + pcsx_miscx_ctl_reg.s.samp_pt = 1; + cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 512); + cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 8192); + break; + default: + break; + } + + /* Write the new misc control for PCS */ + cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface), + pcsx_miscx_ctl_reg.u64); + + /* Write the new GMX settings with the port still disabled */ + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64); + + /* Read GMX CFG again to make sure the config completed */ + gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + + /* Restore the enabled / disabled state */ + gmxx_prtx_cfg.s.en = is_enabled; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64); + + return 0; +} + +/** + * Bring up the SGMII interface to be ready for packet I/O but + * leave I/O disabled using the GMX override. This function + * follows the bringup documented in 10.6.3 of the manual. + * + * @interface: Interface to bringup + * @num_ports: Number of ports on the interface + * + * Returns Zero on success, negative on failure + */ +static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports) +{ + int index; + + __cvmx_helper_setup_gmx(interface, num_ports); + + for (index = 0; index < num_ports; index++) { + int ipd_port = cvmx_helper_get_ipd_port(interface, index); + __cvmx_helper_sgmii_hardware_init_one_time(interface, index); + __cvmx_helper_sgmii_link_set(ipd_port, + __cvmx_helper_sgmii_link_get + (ipd_port)); + + } + + return 0; +} + +/** + * Probe a SGMII interface and determine the number of ports + * connected to it. The SGMII interface should still be down after + * this call. + * + * @interface: Interface to probe + * + * Returns Number of ports on the interface. Zero to disable. + */ +int __cvmx_helper_sgmii_probe(int interface) +{ + union cvmx_gmxx_inf_mode mode; + + /* + * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the + * interface needs to be enabled before IPD otherwise per port + * backpressure may not work properly + */ + mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); + mode.s.en = 1; + cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64); + return 4; +} + +/** + * Bringup and enable a SGMII interface. After this call packet + * I/O should be fully functional. This is called with IPD + * enabled but PKO disabled. + * + * @interface: Interface to bring up + * + * Returns Zero on success, negative on failure + */ +int __cvmx_helper_sgmii_enable(int interface) +{ + int num_ports = cvmx_helper_ports_on_interface(interface); + int index; + + __cvmx_helper_sgmii_hardware_init(interface, num_ports); + + for (index = 0; index < num_ports; index++) { + union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg; + gmxx_prtx_cfg.u64 = + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + gmxx_prtx_cfg.s.en = 1; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), + gmxx_prtx_cfg.u64); + __cvmx_interrupt_pcsx_intx_en_reg_enable(index, interface); + } + __cvmx_interrupt_pcsxx_int_en_reg_enable(interface); + __cvmx_interrupt_gmxx_enable(interface); + return 0; +} + +/** + * Return the link state of an IPD/PKO port as returned by + * auto negotiation. The result of this function may not match + * Octeon's link config if auto negotiation has changed since + * the last call to cvmx_helper_link_set(). + * + * @ipd_port: IPD/PKO port to query + * + * Returns Link state + */ +cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port) +{ + cvmx_helper_link_info_t result; + union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg; + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg; + + result.u64 = 0; + + if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) { + /* The simulator gives you a simulated 1Gbps full duplex link */ + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 1000; + return result; + } + + pcsx_mrx_control_reg.u64 = + cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface)); + if (pcsx_mrx_control_reg.s.loopbck1) { + /* Force 1Gbps full duplex link for internal loopback */ + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 1000; + return result; + } + + pcs_misc_ctl_reg.u64 = + cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface)); + if (pcs_misc_ctl_reg.s.mode) { + /* 1000BASE-X */ + /* FIXME */ + } else { + union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg; + pcsx_miscx_ctl_reg.u64 = + cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface)); + if (pcsx_miscx_ctl_reg.s.mac_phy) { + /* PHY Mode */ + union cvmx_pcsx_mrx_status_reg pcsx_mrx_status_reg; + union cvmx_pcsx_anx_results_reg pcsx_anx_results_reg; + + /* + * Don't bother continuing if the SERTES low + * level link is down + */ + pcsx_mrx_status_reg.u64 = + cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG + (index, interface)); + if (pcsx_mrx_status_reg.s.lnk_st == 0) { + if (__cvmx_helper_sgmii_hardware_init_link + (interface, index) != 0) + return result; + } + + /* Read the autoneg results */ + pcsx_anx_results_reg.u64 = + cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG + (index, interface)); + if (pcsx_anx_results_reg.s.an_cpt) { + /* + * Auto negotiation is complete. Set + * status accordingly. + */ + result.s.full_duplex = + pcsx_anx_results_reg.s.dup; + result.s.link_up = + pcsx_anx_results_reg.s.link_ok; + switch (pcsx_anx_results_reg.s.spd) { + case 0: + result.s.speed = 10; + break; + case 1: + result.s.speed = 100; + break; + case 2: + result.s.speed = 1000; + break; + default: + result.s.speed = 0; + result.s.link_up = 0; + break; + } + } else { + /* + * Auto negotiation isn't + * complete. Return link down. + */ + result.s.speed = 0; + result.s.link_up = 0; + } + } else { /* MAC Mode */ + + result = __cvmx_helper_board_link_get(ipd_port); + } + } + return result; +} + +/** + * Configure an IPD/PKO port for the specified link state. This + * function does not influence auto negotiation at the PHY level. + * The passed link state must always match the link state returned + * by cvmx_helper_link_get(). It is normally best to use + * cvmx_helper_link_autoconf() instead. + * + * @ipd_port: IPD/PKO port to configure + * @link_info: The new link state + * + * Returns Zero on success, negative on failure + */ +int __cvmx_helper_sgmii_link_set(int ipd_port, + cvmx_helper_link_info_t link_info) +{ + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + __cvmx_helper_sgmii_hardware_init_link(interface, index); + return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index, + link_info); +} + +/** + * Configure a port for internal and/or external loopback. Internal + * loopback causes packets sent by the port to be received by + * Octeon. External loopback causes packets received from the wire to + * sent out again. + * + * @ipd_port: IPD/PKO port to loopback. + * @enable_internal: + * Non zero if you want internal loopback + * @enable_external: + * Non zero if you want external loopback + * + * Returns Zero on success, negative on failure. + */ +int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal, + int enable_external) +{ + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg; + union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg; + + pcsx_mrx_control_reg.u64 = + cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface)); + pcsx_mrx_control_reg.s.loopbck1 = enable_internal; + cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), + pcsx_mrx_control_reg.u64); + + pcsx_miscx_ctl_reg.u64 = + cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface)); + pcsx_miscx_ctl_reg.s.loopbck2 = enable_external; + cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface), + pcsx_miscx_ctl_reg.u64); + + __cvmx_helper_sgmii_hardware_init_link(interface, index); + return 0; +} diff --git a/drivers/staging/octeon/cvmx-helper-sgmii.h b/drivers/staging/octeon/cvmx-helper-sgmii.h new file mode 100644 index 0000000..19b48d6 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-sgmii.h @@ -0,0 +1,104 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * @file + * + * Functions for SGMII initialization, configuration, + * and monitoring. + * + */ +#ifndef __CVMX_HELPER_SGMII_H__ +#define __CVMX_HELPER_SGMII_H__ + +/** + * Probe a SGMII interface and determine the number of ports + * connected to it. The SGMII interface should still be down after + * this call. + * + * @interface: Interface to probe + * + * Returns Number of ports on the interface. Zero to disable. + */ +extern int __cvmx_helper_sgmii_probe(int interface); + +/** + * Bringup and enable a SGMII interface. After this call packet + * I/O should be fully functional. This is called with IPD + * enabled but PKO disabled. + * + * @interface: Interface to bring up + * + * Returns Zero on success, negative on failure + */ +extern int __cvmx_helper_sgmii_enable(int interface); + +/** + * Return the link state of an IPD/PKO port as returned by + * auto negotiation. The result of this function may not match + * Octeon's link config if auto negotiation has changed since + * the last call to cvmx_helper_link_set(). + * + * @ipd_port: IPD/PKO port to query + * + * Returns Link state + */ +extern cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port); + +/** + * Configure an IPD/PKO port for the specified link state. This + * function does not influence auto negotiation at the PHY level. + * The passed link state must always match the link state returned + * by cvmx_helper_link_get(). It is normally best to use + * cvmx_helper_link_autoconf() instead. + * + * @ipd_port: IPD/PKO port to configure + * @link_info: The new link state + * + * Returns Zero on success, negative on failure + */ +extern int __cvmx_helper_sgmii_link_set(int ipd_port, + cvmx_helper_link_info_t link_info); + +/** + * Configure a port for internal and/or external loopback. Internal loopback + * causes packets sent by the port to be received by Octeon. External loopback + * causes packets received from the wire to sent out again. + * + * @ipd_port: IPD/PKO port to loopback. + * @enable_internal: + * Non zero if you want internal loopback + * @enable_external: + * Non zero if you want external loopback + * + * Returns Zero on success, negative on failure. + */ +extern int __cvmx_helper_sgmii_configure_loopback(int ipd_port, + int enable_internal, + int enable_external); + +#endif diff --git a/drivers/staging/octeon/cvmx-helper-spi.c b/drivers/staging/octeon/cvmx-helper-spi.c new file mode 100644 index 0000000..8ba6c83 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-spi.c @@ -0,0 +1,195 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +void __cvmx_interrupt_gmxx_enable(int interface); +void __cvmx_interrupt_spxx_int_msk_enable(int index); +void __cvmx_interrupt_stxx_int_msk_enable(int index); + +/* + * Functions for SPI initialization, configuration, + * and monitoring. + */ +#include <asm/octeon/octeon.h> + +#include "cvmx-config.h" +#include "cvmx-spi.h" +#include "cvmx-helper.h" + +#include "cvmx-pip-defs.h" +#include "cvmx-pko-defs.h" + +/* + * CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI + * initialization routines wait for SPI training. You can override the + * value using executive-config.h if necessary. + */ +#ifndef CVMX_HELPER_SPI_TIMEOUT +#define CVMX_HELPER_SPI_TIMEOUT 10 +#endif + +/** + * Probe a SPI interface and determine the number of ports + * connected to it. The SPI interface should still be down after + * this call. + * + * @interface: Interface to probe + * + * Returns Number of ports on the interface. Zero to disable. + */ +int __cvmx_helper_spi_probe(int interface) +{ + int num_ports = 0; + + if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) && + cvmx_spi4000_is_present(interface)) { + num_ports = 10; + } else { + union cvmx_pko_reg_crc_enable enable; + num_ports = 16; + /* + * Unlike the SPI4000, most SPI devices don't + * automatically put on the L2 CRC. For everything + * except for the SPI4000 have PKO append the L2 CRC + * to the packet. + */ + enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE); + enable.s.enable |= 0xffff << (interface * 16); + cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64); + } + __cvmx_helper_setup_gmx(interface, num_ports); + return num_ports; +} + +/** + * Bringup and enable a SPI interface. After this call packet I/O + * should be fully functional. This is called with IPD enabled but + * PKO disabled. + * + * @interface: Interface to bring up + * + * Returns Zero on success, negative on failure + */ +int __cvmx_helper_spi_enable(int interface) +{ + /* + * Normally the ethernet L2 CRC is checked and stripped in the + * GMX block. When you are using SPI, this isn' the case and + * IPD needs to check the L2 CRC. + */ + int num_ports = cvmx_helper_ports_on_interface(interface); + int ipd_port; + for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports; + ipd_port++) { + union cvmx_pip_prt_cfgx port_config; + port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); + port_config.s.crc_en = 1; + cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64); + } + + if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) { + cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX, + CVMX_HELPER_SPI_TIMEOUT, num_ports); + if (cvmx_spi4000_is_present(interface)) + cvmx_spi4000_initialize(interface); + } + __cvmx_interrupt_spxx_int_msk_enable(interface); + __cvmx_interrupt_stxx_int_msk_enable(interface); + __cvmx_interrupt_gmxx_enable(interface); + return 0; +} + +/** + * Return the link state of an IPD/PKO port as returned by + * auto negotiation. The result of this function may not match + * Octeon's link config if auto negotiation has changed since + * the last call to cvmx_helper_link_set(). + * + * @ipd_port: IPD/PKO port to query + * + * Returns Link state + */ +cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port) +{ + cvmx_helper_link_info_t result; + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + result.u64 = 0; + + if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) { + /* The simulator gives you a simulated full duplex link */ + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 10000; + } else if (cvmx_spi4000_is_present(interface)) { + union cvmx_gmxx_rxx_rx_inbnd inband = + cvmx_spi4000_check_speed(interface, index); + result.s.link_up = inband.s.status; + result.s.full_duplex = inband.s.duplex; + switch (inband.s.speed) { + case 0: /* 10 Mbps */ + result.s.speed = 10; + break; + case 1: /* 100 Mbps */ + result.s.speed = 100; + break; + case 2: /* 1 Gbps */ + result.s.speed = 1000; + break; + case 3: /* Illegal */ + result.s.speed = 0; + result.s.link_up = 0; + break; + } + } else { + /* For generic SPI we can't determine the link, just return some + sane results */ + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 10000; + } + return result; +} + +/** + * Configure an IPD/PKO port for the specified link state. This + * function does not influence auto negotiation at the PHY level. + * The passed link state must always match the link state returned + * by cvmx_helper_link_get(). It is normally best to use + * cvmx_helper_link_autoconf() instead. + * + * @ipd_port: IPD/PKO port to configure + * @link_info: The new link state + * + * Returns Zero on success, negative on failure + */ +int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info) +{ + /* Nothing to do. If we have a SPI4000 then the setup was already performed + by cvmx_spi4000_check_speed(). If not then there isn't any link + info */ + return 0; +} diff --git a/drivers/staging/octeon/cvmx-helper-spi.h b/drivers/staging/octeon/cvmx-helper-spi.h new file mode 100644 index 0000000..69bac03 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-spi.h @@ -0,0 +1,84 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Functions for SPI initialization, configuration, + * and monitoring. + */ +#ifndef __CVMX_HELPER_SPI_H__ +#define __CVMX_HELPER_SPI_H__ + +/** + * Probe a SPI interface and determine the number of ports + * connected to it. The SPI interface should still be down after + * this call. + * + * @interface: Interface to probe + * + * Returns Number of ports on the interface. Zero to disable. + */ +extern int __cvmx_helper_spi_probe(int interface); + +/** + * Bringup and enable a SPI interface. After this call packet I/O + * should be fully functional. This is called with IPD enabled but + * PKO disabled. + * + * @interface: Interface to bring up + * + * Returns Zero on success, negative on failure + */ +extern int __cvmx_helper_spi_enable(int interface); + +/** + * Return the link state of an IPD/PKO port as returned by + * auto negotiation. The result of this function may not match + * Octeon's link config if auto negotiation has changed since + * the last call to cvmx_helper_link_set(). + * + * @ipd_port: IPD/PKO port to query + * + * Returns Link state + */ +extern cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port); + +/** + * Configure an IPD/PKO port for the specified link state. This + * function does not influence auto negotiation at the PHY level. + * The passed link state must always match the link state returned + * by cvmx_helper_link_get(). It is normally best to use + * cvmx_helper_link_autoconf() instead. + * + * @ipd_port: IPD/PKO port to configure + * @link_info: The new link state + * + * Returns Zero on success, negative on failure + */ +extern int __cvmx_helper_spi_link_set(int ipd_port, + cvmx_helper_link_info_t link_info); + +#endif diff --git a/drivers/staging/octeon/cvmx-helper-util.c b/drivers/staging/octeon/cvmx-helper-util.c new file mode 100644 index 0000000..41ef8a4 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-util.c @@ -0,0 +1,433 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Small helper utilities. + */ +#include <linux/kernel.h> + +#include <asm/octeon/octeon.h> + +#include "cvmx-config.h" + +#include "cvmx-fpa.h" +#include "cvmx-pip.h" +#include "cvmx-pko.h" +#include "cvmx-ipd.h" +#include "cvmx-spi.h" + +#include "cvmx-helper.h" +#include "cvmx-helper-util.h" + +#include <asm/octeon/cvmx-ipd-defs.h> + +/** + * Convert a interface mode into a human readable string + * + * @mode: Mode to convert + * + * Returns String + */ +const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t + mode) +{ + switch (mode) { + case CVMX_HELPER_INTERFACE_MODE_DISABLED: + return "DISABLED"; + case CVMX_HELPER_INTERFACE_MODE_RGMII: + return "RGMII"; + case CVMX_HELPER_INTERFACE_MODE_GMII: + return "GMII"; + case CVMX_HELPER_INTERFACE_MODE_SPI: + return "SPI"; + case CVMX_HELPER_INTERFACE_MODE_PCIE: + return "PCIE"; + case CVMX_HELPER_INTERFACE_MODE_XAUI: + return "XAUI"; + case CVMX_HELPER_INTERFACE_MODE_SGMII: + return "SGMII"; + case CVMX_HELPER_INTERFACE_MODE_PICMG: + return "PICMG"; + case CVMX_HELPER_INTERFACE_MODE_NPI: + return "NPI"; + case CVMX_HELPER_INTERFACE_MODE_LOOP: + return "LOOP"; + } + return "UNKNOWN"; +} + +/** + * Debug routine to dump the packet structure to the console + * + * @work: Work queue entry containing the packet to dump + * Returns + */ +int cvmx_helper_dump_packet(cvmx_wqe_t *work) +{ + uint64_t count; + uint64_t remaining_bytes; + union cvmx_buf_ptr buffer_ptr; + uint64_t start_of_buffer; + uint8_t *data_address; + uint8_t *end_of_data; + + cvmx_dprintf("Packet Length: %u\n", work->len); + cvmx_dprintf(" Input Port: %u\n", work->ipprt); + cvmx_dprintf(" QoS: %u\n", work->qos); + cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs); + + if (work->word2.s.bufs == 0) { + union cvmx_ipd_wqe_fpa_queue wqe_pool; + wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE); + buffer_ptr.u64 = 0; + buffer_ptr.s.pool = wqe_pool.s.wqe_pool; + buffer_ptr.s.size = 128; + buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data); + if (likely(!work->word2.s.not_IP)) { + union cvmx_pip_ip_offset pip_ip_offset; + pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET); + buffer_ptr.s.addr += + (pip_ip_offset.s.offset << 3) - + work->word2.s.ip_offset; + buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2; + } else { + /* + * WARNING: This code assumes that the packet + * is not RAW. If it was, we would use + * PIP_GBL_CFG[RAW_SHF] instead of + * PIP_GBL_CFG[NIP_SHF]. + */ + union cvmx_pip_gbl_cfg pip_gbl_cfg; + pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG); + buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf; + } + } else + buffer_ptr = work->packet_ptr; + remaining_bytes = work->len; + + while (remaining_bytes) { + start_of_buffer = + ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7; + cvmx_dprintf(" Buffer Start:%llx\n", + (unsigned long long)start_of_buffer); + cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i); + cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back); + cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool); + cvmx_dprintf(" Buffer Data: %llx\n", + (unsigned long long)buffer_ptr.s.addr); + cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size); + + cvmx_dprintf("\t\t"); + data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr); + end_of_data = data_address + buffer_ptr.s.size; + count = 0; + while (data_address < end_of_data) { + if (remaining_bytes == 0) + break; + else + remaining_bytes--; + cvmx_dprintf("%02x", (unsigned int)*data_address); + data_address++; + if (remaining_bytes && (count == 7)) { + cvmx_dprintf("\n\t\t"); + count = 0; + } else + count++; + } + cvmx_dprintf("\n"); + + if (remaining_bytes) + buffer_ptr = *(union cvmx_buf_ptr *) + cvmx_phys_to_ptr(buffer_ptr.s.addr - 8); + } + return 0; +} + +/** + * Setup Random Early Drop on a specific input queue + * + * @queue: Input queue to setup RED on (0-7) + * @pass_thresh: + * Packets will begin slowly dropping when there are less than + * this many packet buffers free in FPA 0. + * @drop_thresh: + * All incomming packets will be dropped when there are less + * than this many free packet buffers in FPA 0. + * Returns Zero on success. Negative on failure + */ +int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh) +{ + union cvmx_ipd_qosx_red_marks red_marks; + union cvmx_ipd_red_quex_param red_param; + + /* Set RED to begin dropping packets when there are pass_thresh buffers + left. It will linearly drop more packets until reaching drop_thresh + buffers */ + red_marks.u64 = 0; + red_marks.s.drop = drop_thresh; + red_marks.s.pass = pass_thresh; + cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64); + + /* Use the actual queue 0 counter, not the average */ + red_param.u64 = 0; + red_param.s.prb_con = + (255ul << 24) / (red_marks.s.pass - red_marks.s.drop); + red_param.s.avg_con = 1; + red_param.s.new_con = 255; + red_param.s.use_pcnt = 1; + cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64); + return 0; +} + +/** + * Setup Random Early Drop to automatically begin dropping packets. + * + * @pass_thresh: + * Packets will begin slowly dropping when there are less than + * this many packet buffers free in FPA 0. + * @drop_thresh: + * All incomming packets will be dropped when there are less + * than this many free packet buffers in FPA 0. + * Returns Zero on success. Negative on failure + */ +int cvmx_helper_setup_red(int pass_thresh, int drop_thresh) +{ + union cvmx_ipd_portx_bp_page_cnt page_cnt; + union cvmx_ipd_bp_prt_red_end ipd_bp_prt_red_end; + union cvmx_ipd_red_port_enable red_port_enable; + int queue; + int interface; + int port; + + /* Disable backpressure based on queued buffers. It needs SW support */ + page_cnt.u64 = 0; + page_cnt.s.bp_enb = 0; + page_cnt.s.page_cnt = 100; + for (interface = 0; interface < 2; interface++) { + for (port = cvmx_helper_get_first_ipd_port(interface); + port < cvmx_helper_get_last_ipd_port(interface); port++) + cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port), + page_cnt.u64); + } + + for (queue = 0; queue < 8; queue++) + cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh); + + /* Shutoff the dropping based on the per port page count. SW isn't + decrementing it right now */ + ipd_bp_prt_red_end.u64 = 0; + ipd_bp_prt_red_end.s.prt_enb = 0; + cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, ipd_bp_prt_red_end.u64); + + red_port_enable.u64 = 0; + red_port_enable.s.prt_enb = 0xfffffffffull; + red_port_enable.s.avg_dly = 10000; + red_port_enable.s.prb_dly = 10000; + cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64); + + return 0; +} + +/** + * Setup the common GMX settings that determine the number of + * ports. These setting apply to almost all configurations of all + * chips. + * + * @interface: Interface to configure + * @num_ports: Number of ports on the interface + * + * Returns Zero on success, negative on failure + */ +int __cvmx_helper_setup_gmx(int interface, int num_ports) +{ + union cvmx_gmxx_tx_prts gmx_tx_prts; + union cvmx_gmxx_rx_prts gmx_rx_prts; + union cvmx_pko_reg_gmx_port_mode pko_mode; + union cvmx_gmxx_txx_thresh gmx_tx_thresh; + int index; + + /* Tell GMX the number of TX ports on this interface */ + gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface)); + gmx_tx_prts.s.prts = num_ports; + cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64); + + /* Tell GMX the number of RX ports on this interface. This only + ** applies to *GMII and XAUI ports */ + if (cvmx_helper_interface_get_mode(interface) == + CVMX_HELPER_INTERFACE_MODE_RGMII + || cvmx_helper_interface_get_mode(interface) == + CVMX_HELPER_INTERFACE_MODE_SGMII + || cvmx_helper_interface_get_mode(interface) == + CVMX_HELPER_INTERFACE_MODE_GMII + || cvmx_helper_interface_get_mode(interface) == + CVMX_HELPER_INTERFACE_MODE_XAUI) { + if (num_ports > 4) { + cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal " + "num_ports\n"); + return -1; + } + + gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface)); + gmx_rx_prts.s.prts = num_ports; + cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64); + } + + /* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, and 50XX */ + if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX) + && !OCTEON_IS_MODEL(OCTEON_CN50XX)) { + /* Tell PKO the number of ports on this interface */ + pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE); + if (interface == 0) { + if (num_ports == 1) + pko_mode.s.mode0 = 4; + else if (num_ports == 2) + pko_mode.s.mode0 = 3; + else if (num_ports <= 4) + pko_mode.s.mode0 = 2; + else if (num_ports <= 8) + pko_mode.s.mode0 = 1; + else + pko_mode.s.mode0 = 0; + } else { + if (num_ports == 1) + pko_mode.s.mode1 = 4; + else if (num_ports == 2) + pko_mode.s.mode1 = 3; + else if (num_ports <= 4) + pko_mode.s.mode1 = 2; + else if (num_ports <= 8) + pko_mode.s.mode1 = 1; + else + pko_mode.s.mode1 = 0; + } + cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64); + } + + /* + * Set GMX to buffer as much data as possible before starting + * transmit. This reduces the chances that we have a TX under + * run due to memory contention. Any packet that fits entirely + * in the GMX FIFO can never have an under run regardless of + * memory load. + */ + gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface)); + if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) + || OCTEON_IS_MODEL(OCTEON_CN50XX)) { + /* These chips have a fixed max threshold of 0x40 */ + gmx_tx_thresh.s.cnt = 0x40; + } else { + /* Choose the max value for the number of ports */ + if (num_ports <= 1) + gmx_tx_thresh.s.cnt = 0x100 / 1; + else if (num_ports == 2) + gmx_tx_thresh.s.cnt = 0x100 / 2; + else + gmx_tx_thresh.s.cnt = 0x100 / 4; + } + /* + * SPI and XAUI can have lots of ports but the GMX hardware + * only ever has a max of 4. + */ + if (num_ports > 4) + num_ports = 4; + for (index = 0; index < num_ports; index++) + cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface), + gmx_tx_thresh.u64); + + return 0; +} + +/** + * Returns the IPD/PKO port number for a port on teh given + * interface. + * + * @interface: Interface to use + * @port: Port on the interface + * + * Returns IPD/PKO port number + */ +int cvmx_helper_get_ipd_port(int interface, int port) +{ + switch (interface) { + case 0: + return port; + case 1: + return port + 16; + case 2: + return port + 32; + case 3: + return port + 36; + } + return -1; +} + +/** + * Returns the interface number for an IPD/PKO port number. + * + * @ipd_port: IPD/PKO port number + * + * Returns Interface number + */ +int cvmx_helper_get_interface_num(int ipd_port) +{ + if (ipd_port < 16) + return 0; + else if (ipd_port < 32) + return 1; + else if (ipd_port < 36) + return 2; + else if (ipd_port < 40) + return 3; + else + cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD " + "port number\n"); + + return -1; +} + +/** + * Returns the interface index number for an IPD/PKO port + * number. + * + * @ipd_port: IPD/PKO port number + * + * Returns Interface index number + */ +int cvmx_helper_get_interface_index_num(int ipd_port) +{ + if (ipd_port < 32) + return ipd_port & 15; + else if (ipd_port < 36) + return ipd_port & 3; + else if (ipd_port < 40) + return ipd_port & 3; + else + cvmx_dprintf("cvmx_helper_get_interface_index_num: " + "Illegal IPD port number\n"); + + return -1; +} diff --git a/drivers/staging/octeon/cvmx-helper-util.h b/drivers/staging/octeon/cvmx-helper-util.h new file mode 100644 index 0000000..6a6e52f --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-util.h @@ -0,0 +1,215 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * + * Small helper utilities. + * + */ + +#ifndef __CVMX_HELPER_UTIL_H__ +#define __CVMX_HELPER_UTIL_H__ + +/** + * Convert a interface mode into a human readable string + * + * @mode: Mode to convert + * + * Returns String + */ +extern const char + *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode); + +/** + * Debug routine to dump the packet structure to the console + * + * @work: Work queue entry containing the packet to dump + * Returns + */ +extern int cvmx_helper_dump_packet(cvmx_wqe_t *work); + +/** + * Setup Random Early Drop on a specific input queue + * + * @queue: Input queue to setup RED on (0-7) + * @pass_thresh: + * Packets will begin slowly dropping when there are less than + * this many packet buffers free in FPA 0. + * @drop_thresh: + * All incomming packets will be dropped when there are less + * than this many free packet buffers in FPA 0. + * Returns Zero on success. Negative on failure + */ +extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh, + int drop_thresh); + +/** + * Setup Random Early Drop to automatically begin dropping packets. + * + * @pass_thresh: + * Packets will begin slowly dropping when there are less than + * this many packet buffers free in FPA 0. + * @drop_thresh: + * All incomming packets will be dropped when there are less + * than this many free packet buffers in FPA 0. + * Returns Zero on success. Negative on failure + */ +extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh); + +/** + * Get the version of the CVMX libraries. + * + * Returns Version string. Note this buffer is allocated statically + * and will be shared by all callers. + */ +extern const char *cvmx_helper_get_version(void); + +/** + * Setup the common GMX settings that determine the number of + * ports. These setting apply to almost all configurations of all + * chips. + * + * @interface: Interface to configure + * @num_ports: Number of ports on the interface + * + * Returns Zero on success, negative on failure + */ +extern int __cvmx_helper_setup_gmx(int interface, int num_ports); + +/** + * Returns the IPD/PKO port number for a port on the given + * interface. + * + * @interface: Interface to use + * @port: Port on the interface + * + * Returns IPD/PKO port number + */ +extern int cvmx_helper_get_ipd_port(int interface, int port); + +/** + * Returns the IPD/PKO port number for the first port on the given + * interface. + * + * @interface: Interface to use + * + * Returns IPD/PKO port number + */ +static inline int cvmx_helper_get_first_ipd_port(int interface) +{ + return cvmx_helper_get_ipd_port(interface, 0); +} + +/** + * Returns the IPD/PKO port number for the last port on the given + * interface. + * + * @interface: Interface to use + * + * Returns IPD/PKO port number + */ +static inline int cvmx_helper_get_last_ipd_port(int interface) +{ + extern int cvmx_helper_ports_on_interface(int interface); + + return cvmx_helper_get_first_ipd_port(interface) + + cvmx_helper_ports_on_interface(interface) - 1; +} + +/** + * Free the packet buffers contained in a work queue entry. + * The work queue entry is not freed. + * + * @work: Work queue entry with packet to free + */ +static inline void cvmx_helper_free_packet_data(cvmx_wqe_t *work) +{ + uint64_t number_buffers; + union cvmx_buf_ptr buffer_ptr; + union cvmx_buf_ptr next_buffer_ptr; + uint64_t start_of_buffer; + + number_buffers = work->word2.s.bufs; + if (number_buffers == 0) + return; + buffer_ptr = work->packet_ptr; + + /* + * Since the number of buffers is not zero, we know this is + * not a dynamic short packet. We need to check if it is a + * packet received with IPD_CTL_STATUS[NO_WPTR]. If this is + * true, we need to free all buffers except for the first + * one. The caller doesn't expect their WQE pointer to be + * freed + */ + start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7; + if (cvmx_ptr_to_phys(work) == start_of_buffer) { + next_buffer_ptr = + *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8); + buffer_ptr = next_buffer_ptr; + number_buffers--; + } + + while (number_buffers--) { + /* + * Remember the back pointer is in cache lines, not + * 64bit words + */ + start_of_buffer = + ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7; + /* + * Read pointer to next buffer before we free the + * current buffer. + */ + next_buffer_ptr = + *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8); + cvmx_fpa_free(cvmx_phys_to_ptr(start_of_buffer), + buffer_ptr.s.pool, 0); + buffer_ptr = next_buffer_ptr; + } +} + +/** + * Returns the interface number for an IPD/PKO port number. + * + * @ipd_port: IPD/PKO port number + * + * Returns Interface number + */ +extern int cvmx_helper_get_interface_num(int ipd_port); + +/** + * Returns the interface index number for an IPD/PKO port + * number. + * + * @ipd_port: IPD/PKO port number + * + * Returns Interface index number + */ +extern int cvmx_helper_get_interface_index_num(int ipd_port); + +#endif /* __CVMX_HELPER_H__ */ diff --git a/drivers/staging/octeon/cvmx-helper-xaui.c b/drivers/staging/octeon/cvmx-helper-xaui.c new file mode 100644 index 0000000..a11e676 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-xaui.c @@ -0,0 +1,348 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Functions for XAUI initialization, configuration, + * and monitoring. + * + */ + +#include <asm/octeon/octeon.h> + +#include "cvmx-config.h" + +#include "cvmx-helper.h" + +#include "cvmx-pko-defs.h" +#include "cvmx-gmxx-defs.h" +#include "cvmx-pcsxx-defs.h" + +void __cvmx_interrupt_gmxx_enable(int interface); +void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block); +void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index); +/** + * Probe a XAUI interface and determine the number of ports + * connected to it. The XAUI interface should still be down + * after this call. + * + * @interface: Interface to probe + * + * Returns Number of ports on the interface. Zero to disable. + */ +int __cvmx_helper_xaui_probe(int interface) +{ + int i; + union cvmx_gmxx_hg2_control gmx_hg2_control; + union cvmx_gmxx_inf_mode mode; + + /* + * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the + * interface needs to be enabled before IPD otherwise per port + * backpressure may not work properly. + */ + mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); + mode.s.en = 1; + cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64); + + __cvmx_helper_setup_gmx(interface, 1); + + /* + * Setup PKO to support 16 ports for HiGig2 virtual + * ports. We're pointing all of the PKO packet ports for this + * interface to the XAUI. This allows us to use HiGig2 + * backpressure per port. + */ + for (i = 0; i < 16; i++) { + union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs; + pko_mem_port_ptrs.u64 = 0; + /* + * We set each PKO port to have equal priority in a + * round robin fashion. + */ + pko_mem_port_ptrs.s.static_p = 0; + pko_mem_port_ptrs.s.qos_mask = 0xff; + /* All PKO ports map to the same XAUI hardware port */ + pko_mem_port_ptrs.s.eid = interface * 4; + pko_mem_port_ptrs.s.pid = interface * 16 + i; + cvmx_write_csr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64); + } + + /* If HiGig2 is enabled return 16 ports, otherwise return 1 port */ + gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface)); + if (gmx_hg2_control.s.hg2tx_en) + return 16; + else + return 1; +} + +/** + * Bringup and enable a XAUI interface. After this call packet + * I/O should be fully functional. This is called with IPD + * enabled but PKO disabled. + * + * @interface: Interface to bring up + * + * Returns Zero on success, negative on failure + */ +int __cvmx_helper_xaui_enable(int interface) +{ + union cvmx_gmxx_prtx_cfg gmx_cfg; + union cvmx_pcsxx_control1_reg xauiCtl; + union cvmx_pcsxx_misc_ctl_reg xauiMiscCtl; + union cvmx_gmxx_tx_xaui_ctl gmxXauiTxCtl; + union cvmx_gmxx_rxx_int_en gmx_rx_int_en; + union cvmx_gmxx_tx_int_en gmx_tx_int_en; + union cvmx_pcsxx_int_en_reg pcsx_int_en_reg; + + /* (1) Interface has already been enabled. */ + + /* (2) Disable GMX. */ + xauiMiscCtl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface)); + xauiMiscCtl.s.gmxeno = 1; + cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64); + + /* (3) Disable GMX and PCSX interrupts. */ + gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(0, interface)); + cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0); + gmx_tx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_TX_INT_EN(interface)); + cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0); + pcsx_int_en_reg.u64 = cvmx_read_csr(CVMX_PCSXX_INT_EN_REG(interface)); + cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0); + + /* (4) Bring up the PCSX and GMX reconciliation layer. */ + /* (4)a Set polarity and lane swapping. */ + /* (4)b */ + gmxXauiTxCtl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface)); + /* Enable better IFG packing and improves performance */ + gmxXauiTxCtl.s.dic_en = 1; + gmxXauiTxCtl.s.uni_en = 0; + cvmx_write_csr(CVMX_GMXX_TX_XAUI_CTL(interface), gmxXauiTxCtl.u64); + + /* (4)c Aply reset sequence */ + xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface)); + xauiCtl.s.lo_pwr = 0; + xauiCtl.s.reset = 1; + cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64); + + /* Wait for PCS to come out of reset */ + if (CVMX_WAIT_FOR_FIELD64 + (CVMX_PCSXX_CONTROL1_REG(interface), union cvmx_pcsxx_control1_reg, + reset, ==, 0, 10000)) + return -1; + /* Wait for PCS to be aligned */ + if (CVMX_WAIT_FOR_FIELD64 + (CVMX_PCSXX_10GBX_STATUS_REG(interface), + union cvmx_pcsxx_10gbx_status_reg, alignd, ==, 1, 10000)) + return -1; + /* Wait for RX to be ready */ + if (CVMX_WAIT_FOR_FIELD64 + (CVMX_GMXX_RX_XAUI_CTL(interface), union cvmx_gmxx_rx_xaui_ctl, + status, ==, 0, 10000)) + return -1; + + /* (6) Configure GMX */ + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface)); + gmx_cfg.s.en = 0; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64); + + /* Wait for GMX RX to be idle */ + if (CVMX_WAIT_FOR_FIELD64 + (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg, + rx_idle, ==, 1, 10000)) + return -1; + /* Wait for GMX TX to be idle */ + if (CVMX_WAIT_FOR_FIELD64 + (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg, + tx_idle, ==, 1, 10000)) + return -1; + + /* GMX configure */ + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface)); + gmx_cfg.s.speed = 1; + gmx_cfg.s.speed_msb = 0; + gmx_cfg.s.slottime = 1; + cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), 1); + cvmx_write_csr(CVMX_GMXX_TXX_SLOT(0, interface), 512); + cvmx_write_csr(CVMX_GMXX_TXX_BURST(0, interface), 8192); + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64); + + /* (7) Clear out any error state */ + cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(0, interface), + cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(0, interface))); + cvmx_write_csr(CVMX_GMXX_TX_INT_REG(interface), + cvmx_read_csr(CVMX_GMXX_TX_INT_REG(interface))); + cvmx_write_csr(CVMX_PCSXX_INT_REG(interface), + cvmx_read_csr(CVMX_PCSXX_INT_REG(interface))); + + /* Wait for receive link */ + if (CVMX_WAIT_FOR_FIELD64 + (CVMX_PCSXX_STATUS1_REG(interface), union cvmx_pcsxx_status1_reg, + rcv_lnk, ==, 1, 10000)) + return -1; + if (CVMX_WAIT_FOR_FIELD64 + (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg, + xmtflt, ==, 0, 10000)) + return -1; + if (CVMX_WAIT_FOR_FIELD64 + (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg, + rcvflt, ==, 0, 10000)) + return -1; + + cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), gmx_rx_int_en.u64); + cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64); + cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), pcsx_int_en_reg.u64); + + cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port(interface, 0)); + + /* (8) Enable packet reception */ + xauiMiscCtl.s.gmxeno = 0; + cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64); + + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface)); + gmx_cfg.s.en = 1; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64); + + __cvmx_interrupt_pcsx_intx_en_reg_enable(0, interface); + __cvmx_interrupt_pcsx_intx_en_reg_enable(1, interface); + __cvmx_interrupt_pcsx_intx_en_reg_enable(2, interface); + __cvmx_interrupt_pcsx_intx_en_reg_enable(3, interface); + __cvmx_interrupt_pcsxx_int_en_reg_enable(interface); + __cvmx_interrupt_gmxx_enable(interface); + + return 0; +} + +/** + * Return the link state of an IPD/PKO port as returned by + * auto negotiation. The result of this function may not match + * Octeon's link config if auto negotiation has changed since + * the last call to cvmx_helper_link_set(). + * + * @ipd_port: IPD/PKO port to query + * + * Returns Link state + */ +cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port) +{ + int interface = cvmx_helper_get_interface_num(ipd_port); + union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl; + union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl; + union cvmx_pcsxx_status1_reg pcsxx_status1_reg; + cvmx_helper_link_info_t result; + + gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface)); + gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface)); + pcsxx_status1_reg.u64 = + cvmx_read_csr(CVMX_PCSXX_STATUS1_REG(interface)); + result.u64 = 0; + + /* Only return a link if both RX and TX are happy */ + if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0) && + (pcsxx_status1_reg.s.rcv_lnk == 1)) { + result.s.link_up = 1; + result.s.full_duplex = 1; + result.s.speed = 10000; + } else { + /* Disable GMX and PCSX interrupts. */ + cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0); + cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0); + cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0); + } + return result; +} + +/** + * Configure an IPD/PKO port for the specified link state. This + * function does not influence auto negotiation at the PHY level. + * The passed link state must always match the link state returned + * by cvmx_helper_link_get(). It is normally best to use + * cvmx_helper_link_autoconf() instead. + * + * @ipd_port: IPD/PKO port to configure + * @link_info: The new link state + * + * Returns Zero on success, negative on failure + */ +int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info) +{ + int interface = cvmx_helper_get_interface_num(ipd_port); + union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl; + union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl; + + gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface)); + gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface)); + + /* If the link shouldn't be up, then just return */ + if (!link_info.s.link_up) + return 0; + + /* Do nothing if both RX and TX are happy */ + if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0)) + return 0; + + /* Bring the link up */ + return __cvmx_helper_xaui_enable(interface); +} + +/** + * Configure a port for internal and/or external loopback. Internal loopback + * causes packets sent by the port to be received by Octeon. External loopback + * causes packets received from the wire to sent out again. + * + * @ipd_port: IPD/PKO port to loopback. + * @enable_internal: + * Non zero if you want internal loopback + * @enable_external: + * Non zero if you want external loopback + * + * Returns Zero on success, negative on failure. + */ +extern int __cvmx_helper_xaui_configure_loopback(int ipd_port, + int enable_internal, + int enable_external) +{ + int interface = cvmx_helper_get_interface_num(ipd_port); + union cvmx_pcsxx_control1_reg pcsxx_control1_reg; + union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback; + + /* Set the internal loop */ + pcsxx_control1_reg.u64 = + cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface)); + pcsxx_control1_reg.s.loopbck1 = enable_internal; + cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), + pcsxx_control1_reg.u64); + + /* Set the external loop */ + gmxx_xaui_ext_loopback.u64 = + cvmx_read_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface)); + gmxx_xaui_ext_loopback.s.en = enable_external; + cvmx_write_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface), + gmxx_xaui_ext_loopback.u64); + + /* Take the link through a reset */ + return __cvmx_helper_xaui_enable(interface); +} diff --git a/drivers/staging/octeon/cvmx-helper-xaui.h b/drivers/staging/octeon/cvmx-helper-xaui.h new file mode 100644 index 0000000..4b4db2f --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper-xaui.h @@ -0,0 +1,103 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * @file + * + * Functions for XAUI initialization, configuration, + * and monitoring. + * + */ +#ifndef __CVMX_HELPER_XAUI_H__ +#define __CVMX_HELPER_XAUI_H__ + +/** + * Probe a XAUI interface and determine the number of ports + * connected to it. The XAUI interface should still be down + * after this call. + * + * @interface: Interface to probe + * + * Returns Number of ports on the interface. Zero to disable. + */ +extern int __cvmx_helper_xaui_probe(int interface); + +/** + * Bringup and enable a XAUI interface. After this call packet + * I/O should be fully functional. This is called with IPD + * enabled but PKO disabled. + * + * @interface: Interface to bring up + * + * Returns Zero on success, negative on failure + */ +extern int __cvmx_helper_xaui_enable(int interface); + +/** + * Return the link state of an IPD/PKO port as returned by + * auto negotiation. The result of this function may not match + * Octeon's link config if auto negotiation has changed since + * the last call to cvmx_helper_link_set(). + * + * @ipd_port: IPD/PKO port to query + * + * Returns Link state + */ +extern cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port); + +/** + * Configure an IPD/PKO port for the specified link state. This + * function does not influence auto negotiation at the PHY level. + * The passed link state must always match the link state returned + * by cvmx_helper_link_get(). It is normally best to use + * cvmx_helper_link_autoconf() instead. + * + * @ipd_port: IPD/PKO port to configure + * @link_info: The new link state + * + * Returns Zero on success, negative on failure + */ +extern int __cvmx_helper_xaui_link_set(int ipd_port, + cvmx_helper_link_info_t link_info); + +/** + * Configure a port for internal and/or external loopback. Internal loopback + * causes packets sent by the port to be received by Octeon. External loopback + * causes packets received from the wire to sent out again. + * + * @ipd_port: IPD/PKO port to loopback. + * @enable_internal: + * Non zero if you want internal loopback + * @enable_external: + * Non zero if you want external loopback + * + * Returns Zero on success, negative on failure. + */ +extern int __cvmx_helper_xaui_configure_loopback(int ipd_port, + int enable_internal, + int enable_external); +#endif diff --git a/drivers/staging/octeon/cvmx-helper.c b/drivers/staging/octeon/cvmx-helper.c new file mode 100644 index 0000000..5915066 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper.c @@ -0,0 +1,1058 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * + * Helper functions for common, but complicated tasks. + * + */ +#include <asm/octeon/octeon.h> + +#include "cvmx-config.h" + +#include "cvmx-fpa.h" +#include "cvmx-pip.h" +#include "cvmx-pko.h" +#include "cvmx-ipd.h" +#include "cvmx-spi.h" +#include "cvmx-helper.h" +#include "cvmx-helper-board.h" + +#include "cvmx-pip-defs.h" +#include "cvmx-smix-defs.h" +#include "cvmx-asxx-defs.h" + +/** + * cvmx_override_pko_queue_priority(int ipd_port, uint64_t + * priorities[16]) is a function pointer. It is meant to allow + * customization of the PKO queue priorities based on the port + * number. Users should set this pointer to a function before + * calling any cvmx-helper operations. + */ +void (*cvmx_override_pko_queue_priority) (int pko_port, + uint64_t priorities[16]); + +/** + * cvmx_override_ipd_port_setup(int ipd_port) is a function + * pointer. It is meant to allow customization of the IPD port + * setup before packet input/output comes online. It is called + * after cvmx-helper does the default IPD configuration, but + * before IPD is enabled. Users should set this pointer to a + * function before calling any cvmx-helper operations. + */ +void (*cvmx_override_ipd_port_setup) (int ipd_port); + +/* Port count per interface */ +static int interface_port_count[4] = { 0, 0, 0, 0 }; + +/* Port last configured link info index by IPD/PKO port */ +static cvmx_helper_link_info_t + port_link_info[CVMX_PIP_NUM_INPUT_PORTS]; + +/** + * Return the number of interfaces the chip has. Each interface + * may have multiple ports. Most chips support two interfaces, + * but the CNX0XX and CNX1XX are exceptions. These only support + * one interface. + * + * Returns Number of interfaces on chip + */ +int cvmx_helper_get_number_of_interfaces(void) +{ + if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) + return 4; + else + return 3; +} + +/** + * Return the number of ports on an interface. Depending on the + * chip and configuration, this can be 1-16. A value of 0 + * specifies that the interface doesn't exist or isn't usable. + * + * @interface: Interface to get the port count for + * + * Returns Number of ports on interface. Can be Zero. + */ +int cvmx_helper_ports_on_interface(int interface) +{ + return interface_port_count[interface]; +} + +/** + * Get the operating mode of an interface. Depending on the Octeon + * chip and configuration, this function returns an enumeration + * of the type of packet I/O supported by an interface. + * + * @interface: Interface to probe + * + * Returns Mode of the interface. Unknown or unsupported interfaces return + * DISABLED. + */ +cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface) +{ + union cvmx_gmxx_inf_mode mode; + if (interface == 2) + return CVMX_HELPER_INTERFACE_MODE_NPI; + + if (interface == 3) { + if (OCTEON_IS_MODEL(OCTEON_CN56XX) + || OCTEON_IS_MODEL(OCTEON_CN52XX)) + return CVMX_HELPER_INTERFACE_MODE_LOOP; + else + return CVMX_HELPER_INTERFACE_MODE_DISABLED; + } + + if (interface == 0 + && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5 + && cvmx_sysinfo_get()->board_rev_major == 1) { + /* + * Lie about interface type of CN3005 board. This + * board has a switch on port 1 like the other + * evaluation boards, but it is connected over RGMII + * instead of GMII. Report GMII mode so that the + * speed is forced to 1 Gbit full duplex. Other than + * some initial configuration (which does not use the + * output of this function) there is no difference in + * setup between GMII and RGMII modes. + */ + return CVMX_HELPER_INTERFACE_MODE_GMII; + } + + /* Interface 1 is always disabled on CN31XX and CN30XX */ + if ((interface == 1) + && (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX) + || OCTEON_IS_MODEL(OCTEON_CN50XX) + || OCTEON_IS_MODEL(OCTEON_CN52XX))) + return CVMX_HELPER_INTERFACE_MODE_DISABLED; + + mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); + + if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) { + switch (mode.cn56xx.mode) { + case 0: + return CVMX_HELPER_INTERFACE_MODE_DISABLED; + case 1: + return CVMX_HELPER_INTERFACE_MODE_XAUI; + case 2: + return CVMX_HELPER_INTERFACE_MODE_SGMII; + case 3: + return CVMX_HELPER_INTERFACE_MODE_PICMG; + default: + return CVMX_HELPER_INTERFACE_MODE_DISABLED; + } + } else { + if (!mode.s.en) + return CVMX_HELPER_INTERFACE_MODE_DISABLED; + + if (mode.s.type) { + if (OCTEON_IS_MODEL(OCTEON_CN38XX) + || OCTEON_IS_MODEL(OCTEON_CN58XX)) + return CVMX_HELPER_INTERFACE_MODE_SPI; + else + return CVMX_HELPER_INTERFACE_MODE_GMII; + } else + return CVMX_HELPER_INTERFACE_MODE_RGMII; + } +} + +/** + * Configure the IPD/PIP tagging and QoS options for a specific + * port. This function determines the POW work queue entry + * contents for a port. The setup performed here is controlled by + * the defines in executive-config.h. + * + * @ipd_port: Port to configure. This follows the IPD numbering, not the + * per interface numbering + * + * Returns Zero on success, negative on failure + */ +static int __cvmx_helper_port_setup_ipd(int ipd_port) +{ + union cvmx_pip_prt_cfgx port_config; + union cvmx_pip_prt_tagx tag_config; + + port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port)); + tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port)); + + /* Have each port go to a different POW queue */ + port_config.s.qos = ipd_port & 0x7; + + /* Process the headers and place the IP header in the work queue */ + port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE; + + tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP; + tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP; + tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT; + tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT; + tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER; + tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP; + tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP; + tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT; + tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT; + tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL; + tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT; + tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE; + tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE; + tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE; + tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE; + tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE; + /* Put all packets in group 0. Other groups can be used by the app */ + tag_config.s.grp = 0; + + cvmx_pip_config_port(ipd_port, port_config, tag_config); + + /* Give the user a chance to override our setting for each port */ + if (cvmx_override_ipd_port_setup) + cvmx_override_ipd_port_setup(ipd_port); + + return 0; +} + +/** + * This function probes an interface to determine the actual + * number of hardware ports connected to it. It doesn't setup the + * ports or enable them. The main goal here is to set the global + * interface_port_count[interface] correctly. Hardware setup of the + * ports will be performed later. + * + * @interface: Interface to probe + * + * Returns Zero on success, negative on failure + */ +int cvmx_helper_interface_probe(int interface) +{ + /* At this stage in the game we don't want packets to be moving yet. + The following probe calls should perform hardware setup + needed to determine port counts. Receive must still be disabled */ + switch (cvmx_helper_interface_get_mode(interface)) { + /* These types don't support ports to IPD/PKO */ + case CVMX_HELPER_INTERFACE_MODE_DISABLED: + case CVMX_HELPER_INTERFACE_MODE_PCIE: + interface_port_count[interface] = 0; + break; + /* XAUI is a single high speed port */ + case CVMX_HELPER_INTERFACE_MODE_XAUI: + interface_port_count[interface] = + __cvmx_helper_xaui_probe(interface); + break; + /* + * RGMII/GMII/MII are all treated about the same. Most + * functions refer to these ports as RGMII. + */ + case CVMX_HELPER_INTERFACE_MODE_RGMII: + case CVMX_HELPER_INTERFACE_MODE_GMII: + interface_port_count[interface] = + __cvmx_helper_rgmii_probe(interface); + break; + /* + * SPI4 can have 1-16 ports depending on the device at + * the other end. + */ + case CVMX_HELPER_INTERFACE_MODE_SPI: + interface_port_count[interface] = + __cvmx_helper_spi_probe(interface); + break; + /* + * SGMII can have 1-4 ports depending on how many are + * hooked up. + */ + case CVMX_HELPER_INTERFACE_MODE_SGMII: + case CVMX_HELPER_INTERFACE_MODE_PICMG: + interface_port_count[interface] = + __cvmx_helper_sgmii_probe(interface); + break; + /* PCI target Network Packet Interface */ + case CVMX_HELPER_INTERFACE_MODE_NPI: + interface_port_count[interface] = + __cvmx_helper_npi_probe(interface); + break; + /* + * Special loopback only ports. These are not the same + * as other ports in loopback mode. + */ + case CVMX_HELPER_INTERFACE_MODE_LOOP: + interface_port_count[interface] = + __cvmx_helper_loop_probe(interface); + break; + } + + interface_port_count[interface] = + __cvmx_helper_board_interface_probe(interface, + interface_port_count + [interface]); + + /* Make sure all global variables propagate to other cores */ + CVMX_SYNCWS; + + return 0; +} + +/** + * Setup the IPD/PIP for the ports on an interface. Packet + * classification and tagging are set for every port on the + * interface. The number of ports on the interface must already + * have been probed. + * + * @interface: Interface to setup IPD/PIP for + * + * Returns Zero on success, negative on failure + */ +static int __cvmx_helper_interface_setup_ipd(int interface) +{ + int ipd_port = cvmx_helper_get_ipd_port(interface, 0); + int num_ports = interface_port_count[interface]; + + while (num_ports--) { + __cvmx_helper_port_setup_ipd(ipd_port); + ipd_port++; + } + return 0; +} + +/** + * Setup global setting for IPD/PIP not related to a specific + * interface or port. This must be called before IPD is enabled. + * + * Returns Zero on success, negative on failure. + */ +static int __cvmx_helper_global_setup_ipd(void) +{ + /* Setup the global packet input options */ + cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE / 8, + CVMX_HELPER_FIRST_MBUFF_SKIP / 8, + CVMX_HELPER_NOT_FIRST_MBUFF_SKIP / 8, + /* The +8 is to account for the next ptr */ + (CVMX_HELPER_FIRST_MBUFF_SKIP + 8) / 128, + /* The +8 is to account for the next ptr */ + (CVMX_HELPER_NOT_FIRST_MBUFF_SKIP + 8) / 128, + CVMX_FPA_WQE_POOL, + CVMX_IPD_OPC_MODE_STT, + CVMX_HELPER_ENABLE_BACK_PRESSURE); + return 0; +} + +/** + * Setup the PKO for the ports on an interface. The number of + * queues per port and the priority of each PKO output queue + * is set here. PKO must be disabled when this function is called. + * + * @interface: Interface to setup PKO for + * + * Returns Zero on success, negative on failure + */ +static int __cvmx_helper_interface_setup_pko(int interface) +{ + /* + * Each packet output queue has an associated priority. The + * higher the priority, the more often it can send a packet. A + * priority of 8 means it can send in all 8 rounds of + * contention. We're going to make each queue one less than + * the last. The vector of priorities has been extended to + * support CN5xxx CPUs, where up to 16 queues can be + * associated to a port. To keep backward compatibility we + * don't change the initial 8 priorities and replicate them in + * the second half. With per-core PKO queues (PKO lockless + * operation) all queues have the same priority. + */ + uint64_t priorities[16] = + { 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 }; + + /* + * Setup the IPD/PIP and PKO for the ports discovered + * above. Here packet classification, tagging and output + * priorities are set. + */ + int ipd_port = cvmx_helper_get_ipd_port(interface, 0); + int num_ports = interface_port_count[interface]; + while (num_ports--) { + /* + * Give the user a chance to override the per queue + * priorities. + */ + if (cvmx_override_pko_queue_priority) + cvmx_override_pko_queue_priority(ipd_port, priorities); + + cvmx_pko_config_port(ipd_port, + cvmx_pko_get_base_queue_per_core(ipd_port, + 0), + cvmx_pko_get_num_queues(ipd_port), + priorities); + ipd_port++; + } + return 0; +} + +/** + * Setup global setting for PKO not related to a specific + * interface or port. This must be called before PKO is enabled. + * + * Returns Zero on success, negative on failure. + */ +static int __cvmx_helper_global_setup_pko(void) +{ + /* + * Disable tagwait FAU timeout. This needs to be done before + * anyone might start packet output using tags. + */ + union cvmx_iob_fau_timeout fau_to; + fau_to.u64 = 0; + fau_to.s.tout_val = 0xfff; + fau_to.s.tout_enb = 0; + cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64); + return 0; +} + +/** + * Setup global backpressure setting. + * + * Returns Zero on success, negative on failure + */ +static int __cvmx_helper_global_setup_backpressure(void) +{ +#if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE + /* Disable backpressure if configured to do so */ + /* Disable backpressure (pause frame) generation */ + int num_interfaces = cvmx_helper_get_number_of_interfaces(); + int interface; + for (interface = 0; interface < num_interfaces; interface++) { + switch (cvmx_helper_interface_get_mode(interface)) { + case CVMX_HELPER_INTERFACE_MODE_DISABLED: + case CVMX_HELPER_INTERFACE_MODE_PCIE: + case CVMX_HELPER_INTERFACE_MODE_NPI: + case CVMX_HELPER_INTERFACE_MODE_LOOP: + case CVMX_HELPER_INTERFACE_MODE_XAUI: + break; + case CVMX_HELPER_INTERFACE_MODE_RGMII: + case CVMX_HELPER_INTERFACE_MODE_GMII: + case CVMX_HELPER_INTERFACE_MODE_SPI: + case CVMX_HELPER_INTERFACE_MODE_SGMII: + case CVMX_HELPER_INTERFACE_MODE_PICMG: + cvmx_gmx_set_backpressure_override(interface, 0xf); + break; + } + } +#endif + + return 0; +} + +/** + * Enable packet input/output from the hardware. This function is + * called after all internal setup is complete and IPD is enabled. + * After this function completes, packets will be accepted from the + * hardware ports. PKO should still be disabled to make sure packets + * aren't sent out partially setup hardware. + * + * @interface: Interface to enable + * + * Returns Zero on success, negative on failure + */ +static int __cvmx_helper_packet_hardware_enable(int interface) +{ + int result = 0; + switch (cvmx_helper_interface_get_mode(interface)) { + /* These types don't support ports to IPD/PKO */ + case CVMX_HELPER_INTERFACE_MODE_DISABLED: + case CVMX_HELPER_INTERFACE_MODE_PCIE: + /* Nothing to do */ + break; + /* XAUI is a single high speed port */ + case CVMX_HELPER_INTERFACE_MODE_XAUI: + result = __cvmx_helper_xaui_enable(interface); + break; + /* + * RGMII/GMII/MII are all treated about the same. Most + * functions refer to these ports as RGMII + */ + case CVMX_HELPER_INTERFACE_MODE_RGMII: + case CVMX_HELPER_INTERFACE_MODE_GMII: + result = __cvmx_helper_rgmii_enable(interface); + break; + /* + * SPI4 can have 1-16 ports depending on the device at + * the other end + */ + case CVMX_HELPER_INTERFACE_MODE_SPI: + result = __cvmx_helper_spi_enable(interface); + break; + /* + * SGMII can have 1-4 ports depending on how many are + * hooked up + */ + case CVMX_HELPER_INTERFACE_MODE_SGMII: + case CVMX_HELPER_INTERFACE_MODE_PICMG: + result = __cvmx_helper_sgmii_enable(interface); + break; + /* PCI target Network Packet Interface */ + case CVMX_HELPER_INTERFACE_MODE_NPI: + result = __cvmx_helper_npi_enable(interface); + break; + /* + * Special loopback only ports. These are not the same + * as other ports in loopback mode + */ + case CVMX_HELPER_INTERFACE_MODE_LOOP: + result = __cvmx_helper_loop_enable(interface); + break; + } + result |= __cvmx_helper_board_hardware_enable(interface); + return result; +} + +/** + * Function to adjust internal IPD pointer alignments + * + * Returns 0 on success + * !0 on failure + */ +int __cvmx_helper_errata_fix_ipd_ptr_alignment(void) +{ +#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES \ + (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP) +#define FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES \ + (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_NOT_FIRST_MBUFF_SKIP) +#define FIX_IPD_OUTPORT 0 + /* Ports 0-15 are interface 0, 16-31 are interface 1 */ +#define INTERFACE(port) (port >> 4) +#define INDEX(port) (port & 0xf) + uint64_t *p64; + cvmx_pko_command_word0_t pko_command; + union cvmx_buf_ptr g_buffer, pkt_buffer; + cvmx_wqe_t *work; + int size, num_segs = 0, wqe_pcnt, pkt_pcnt; + union cvmx_gmxx_prtx_cfg gmx_cfg; + int retry_cnt; + int retry_loop_cnt; + int mtu; + int i; + cvmx_helper_link_info_t link_info; + + /* Save values for restore at end */ + uint64_t prtx_cfg = + cvmx_read_csr(CVMX_GMXX_PRTX_CFG + (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT))); + uint64_t tx_ptr_en = + cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT))); + uint64_t rx_ptr_en = + cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT))); + uint64_t rxx_jabber = + cvmx_read_csr(CVMX_GMXX_RXX_JABBER + (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT))); + uint64_t frame_max = + cvmx_read_csr(CVMX_GMXX_RXX_FRM_MAX + (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT))); + + /* Configure port to gig FDX as required for loopback mode */ + cvmx_helper_rgmii_internal_loopback(FIX_IPD_OUTPORT); + + /* + * Disable reception on all ports so if traffic is present it + * will not interfere. + */ + cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0); + + cvmx_wait(100000000ull); + + for (retry_loop_cnt = 0; retry_loop_cnt < 10; retry_loop_cnt++) { + retry_cnt = 100000; + wqe_pcnt = cvmx_read_csr(CVMX_IPD_PTR_COUNT); + pkt_pcnt = (wqe_pcnt >> 7) & 0x7f; + wqe_pcnt &= 0x7f; + + num_segs = (2 + pkt_pcnt - wqe_pcnt) & 3; + + if (num_segs == 0) + goto fix_ipd_exit; + + num_segs += 1; + + size = + FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES + + ((num_segs - 1) * FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES) - + (FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES / 2); + + cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), + 1 << INDEX(FIX_IPD_OUTPORT)); + CVMX_SYNC; + + g_buffer.u64 = 0; + g_buffer.s.addr = + cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_WQE_POOL)); + if (g_buffer.s.addr == 0) { + cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT " + "buffer allocation failure.\n"); + goto fix_ipd_exit; + } + + g_buffer.s.pool = CVMX_FPA_WQE_POOL; + g_buffer.s.size = num_segs; + + pkt_buffer.u64 = 0; + pkt_buffer.s.addr = + cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL)); + if (pkt_buffer.s.addr == 0) { + cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT " + "buffer allocation failure.\n"); + goto fix_ipd_exit; + } + pkt_buffer.s.i = 1; + pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL; + pkt_buffer.s.size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES; + + p64 = (uint64_t *) cvmx_phys_to_ptr(pkt_buffer.s.addr); + p64[0] = 0xffffffffffff0000ull; + p64[1] = 0x08004510ull; + p64[2] = ((uint64_t) (size - 14) << 48) | 0x5ae740004000ull; + p64[3] = 0x3a5fc0a81073c0a8ull; + + for (i = 0; i < num_segs; i++) { + if (i > 0) + pkt_buffer.s.size = + FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES; + + if (i == (num_segs - 1)) + pkt_buffer.s.i = 0; + + *(uint64_t *) cvmx_phys_to_ptr(g_buffer.s.addr + + 8 * i) = pkt_buffer.u64; + } + + /* Build the PKO command */ + pko_command.u64 = 0; + pko_command.s.segs = num_segs; + pko_command.s.total_bytes = size; + pko_command.s.dontfree = 0; + pko_command.s.gather = 1; + + gmx_cfg.u64 = + cvmx_read_csr(CVMX_GMXX_PRTX_CFG + (INDEX(FIX_IPD_OUTPORT), + INTERFACE(FIX_IPD_OUTPORT))); + gmx_cfg.s.en = 1; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG + (INDEX(FIX_IPD_OUTPORT), + INTERFACE(FIX_IPD_OUTPORT)), gmx_cfg.u64); + cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), + 1 << INDEX(FIX_IPD_OUTPORT)); + cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), + 1 << INDEX(FIX_IPD_OUTPORT)); + + mtu = + cvmx_read_csr(CVMX_GMXX_RXX_JABBER + (INDEX(FIX_IPD_OUTPORT), + INTERFACE(FIX_IPD_OUTPORT))); + cvmx_write_csr(CVMX_GMXX_RXX_JABBER + (INDEX(FIX_IPD_OUTPORT), + INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4); + cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX + (INDEX(FIX_IPD_OUTPORT), + INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4); + + cvmx_pko_send_packet_prepare(FIX_IPD_OUTPORT, + cvmx_pko_get_base_queue + (FIX_IPD_OUTPORT), + CVMX_PKO_LOCK_CMD_QUEUE); + cvmx_pko_send_packet_finish(FIX_IPD_OUTPORT, + cvmx_pko_get_base_queue + (FIX_IPD_OUTPORT), pko_command, + g_buffer, CVMX_PKO_LOCK_CMD_QUEUE); + + CVMX_SYNC; + + do { + work = cvmx_pow_work_request_sync(CVMX_POW_WAIT); + retry_cnt--; + } while ((work == NULL) && (retry_cnt > 0)); + + if (!retry_cnt) + cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT " + "get_work() timeout occured.\n"); + + /* Free packet */ + if (work) + cvmx_helper_free_packet_data(work); + } + +fix_ipd_exit: + + /* Return CSR configs to saved values */ + cvmx_write_csr(CVMX_GMXX_PRTX_CFG + (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), + prtx_cfg); + cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), + tx_ptr_en); + cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), + rx_ptr_en); + cvmx_write_csr(CVMX_GMXX_RXX_JABBER + (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), + rxx_jabber); + cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX + (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), + frame_max); + cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0); + /* Set link to down so autonegotiation will set it up again */ + link_info.u64 = 0; + cvmx_helper_link_set(FIX_IPD_OUTPORT, link_info); + + /* + * Bring the link back up as autonegotiation is not done in + * user applications. + */ + cvmx_helper_link_autoconf(FIX_IPD_OUTPORT); + + CVMX_SYNC; + if (num_segs) + cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT failed.\n"); + + return !!num_segs; + +} + +/** + * Called after all internal packet IO paths are setup. This + * function enables IPD/PIP and begins packet input and output. + * + * Returns Zero on success, negative on failure + */ +int cvmx_helper_ipd_and_packet_input_enable(void) +{ + int num_interfaces; + int interface; + + /* Enable IPD */ + cvmx_ipd_enable(); + + /* + * Time to enable hardware ports packet input and output. Note + * that at this point IPD/PIP must be fully functional and PKO + * must be disabled + */ + num_interfaces = cvmx_helper_get_number_of_interfaces(); + for (interface = 0; interface < num_interfaces; interface++) { + if (cvmx_helper_ports_on_interface(interface) > 0) + __cvmx_helper_packet_hardware_enable(interface); + } + + /* Finally enable PKO now that the entire path is up and running */ + cvmx_pko_enable(); + + if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1) + || OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1)) + && (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)) + __cvmx_helper_errata_fix_ipd_ptr_alignment(); + return 0; +} + +/** + * Initialize the PIP, IPD, and PKO hardware to support + * simple priority based queues for the ethernet ports. Each + * port is configured with a number of priority queues based + * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower + * priority than the previous. + * + * Returns Zero on success, non-zero on failure + */ +int cvmx_helper_initialize_packet_io_global(void) +{ + int result = 0; + int interface; + union cvmx_l2c_cfg l2c_cfg; + union cvmx_smix_en smix_en; + const int num_interfaces = cvmx_helper_get_number_of_interfaces(); + + /* + * CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to + * be disabled. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0)) + __cvmx_helper_errata_qlm_disable_2nd_order_cdr(1); + + /* + * Tell L2 to give the IOB statically higher priority compared + * to the cores. This avoids conditions where IO blocks might + * be starved under very high L2 loads. + */ + l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG); + l2c_cfg.s.lrf_arb_mode = 0; + l2c_cfg.s.rfb_arb_mode = 0; + cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64); + + /* Make sure SMI/MDIO is enabled so we can query PHYs */ + smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(0)); + if (!smix_en.s.en) { + smix_en.s.en = 1; + cvmx_write_csr(CVMX_SMIX_EN(0), smix_en.u64); + } + + /* Newer chips actually have two SMI/MDIO interfaces */ + if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) && + !OCTEON_IS_MODEL(OCTEON_CN58XX) && + !OCTEON_IS_MODEL(OCTEON_CN50XX)) { + smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(1)); + if (!smix_en.s.en) { + smix_en.s.en = 1; + cvmx_write_csr(CVMX_SMIX_EN(1), smix_en.u64); + } + } + + cvmx_pko_initialize_global(); + for (interface = 0; interface < num_interfaces; interface++) { + result |= cvmx_helper_interface_probe(interface); + if (cvmx_helper_ports_on_interface(interface) > 0) + cvmx_dprintf("Interface %d has %d ports (%s)\n", + interface, + cvmx_helper_ports_on_interface(interface), + cvmx_helper_interface_mode_to_string + (cvmx_helper_interface_get_mode + (interface))); + result |= __cvmx_helper_interface_setup_ipd(interface); + result |= __cvmx_helper_interface_setup_pko(interface); + } + + result |= __cvmx_helper_global_setup_ipd(); + result |= __cvmx_helper_global_setup_pko(); + + /* Enable any flow control and backpressure */ + result |= __cvmx_helper_global_setup_backpressure(); + +#if CVMX_HELPER_ENABLE_IPD + result |= cvmx_helper_ipd_and_packet_input_enable(); +#endif + return result; +} + +/** + * Does core local initialization for packet io + * + * Returns Zero on success, non-zero on failure + */ +int cvmx_helper_initialize_packet_io_local(void) +{ + return cvmx_pko_initialize_local(); +} + +/** + * Auto configure an IPD/PKO port link state and speed. This + * function basically does the equivalent of: + * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port)); + * + * @ipd_port: IPD/PKO port to auto configure + * + * Returns Link state after configure + */ +cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port) +{ + cvmx_helper_link_info_t link_info; + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + + if (index >= cvmx_helper_ports_on_interface(interface)) { + link_info.u64 = 0; + return link_info; + } + + link_info = cvmx_helper_link_get(ipd_port); + if (link_info.u64 == port_link_info[ipd_port].u64) + return link_info; + + /* If we fail to set the link speed, port_link_info will not change */ + cvmx_helper_link_set(ipd_port, link_info); + + /* + * port_link_info should be the current value, which will be + * different than expect if cvmx_helper_link_set() failed. + */ + return port_link_info[ipd_port]; +} + +/** + * Return the link state of an IPD/PKO port as returned by + * auto negotiation. The result of this function may not match + * Octeon's link config if auto negotiation has changed since + * the last call to cvmx_helper_link_set(). + * + * @ipd_port: IPD/PKO port to query + * + * Returns Link state + */ +cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port) +{ + cvmx_helper_link_info_t result; + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + + /* The default result will be a down link unless the code below + changes it */ + result.u64 = 0; + + if (index >= cvmx_helper_ports_on_interface(interface)) + return result; + + switch (cvmx_helper_interface_get_mode(interface)) { + case CVMX_HELPER_INTERFACE_MODE_DISABLED: + case CVMX_HELPER_INTERFACE_MODE_PCIE: + /* Network links are not supported */ + break; + case CVMX_HELPER_INTERFACE_MODE_XAUI: + result = __cvmx_helper_xaui_link_get(ipd_port); + break; + case CVMX_HELPER_INTERFACE_MODE_GMII: + if (index == 0) + result = __cvmx_helper_rgmii_link_get(ipd_port); + else { + result.s.full_duplex = 1; + result.s.link_up = 1; + result.s.speed = 1000; + } + break; + case CVMX_HELPER_INTERFACE_MODE_RGMII: + result = __cvmx_helper_rgmii_link_get(ipd_port); + break; + case CVMX_HELPER_INTERFACE_MODE_SPI: + result = __cvmx_helper_spi_link_get(ipd_port); + break; + case CVMX_HELPER_INTERFACE_MODE_SGMII: + case CVMX_HELPER_INTERFACE_MODE_PICMG: + result = __cvmx_helper_sgmii_link_get(ipd_port); + break; + case CVMX_HELPER_INTERFACE_MODE_NPI: + case CVMX_HELPER_INTERFACE_MODE_LOOP: + /* Network links are not supported */ + break; + } + return result; +} + +/** + * Configure an IPD/PKO port for the specified link state. This + * function does not influence auto negotiation at the PHY level. + * The passed link state must always match the link state returned + * by cvmx_helper_link_get(). It is normally best to use + * cvmx_helper_link_autoconf() instead. + * + * @ipd_port: IPD/PKO port to configure + * @link_info: The new link state + * + * Returns Zero on success, negative on failure + */ +int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info) +{ + int result = -1; + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + + if (index >= cvmx_helper_ports_on_interface(interface)) + return -1; + + switch (cvmx_helper_interface_get_mode(interface)) { + case CVMX_HELPER_INTERFACE_MODE_DISABLED: + case CVMX_HELPER_INTERFACE_MODE_PCIE: + break; + case CVMX_HELPER_INTERFACE_MODE_XAUI: + result = __cvmx_helper_xaui_link_set(ipd_port, link_info); + break; + /* + * RGMII/GMII/MII are all treated about the same. Most + * functions refer to these ports as RGMII. + */ + case CVMX_HELPER_INTERFACE_MODE_RGMII: + case CVMX_HELPER_INTERFACE_MODE_GMII: + result = __cvmx_helper_rgmii_link_set(ipd_port, link_info); + break; + case CVMX_HELPER_INTERFACE_MODE_SPI: + result = __cvmx_helper_spi_link_set(ipd_port, link_info); + break; + case CVMX_HELPER_INTERFACE_MODE_SGMII: + case CVMX_HELPER_INTERFACE_MODE_PICMG: + result = __cvmx_helper_sgmii_link_set(ipd_port, link_info); + break; + case CVMX_HELPER_INTERFACE_MODE_NPI: + case CVMX_HELPER_INTERFACE_MODE_LOOP: + break; + } + /* Set the port_link_info here so that the link status is updated + no matter how cvmx_helper_link_set is called. We don't change + the value if link_set failed */ + if (result == 0) + port_link_info[ipd_port].u64 = link_info.u64; + return result; +} + +/** + * Configure a port for internal and/or external loopback. Internal loopback + * causes packets sent by the port to be received by Octeon. External loopback + * causes packets received from the wire to sent out again. + * + * @ipd_port: IPD/PKO port to loopback. + * @enable_internal: + * Non zero if you want internal loopback + * @enable_external: + * Non zero if you want external loopback + * + * Returns Zero on success, negative on failure. + */ +int cvmx_helper_configure_loopback(int ipd_port, int enable_internal, + int enable_external) +{ + int result = -1; + int interface = cvmx_helper_get_interface_num(ipd_port); + int index = cvmx_helper_get_interface_index_num(ipd_port); + + if (index >= cvmx_helper_ports_on_interface(interface)) + return -1; + + switch (cvmx_helper_interface_get_mode(interface)) { + case CVMX_HELPER_INTERFACE_MODE_DISABLED: + case CVMX_HELPER_INTERFACE_MODE_PCIE: + case CVMX_HELPER_INTERFACE_MODE_SPI: + case CVMX_HELPER_INTERFACE_MODE_NPI: + case CVMX_HELPER_INTERFACE_MODE_LOOP: + break; + case CVMX_HELPER_INTERFACE_MODE_XAUI: + result = + __cvmx_helper_xaui_configure_loopback(ipd_port, + enable_internal, + enable_external); + break; + case CVMX_HELPER_INTERFACE_MODE_RGMII: + case CVMX_HELPER_INTERFACE_MODE_GMII: + result = + __cvmx_helper_rgmii_configure_loopback(ipd_port, + enable_internal, + enable_external); + break; + case CVMX_HELPER_INTERFACE_MODE_SGMII: + case CVMX_HELPER_INTERFACE_MODE_PICMG: + result = + __cvmx_helper_sgmii_configure_loopback(ipd_port, + enable_internal, + enable_external); + break; + } + return result; +} diff --git a/drivers/staging/octeon/cvmx-helper.h b/drivers/staging/octeon/cvmx-helper.h new file mode 100644 index 0000000..51916f3 --- /dev/null +++ b/drivers/staging/octeon/cvmx-helper.h @@ -0,0 +1,227 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * + * Helper functions for common, but complicated tasks. + * + */ + +#ifndef __CVMX_HELPER_H__ +#define __CVMX_HELPER_H__ + +#include "cvmx-config.h" +#include "cvmx-fpa.h" +#include "cvmx-wqe.h" + +typedef enum { + CVMX_HELPER_INTERFACE_MODE_DISABLED, + CVMX_HELPER_INTERFACE_MODE_RGMII, + CVMX_HELPER_INTERFACE_MODE_GMII, + CVMX_HELPER_INTERFACE_MODE_SPI, + CVMX_HELPER_INTERFACE_MODE_PCIE, + CVMX_HELPER_INTERFACE_MODE_XAUI, + CVMX_HELPER_INTERFACE_MODE_SGMII, + CVMX_HELPER_INTERFACE_MODE_PICMG, + CVMX_HELPER_INTERFACE_MODE_NPI, + CVMX_HELPER_INTERFACE_MODE_LOOP, +} cvmx_helper_interface_mode_t; + +typedef union { + uint64_t u64; + struct { + uint64_t reserved_20_63:44; + uint64_t link_up:1; /**< Is the physical link up? */ + uint64_t full_duplex:1; /**< 1 if the link is full duplex */ + uint64_t speed:18; /**< Speed of the link in Mbps */ + } s; +} cvmx_helper_link_info_t; + +#include "cvmx-helper-fpa.h" + +#include <asm/octeon/cvmx-helper-errata.h> +#include "cvmx-helper-loop.h" +#include "cvmx-helper-npi.h" +#include "cvmx-helper-rgmii.h" +#include "cvmx-helper-sgmii.h" +#include "cvmx-helper-spi.h" +#include "cvmx-helper-util.h" +#include "cvmx-helper-xaui.h" + +/** + * cvmx_override_pko_queue_priority(int ipd_port, uint64_t + * priorities[16]) is a function pointer. It is meant to allow + * customization of the PKO queue priorities based on the port + * number. Users should set this pointer to a function before + * calling any cvmx-helper operations. + */ +extern void (*cvmx_override_pko_queue_priority) (int pko_port, + uint64_t priorities[16]); + +/** + * cvmx_override_ipd_port_setup(int ipd_port) is a function + * pointer. It is meant to allow customization of the IPD port + * setup before packet input/output comes online. It is called + * after cvmx-helper does the default IPD configuration, but + * before IPD is enabled. Users should set this pointer to a + * function before calling any cvmx-helper operations. + */ +extern void (*cvmx_override_ipd_port_setup) (int ipd_port); + +/** + * This function enables the IPD and also enables the packet interfaces. + * The packet interfaces (RGMII and SPI) must be enabled after the + * IPD. This should be called by the user program after any additional + * IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD + * is not set in the executive-config.h file. + * + * Returns 0 on success + * -1 on failure + */ +extern int cvmx_helper_ipd_and_packet_input_enable(void); + +/** + * Initialize the PIP, IPD, and PKO hardware to support + * simple priority based queues for the ethernet ports. Each + * port is configured with a number of priority queues based + * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower + * priority than the previous. + * + * Returns Zero on success, non-zero on failure + */ +extern int cvmx_helper_initialize_packet_io_global(void); + +/** + * Does core local initialization for packet io + * + * Returns Zero on success, non-zero on failure + */ +extern int cvmx_helper_initialize_packet_io_local(void); + +/** + * Returns the number of ports on the given interface. + * The interface must be initialized before the port count + * can be returned. + * + * @interface: Which interface to return port count for. + * + * Returns Port count for interface + * -1 for uninitialized interface + */ +extern int cvmx_helper_ports_on_interface(int interface); + +/** + * Return the number of interfaces the chip has. Each interface + * may have multiple ports. Most chips support two interfaces, + * but the CNX0XX and CNX1XX are exceptions. These only support + * one interface. + * + * Returns Number of interfaces on chip + */ +extern int cvmx_helper_get_number_of_interfaces(void); + +/** + * Get the operating mode of an interface. Depending on the Octeon + * chip and configuration, this function returns an enumeration + * of the type of packet I/O supported by an interface. + * + * @interface: Interface to probe + * + * Returns Mode of the interface. Unknown or unsupported interfaces return + * DISABLED. + */ +extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int + interface); + +/** + * Auto configure an IPD/PKO port link state and speed. This + * function basically does the equivalent of: + * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port)); + * + * @ipd_port: IPD/PKO port to auto configure + * + * Returns Link state after configure + */ +extern cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port); + +/** + * Return the link state of an IPD/PKO port as returned by + * auto negotiation. The result of this function may not match + * Octeon's link config if auto negotiation has changed since + * the last call to cvmx_helper_link_set(). + * + * @ipd_port: IPD/PKO port to query + * + * Returns Link state + */ +extern cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port); + +/** + * Configure an IPD/PKO port for the specified link state. This + * function does not influence auto negotiation at the PHY level. + * The passed link state must always match the link state returned + * by cvmx_helper_link_get(). It is normally best to use + * cvmx_helper_link_autoconf() instead. + * + * @ipd_port: IPD/PKO port to configure + * @link_info: The new link state + * + * Returns Zero on success, negative on failure + */ +extern int cvmx_helper_link_set(int ipd_port, + cvmx_helper_link_info_t link_info); + +/** + * This function probes an interface to determine the actual + * number of hardware ports connected to it. It doesn't setup the + * ports or enable them. The main goal here is to set the global + * interface_port_count[interface] correctly. Hardware setup of the + * ports will be performed later. + * + * @interface: Interface to probe + * + * Returns Zero on success, negative on failure + */ +extern int cvmx_helper_interface_probe(int interface); + +/** + * Configure a port for internal and/or external loopback. Internal loopback + * causes packets sent by the port to be received by Octeon. External loopback + * causes packets received from the wire to sent out again. + * + * @ipd_port: IPD/PKO port to loopback. + * @enable_internal: + * Non zero if you want internal loopback + * @enable_external: + * Non zero if you want external loopback + * + * Returns Zero on success, negative on failure. + */ +extern int cvmx_helper_configure_loopback(int ipd_port, int enable_internal, + int enable_external); + +#endif /* __CVMX_HELPER_H__ */ diff --git a/drivers/staging/octeon/cvmx-interrupt-decodes.c b/drivers/staging/octeon/cvmx-interrupt-decodes.c new file mode 100644 index 0000000..a3337e3 --- /dev/null +++ b/drivers/staging/octeon/cvmx-interrupt-decodes.c @@ -0,0 +1,371 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2009 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * + * Automatically generated functions useful for enabling + * and decoding RSL_INT_BLOCKS interrupts. + * + */ + +#include <asm/octeon/octeon.h> + +#include "cvmx-gmxx-defs.h" +#include "cvmx-pcsx-defs.h" +#include "cvmx-pcsxx-defs.h" +#include "cvmx-spxx-defs.h" +#include "cvmx-stxx-defs.h" + +#ifndef PRINT_ERROR +#define PRINT_ERROR(format, ...) +#endif + + +/** + * __cvmx_interrupt_gmxx_rxx_int_en_enable enables all interrupt bits in cvmx_gmxx_rxx_int_en_t + */ +void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block) +{ + union cvmx_gmxx_rxx_int_en gmx_rx_int_en; + cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, block), + cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, block))); + gmx_rx_int_en.u64 = 0; + if (OCTEON_IS_MODEL(OCTEON_CN56XX)) { + /* Skipping gmx_rx_int_en.s.reserved_29_63 */ + gmx_rx_int_en.s.hg2cc = 1; + gmx_rx_int_en.s.hg2fld = 1; + gmx_rx_int_en.s.undat = 1; + gmx_rx_int_en.s.uneop = 1; + gmx_rx_int_en.s.unsop = 1; + gmx_rx_int_en.s.bad_term = 1; + gmx_rx_int_en.s.bad_seq = 1; + gmx_rx_int_en.s.rem_fault = 1; + gmx_rx_int_en.s.loc_fault = 1; + gmx_rx_int_en.s.pause_drp = 1; + /* Skipping gmx_rx_int_en.s.reserved_16_18 */ + /*gmx_rx_int_en.s.ifgerr = 1; */ + /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ + /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ + /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ + gmx_rx_int_en.s.ovrerr = 1; + /* Skipping gmx_rx_int_en.s.reserved_9_9 */ + gmx_rx_int_en.s.skperr = 1; + gmx_rx_int_en.s.rcverr = 1; + /* Skipping gmx_rx_int_en.s.reserved_5_6 */ + /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ + gmx_rx_int_en.s.jabber = 1; + /* Skipping gmx_rx_int_en.s.reserved_2_2 */ + gmx_rx_int_en.s.carext = 1; + /* Skipping gmx_rx_int_en.s.reserved_0_0 */ + } + if (OCTEON_IS_MODEL(OCTEON_CN30XX)) { + /* Skipping gmx_rx_int_en.s.reserved_19_63 */ + /*gmx_rx_int_en.s.phy_dupx = 1; */ + /*gmx_rx_int_en.s.phy_spd = 1; */ + /*gmx_rx_int_en.s.phy_link = 1; */ + /*gmx_rx_int_en.s.ifgerr = 1; */ + /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ + /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ + /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ + gmx_rx_int_en.s.ovrerr = 1; + gmx_rx_int_en.s.niberr = 1; + gmx_rx_int_en.s.skperr = 1; + gmx_rx_int_en.s.rcverr = 1; + /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ + gmx_rx_int_en.s.alnerr = 1; + /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ + gmx_rx_int_en.s.jabber = 1; + gmx_rx_int_en.s.maxerr = 1; + gmx_rx_int_en.s.carext = 1; + gmx_rx_int_en.s.minerr = 1; + } + if (OCTEON_IS_MODEL(OCTEON_CN50XX)) { + /* Skipping gmx_rx_int_en.s.reserved_20_63 */ + gmx_rx_int_en.s.pause_drp = 1; + /*gmx_rx_int_en.s.phy_dupx = 1; */ + /*gmx_rx_int_en.s.phy_spd = 1; */ + /*gmx_rx_int_en.s.phy_link = 1; */ + /*gmx_rx_int_en.s.ifgerr = 1; */ + /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ + /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ + /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ + gmx_rx_int_en.s.ovrerr = 1; + gmx_rx_int_en.s.niberr = 1; + gmx_rx_int_en.s.skperr = 1; + gmx_rx_int_en.s.rcverr = 1; + /* Skipping gmx_rx_int_en.s.reserved_6_6 */ + gmx_rx_int_en.s.alnerr = 1; + /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ + gmx_rx_int_en.s.jabber = 1; + /* Skipping gmx_rx_int_en.s.reserved_2_2 */ + gmx_rx_int_en.s.carext = 1; + /* Skipping gmx_rx_int_en.s.reserved_0_0 */ + } + if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { + /* Skipping gmx_rx_int_en.s.reserved_19_63 */ + /*gmx_rx_int_en.s.phy_dupx = 1; */ + /*gmx_rx_int_en.s.phy_spd = 1; */ + /*gmx_rx_int_en.s.phy_link = 1; */ + /*gmx_rx_int_en.s.ifgerr = 1; */ + /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ + /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ + /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ + gmx_rx_int_en.s.ovrerr = 1; + gmx_rx_int_en.s.niberr = 1; + gmx_rx_int_en.s.skperr = 1; + gmx_rx_int_en.s.rcverr = 1; + /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ + gmx_rx_int_en.s.alnerr = 1; + /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ + gmx_rx_int_en.s.jabber = 1; + gmx_rx_int_en.s.maxerr = 1; + gmx_rx_int_en.s.carext = 1; + gmx_rx_int_en.s.minerr = 1; + } + if (OCTEON_IS_MODEL(OCTEON_CN31XX)) { + /* Skipping gmx_rx_int_en.s.reserved_19_63 */ + /*gmx_rx_int_en.s.phy_dupx = 1; */ + /*gmx_rx_int_en.s.phy_spd = 1; */ + /*gmx_rx_int_en.s.phy_link = 1; */ + /*gmx_rx_int_en.s.ifgerr = 1; */ + /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ + /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ + /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ + gmx_rx_int_en.s.ovrerr = 1; + gmx_rx_int_en.s.niberr = 1; + gmx_rx_int_en.s.skperr = 1; + gmx_rx_int_en.s.rcverr = 1; + /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ + gmx_rx_int_en.s.alnerr = 1; + /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ + gmx_rx_int_en.s.jabber = 1; + gmx_rx_int_en.s.maxerr = 1; + gmx_rx_int_en.s.carext = 1; + gmx_rx_int_en.s.minerr = 1; + } + if (OCTEON_IS_MODEL(OCTEON_CN58XX)) { + /* Skipping gmx_rx_int_en.s.reserved_20_63 */ + gmx_rx_int_en.s.pause_drp = 1; + /*gmx_rx_int_en.s.phy_dupx = 1; */ + /*gmx_rx_int_en.s.phy_spd = 1; */ + /*gmx_rx_int_en.s.phy_link = 1; */ + /*gmx_rx_int_en.s.ifgerr = 1; */ + /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ + /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ + /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ + gmx_rx_int_en.s.ovrerr = 1; + gmx_rx_int_en.s.niberr = 1; + gmx_rx_int_en.s.skperr = 1; + gmx_rx_int_en.s.rcverr = 1; + /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ + gmx_rx_int_en.s.alnerr = 1; + /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ + gmx_rx_int_en.s.jabber = 1; + gmx_rx_int_en.s.maxerr = 1; + gmx_rx_int_en.s.carext = 1; + gmx_rx_int_en.s.minerr = 1; + } + if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { + /* Skipping gmx_rx_int_en.s.reserved_29_63 */ + gmx_rx_int_en.s.hg2cc = 1; + gmx_rx_int_en.s.hg2fld = 1; + gmx_rx_int_en.s.undat = 1; + gmx_rx_int_en.s.uneop = 1; + gmx_rx_int_en.s.unsop = 1; + gmx_rx_int_en.s.bad_term = 1; + gmx_rx_int_en.s.bad_seq = 0; + gmx_rx_int_en.s.rem_fault = 1; + gmx_rx_int_en.s.loc_fault = 0; + gmx_rx_int_en.s.pause_drp = 1; + /* Skipping gmx_rx_int_en.s.reserved_16_18 */ + /*gmx_rx_int_en.s.ifgerr = 1; */ + /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ + /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ + /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ + gmx_rx_int_en.s.ovrerr = 1; + /* Skipping gmx_rx_int_en.s.reserved_9_9 */ + gmx_rx_int_en.s.skperr = 1; + gmx_rx_int_en.s.rcverr = 1; + /* Skipping gmx_rx_int_en.s.reserved_5_6 */ + /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ + gmx_rx_int_en.s.jabber = 1; + /* Skipping gmx_rx_int_en.s.reserved_2_2 */ + gmx_rx_int_en.s.carext = 1; + /* Skipping gmx_rx_int_en.s.reserved_0_0 */ + } + cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, block), gmx_rx_int_en.u64); +} +/** + * __cvmx_interrupt_pcsx_intx_en_reg_enable enables all interrupt bits in cvmx_pcsx_intx_en_reg_t + */ +void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block) +{ + union cvmx_pcsx_intx_en_reg pcs_int_en_reg; + cvmx_write_csr(CVMX_PCSX_INTX_REG(index, block), + cvmx_read_csr(CVMX_PCSX_INTX_REG(index, block))); + pcs_int_en_reg.u64 = 0; + if (OCTEON_IS_MODEL(OCTEON_CN56XX)) { + /* Skipping pcs_int_en_reg.s.reserved_12_63 */ + /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */ + pcs_int_en_reg.s.sync_bad_en = 1; + pcs_int_en_reg.s.an_bad_en = 1; + pcs_int_en_reg.s.rxlock_en = 1; + pcs_int_en_reg.s.rxbad_en = 1; + /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */ + pcs_int_en_reg.s.txbad_en = 1; + pcs_int_en_reg.s.txfifo_en = 1; + pcs_int_en_reg.s.txfifu_en = 1; + pcs_int_en_reg.s.an_err_en = 1; + /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */ + /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */ + } + if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { + /* Skipping pcs_int_en_reg.s.reserved_12_63 */ + /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */ + pcs_int_en_reg.s.sync_bad_en = 1; + pcs_int_en_reg.s.an_bad_en = 1; + pcs_int_en_reg.s.rxlock_en = 1; + pcs_int_en_reg.s.rxbad_en = 1; + /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */ + pcs_int_en_reg.s.txbad_en = 1; + pcs_int_en_reg.s.txfifo_en = 1; + pcs_int_en_reg.s.txfifu_en = 1; + pcs_int_en_reg.s.an_err_en = 1; + /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */ + /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */ + } + cvmx_write_csr(CVMX_PCSX_INTX_EN_REG(index, block), pcs_int_en_reg.u64); +} +/** + * __cvmx_interrupt_pcsxx_int_en_reg_enable enables all interrupt bits in cvmx_pcsxx_int_en_reg_t + */ +void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index) +{ + union cvmx_pcsxx_int_en_reg pcsx_int_en_reg; + cvmx_write_csr(CVMX_PCSXX_INT_REG(index), + cvmx_read_csr(CVMX_PCSXX_INT_REG(index))); + pcsx_int_en_reg.u64 = 0; + if (OCTEON_IS_MODEL(OCTEON_CN56XX)) { + /* Skipping pcsx_int_en_reg.s.reserved_6_63 */ + pcsx_int_en_reg.s.algnlos_en = 1; + pcsx_int_en_reg.s.synlos_en = 1; + pcsx_int_en_reg.s.bitlckls_en = 1; + pcsx_int_en_reg.s.rxsynbad_en = 1; + pcsx_int_en_reg.s.rxbad_en = 1; + pcsx_int_en_reg.s.txflt_en = 1; + } + if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { + /* Skipping pcsx_int_en_reg.s.reserved_6_63 */ + pcsx_int_en_reg.s.algnlos_en = 1; + pcsx_int_en_reg.s.synlos_en = 1; + pcsx_int_en_reg.s.bitlckls_en = 0; /* Happens if XAUI module is not installed */ + pcsx_int_en_reg.s.rxsynbad_en = 1; + pcsx_int_en_reg.s.rxbad_en = 1; + pcsx_int_en_reg.s.txflt_en = 1; + } + cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(index), pcsx_int_en_reg.u64); +} + +/** + * __cvmx_interrupt_spxx_int_msk_enable enables all interrupt bits in cvmx_spxx_int_msk_t + */ +void __cvmx_interrupt_spxx_int_msk_enable(int index) +{ + union cvmx_spxx_int_msk spx_int_msk; + cvmx_write_csr(CVMX_SPXX_INT_REG(index), + cvmx_read_csr(CVMX_SPXX_INT_REG(index))); + spx_int_msk.u64 = 0; + if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { + /* Skipping spx_int_msk.s.reserved_12_63 */ + spx_int_msk.s.calerr = 1; + spx_int_msk.s.syncerr = 1; + spx_int_msk.s.diperr = 1; + spx_int_msk.s.tpaovr = 1; + spx_int_msk.s.rsverr = 1; + spx_int_msk.s.drwnng = 1; + spx_int_msk.s.clserr = 1; + spx_int_msk.s.spiovr = 1; + /* Skipping spx_int_msk.s.reserved_2_3 */ + spx_int_msk.s.abnorm = 1; + spx_int_msk.s.prtnxa = 1; + } + if (OCTEON_IS_MODEL(OCTEON_CN58XX)) { + /* Skipping spx_int_msk.s.reserved_12_63 */ + spx_int_msk.s.calerr = 1; + spx_int_msk.s.syncerr = 1; + spx_int_msk.s.diperr = 1; + spx_int_msk.s.tpaovr = 1; + spx_int_msk.s.rsverr = 1; + spx_int_msk.s.drwnng = 1; + spx_int_msk.s.clserr = 1; + spx_int_msk.s.spiovr = 1; + /* Skipping spx_int_msk.s.reserved_2_3 */ + spx_int_msk.s.abnorm = 1; + spx_int_msk.s.prtnxa = 1; + } + cvmx_write_csr(CVMX_SPXX_INT_MSK(index), spx_int_msk.u64); +} +/** + * __cvmx_interrupt_stxx_int_msk_enable enables all interrupt bits in cvmx_stxx_int_msk_t + */ +void __cvmx_interrupt_stxx_int_msk_enable(int index) +{ + union cvmx_stxx_int_msk stx_int_msk; + cvmx_write_csr(CVMX_STXX_INT_REG(index), + cvmx_read_csr(CVMX_STXX_INT_REG(index))); + stx_int_msk.u64 = 0; + if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { + /* Skipping stx_int_msk.s.reserved_8_63 */ + stx_int_msk.s.frmerr = 1; + stx_int_msk.s.unxfrm = 1; + stx_int_msk.s.nosync = 1; + stx_int_msk.s.diperr = 1; + stx_int_msk.s.datovr = 1; + stx_int_msk.s.ovrbst = 1; + stx_int_msk.s.calpar1 = 1; + stx_int_msk.s.calpar0 = 1; + } + if (OCTEON_IS_MODEL(OCTEON_CN58XX)) { + /* Skipping stx_int_msk.s.reserved_8_63 */ + stx_int_msk.s.frmerr = 1; + stx_int_msk.s.unxfrm = 1; + stx_int_msk.s.nosync = 1; + stx_int_msk.s.diperr = 1; + stx_int_msk.s.datovr = 1; + stx_int_msk.s.ovrbst = 1; + stx_int_msk.s.calpar1 = 1; + stx_int_msk.s.calpar0 = 1; + } + cvmx_write_csr(CVMX_STXX_INT_MSK(index), stx_int_msk.u64); +} diff --git a/drivers/staging/octeon/cvmx-interrupt-rsl.c b/drivers/staging/octeon/cvmx-interrupt-rsl.c new file mode 100644 index 0000000..df50048 --- /dev/null +++ b/drivers/staging/octeon/cvmx-interrupt-rsl.c @@ -0,0 +1,140 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Utility functions to decode Octeon's RSL_INT_BLOCKS + * interrupts into error messages. + */ + +#include <asm/octeon/octeon.h> + +#include "cvmx-asxx-defs.h" +#include "cvmx-gmxx-defs.h" + +#ifndef PRINT_ERROR +#define PRINT_ERROR(format, ...) +#endif + +void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block); + +/** + * Enable ASX error interrupts that exist on CN3XXX, CN50XX, and + * CN58XX. + * + * @block: Interface to enable 0-1 + */ +void __cvmx_interrupt_asxx_enable(int block) +{ + int mask; + union cvmx_asxx_int_en csr; + /* + * CN38XX and CN58XX have two interfaces with 4 ports per + * interface. All other chips have a max of 3 ports on + * interface 0 + */ + if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) + mask = 0xf; /* Set enables for 4 ports */ + else + mask = 0x7; /* Set enables for 3 ports */ + + /* Enable interface interrupts */ + csr.u64 = cvmx_read_csr(CVMX_ASXX_INT_EN(block)); + csr.s.txpsh = mask; + csr.s.txpop = mask; + csr.s.ovrflw = mask; + cvmx_write_csr(CVMX_ASXX_INT_EN(block), csr.u64); +} +/** + * Enable GMX error reporting for the supplied interface + * + * @interface: Interface to enable + */ +void __cvmx_interrupt_gmxx_enable(int interface) +{ + union cvmx_gmxx_inf_mode mode; + union cvmx_gmxx_tx_int_en gmx_tx_int_en; + int num_ports; + int index; + + mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); + + if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) { + if (mode.s.en) { + switch (mode.cn56xx.mode) { + case 1: /* XAUI */ + num_ports = 1; + break; + case 2: /* SGMII */ + case 3: /* PICMG */ + num_ports = 4; + break; + default: /* Disabled */ + num_ports = 0; + break; + } + } else + num_ports = 0; + } else { + if (mode.s.en) { + if (OCTEON_IS_MODEL(OCTEON_CN38XX) + || OCTEON_IS_MODEL(OCTEON_CN58XX)) { + /* + * SPI on CN38XX and CN58XX report all + * errors through port 0. RGMII needs + * to check all 4 ports + */ + if (mode.s.type) + num_ports = 1; + else + num_ports = 4; + } else { + /* + * CN30XX, CN31XX, and CN50XX have two + * or three ports. GMII and MII has 2, + * RGMII has three + */ + if (mode.s.type) + num_ports = 2; + else + num_ports = 3; + } + } else + num_ports = 0; + } + + gmx_tx_int_en.u64 = 0; + if (num_ports) { + if (OCTEON_IS_MODEL(OCTEON_CN38XX) + || OCTEON_IS_MODEL(OCTEON_CN58XX)) + gmx_tx_int_en.s.ncb_nxa = 1; + gmx_tx_int_en.s.pko_nxa = 1; + } + gmx_tx_int_en.s.undflw = (1 << num_ports) - 1; + cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64); + for (index = 0; index < num_ports; index++) + __cvmx_interrupt_gmxx_rxx_int_en_enable(index, interface); +} diff --git a/drivers/staging/octeon/cvmx-ipd.h b/drivers/staging/octeon/cvmx-ipd.h new file mode 100644 index 0000000..115a552 --- /dev/null +++ b/drivers/staging/octeon/cvmx-ipd.h @@ -0,0 +1,338 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * + * Interface to the hardware Input Packet Data unit. + */ + +#ifndef __CVMX_IPD_H__ +#define __CVMX_IPD_H__ + +#include <asm/octeon/octeon-feature.h> + +#include <asm/octeon/cvmx-ipd-defs.h> + +enum cvmx_ipd_mode { + CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */ + CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */ + CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */ + CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */ +}; + +#ifndef CVMX_ENABLE_LEN_M8_FIX +#define CVMX_ENABLE_LEN_M8_FIX 0 +#endif + +/* CSR typedefs have been moved to cvmx-csr-*.h */ +typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_mbuff_first_skip_t; +typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_first_next_ptr_back_t; + +typedef cvmx_ipd_mbuff_first_skip_t cvmx_ipd_mbuff_not_first_skip_t; +typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t; + +/** + * Configure IPD + * + * @mbuff_size: Packets buffer size in 8 byte words + * @first_mbuff_skip: + * Number of 8 byte words to skip in the first buffer + * @not_first_mbuff_skip: + * Number of 8 byte words to skip in each following buffer + * @first_back: Must be same as first_mbuff_skip / 128 + * @second_back: + * Must be same as not_first_mbuff_skip / 128 + * @wqe_fpa_pool: + * FPA pool to get work entries from + * @cache_mode: + * @back_pres_enable_flag: + * Enable or disable port back pressure + */ +static inline void cvmx_ipd_config(uint64_t mbuff_size, + uint64_t first_mbuff_skip, + uint64_t not_first_mbuff_skip, + uint64_t first_back, + uint64_t second_back, + uint64_t wqe_fpa_pool, + enum cvmx_ipd_mode cache_mode, + uint64_t back_pres_enable_flag) +{ + cvmx_ipd_mbuff_first_skip_t first_skip; + cvmx_ipd_mbuff_not_first_skip_t not_first_skip; + union cvmx_ipd_packet_mbuff_size size; + cvmx_ipd_first_next_ptr_back_t first_back_struct; + cvmx_ipd_second_next_ptr_back_t second_back_struct; + union cvmx_ipd_wqe_fpa_queue wqe_pool; + union cvmx_ipd_ctl_status ipd_ctl_reg; + + first_skip.u64 = 0; + first_skip.s.skip_sz = first_mbuff_skip; + cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64); + + not_first_skip.u64 = 0; + not_first_skip.s.skip_sz = not_first_mbuff_skip; + cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64); + + size.u64 = 0; + size.s.mb_size = mbuff_size; + cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64); + + first_back_struct.u64 = 0; + first_back_struct.s.back = first_back; + cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64); + + second_back_struct.u64 = 0; + second_back_struct.s.back = second_back; + cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64); + + wqe_pool.u64 = 0; + wqe_pool.s.wqe_pool = wqe_fpa_pool; + cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64); + + ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); + ipd_ctl_reg.s.opc_mode = cache_mode; + ipd_ctl_reg.s.pbp_en = back_pres_enable_flag; + cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64); + + /* Note: the example RED code that used to be here has been moved to + cvmx_helper_setup_red */ +} + +/** + * Enable IPD + */ +static inline void cvmx_ipd_enable(void) +{ + union cvmx_ipd_ctl_status ipd_reg; + ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); + if (ipd_reg.s.ipd_en) { + cvmx_dprintf + ("Warning: Enabling IPD when IPD already enabled.\n"); + } + ipd_reg.s.ipd_en = 1; +#if CVMX_ENABLE_LEN_M8_FIX + if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) + ipd_reg.s.len_m8 = TRUE; +#endif + cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64); +} + +/** + * Disable IPD + */ +static inline void cvmx_ipd_disable(void) +{ + union cvmx_ipd_ctl_status ipd_reg; + ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); + ipd_reg.s.ipd_en = 0; + cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64); +} + +/** + * Supportive function for cvmx_fpa_shutdown_pool. + */ +static inline void cvmx_ipd_free_ptr(void) +{ + /* Only CN38XXp{1,2} cannot read pointer out of the IPD */ + if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1) + && !OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) { + int no_wptr = 0; + union cvmx_ipd_ptr_count ipd_ptr_count; + ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT); + + /* Handle Work Queue Entry in cn56xx and cn52xx */ + if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) { + union cvmx_ipd_ctl_status ipd_ctl_status; + ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); + if (ipd_ctl_status.s.no_wptr) + no_wptr = 1; + } + + /* Free the prefetched WQE */ + if (ipd_ptr_count.s.wqev_cnt) { + union cvmx_ipd_wqe_ptr_valid ipd_wqe_ptr_valid; + ipd_wqe_ptr_valid.u64 = + cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID); + if (no_wptr) + cvmx_fpa_free(cvmx_phys_to_ptr + ((uint64_t) ipd_wqe_ptr_valid.s. + ptr << 7), CVMX_FPA_PACKET_POOL, + 0); + else + cvmx_fpa_free(cvmx_phys_to_ptr + ((uint64_t) ipd_wqe_ptr_valid.s. + ptr << 7), CVMX_FPA_WQE_POOL, 0); + } + + /* Free all WQE in the fifo */ + if (ipd_ptr_count.s.wqe_pcnt) { + int i; + union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl; + ipd_pwp_ptr_fifo_ctl.u64 = + cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL); + for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) { + ipd_pwp_ptr_fifo_ctl.s.cena = 0; + ipd_pwp_ptr_fifo_ctl.s.raddr = + ipd_pwp_ptr_fifo_ctl.s.max_cnts + + (ipd_pwp_ptr_fifo_ctl.s.wraddr + + i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts; + cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, + ipd_pwp_ptr_fifo_ctl.u64); + ipd_pwp_ptr_fifo_ctl.u64 = + cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL); + if (no_wptr) + cvmx_fpa_free(cvmx_phys_to_ptr + ((uint64_t) + ipd_pwp_ptr_fifo_ctl.s. + ptr << 7), + CVMX_FPA_PACKET_POOL, 0); + else + cvmx_fpa_free(cvmx_phys_to_ptr + ((uint64_t) + ipd_pwp_ptr_fifo_ctl.s. + ptr << 7), + CVMX_FPA_WQE_POOL, 0); + } + ipd_pwp_ptr_fifo_ctl.s.cena = 1; + cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, + ipd_pwp_ptr_fifo_ctl.u64); + } + + /* Free the prefetched packet */ + if (ipd_ptr_count.s.pktv_cnt) { + union cvmx_ipd_pkt_ptr_valid ipd_pkt_ptr_valid; + ipd_pkt_ptr_valid.u64 = + cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID); + cvmx_fpa_free(cvmx_phys_to_ptr + (ipd_pkt_ptr_valid.s.ptr << 7), + CVMX_FPA_PACKET_POOL, 0); + } + + /* Free the per port prefetched packets */ + if (1) { + int i; + union cvmx_ipd_prc_port_ptr_fifo_ctl + ipd_prc_port_ptr_fifo_ctl; + ipd_prc_port_ptr_fifo_ctl.u64 = + cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL); + + for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt; + i++) { + ipd_prc_port_ptr_fifo_ctl.s.cena = 0; + ipd_prc_port_ptr_fifo_ctl.s.raddr = + i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt; + cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, + ipd_prc_port_ptr_fifo_ctl.u64); + ipd_prc_port_ptr_fifo_ctl.u64 = + cvmx_read_csr + (CVMX_IPD_PRC_PORT_PTR_FIFO_CTL); + cvmx_fpa_free(cvmx_phys_to_ptr + ((uint64_t) + ipd_prc_port_ptr_fifo_ctl.s. + ptr << 7), CVMX_FPA_PACKET_POOL, + 0); + } + ipd_prc_port_ptr_fifo_ctl.s.cena = 1; + cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL, + ipd_prc_port_ptr_fifo_ctl.u64); + } + + /* Free all packets in the holding fifo */ + if (ipd_ptr_count.s.pfif_cnt) { + int i; + union cvmx_ipd_prc_hold_ptr_fifo_ctl + ipd_prc_hold_ptr_fifo_ctl; + + ipd_prc_hold_ptr_fifo_ctl.u64 = + cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL); + + for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) { + ipd_prc_hold_ptr_fifo_ctl.s.cena = 0; + ipd_prc_hold_ptr_fifo_ctl.s.raddr = + (ipd_prc_hold_ptr_fifo_ctl.s.praddr + + i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt; + cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, + ipd_prc_hold_ptr_fifo_ctl.u64); + ipd_prc_hold_ptr_fifo_ctl.u64 = + cvmx_read_csr + (CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL); + cvmx_fpa_free(cvmx_phys_to_ptr + ((uint64_t) + ipd_prc_hold_ptr_fifo_ctl.s. + ptr << 7), CVMX_FPA_PACKET_POOL, + 0); + } + ipd_prc_hold_ptr_fifo_ctl.s.cena = 1; + cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL, + ipd_prc_hold_ptr_fifo_ctl.u64); + } + + /* Free all packets in the fifo */ + if (ipd_ptr_count.s.pkt_pcnt) { + int i; + union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl; + ipd_pwp_ptr_fifo_ctl.u64 = + cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL); + + for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) { + ipd_pwp_ptr_fifo_ctl.s.cena = 0; + ipd_pwp_ptr_fifo_ctl.s.raddr = + (ipd_pwp_ptr_fifo_ctl.s.praddr + + i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts; + cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, + ipd_pwp_ptr_fifo_ctl.u64); + ipd_pwp_ptr_fifo_ctl.u64 = + cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL); + cvmx_fpa_free(cvmx_phys_to_ptr + ((uint64_t) ipd_pwp_ptr_fifo_ctl. + s.ptr << 7), + CVMX_FPA_PACKET_POOL, 0); + } + ipd_pwp_ptr_fifo_ctl.s.cena = 1; + cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL, + ipd_pwp_ptr_fifo_ctl.u64); + } + + /* Reset the IPD to get all buffers out of it */ + { + union cvmx_ipd_ctl_status ipd_ctl_status; + ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS); + ipd_ctl_status.s.reset = 1; + cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64); + } + + /* Reset the PIP */ + { + union cvmx_pip_sft_rst pip_sft_rst; + pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST); + pip_sft_rst.s.rst = 1; + cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64); + } + } +} + +#endif /* __CVMX_IPD_H__ */ diff --git a/drivers/staging/octeon/cvmx-mdio.h b/drivers/staging/octeon/cvmx-mdio.h new file mode 100644 index 0000000..c987a75 --- /dev/null +++ b/drivers/staging/octeon/cvmx-mdio.h @@ -0,0 +1,506 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * + * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3 + * clause 22 and clause 45 operations. + * + */ + +#ifndef __CVMX_MIO_H__ +#define __CVMX_MIO_H__ + +#include "cvmx-smix-defs.h" + +/** + * PHY register 0 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_CONTROL 0 +typedef union { + uint16_t u16; + struct { + uint16_t reset:1; + uint16_t loopback:1; + uint16_t speed_lsb:1; + uint16_t autoneg_enable:1; + uint16_t power_down:1; + uint16_t isolate:1; + uint16_t restart_autoneg:1; + uint16_t duplex:1; + uint16_t collision_test:1; + uint16_t speed_msb:1; + uint16_t unidirectional_enable:1; + uint16_t reserved_0_4:5; + } s; +} cvmx_mdio_phy_reg_control_t; + +/** + * PHY register 1 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_STATUS 1 +typedef union { + uint16_t u16; + struct { + uint16_t capable_100base_t4:1; + uint16_t capable_100base_x_full:1; + uint16_t capable_100base_x_half:1; + uint16_t capable_10_full:1; + uint16_t capable_10_half:1; + uint16_t capable_100base_t2_full:1; + uint16_t capable_100base_t2_half:1; + uint16_t capable_extended_status:1; + uint16_t capable_unidirectional:1; + uint16_t capable_mf_preamble_suppression:1; + uint16_t autoneg_complete:1; + uint16_t remote_fault:1; + uint16_t capable_autoneg:1; + uint16_t link_status:1; + uint16_t jabber_detect:1; + uint16_t capable_extended_registers:1; + + } s; +} cvmx_mdio_phy_reg_status_t; + +/** + * PHY register 2 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_ID1 2 +typedef union { + uint16_t u16; + struct { + uint16_t oui_bits_3_18; + } s; +} cvmx_mdio_phy_reg_id1_t; + +/** + * PHY register 3 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_ID2 3 +typedef union { + uint16_t u16; + struct { + uint16_t oui_bits_19_24:6; + uint16_t model:6; + uint16_t revision:4; + } s; +} cvmx_mdio_phy_reg_id2_t; + +/** + * PHY register 4 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4 +typedef union { + uint16_t u16; + struct { + uint16_t next_page:1; + uint16_t reserved_14:1; + uint16_t remote_fault:1; + uint16_t reserved_12:1; + uint16_t asymmetric_pause:1; + uint16_t pause:1; + uint16_t advert_100base_t4:1; + uint16_t advert_100base_tx_full:1; + uint16_t advert_100base_tx_half:1; + uint16_t advert_10base_tx_full:1; + uint16_t advert_10base_tx_half:1; + uint16_t selector:5; + } s; +} cvmx_mdio_phy_reg_autoneg_adver_t; + +/** + * PHY register 5 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5 +typedef union { + uint16_t u16; + struct { + uint16_t next_page:1; + uint16_t ack:1; + uint16_t remote_fault:1; + uint16_t reserved_12:1; + uint16_t asymmetric_pause:1; + uint16_t pause:1; + uint16_t advert_100base_t4:1; + uint16_t advert_100base_tx_full:1; + uint16_t advert_100base_tx_half:1; + uint16_t advert_10base_tx_full:1; + uint16_t advert_10base_tx_half:1; + uint16_t selector:5; + } s; +} cvmx_mdio_phy_reg_link_partner_ability_t; + +/** + * PHY register 6 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6 +typedef union { + uint16_t u16; + struct { + uint16_t reserved_5_15:11; + uint16_t parallel_detection_fault:1; + uint16_t link_partner_next_page_capable:1; + uint16_t local_next_page_capable:1; + uint16_t page_received:1; + uint16_t link_partner_autoneg_capable:1; + + } s; +} cvmx_mdio_phy_reg_autoneg_expansion_t; + +/** + * PHY register 9 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_CONTROL_1000 9 +typedef union { + uint16_t u16; + struct { + uint16_t test_mode:3; + uint16_t manual_master_slave:1; + uint16_t master:1; + uint16_t port_type:1; + uint16_t advert_1000base_t_full:1; + uint16_t advert_1000base_t_half:1; + uint16_t reserved_0_7:8; + } s; +} cvmx_mdio_phy_reg_control_1000_t; + +/** + * PHY register 10 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_STATUS_1000 10 +typedef union { + uint16_t u16; + struct { + uint16_t master_slave_fault:1; + uint16_t is_master:1; + uint16_t local_receiver_ok:1; + uint16_t remote_receiver_ok:1; + uint16_t remote_capable_1000base_t_full:1; + uint16_t remote_capable_1000base_t_half:1; + uint16_t reserved_8_9:2; + uint16_t idle_error_count:8; + } s; +} cvmx_mdio_phy_reg_status_1000_t; + +/** + * PHY register 15 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15 +typedef union { + uint16_t u16; + struct { + uint16_t capable_1000base_x_full:1; + uint16_t capable_1000base_x_half:1; + uint16_t capable_1000base_t_full:1; + uint16_t capable_1000base_t_half:1; + uint16_t reserved_0_11:12; + } s; +} cvmx_mdio_phy_reg_extended_status_t; + +/** + * PHY register 13 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13 +typedef union { + uint16_t u16; + struct { + uint16_t function:2; + uint16_t reserved_5_13:9; + uint16_t devad:5; + } s; +} cvmx_mdio_phy_reg_mmd_control_t; + +/** + * PHY register 14 from the 802.3 spec + */ +#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14 +typedef union { + uint16_t u16; + struct { + uint16_t address_data:16; + } s; +} cvmx_mdio_phy_reg_mmd_address_data_t; + +/* Operating request encodings. */ +#define MDIO_CLAUSE_22_WRITE 0 +#define MDIO_CLAUSE_22_READ 1 + +#define MDIO_CLAUSE_45_ADDRESS 0 +#define MDIO_CLAUSE_45_WRITE 1 +#define MDIO_CLAUSE_45_READ_INC 2 +#define MDIO_CLAUSE_45_READ 3 + +/* MMD identifiers, mostly for accessing devices withing XENPAK modules. */ +#define CVMX_MMD_DEVICE_PMA_PMD 1 +#define CVMX_MMD_DEVICE_WIS 2 +#define CVMX_MMD_DEVICE_PCS 3 +#define CVMX_MMD_DEVICE_PHY_XS 4 +#define CVMX_MMD_DEVICE_DTS_XS 5 +#define CVMX_MMD_DEVICE_TC 6 +#define CVMX_MMD_DEVICE_CL22_EXT 29 +#define CVMX_MMD_DEVICE_VENDOR_1 30 +#define CVMX_MMD_DEVICE_VENDOR_2 31 + +/* Helper function to put MDIO interface into clause 45 mode */ +static inline void __cvmx_mdio_set_clause45_mode(int bus_id) +{ + union cvmx_smix_clk smi_clk; + /* Put bus into clause 45 mode */ + smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id)); + smi_clk.s.mode = 1; + smi_clk.s.preamble = 1; + cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64); +} + +/* Helper function to put MDIO interface into clause 22 mode */ +static inline void __cvmx_mdio_set_clause22_mode(int bus_id) +{ + union cvmx_smix_clk smi_clk; + /* Put bus into clause 22 mode */ + smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id)); + smi_clk.s.mode = 0; + cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64); +} + +/** + * Perform an MII read. This function is used to read PHY + * registers controlling auto negotiation. + * + * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX) + * support multiple busses. + * @phy_id: The MII phy id + * @location: Register location to read + * + * Returns Result from the read or -1 on failure + */ +static inline int cvmx_mdio_read(int bus_id, int phy_id, int location) +{ + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_rd_dat smi_rd; + int timeout = 1000; + + if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45)) + __cvmx_mdio_set_clause22_mode(bus_id); + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ; + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = location; + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64); + + do { + cvmx_wait(1000); + smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id)); + } while (smi_rd.s.pending && timeout--); + + if (smi_rd.s.val) + return smi_rd.s.dat; + else + return -1; +} + +/** + * Perform an MII write. This function is used to write PHY + * registers controlling auto negotiation. + * + * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX) + * support multiple busses. + * @phy_id: The MII phy id + * @location: Register location to write + * @val: Value to write + * + * Returns -1 on error + * 0 on success + */ +static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val) +{ + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_wr_dat smi_wr; + int timeout = 1000; + + if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45)) + __cvmx_mdio_set_clause22_mode(bus_id); + + smi_wr.u64 = 0; + smi_wr.s.dat = val; + cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64); + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE; + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = location; + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64); + + do { + cvmx_wait(1000); + smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id)); + } while (smi_wr.s.pending && --timeout); + if (timeout <= 0) + return -1; + + return 0; +} + +/** + * Perform an IEEE 802.3 clause 45 MII read. This function is used to + * read PHY registers controlling auto negotiation. + * + * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX) + * support multiple busses. + * @phy_id: The MII phy id + * @device: MDIO Managable Device (MMD) id + * @location: Register location to read + * + * Returns Result from the read or -1 on failure + */ + +static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device, + int location) +{ + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_rd_dat smi_rd; + union cvmx_smix_wr_dat smi_wr; + int timeout = 1000; + + if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45)) + return -1; + + __cvmx_mdio_set_clause45_mode(bus_id); + + smi_wr.u64 = 0; + smi_wr.s.dat = location; + cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64); + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS; + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = device; + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64); + + do { + cvmx_wait(1000); + smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id)); + } while (smi_wr.s.pending && --timeout); + if (timeout <= 0) { + cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d " + "device %2d register %2d TIME OUT(address)\n", + bus_id, phy_id, device, location); + return -1; + } + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ; + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = device; + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64); + + do { + cvmx_wait(1000); + smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id)); + } while (smi_rd.s.pending && timeout--); + + if (timeout <= 0) { + cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d " + "device %2d register %2d TIME OUT(data)\n", + bus_id, phy_id, device, location); + return -1; + } + + if (smi_rd.s.val) + return smi_rd.s.dat; + else { + cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d " + "device %2d register %2d INVALID READ\n", + bus_id, phy_id, device, location); + return -1; + } +} + +/** + * Perform an IEEE 802.3 clause 45 MII write. This function is used to + * write PHY registers controlling auto negotiation. + * + * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX) + * support multiple busses. + * @phy_id: The MII phy id + * @device: MDIO Managable Device (MMD) id + * @location: Register location to write + * @val: Value to write + * + * Returns -1 on error + * 0 on success + */ +static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device, + int location, int val) +{ + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_wr_dat smi_wr; + int timeout = 1000; + + if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45)) + return -1; + + __cvmx_mdio_set_clause45_mode(bus_id); + + smi_wr.u64 = 0; + smi_wr.s.dat = location; + cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64); + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS; + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = device; + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64); + + do { + cvmx_wait(1000); + smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id)); + } while (smi_wr.s.pending && --timeout); + if (timeout <= 0) + return -1; + + smi_wr.u64 = 0; + smi_wr.s.dat = val; + cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64); + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE; + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = device; + cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64); + + do { + cvmx_wait(1000); + smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id)); + } while (smi_wr.s.pending && --timeout); + if (timeout <= 0) + return -1; + + return 0; +} + +#endif diff --git a/drivers/staging/octeon/cvmx-packet.h b/drivers/staging/octeon/cvmx-packet.h new file mode 100644 index 0000000..62ffe78a --- /dev/null +++ b/drivers/staging/octeon/cvmx-packet.h @@ -0,0 +1,65 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * + * Packet buffer defines. + */ + +#ifndef __CVMX_PACKET_H__ +#define __CVMX_PACKET_H__ + +/** + * This structure defines a buffer pointer on Octeon + */ +union cvmx_buf_ptr { + void *ptr; + uint64_t u64; + struct { + /* + * if set, invert the "free" pick of the overall + * packet. HW always sets this bit to 0 on inbound + * packet + */ + uint64_t i:1; + /* + * Indicates the amount to back up to get to the + * buffer start in cache lines. In most cases this is + * less than one complete cache line, so the value is + * zero. + */ + uint64_t back:4; + /* The pool that the buffer came from / goes to */ + uint64_t pool:3; + /* The size of the segment pointed to by addr (in bytes) */ + uint64_t size:16; + /* Pointer to the first byte of the data, NOT buffer */ + uint64_t addr:40; + } s; +}; + +#endif /* __CVMX_PACKET_H__ */ diff --git a/drivers/staging/octeon/cvmx-pcsx-defs.h b/drivers/staging/octeon/cvmx-pcsx-defs.h new file mode 100644 index 0000000..d45952d --- /dev/null +++ b/drivers/staging/octeon/cvmx-pcsx-defs.h @@ -0,0 +1,370 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PCSX_DEFS_H__ +#define __CVMX_PCSX_DEFS_H__ + +#define CVMX_PCSX_ANX_ADV_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001010ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_ANX_EXT_ST_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001028ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_ANX_LP_ABIL_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001018ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_ANX_RESULTS_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001020ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_INTX_EN_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001088ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_INTX_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001080ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_LINKX_TIMER_COUNT_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001040ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_LOG_ANLX_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001090ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_MISCX_CTL_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001078ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_MRX_CONTROL_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001000ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_MRX_STATUS_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001008ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_RXX_STATES_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001058ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_RXX_SYNC_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001050ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_SGMX_AN_ADV_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001068ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_SGMX_LP_ADV_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001070ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_TXX_STATES_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001060ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSX_TX_RXX_POLARITY_REG(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0001048ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull)) + +union cvmx_pcsx_anx_adv_reg { + uint64_t u64; + struct cvmx_pcsx_anx_adv_reg_s { + uint64_t reserved_16_63:48; + uint64_t np:1; + uint64_t reserved_14_14:1; + uint64_t rem_flt:2; + uint64_t reserved_9_11:3; + uint64_t pause:2; + uint64_t hfd:1; + uint64_t fd:1; + uint64_t reserved_0_4:5; + } s; + struct cvmx_pcsx_anx_adv_reg_s cn52xx; + struct cvmx_pcsx_anx_adv_reg_s cn52xxp1; + struct cvmx_pcsx_anx_adv_reg_s cn56xx; + struct cvmx_pcsx_anx_adv_reg_s cn56xxp1; +}; + +union cvmx_pcsx_anx_ext_st_reg { + uint64_t u64; + struct cvmx_pcsx_anx_ext_st_reg_s { + uint64_t reserved_16_63:48; + uint64_t thou_xfd:1; + uint64_t thou_xhd:1; + uint64_t thou_tfd:1; + uint64_t thou_thd:1; + uint64_t reserved_0_11:12; + } s; + struct cvmx_pcsx_anx_ext_st_reg_s cn52xx; + struct cvmx_pcsx_anx_ext_st_reg_s cn52xxp1; + struct cvmx_pcsx_anx_ext_st_reg_s cn56xx; + struct cvmx_pcsx_anx_ext_st_reg_s cn56xxp1; +}; + +union cvmx_pcsx_anx_lp_abil_reg { + uint64_t u64; + struct cvmx_pcsx_anx_lp_abil_reg_s { + uint64_t reserved_16_63:48; + uint64_t np:1; + uint64_t ack:1; + uint64_t rem_flt:2; + uint64_t reserved_9_11:3; + uint64_t pause:2; + uint64_t hfd:1; + uint64_t fd:1; + uint64_t reserved_0_4:5; + } s; + struct cvmx_pcsx_anx_lp_abil_reg_s cn52xx; + struct cvmx_pcsx_anx_lp_abil_reg_s cn52xxp1; + struct cvmx_pcsx_anx_lp_abil_reg_s cn56xx; + struct cvmx_pcsx_anx_lp_abil_reg_s cn56xxp1; +}; + +union cvmx_pcsx_anx_results_reg { + uint64_t u64; + struct cvmx_pcsx_anx_results_reg_s { + uint64_t reserved_7_63:57; + uint64_t pause:2; + uint64_t spd:2; + uint64_t an_cpt:1; + uint64_t dup:1; + uint64_t link_ok:1; + } s; + struct cvmx_pcsx_anx_results_reg_s cn52xx; + struct cvmx_pcsx_anx_results_reg_s cn52xxp1; + struct cvmx_pcsx_anx_results_reg_s cn56xx; + struct cvmx_pcsx_anx_results_reg_s cn56xxp1; +}; + +union cvmx_pcsx_intx_en_reg { + uint64_t u64; + struct cvmx_pcsx_intx_en_reg_s { + uint64_t reserved_12_63:52; + uint64_t dup:1; + uint64_t sync_bad_en:1; + uint64_t an_bad_en:1; + uint64_t rxlock_en:1; + uint64_t rxbad_en:1; + uint64_t rxerr_en:1; + uint64_t txbad_en:1; + uint64_t txfifo_en:1; + uint64_t txfifu_en:1; + uint64_t an_err_en:1; + uint64_t xmit_en:1; + uint64_t lnkspd_en:1; + } s; + struct cvmx_pcsx_intx_en_reg_s cn52xx; + struct cvmx_pcsx_intx_en_reg_s cn52xxp1; + struct cvmx_pcsx_intx_en_reg_s cn56xx; + struct cvmx_pcsx_intx_en_reg_s cn56xxp1; +}; + +union cvmx_pcsx_intx_reg { + uint64_t u64; + struct cvmx_pcsx_intx_reg_s { + uint64_t reserved_12_63:52; + uint64_t dup:1; + uint64_t sync_bad:1; + uint64_t an_bad:1; + uint64_t rxlock:1; + uint64_t rxbad:1; + uint64_t rxerr:1; + uint64_t txbad:1; + uint64_t txfifo:1; + uint64_t txfifu:1; + uint64_t an_err:1; + uint64_t xmit:1; + uint64_t lnkspd:1; + } s; + struct cvmx_pcsx_intx_reg_s cn52xx; + struct cvmx_pcsx_intx_reg_s cn52xxp1; + struct cvmx_pcsx_intx_reg_s cn56xx; + struct cvmx_pcsx_intx_reg_s cn56xxp1; +}; + +union cvmx_pcsx_linkx_timer_count_reg { + uint64_t u64; + struct cvmx_pcsx_linkx_timer_count_reg_s { + uint64_t reserved_16_63:48; + uint64_t count:16; + } s; + struct cvmx_pcsx_linkx_timer_count_reg_s cn52xx; + struct cvmx_pcsx_linkx_timer_count_reg_s cn52xxp1; + struct cvmx_pcsx_linkx_timer_count_reg_s cn56xx; + struct cvmx_pcsx_linkx_timer_count_reg_s cn56xxp1; +}; + +union cvmx_pcsx_log_anlx_reg { + uint64_t u64; + struct cvmx_pcsx_log_anlx_reg_s { + uint64_t reserved_4_63:60; + uint64_t lafifovfl:1; + uint64_t la_en:1; + uint64_t pkt_sz:2; + } s; + struct cvmx_pcsx_log_anlx_reg_s cn52xx; + struct cvmx_pcsx_log_anlx_reg_s cn52xxp1; + struct cvmx_pcsx_log_anlx_reg_s cn56xx; + struct cvmx_pcsx_log_anlx_reg_s cn56xxp1; +}; + +union cvmx_pcsx_miscx_ctl_reg { + uint64_t u64; + struct cvmx_pcsx_miscx_ctl_reg_s { + uint64_t reserved_13_63:51; + uint64_t sgmii:1; + uint64_t gmxeno:1; + uint64_t loopbck2:1; + uint64_t mac_phy:1; + uint64_t mode:1; + uint64_t an_ovrd:1; + uint64_t samp_pt:7; + } s; + struct cvmx_pcsx_miscx_ctl_reg_s cn52xx; + struct cvmx_pcsx_miscx_ctl_reg_s cn52xxp1; + struct cvmx_pcsx_miscx_ctl_reg_s cn56xx; + struct cvmx_pcsx_miscx_ctl_reg_s cn56xxp1; +}; + +union cvmx_pcsx_mrx_control_reg { + uint64_t u64; + struct cvmx_pcsx_mrx_control_reg_s { + uint64_t reserved_16_63:48; + uint64_t reset:1; + uint64_t loopbck1:1; + uint64_t spdlsb:1; + uint64_t an_en:1; + uint64_t pwr_dn:1; + uint64_t reserved_10_10:1; + uint64_t rst_an:1; + uint64_t dup:1; + uint64_t coltst:1; + uint64_t spdmsb:1; + uint64_t uni:1; + uint64_t reserved_0_4:5; + } s; + struct cvmx_pcsx_mrx_control_reg_s cn52xx; + struct cvmx_pcsx_mrx_control_reg_s cn52xxp1; + struct cvmx_pcsx_mrx_control_reg_s cn56xx; + struct cvmx_pcsx_mrx_control_reg_s cn56xxp1; +}; + +union cvmx_pcsx_mrx_status_reg { + uint64_t u64; + struct cvmx_pcsx_mrx_status_reg_s { + uint64_t reserved_16_63:48; + uint64_t hun_t4:1; + uint64_t hun_xfd:1; + uint64_t hun_xhd:1; + uint64_t ten_fd:1; + uint64_t ten_hd:1; + uint64_t hun_t2fd:1; + uint64_t hun_t2hd:1; + uint64_t ext_st:1; + uint64_t reserved_7_7:1; + uint64_t prb_sup:1; + uint64_t an_cpt:1; + uint64_t rm_flt:1; + uint64_t an_abil:1; + uint64_t lnk_st:1; + uint64_t reserved_1_1:1; + uint64_t extnd:1; + } s; + struct cvmx_pcsx_mrx_status_reg_s cn52xx; + struct cvmx_pcsx_mrx_status_reg_s cn52xxp1; + struct cvmx_pcsx_mrx_status_reg_s cn56xx; + struct cvmx_pcsx_mrx_status_reg_s cn56xxp1; +}; + +union cvmx_pcsx_rxx_states_reg { + uint64_t u64; + struct cvmx_pcsx_rxx_states_reg_s { + uint64_t reserved_16_63:48; + uint64_t rx_bad:1; + uint64_t rx_st:5; + uint64_t sync_bad:1; + uint64_t sync:4; + uint64_t an_bad:1; + uint64_t an_st:4; + } s; + struct cvmx_pcsx_rxx_states_reg_s cn52xx; + struct cvmx_pcsx_rxx_states_reg_s cn52xxp1; + struct cvmx_pcsx_rxx_states_reg_s cn56xx; + struct cvmx_pcsx_rxx_states_reg_s cn56xxp1; +}; + +union cvmx_pcsx_rxx_sync_reg { + uint64_t u64; + struct cvmx_pcsx_rxx_sync_reg_s { + uint64_t reserved_2_63:62; + uint64_t sync:1; + uint64_t bit_lock:1; + } s; + struct cvmx_pcsx_rxx_sync_reg_s cn52xx; + struct cvmx_pcsx_rxx_sync_reg_s cn52xxp1; + struct cvmx_pcsx_rxx_sync_reg_s cn56xx; + struct cvmx_pcsx_rxx_sync_reg_s cn56xxp1; +}; + +union cvmx_pcsx_sgmx_an_adv_reg { + uint64_t u64; + struct cvmx_pcsx_sgmx_an_adv_reg_s { + uint64_t reserved_16_63:48; + uint64_t link:1; + uint64_t ack:1; + uint64_t reserved_13_13:1; + uint64_t dup:1; + uint64_t speed:2; + uint64_t reserved_1_9:9; + uint64_t one:1; + } s; + struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xx; + struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xxp1; + struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xx; + struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xxp1; +}; + +union cvmx_pcsx_sgmx_lp_adv_reg { + uint64_t u64; + struct cvmx_pcsx_sgmx_lp_adv_reg_s { + uint64_t reserved_16_63:48; + uint64_t link:1; + uint64_t reserved_13_14:2; + uint64_t dup:1; + uint64_t speed:2; + uint64_t reserved_1_9:9; + uint64_t one:1; + } s; + struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xx; + struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xxp1; + struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xx; + struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xxp1; +}; + +union cvmx_pcsx_txx_states_reg { + uint64_t u64; + struct cvmx_pcsx_txx_states_reg_s { + uint64_t reserved_7_63:57; + uint64_t xmit:2; + uint64_t tx_bad:1; + uint64_t ord_st:4; + } s; + struct cvmx_pcsx_txx_states_reg_s cn52xx; + struct cvmx_pcsx_txx_states_reg_s cn52xxp1; + struct cvmx_pcsx_txx_states_reg_s cn56xx; + struct cvmx_pcsx_txx_states_reg_s cn56xxp1; +}; + +union cvmx_pcsx_tx_rxx_polarity_reg { + uint64_t u64; + struct cvmx_pcsx_tx_rxx_polarity_reg_s { + uint64_t reserved_4_63:60; + uint64_t rxovrd:1; + uint64_t autorxpl:1; + uint64_t rxplrt:1; + uint64_t txplrt:1; + } s; + struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xx; + struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xxp1; + struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xx; + struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-pcsxx-defs.h b/drivers/staging/octeon/cvmx-pcsxx-defs.h new file mode 100644 index 0000000..55d120f --- /dev/null +++ b/drivers/staging/octeon/cvmx-pcsxx-defs.h @@ -0,0 +1,316 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PCSXX_DEFS_H__ +#define __CVMX_PCSXX_DEFS_H__ + +#define CVMX_PCSXX_10GBX_STATUS_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000828ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_BIST_STATUS_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000870ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_BIT_LOCK_STATUS_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000850ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_CONTROL1_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000800ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_CONTROL2_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000818ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_INT_EN_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000860ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_INT_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000858ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_LOG_ANL_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000868ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_MISC_CTL_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000848ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_RX_SYNC_STATES_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000838ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_SPD_ABIL_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000810ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_STATUS1_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000808ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_STATUS2_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000820ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_TX_RX_POLARITY_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000840ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PCSXX_TX_RX_STATES_REG(block_id) \ + CVMX_ADD_IO_SEG(0x00011800B0000830ull + (((block_id) & 1) * 0x8000000ull)) + +union cvmx_pcsxx_10gbx_status_reg { + uint64_t u64; + struct cvmx_pcsxx_10gbx_status_reg_s { + uint64_t reserved_13_63:51; + uint64_t alignd:1; + uint64_t pattst:1; + uint64_t reserved_4_10:7; + uint64_t l3sync:1; + uint64_t l2sync:1; + uint64_t l1sync:1; + uint64_t l0sync:1; + } s; + struct cvmx_pcsxx_10gbx_status_reg_s cn52xx; + struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1; + struct cvmx_pcsxx_10gbx_status_reg_s cn56xx; + struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_bist_status_reg { + uint64_t u64; + struct cvmx_pcsxx_bist_status_reg_s { + uint64_t reserved_1_63:63; + uint64_t bist_status:1; + } s; + struct cvmx_pcsxx_bist_status_reg_s cn52xx; + struct cvmx_pcsxx_bist_status_reg_s cn52xxp1; + struct cvmx_pcsxx_bist_status_reg_s cn56xx; + struct cvmx_pcsxx_bist_status_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_bit_lock_status_reg { + uint64_t u64; + struct cvmx_pcsxx_bit_lock_status_reg_s { + uint64_t reserved_4_63:60; + uint64_t bitlck3:1; + uint64_t bitlck2:1; + uint64_t bitlck1:1; + uint64_t bitlck0:1; + } s; + struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx; + struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1; + struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx; + struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_control1_reg { + uint64_t u64; + struct cvmx_pcsxx_control1_reg_s { + uint64_t reserved_16_63:48; + uint64_t reset:1; + uint64_t loopbck1:1; + uint64_t spdsel1:1; + uint64_t reserved_12_12:1; + uint64_t lo_pwr:1; + uint64_t reserved_7_10:4; + uint64_t spdsel0:1; + uint64_t spd:4; + uint64_t reserved_0_1:2; + } s; + struct cvmx_pcsxx_control1_reg_s cn52xx; + struct cvmx_pcsxx_control1_reg_s cn52xxp1; + struct cvmx_pcsxx_control1_reg_s cn56xx; + struct cvmx_pcsxx_control1_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_control2_reg { + uint64_t u64; + struct cvmx_pcsxx_control2_reg_s { + uint64_t reserved_2_63:62; + uint64_t type:2; + } s; + struct cvmx_pcsxx_control2_reg_s cn52xx; + struct cvmx_pcsxx_control2_reg_s cn52xxp1; + struct cvmx_pcsxx_control2_reg_s cn56xx; + struct cvmx_pcsxx_control2_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_int_en_reg { + uint64_t u64; + struct cvmx_pcsxx_int_en_reg_s { + uint64_t reserved_6_63:58; + uint64_t algnlos_en:1; + uint64_t synlos_en:1; + uint64_t bitlckls_en:1; + uint64_t rxsynbad_en:1; + uint64_t rxbad_en:1; + uint64_t txflt_en:1; + } s; + struct cvmx_pcsxx_int_en_reg_s cn52xx; + struct cvmx_pcsxx_int_en_reg_s cn52xxp1; + struct cvmx_pcsxx_int_en_reg_s cn56xx; + struct cvmx_pcsxx_int_en_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_int_reg { + uint64_t u64; + struct cvmx_pcsxx_int_reg_s { + uint64_t reserved_6_63:58; + uint64_t algnlos:1; + uint64_t synlos:1; + uint64_t bitlckls:1; + uint64_t rxsynbad:1; + uint64_t rxbad:1; + uint64_t txflt:1; + } s; + struct cvmx_pcsxx_int_reg_s cn52xx; + struct cvmx_pcsxx_int_reg_s cn52xxp1; + struct cvmx_pcsxx_int_reg_s cn56xx; + struct cvmx_pcsxx_int_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_log_anl_reg { + uint64_t u64; + struct cvmx_pcsxx_log_anl_reg_s { + uint64_t reserved_7_63:57; + uint64_t enc_mode:1; + uint64_t drop_ln:2; + uint64_t lafifovfl:1; + uint64_t la_en:1; + uint64_t pkt_sz:2; + } s; + struct cvmx_pcsxx_log_anl_reg_s cn52xx; + struct cvmx_pcsxx_log_anl_reg_s cn52xxp1; + struct cvmx_pcsxx_log_anl_reg_s cn56xx; + struct cvmx_pcsxx_log_anl_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_misc_ctl_reg { + uint64_t u64; + struct cvmx_pcsxx_misc_ctl_reg_s { + uint64_t reserved_4_63:60; + uint64_t tx_swap:1; + uint64_t rx_swap:1; + uint64_t xaui:1; + uint64_t gmxeno:1; + } s; + struct cvmx_pcsxx_misc_ctl_reg_s cn52xx; + struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1; + struct cvmx_pcsxx_misc_ctl_reg_s cn56xx; + struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_rx_sync_states_reg { + uint64_t u64; + struct cvmx_pcsxx_rx_sync_states_reg_s { + uint64_t reserved_16_63:48; + uint64_t sync3st:4; + uint64_t sync2st:4; + uint64_t sync1st:4; + uint64_t sync0st:4; + } s; + struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx; + struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1; + struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx; + struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_spd_abil_reg { + uint64_t u64; + struct cvmx_pcsxx_spd_abil_reg_s { + uint64_t reserved_2_63:62; + uint64_t tenpasst:1; + uint64_t tengb:1; + } s; + struct cvmx_pcsxx_spd_abil_reg_s cn52xx; + struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1; + struct cvmx_pcsxx_spd_abil_reg_s cn56xx; + struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_status1_reg { + uint64_t u64; + struct cvmx_pcsxx_status1_reg_s { + uint64_t reserved_8_63:56; + uint64_t flt:1; + uint64_t reserved_3_6:4; + uint64_t rcv_lnk:1; + uint64_t lpable:1; + uint64_t reserved_0_0:1; + } s; + struct cvmx_pcsxx_status1_reg_s cn52xx; + struct cvmx_pcsxx_status1_reg_s cn52xxp1; + struct cvmx_pcsxx_status1_reg_s cn56xx; + struct cvmx_pcsxx_status1_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_status2_reg { + uint64_t u64; + struct cvmx_pcsxx_status2_reg_s { + uint64_t reserved_16_63:48; + uint64_t dev:2; + uint64_t reserved_12_13:2; + uint64_t xmtflt:1; + uint64_t rcvflt:1; + uint64_t reserved_3_9:7; + uint64_t tengb_w:1; + uint64_t tengb_x:1; + uint64_t tengb_r:1; + } s; + struct cvmx_pcsxx_status2_reg_s cn52xx; + struct cvmx_pcsxx_status2_reg_s cn52xxp1; + struct cvmx_pcsxx_status2_reg_s cn56xx; + struct cvmx_pcsxx_status2_reg_s cn56xxp1; +}; + +union cvmx_pcsxx_tx_rx_polarity_reg { + uint64_t u64; + struct cvmx_pcsxx_tx_rx_polarity_reg_s { + uint64_t reserved_10_63:54; + uint64_t xor_rxplrt:4; + uint64_t xor_txplrt:4; + uint64_t rxplrt:1; + uint64_t txplrt:1; + } s; + struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx; + struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 { + uint64_t reserved_2_63:62; + uint64_t rxplrt:1; + uint64_t txplrt:1; + } cn52xxp1; + struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx; + struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1; +}; + +union cvmx_pcsxx_tx_rx_states_reg { + uint64_t u64; + struct cvmx_pcsxx_tx_rx_states_reg_s { + uint64_t reserved_14_63:50; + uint64_t term_err:1; + uint64_t syn3bad:1; + uint64_t syn2bad:1; + uint64_t syn1bad:1; + uint64_t syn0bad:1; + uint64_t rxbad:1; + uint64_t algn_st:3; + uint64_t rx_st:2; + uint64_t tx_st:3; + } s; + struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx; + struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 { + uint64_t reserved_13_63:51; + uint64_t syn3bad:1; + uint64_t syn2bad:1; + uint64_t syn1bad:1; + uint64_t syn0bad:1; + uint64_t rxbad:1; + uint64_t algn_st:3; + uint64_t rx_st:2; + uint64_t tx_st:3; + } cn52xxp1; + struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx; + struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-pip-defs.h b/drivers/staging/octeon/cvmx-pip-defs.h new file mode 100644 index 0000000..5a36910 --- /dev/null +++ b/drivers/staging/octeon/cvmx-pip-defs.h @@ -0,0 +1,1267 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PIP_DEFS_H__ +#define __CVMX_PIP_DEFS_H__ + +/* + * Enumeration representing the amount of packet processing + * and validation performed by the input hardware. + */ +enum cvmx_pip_port_parse_mode { + /* + * Packet input doesn't perform any processing of the input + * packet. + */ + CVMX_PIP_PORT_CFG_MODE_NONE = 0ull, + /* + * Full packet processing is performed with pointer starting + * at the L2 (ethernet MAC) header. + */ + CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull, + /* + * Input packets are assumed to be IP. Results from non IP + * packets is undefined. Pointers reference the beginning of + * the IP header. + */ + CVMX_PIP_PORT_CFG_MODE_SKIPIP = 2ull +}; + +#define CVMX_PIP_BCK_PRS \ + CVMX_ADD_IO_SEG(0x00011800A0000038ull) +#define CVMX_PIP_BIST_STATUS \ + CVMX_ADD_IO_SEG(0x00011800A0000000ull) +#define CVMX_PIP_CRC_CTLX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000040ull + (((offset) & 1) * 8)) +#define CVMX_PIP_CRC_IVX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000050ull + (((offset) & 1) * 8)) +#define CVMX_PIP_DEC_IPSECX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000080ull + (((offset) & 3) * 8)) +#define CVMX_PIP_DSA_SRC_GRP \ + CVMX_ADD_IO_SEG(0x00011800A0000190ull) +#define CVMX_PIP_DSA_VID_GRP \ + CVMX_ADD_IO_SEG(0x00011800A0000198ull) +#define CVMX_PIP_FRM_LEN_CHKX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000180ull + (((offset) & 1) * 8)) +#define CVMX_PIP_GBL_CFG \ + CVMX_ADD_IO_SEG(0x00011800A0000028ull) +#define CVMX_PIP_GBL_CTL \ + CVMX_ADD_IO_SEG(0x00011800A0000020ull) +#define CVMX_PIP_HG_PRI_QOS \ + CVMX_ADD_IO_SEG(0x00011800A00001A0ull) +#define CVMX_PIP_INT_EN \ + CVMX_ADD_IO_SEG(0x00011800A0000010ull) +#define CVMX_PIP_INT_REG \ + CVMX_ADD_IO_SEG(0x00011800A0000008ull) +#define CVMX_PIP_IP_OFFSET \ + CVMX_ADD_IO_SEG(0x00011800A0000060ull) +#define CVMX_PIP_PRT_CFGX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000200ull + (((offset) & 63) * 8)) +#define CVMX_PIP_PRT_TAGX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000400ull + (((offset) & 63) * 8)) +#define CVMX_PIP_QOS_DIFFX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000600ull + (((offset) & 63) * 8)) +#define CVMX_PIP_QOS_VLANX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A00000C0ull + (((offset) & 7) * 8)) +#define CVMX_PIP_QOS_WATCHX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000100ull + (((offset) & 7) * 8)) +#define CVMX_PIP_RAW_WORD \ + CVMX_ADD_IO_SEG(0x00011800A00000B0ull) +#define CVMX_PIP_SFT_RST \ + CVMX_ADD_IO_SEG(0x00011800A0000030ull) +#define CVMX_PIP_STAT0_PRTX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000800ull + (((offset) & 63) * 80)) +#define CVMX_PIP_STAT1_PRTX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000808ull + (((offset) & 63) * 80)) +#define CVMX_PIP_STAT2_PRTX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000810ull + (((offset) & 63) * 80)) +#define CVMX_PIP_STAT3_PRTX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000818ull + (((offset) & 63) * 80)) +#define CVMX_PIP_STAT4_PRTX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000820ull + (((offset) & 63) * 80)) +#define CVMX_PIP_STAT5_PRTX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000828ull + (((offset) & 63) * 80)) +#define CVMX_PIP_STAT6_PRTX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000830ull + (((offset) & 63) * 80)) +#define CVMX_PIP_STAT7_PRTX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000838ull + (((offset) & 63) * 80)) +#define CVMX_PIP_STAT8_PRTX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000840ull + (((offset) & 63) * 80)) +#define CVMX_PIP_STAT9_PRTX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0000848ull + (((offset) & 63) * 80)) +#define CVMX_PIP_STAT_CTL \ + CVMX_ADD_IO_SEG(0x00011800A0000018ull) +#define CVMX_PIP_STAT_INB_ERRSX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0001A10ull + (((offset) & 63) * 32)) +#define CVMX_PIP_STAT_INB_OCTSX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0001A08ull + (((offset) & 63) * 32)) +#define CVMX_PIP_STAT_INB_PKTSX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0001A00ull + (((offset) & 63) * 32)) +#define CVMX_PIP_TAG_INCX(offset) \ + CVMX_ADD_IO_SEG(0x00011800A0001800ull + (((offset) & 63) * 8)) +#define CVMX_PIP_TAG_MASK \ + CVMX_ADD_IO_SEG(0x00011800A0000070ull) +#define CVMX_PIP_TAG_SECRET \ + CVMX_ADD_IO_SEG(0x00011800A0000068ull) +#define CVMX_PIP_TODO_ENTRY \ + CVMX_ADD_IO_SEG(0x00011800A0000078ull) + +union cvmx_pip_bck_prs { + uint64_t u64; + struct cvmx_pip_bck_prs_s { + uint64_t bckprs:1; + uint64_t reserved_13_62:50; + uint64_t hiwater:5; + uint64_t reserved_5_7:3; + uint64_t lowater:5; + } s; + struct cvmx_pip_bck_prs_s cn38xx; + struct cvmx_pip_bck_prs_s cn38xxp2; + struct cvmx_pip_bck_prs_s cn56xx; + struct cvmx_pip_bck_prs_s cn56xxp1; + struct cvmx_pip_bck_prs_s cn58xx; + struct cvmx_pip_bck_prs_s cn58xxp1; +}; + +union cvmx_pip_bist_status { + uint64_t u64; + struct cvmx_pip_bist_status_s { + uint64_t reserved_18_63:46; + uint64_t bist:18; + } s; + struct cvmx_pip_bist_status_s cn30xx; + struct cvmx_pip_bist_status_s cn31xx; + struct cvmx_pip_bist_status_s cn38xx; + struct cvmx_pip_bist_status_s cn38xxp2; + struct cvmx_pip_bist_status_cn50xx { + uint64_t reserved_17_63:47; + uint64_t bist:17; + } cn50xx; + struct cvmx_pip_bist_status_s cn52xx; + struct cvmx_pip_bist_status_s cn52xxp1; + struct cvmx_pip_bist_status_s cn56xx; + struct cvmx_pip_bist_status_s cn56xxp1; + struct cvmx_pip_bist_status_s cn58xx; + struct cvmx_pip_bist_status_s cn58xxp1; +}; + +union cvmx_pip_crc_ctlx { + uint64_t u64; + struct cvmx_pip_crc_ctlx_s { + uint64_t reserved_2_63:62; + uint64_t invres:1; + uint64_t reflect:1; + } s; + struct cvmx_pip_crc_ctlx_s cn38xx; + struct cvmx_pip_crc_ctlx_s cn38xxp2; + struct cvmx_pip_crc_ctlx_s cn58xx; + struct cvmx_pip_crc_ctlx_s cn58xxp1; +}; + +union cvmx_pip_crc_ivx { + uint64_t u64; + struct cvmx_pip_crc_ivx_s { + uint64_t reserved_32_63:32; + uint64_t iv:32; + } s; + struct cvmx_pip_crc_ivx_s cn38xx; + struct cvmx_pip_crc_ivx_s cn38xxp2; + struct cvmx_pip_crc_ivx_s cn58xx; + struct cvmx_pip_crc_ivx_s cn58xxp1; +}; + +union cvmx_pip_dec_ipsecx { + uint64_t u64; + struct cvmx_pip_dec_ipsecx_s { + uint64_t reserved_18_63:46; + uint64_t tcp:1; + uint64_t udp:1; + uint64_t dprt:16; + } s; + struct cvmx_pip_dec_ipsecx_s cn30xx; + struct cvmx_pip_dec_ipsecx_s cn31xx; + struct cvmx_pip_dec_ipsecx_s cn38xx; + struct cvmx_pip_dec_ipsecx_s cn38xxp2; + struct cvmx_pip_dec_ipsecx_s cn50xx; + struct cvmx_pip_dec_ipsecx_s cn52xx; + struct cvmx_pip_dec_ipsecx_s cn52xxp1; + struct cvmx_pip_dec_ipsecx_s cn56xx; + struct cvmx_pip_dec_ipsecx_s cn56xxp1; + struct cvmx_pip_dec_ipsecx_s cn58xx; + struct cvmx_pip_dec_ipsecx_s cn58xxp1; +}; + +union cvmx_pip_dsa_src_grp { + uint64_t u64; + struct cvmx_pip_dsa_src_grp_s { + uint64_t map15:4; + uint64_t map14:4; + uint64_t map13:4; + uint64_t map12:4; + uint64_t map11:4; + uint64_t map10:4; + uint64_t map9:4; + uint64_t map8:4; + uint64_t map7:4; + uint64_t map6:4; + uint64_t map5:4; + uint64_t map4:4; + uint64_t map3:4; + uint64_t map2:4; + uint64_t map1:4; + uint64_t map0:4; + } s; + struct cvmx_pip_dsa_src_grp_s cn52xx; + struct cvmx_pip_dsa_src_grp_s cn52xxp1; + struct cvmx_pip_dsa_src_grp_s cn56xx; +}; + +union cvmx_pip_dsa_vid_grp { + uint64_t u64; + struct cvmx_pip_dsa_vid_grp_s { + uint64_t map15:4; + uint64_t map14:4; + uint64_t map13:4; + uint64_t map12:4; + uint64_t map11:4; + uint64_t map10:4; + uint64_t map9:4; + uint64_t map8:4; + uint64_t map7:4; + uint64_t map6:4; + uint64_t map5:4; + uint64_t map4:4; + uint64_t map3:4; + uint64_t map2:4; + uint64_t map1:4; + uint64_t map0:4; + } s; + struct cvmx_pip_dsa_vid_grp_s cn52xx; + struct cvmx_pip_dsa_vid_grp_s cn52xxp1; + struct cvmx_pip_dsa_vid_grp_s cn56xx; +}; + +union cvmx_pip_frm_len_chkx { + uint64_t u64; + struct cvmx_pip_frm_len_chkx_s { + uint64_t reserved_32_63:32; + uint64_t maxlen:16; + uint64_t minlen:16; + } s; + struct cvmx_pip_frm_len_chkx_s cn50xx; + struct cvmx_pip_frm_len_chkx_s cn52xx; + struct cvmx_pip_frm_len_chkx_s cn52xxp1; + struct cvmx_pip_frm_len_chkx_s cn56xx; + struct cvmx_pip_frm_len_chkx_s cn56xxp1; +}; + +union cvmx_pip_gbl_cfg { + uint64_t u64; + struct cvmx_pip_gbl_cfg_s { + uint64_t reserved_19_63:45; + uint64_t tag_syn:1; + uint64_t ip6_udp:1; + uint64_t max_l2:1; + uint64_t reserved_11_15:5; + uint64_t raw_shf:3; + uint64_t reserved_3_7:5; + uint64_t nip_shf:3; + } s; + struct cvmx_pip_gbl_cfg_s cn30xx; + struct cvmx_pip_gbl_cfg_s cn31xx; + struct cvmx_pip_gbl_cfg_s cn38xx; + struct cvmx_pip_gbl_cfg_s cn38xxp2; + struct cvmx_pip_gbl_cfg_s cn50xx; + struct cvmx_pip_gbl_cfg_s cn52xx; + struct cvmx_pip_gbl_cfg_s cn52xxp1; + struct cvmx_pip_gbl_cfg_s cn56xx; + struct cvmx_pip_gbl_cfg_s cn56xxp1; + struct cvmx_pip_gbl_cfg_s cn58xx; + struct cvmx_pip_gbl_cfg_s cn58xxp1; +}; + +union cvmx_pip_gbl_ctl { + uint64_t u64; + struct cvmx_pip_gbl_ctl_s { + uint64_t reserved_27_63:37; + uint64_t dsa_grp_tvid:1; + uint64_t dsa_grp_scmd:1; + uint64_t dsa_grp_sid:1; + uint64_t reserved_21_23:3; + uint64_t ring_en:1; + uint64_t reserved_17_19:3; + uint64_t ignrs:1; + uint64_t vs_wqe:1; + uint64_t vs_qos:1; + uint64_t l2_mal:1; + uint64_t tcp_flag:1; + uint64_t l4_len:1; + uint64_t l4_chk:1; + uint64_t l4_prt:1; + uint64_t l4_mal:1; + uint64_t reserved_6_7:2; + uint64_t ip6_eext:2; + uint64_t ip4_opts:1; + uint64_t ip_hop:1; + uint64_t ip_mal:1; + uint64_t ip_chk:1; + } s; + struct cvmx_pip_gbl_ctl_cn30xx { + uint64_t reserved_17_63:47; + uint64_t ignrs:1; + uint64_t vs_wqe:1; + uint64_t vs_qos:1; + uint64_t l2_mal:1; + uint64_t tcp_flag:1; + uint64_t l4_len:1; + uint64_t l4_chk:1; + uint64_t l4_prt:1; + uint64_t l4_mal:1; + uint64_t reserved_6_7:2; + uint64_t ip6_eext:2; + uint64_t ip4_opts:1; + uint64_t ip_hop:1; + uint64_t ip_mal:1; + uint64_t ip_chk:1; + } cn30xx; + struct cvmx_pip_gbl_ctl_cn30xx cn31xx; + struct cvmx_pip_gbl_ctl_cn30xx cn38xx; + struct cvmx_pip_gbl_ctl_cn30xx cn38xxp2; + struct cvmx_pip_gbl_ctl_cn30xx cn50xx; + struct cvmx_pip_gbl_ctl_s cn52xx; + struct cvmx_pip_gbl_ctl_s cn52xxp1; + struct cvmx_pip_gbl_ctl_s cn56xx; + struct cvmx_pip_gbl_ctl_cn56xxp1 { + uint64_t reserved_21_63:43; + uint64_t ring_en:1; + uint64_t reserved_17_19:3; + uint64_t ignrs:1; + uint64_t vs_wqe:1; + uint64_t vs_qos:1; + uint64_t l2_mal:1; + uint64_t tcp_flag:1; + uint64_t l4_len:1; + uint64_t l4_chk:1; + uint64_t l4_prt:1; + uint64_t l4_mal:1; + uint64_t reserved_6_7:2; + uint64_t ip6_eext:2; + uint64_t ip4_opts:1; + uint64_t ip_hop:1; + uint64_t ip_mal:1; + uint64_t ip_chk:1; + } cn56xxp1; + struct cvmx_pip_gbl_ctl_cn30xx cn58xx; + struct cvmx_pip_gbl_ctl_cn30xx cn58xxp1; +}; + +union cvmx_pip_hg_pri_qos { + uint64_t u64; + struct cvmx_pip_hg_pri_qos_s { + uint64_t reserved_11_63:53; + uint64_t qos:3; + uint64_t reserved_6_7:2; + uint64_t pri:6; + } s; + struct cvmx_pip_hg_pri_qos_s cn52xx; + struct cvmx_pip_hg_pri_qos_s cn52xxp1; + struct cvmx_pip_hg_pri_qos_s cn56xx; +}; + +union cvmx_pip_int_en { + uint64_t u64; + struct cvmx_pip_int_en_s { + uint64_t reserved_13_63:51; + uint64_t punyerr:1; + uint64_t lenerr:1; + uint64_t maxerr:1; + uint64_t minerr:1; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t crcerr:1; + uint64_t pktdrp:1; + } s; + struct cvmx_pip_int_en_cn30xx { + uint64_t reserved_9_63:55; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t crcerr:1; + uint64_t pktdrp:1; + } cn30xx; + struct cvmx_pip_int_en_cn30xx cn31xx; + struct cvmx_pip_int_en_cn30xx cn38xx; + struct cvmx_pip_int_en_cn30xx cn38xxp2; + struct cvmx_pip_int_en_cn50xx { + uint64_t reserved_12_63:52; + uint64_t lenerr:1; + uint64_t maxerr:1; + uint64_t minerr:1; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t reserved_1_1:1; + uint64_t pktdrp:1; + } cn50xx; + struct cvmx_pip_int_en_cn52xx { + uint64_t reserved_13_63:51; + uint64_t punyerr:1; + uint64_t lenerr:1; + uint64_t maxerr:1; + uint64_t minerr:1; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t reserved_1_1:1; + uint64_t pktdrp:1; + } cn52xx; + struct cvmx_pip_int_en_cn52xx cn52xxp1; + struct cvmx_pip_int_en_s cn56xx; + struct cvmx_pip_int_en_cn56xxp1 { + uint64_t reserved_12_63:52; + uint64_t lenerr:1; + uint64_t maxerr:1; + uint64_t minerr:1; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t crcerr:1; + uint64_t pktdrp:1; + } cn56xxp1; + struct cvmx_pip_int_en_cn58xx { + uint64_t reserved_13_63:51; + uint64_t punyerr:1; + uint64_t reserved_9_11:3; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t crcerr:1; + uint64_t pktdrp:1; + } cn58xx; + struct cvmx_pip_int_en_cn30xx cn58xxp1; +}; + +union cvmx_pip_int_reg { + uint64_t u64; + struct cvmx_pip_int_reg_s { + uint64_t reserved_13_63:51; + uint64_t punyerr:1; + uint64_t lenerr:1; + uint64_t maxerr:1; + uint64_t minerr:1; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t crcerr:1; + uint64_t pktdrp:1; + } s; + struct cvmx_pip_int_reg_cn30xx { + uint64_t reserved_9_63:55; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t crcerr:1; + uint64_t pktdrp:1; + } cn30xx; + struct cvmx_pip_int_reg_cn30xx cn31xx; + struct cvmx_pip_int_reg_cn30xx cn38xx; + struct cvmx_pip_int_reg_cn30xx cn38xxp2; + struct cvmx_pip_int_reg_cn50xx { + uint64_t reserved_12_63:52; + uint64_t lenerr:1; + uint64_t maxerr:1; + uint64_t minerr:1; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t reserved_1_1:1; + uint64_t pktdrp:1; + } cn50xx; + struct cvmx_pip_int_reg_cn52xx { + uint64_t reserved_13_63:51; + uint64_t punyerr:1; + uint64_t lenerr:1; + uint64_t maxerr:1; + uint64_t minerr:1; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t reserved_1_1:1; + uint64_t pktdrp:1; + } cn52xx; + struct cvmx_pip_int_reg_cn52xx cn52xxp1; + struct cvmx_pip_int_reg_s cn56xx; + struct cvmx_pip_int_reg_cn56xxp1 { + uint64_t reserved_12_63:52; + uint64_t lenerr:1; + uint64_t maxerr:1; + uint64_t minerr:1; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t crcerr:1; + uint64_t pktdrp:1; + } cn56xxp1; + struct cvmx_pip_int_reg_cn58xx { + uint64_t reserved_13_63:51; + uint64_t punyerr:1; + uint64_t reserved_9_11:3; + uint64_t beperr:1; + uint64_t feperr:1; + uint64_t todoovr:1; + uint64_t skprunt:1; + uint64_t badtag:1; + uint64_t prtnxa:1; + uint64_t bckprs:1; + uint64_t crcerr:1; + uint64_t pktdrp:1; + } cn58xx; + struct cvmx_pip_int_reg_cn30xx cn58xxp1; +}; + +union cvmx_pip_ip_offset { + uint64_t u64; + struct cvmx_pip_ip_offset_s { + uint64_t reserved_3_63:61; + uint64_t offset:3; + } s; + struct cvmx_pip_ip_offset_s cn30xx; + struct cvmx_pip_ip_offset_s cn31xx; + struct cvmx_pip_ip_offset_s cn38xx; + struct cvmx_pip_ip_offset_s cn38xxp2; + struct cvmx_pip_ip_offset_s cn50xx; + struct cvmx_pip_ip_offset_s cn52xx; + struct cvmx_pip_ip_offset_s cn52xxp1; + struct cvmx_pip_ip_offset_s cn56xx; + struct cvmx_pip_ip_offset_s cn56xxp1; + struct cvmx_pip_ip_offset_s cn58xx; + struct cvmx_pip_ip_offset_s cn58xxp1; +}; + +union cvmx_pip_prt_cfgx { + uint64_t u64; + struct cvmx_pip_prt_cfgx_s { + uint64_t reserved_53_63:11; + uint64_t pad_len:1; + uint64_t vlan_len:1; + uint64_t lenerr_en:1; + uint64_t maxerr_en:1; + uint64_t minerr_en:1; + uint64_t grp_wat_47:4; + uint64_t qos_wat_47:4; + uint64_t reserved_37_39:3; + uint64_t rawdrp:1; + uint64_t tag_inc:2; + uint64_t dyn_rs:1; + uint64_t inst_hdr:1; + uint64_t grp_wat:4; + uint64_t hg_qos:1; + uint64_t qos:3; + uint64_t qos_wat:4; + uint64_t qos_vsel:1; + uint64_t qos_vod:1; + uint64_t qos_diff:1; + uint64_t qos_vlan:1; + uint64_t reserved_13_15:3; + uint64_t crc_en:1; + uint64_t higig_en:1; + uint64_t dsa_en:1; + uint64_t mode:2; + uint64_t reserved_7_7:1; + uint64_t skip:7; + } s; + struct cvmx_pip_prt_cfgx_cn30xx { + uint64_t reserved_37_63:27; + uint64_t rawdrp:1; + uint64_t tag_inc:2; + uint64_t dyn_rs:1; + uint64_t inst_hdr:1; + uint64_t grp_wat:4; + uint64_t reserved_27_27:1; + uint64_t qos:3; + uint64_t qos_wat:4; + uint64_t reserved_18_19:2; + uint64_t qos_diff:1; + uint64_t qos_vlan:1; + uint64_t reserved_10_15:6; + uint64_t mode:2; + uint64_t reserved_7_7:1; + uint64_t skip:7; + } cn30xx; + struct cvmx_pip_prt_cfgx_cn30xx cn31xx; + struct cvmx_pip_prt_cfgx_cn38xx { + uint64_t reserved_37_63:27; + uint64_t rawdrp:1; + uint64_t tag_inc:2; + uint64_t dyn_rs:1; + uint64_t inst_hdr:1; + uint64_t grp_wat:4; + uint64_t reserved_27_27:1; + uint64_t qos:3; + uint64_t qos_wat:4; + uint64_t reserved_18_19:2; + uint64_t qos_diff:1; + uint64_t qos_vlan:1; + uint64_t reserved_13_15:3; + uint64_t crc_en:1; + uint64_t reserved_10_11:2; + uint64_t mode:2; + uint64_t reserved_7_7:1; + uint64_t skip:7; + } cn38xx; + struct cvmx_pip_prt_cfgx_cn38xx cn38xxp2; + struct cvmx_pip_prt_cfgx_cn50xx { + uint64_t reserved_53_63:11; + uint64_t pad_len:1; + uint64_t vlan_len:1; + uint64_t lenerr_en:1; + uint64_t maxerr_en:1; + uint64_t minerr_en:1; + uint64_t grp_wat_47:4; + uint64_t qos_wat_47:4; + uint64_t reserved_37_39:3; + uint64_t rawdrp:1; + uint64_t tag_inc:2; + uint64_t dyn_rs:1; + uint64_t inst_hdr:1; + uint64_t grp_wat:4; + uint64_t reserved_27_27:1; + uint64_t qos:3; + uint64_t qos_wat:4; + uint64_t reserved_19_19:1; + uint64_t qos_vod:1; + uint64_t qos_diff:1; + uint64_t qos_vlan:1; + uint64_t reserved_13_15:3; + uint64_t crc_en:1; + uint64_t reserved_10_11:2; + uint64_t mode:2; + uint64_t reserved_7_7:1; + uint64_t skip:7; + } cn50xx; + struct cvmx_pip_prt_cfgx_s cn52xx; + struct cvmx_pip_prt_cfgx_s cn52xxp1; + struct cvmx_pip_prt_cfgx_s cn56xx; + struct cvmx_pip_prt_cfgx_cn50xx cn56xxp1; + struct cvmx_pip_prt_cfgx_cn58xx { + uint64_t reserved_37_63:27; + uint64_t rawdrp:1; + uint64_t tag_inc:2; + uint64_t dyn_rs:1; + uint64_t inst_hdr:1; + uint64_t grp_wat:4; + uint64_t reserved_27_27:1; + uint64_t qos:3; + uint64_t qos_wat:4; + uint64_t reserved_19_19:1; + uint64_t qos_vod:1; + uint64_t qos_diff:1; + uint64_t qos_vlan:1; + uint64_t reserved_13_15:3; + uint64_t crc_en:1; + uint64_t reserved_10_11:2; + uint64_t mode:2; + uint64_t reserved_7_7:1; + uint64_t skip:7; + } cn58xx; + struct cvmx_pip_prt_cfgx_cn58xx cn58xxp1; +}; + +union cvmx_pip_prt_tagx { + uint64_t u64; + struct cvmx_pip_prt_tagx_s { + uint64_t reserved_40_63:24; + uint64_t grptagbase:4; + uint64_t grptagmask:4; + uint64_t grptag:1; + uint64_t grptag_mskip:1; + uint64_t tag_mode:2; + uint64_t inc_vs:2; + uint64_t inc_vlan:1; + uint64_t inc_prt_flag:1; + uint64_t ip6_dprt_flag:1; + uint64_t ip4_dprt_flag:1; + uint64_t ip6_sprt_flag:1; + uint64_t ip4_sprt_flag:1; + uint64_t ip6_nxth_flag:1; + uint64_t ip4_pctl_flag:1; + uint64_t ip6_dst_flag:1; + uint64_t ip4_dst_flag:1; + uint64_t ip6_src_flag:1; + uint64_t ip4_src_flag:1; + uint64_t tcp6_tag_type:2; + uint64_t tcp4_tag_type:2; + uint64_t ip6_tag_type:2; + uint64_t ip4_tag_type:2; + uint64_t non_tag_type:2; + uint64_t grp:4; + } s; + struct cvmx_pip_prt_tagx_cn30xx { + uint64_t reserved_40_63:24; + uint64_t grptagbase:4; + uint64_t grptagmask:4; + uint64_t grptag:1; + uint64_t reserved_30_30:1; + uint64_t tag_mode:2; + uint64_t inc_vs:2; + uint64_t inc_vlan:1; + uint64_t inc_prt_flag:1; + uint64_t ip6_dprt_flag:1; + uint64_t ip4_dprt_flag:1; + uint64_t ip6_sprt_flag:1; + uint64_t ip4_sprt_flag:1; + uint64_t ip6_nxth_flag:1; + uint64_t ip4_pctl_flag:1; + uint64_t ip6_dst_flag:1; + uint64_t ip4_dst_flag:1; + uint64_t ip6_src_flag:1; + uint64_t ip4_src_flag:1; + uint64_t tcp6_tag_type:2; + uint64_t tcp4_tag_type:2; + uint64_t ip6_tag_type:2; + uint64_t ip4_tag_type:2; + uint64_t non_tag_type:2; + uint64_t grp:4; + } cn30xx; + struct cvmx_pip_prt_tagx_cn30xx cn31xx; + struct cvmx_pip_prt_tagx_cn30xx cn38xx; + struct cvmx_pip_prt_tagx_cn30xx cn38xxp2; + struct cvmx_pip_prt_tagx_s cn50xx; + struct cvmx_pip_prt_tagx_s cn52xx; + struct cvmx_pip_prt_tagx_s cn52xxp1; + struct cvmx_pip_prt_tagx_s cn56xx; + struct cvmx_pip_prt_tagx_s cn56xxp1; + struct cvmx_pip_prt_tagx_cn30xx cn58xx; + struct cvmx_pip_prt_tagx_cn30xx cn58xxp1; +}; + +union cvmx_pip_qos_diffx { + uint64_t u64; + struct cvmx_pip_qos_diffx_s { + uint64_t reserved_3_63:61; + uint64_t qos:3; + } s; + struct cvmx_pip_qos_diffx_s cn30xx; + struct cvmx_pip_qos_diffx_s cn31xx; + struct cvmx_pip_qos_diffx_s cn38xx; + struct cvmx_pip_qos_diffx_s cn38xxp2; + struct cvmx_pip_qos_diffx_s cn50xx; + struct cvmx_pip_qos_diffx_s cn52xx; + struct cvmx_pip_qos_diffx_s cn52xxp1; + struct cvmx_pip_qos_diffx_s cn56xx; + struct cvmx_pip_qos_diffx_s cn56xxp1; + struct cvmx_pip_qos_diffx_s cn58xx; + struct cvmx_pip_qos_diffx_s cn58xxp1; +}; + +union cvmx_pip_qos_vlanx { + uint64_t u64; + struct cvmx_pip_qos_vlanx_s { + uint64_t reserved_7_63:57; + uint64_t qos1:3; + uint64_t reserved_3_3:1; + uint64_t qos:3; + } s; + struct cvmx_pip_qos_vlanx_cn30xx { + uint64_t reserved_3_63:61; + uint64_t qos:3; + } cn30xx; + struct cvmx_pip_qos_vlanx_cn30xx cn31xx; + struct cvmx_pip_qos_vlanx_cn30xx cn38xx; + struct cvmx_pip_qos_vlanx_cn30xx cn38xxp2; + struct cvmx_pip_qos_vlanx_cn30xx cn50xx; + struct cvmx_pip_qos_vlanx_s cn52xx; + struct cvmx_pip_qos_vlanx_s cn52xxp1; + struct cvmx_pip_qos_vlanx_s cn56xx; + struct cvmx_pip_qos_vlanx_cn30xx cn56xxp1; + struct cvmx_pip_qos_vlanx_cn30xx cn58xx; + struct cvmx_pip_qos_vlanx_cn30xx cn58xxp1; +}; + +union cvmx_pip_qos_watchx { + uint64_t u64; + struct cvmx_pip_qos_watchx_s { + uint64_t reserved_48_63:16; + uint64_t mask:16; + uint64_t reserved_28_31:4; + uint64_t grp:4; + uint64_t reserved_23_23:1; + uint64_t qos:3; + uint64_t reserved_19_19:1; + uint64_t match_type:3; + uint64_t match_value:16; + } s; + struct cvmx_pip_qos_watchx_cn30xx { + uint64_t reserved_48_63:16; + uint64_t mask:16; + uint64_t reserved_28_31:4; + uint64_t grp:4; + uint64_t reserved_23_23:1; + uint64_t qos:3; + uint64_t reserved_18_19:2; + uint64_t match_type:2; + uint64_t match_value:16; + } cn30xx; + struct cvmx_pip_qos_watchx_cn30xx cn31xx; + struct cvmx_pip_qos_watchx_cn30xx cn38xx; + struct cvmx_pip_qos_watchx_cn30xx cn38xxp2; + struct cvmx_pip_qos_watchx_s cn50xx; + struct cvmx_pip_qos_watchx_s cn52xx; + struct cvmx_pip_qos_watchx_s cn52xxp1; + struct cvmx_pip_qos_watchx_s cn56xx; + struct cvmx_pip_qos_watchx_s cn56xxp1; + struct cvmx_pip_qos_watchx_cn30xx cn58xx; + struct cvmx_pip_qos_watchx_cn30xx cn58xxp1; +}; + +union cvmx_pip_raw_word { + uint64_t u64; + struct cvmx_pip_raw_word_s { + uint64_t reserved_56_63:8; + uint64_t word:56; + } s; + struct cvmx_pip_raw_word_s cn30xx; + struct cvmx_pip_raw_word_s cn31xx; + struct cvmx_pip_raw_word_s cn38xx; + struct cvmx_pip_raw_word_s cn38xxp2; + struct cvmx_pip_raw_word_s cn50xx; + struct cvmx_pip_raw_word_s cn52xx; + struct cvmx_pip_raw_word_s cn52xxp1; + struct cvmx_pip_raw_word_s cn56xx; + struct cvmx_pip_raw_word_s cn56xxp1; + struct cvmx_pip_raw_word_s cn58xx; + struct cvmx_pip_raw_word_s cn58xxp1; +}; + +union cvmx_pip_sft_rst { + uint64_t u64; + struct cvmx_pip_sft_rst_s { + uint64_t reserved_1_63:63; + uint64_t rst:1; + } s; + struct cvmx_pip_sft_rst_s cn30xx; + struct cvmx_pip_sft_rst_s cn31xx; + struct cvmx_pip_sft_rst_s cn38xx; + struct cvmx_pip_sft_rst_s cn50xx; + struct cvmx_pip_sft_rst_s cn52xx; + struct cvmx_pip_sft_rst_s cn52xxp1; + struct cvmx_pip_sft_rst_s cn56xx; + struct cvmx_pip_sft_rst_s cn56xxp1; + struct cvmx_pip_sft_rst_s cn58xx; + struct cvmx_pip_sft_rst_s cn58xxp1; +}; + +union cvmx_pip_stat0_prtx { + uint64_t u64; + struct cvmx_pip_stat0_prtx_s { + uint64_t drp_pkts:32; + uint64_t drp_octs:32; + } s; + struct cvmx_pip_stat0_prtx_s cn30xx; + struct cvmx_pip_stat0_prtx_s cn31xx; + struct cvmx_pip_stat0_prtx_s cn38xx; + struct cvmx_pip_stat0_prtx_s cn38xxp2; + struct cvmx_pip_stat0_prtx_s cn50xx; + struct cvmx_pip_stat0_prtx_s cn52xx; + struct cvmx_pip_stat0_prtx_s cn52xxp1; + struct cvmx_pip_stat0_prtx_s cn56xx; + struct cvmx_pip_stat0_prtx_s cn56xxp1; + struct cvmx_pip_stat0_prtx_s cn58xx; + struct cvmx_pip_stat0_prtx_s cn58xxp1; +}; + +union cvmx_pip_stat1_prtx { + uint64_t u64; + struct cvmx_pip_stat1_prtx_s { + uint64_t reserved_48_63:16; + uint64_t octs:48; + } s; + struct cvmx_pip_stat1_prtx_s cn30xx; + struct cvmx_pip_stat1_prtx_s cn31xx; + struct cvmx_pip_stat1_prtx_s cn38xx; + struct cvmx_pip_stat1_prtx_s cn38xxp2; + struct cvmx_pip_stat1_prtx_s cn50xx; + struct cvmx_pip_stat1_prtx_s cn52xx; + struct cvmx_pip_stat1_prtx_s cn52xxp1; + struct cvmx_pip_stat1_prtx_s cn56xx; + struct cvmx_pip_stat1_prtx_s cn56xxp1; + struct cvmx_pip_stat1_prtx_s cn58xx; + struct cvmx_pip_stat1_prtx_s cn58xxp1; +}; + +union cvmx_pip_stat2_prtx { + uint64_t u64; + struct cvmx_pip_stat2_prtx_s { + uint64_t pkts:32; + uint64_t raw:32; + } s; + struct cvmx_pip_stat2_prtx_s cn30xx; + struct cvmx_pip_stat2_prtx_s cn31xx; + struct cvmx_pip_stat2_prtx_s cn38xx; + struct cvmx_pip_stat2_prtx_s cn38xxp2; + struct cvmx_pip_stat2_prtx_s cn50xx; + struct cvmx_pip_stat2_prtx_s cn52xx; + struct cvmx_pip_stat2_prtx_s cn52xxp1; + struct cvmx_pip_stat2_prtx_s cn56xx; + struct cvmx_pip_stat2_prtx_s cn56xxp1; + struct cvmx_pip_stat2_prtx_s cn58xx; + struct cvmx_pip_stat2_prtx_s cn58xxp1; +}; + +union cvmx_pip_stat3_prtx { + uint64_t u64; + struct cvmx_pip_stat3_prtx_s { + uint64_t bcst:32; + uint64_t mcst:32; + } s; + struct cvmx_pip_stat3_prtx_s cn30xx; + struct cvmx_pip_stat3_prtx_s cn31xx; + struct cvmx_pip_stat3_prtx_s cn38xx; + struct cvmx_pip_stat3_prtx_s cn38xxp2; + struct cvmx_pip_stat3_prtx_s cn50xx; + struct cvmx_pip_stat3_prtx_s cn52xx; + struct cvmx_pip_stat3_prtx_s cn52xxp1; + struct cvmx_pip_stat3_prtx_s cn56xx; + struct cvmx_pip_stat3_prtx_s cn56xxp1; + struct cvmx_pip_stat3_prtx_s cn58xx; + struct cvmx_pip_stat3_prtx_s cn58xxp1; +}; + +union cvmx_pip_stat4_prtx { + uint64_t u64; + struct cvmx_pip_stat4_prtx_s { + uint64_t h65to127:32; + uint64_t h64:32; + } s; + struct cvmx_pip_stat4_prtx_s cn30xx; + struct cvmx_pip_stat4_prtx_s cn31xx; + struct cvmx_pip_stat4_prtx_s cn38xx; + struct cvmx_pip_stat4_prtx_s cn38xxp2; + struct cvmx_pip_stat4_prtx_s cn50xx; + struct cvmx_pip_stat4_prtx_s cn52xx; + struct cvmx_pip_stat4_prtx_s cn52xxp1; + struct cvmx_pip_stat4_prtx_s cn56xx; + struct cvmx_pip_stat4_prtx_s cn56xxp1; + struct cvmx_pip_stat4_prtx_s cn58xx; + struct cvmx_pip_stat4_prtx_s cn58xxp1; +}; + +union cvmx_pip_stat5_prtx { + uint64_t u64; + struct cvmx_pip_stat5_prtx_s { + uint64_t h256to511:32; + uint64_t h128to255:32; + } s; + struct cvmx_pip_stat5_prtx_s cn30xx; + struct cvmx_pip_stat5_prtx_s cn31xx; + struct cvmx_pip_stat5_prtx_s cn38xx; + struct cvmx_pip_stat5_prtx_s cn38xxp2; + struct cvmx_pip_stat5_prtx_s cn50xx; + struct cvmx_pip_stat5_prtx_s cn52xx; + struct cvmx_pip_stat5_prtx_s cn52xxp1; + struct cvmx_pip_stat5_prtx_s cn56xx; + struct cvmx_pip_stat5_prtx_s cn56xxp1; + struct cvmx_pip_stat5_prtx_s cn58xx; + struct cvmx_pip_stat5_prtx_s cn58xxp1; +}; + +union cvmx_pip_stat6_prtx { + uint64_t u64; + struct cvmx_pip_stat6_prtx_s { + uint64_t h1024to1518:32; + uint64_t h512to1023:32; + } s; + struct cvmx_pip_stat6_prtx_s cn30xx; + struct cvmx_pip_stat6_prtx_s cn31xx; + struct cvmx_pip_stat6_prtx_s cn38xx; + struct cvmx_pip_stat6_prtx_s cn38xxp2; + struct cvmx_pip_stat6_prtx_s cn50xx; + struct cvmx_pip_stat6_prtx_s cn52xx; + struct cvmx_pip_stat6_prtx_s cn52xxp1; + struct cvmx_pip_stat6_prtx_s cn56xx; + struct cvmx_pip_stat6_prtx_s cn56xxp1; + struct cvmx_pip_stat6_prtx_s cn58xx; + struct cvmx_pip_stat6_prtx_s cn58xxp1; +}; + +union cvmx_pip_stat7_prtx { + uint64_t u64; + struct cvmx_pip_stat7_prtx_s { + uint64_t fcs:32; + uint64_t h1519:32; + } s; + struct cvmx_pip_stat7_prtx_s cn30xx; + struct cvmx_pip_stat7_prtx_s cn31xx; + struct cvmx_pip_stat7_prtx_s cn38xx; + struct cvmx_pip_stat7_prtx_s cn38xxp2; + struct cvmx_pip_stat7_prtx_s cn50xx; + struct cvmx_pip_stat7_prtx_s cn52xx; + struct cvmx_pip_stat7_prtx_s cn52xxp1; + struct cvmx_pip_stat7_prtx_s cn56xx; + struct cvmx_pip_stat7_prtx_s cn56xxp1; + struct cvmx_pip_stat7_prtx_s cn58xx; + struct cvmx_pip_stat7_prtx_s cn58xxp1; +}; + +union cvmx_pip_stat8_prtx { + uint64_t u64; + struct cvmx_pip_stat8_prtx_s { + uint64_t frag:32; + uint64_t undersz:32; + } s; + struct cvmx_pip_stat8_prtx_s cn30xx; + struct cvmx_pip_stat8_prtx_s cn31xx; + struct cvmx_pip_stat8_prtx_s cn38xx; + struct cvmx_pip_stat8_prtx_s cn38xxp2; + struct cvmx_pip_stat8_prtx_s cn50xx; + struct cvmx_pip_stat8_prtx_s cn52xx; + struct cvmx_pip_stat8_prtx_s cn52xxp1; + struct cvmx_pip_stat8_prtx_s cn56xx; + struct cvmx_pip_stat8_prtx_s cn56xxp1; + struct cvmx_pip_stat8_prtx_s cn58xx; + struct cvmx_pip_stat8_prtx_s cn58xxp1; +}; + +union cvmx_pip_stat9_prtx { + uint64_t u64; + struct cvmx_pip_stat9_prtx_s { + uint64_t jabber:32; + uint64_t oversz:32; + } s; + struct cvmx_pip_stat9_prtx_s cn30xx; + struct cvmx_pip_stat9_prtx_s cn31xx; + struct cvmx_pip_stat9_prtx_s cn38xx; + struct cvmx_pip_stat9_prtx_s cn38xxp2; + struct cvmx_pip_stat9_prtx_s cn50xx; + struct cvmx_pip_stat9_prtx_s cn52xx; + struct cvmx_pip_stat9_prtx_s cn52xxp1; + struct cvmx_pip_stat9_prtx_s cn56xx; + struct cvmx_pip_stat9_prtx_s cn56xxp1; + struct cvmx_pip_stat9_prtx_s cn58xx; + struct cvmx_pip_stat9_prtx_s cn58xxp1; +}; + +union cvmx_pip_stat_ctl { + uint64_t u64; + struct cvmx_pip_stat_ctl_s { + uint64_t reserved_1_63:63; + uint64_t rdclr:1; + } s; + struct cvmx_pip_stat_ctl_s cn30xx; + struct cvmx_pip_stat_ctl_s cn31xx; + struct cvmx_pip_stat_ctl_s cn38xx; + struct cvmx_pip_stat_ctl_s cn38xxp2; + struct cvmx_pip_stat_ctl_s cn50xx; + struct cvmx_pip_stat_ctl_s cn52xx; + struct cvmx_pip_stat_ctl_s cn52xxp1; + struct cvmx_pip_stat_ctl_s cn56xx; + struct cvmx_pip_stat_ctl_s cn56xxp1; + struct cvmx_pip_stat_ctl_s cn58xx; + struct cvmx_pip_stat_ctl_s cn58xxp1; +}; + +union cvmx_pip_stat_inb_errsx { + uint64_t u64; + struct cvmx_pip_stat_inb_errsx_s { + uint64_t reserved_16_63:48; + uint64_t errs:16; + } s; + struct cvmx_pip_stat_inb_errsx_s cn30xx; + struct cvmx_pip_stat_inb_errsx_s cn31xx; + struct cvmx_pip_stat_inb_errsx_s cn38xx; + struct cvmx_pip_stat_inb_errsx_s cn38xxp2; + struct cvmx_pip_stat_inb_errsx_s cn50xx; + struct cvmx_pip_stat_inb_errsx_s cn52xx; + struct cvmx_pip_stat_inb_errsx_s cn52xxp1; + struct cvmx_pip_stat_inb_errsx_s cn56xx; + struct cvmx_pip_stat_inb_errsx_s cn56xxp1; + struct cvmx_pip_stat_inb_errsx_s cn58xx; + struct cvmx_pip_stat_inb_errsx_s cn58xxp1; +}; + +union cvmx_pip_stat_inb_octsx { + uint64_t u64; + struct cvmx_pip_stat_inb_octsx_s { + uint64_t reserved_48_63:16; + uint64_t octs:48; + } s; + struct cvmx_pip_stat_inb_octsx_s cn30xx; + struct cvmx_pip_stat_inb_octsx_s cn31xx; + struct cvmx_pip_stat_inb_octsx_s cn38xx; + struct cvmx_pip_stat_inb_octsx_s cn38xxp2; + struct cvmx_pip_stat_inb_octsx_s cn50xx; + struct cvmx_pip_stat_inb_octsx_s cn52xx; + struct cvmx_pip_stat_inb_octsx_s cn52xxp1; + struct cvmx_pip_stat_inb_octsx_s cn56xx; + struct cvmx_pip_stat_inb_octsx_s cn56xxp1; + struct cvmx_pip_stat_inb_octsx_s cn58xx; + struct cvmx_pip_stat_inb_octsx_s cn58xxp1; +}; + +union cvmx_pip_stat_inb_pktsx { + uint64_t u64; + struct cvmx_pip_stat_inb_pktsx_s { + uint64_t reserved_32_63:32; + uint64_t pkts:32; + } s; + struct cvmx_pip_stat_inb_pktsx_s cn30xx; + struct cvmx_pip_stat_inb_pktsx_s cn31xx; + struct cvmx_pip_stat_inb_pktsx_s cn38xx; + struct cvmx_pip_stat_inb_pktsx_s cn38xxp2; + struct cvmx_pip_stat_inb_pktsx_s cn50xx; + struct cvmx_pip_stat_inb_pktsx_s cn52xx; + struct cvmx_pip_stat_inb_pktsx_s cn52xxp1; + struct cvmx_pip_stat_inb_pktsx_s cn56xx; + struct cvmx_pip_stat_inb_pktsx_s cn56xxp1; + struct cvmx_pip_stat_inb_pktsx_s cn58xx; + struct cvmx_pip_stat_inb_pktsx_s cn58xxp1; +}; + +union cvmx_pip_tag_incx { + uint64_t u64; + struct cvmx_pip_tag_incx_s { + uint64_t reserved_8_63:56; + uint64_t en:8; + } s; + struct cvmx_pip_tag_incx_s cn30xx; + struct cvmx_pip_tag_incx_s cn31xx; + struct cvmx_pip_tag_incx_s cn38xx; + struct cvmx_pip_tag_incx_s cn38xxp2; + struct cvmx_pip_tag_incx_s cn50xx; + struct cvmx_pip_tag_incx_s cn52xx; + struct cvmx_pip_tag_incx_s cn52xxp1; + struct cvmx_pip_tag_incx_s cn56xx; + struct cvmx_pip_tag_incx_s cn56xxp1; + struct cvmx_pip_tag_incx_s cn58xx; + struct cvmx_pip_tag_incx_s cn58xxp1; +}; + +union cvmx_pip_tag_mask { + uint64_t u64; + struct cvmx_pip_tag_mask_s { + uint64_t reserved_16_63:48; + uint64_t mask:16; + } s; + struct cvmx_pip_tag_mask_s cn30xx; + struct cvmx_pip_tag_mask_s cn31xx; + struct cvmx_pip_tag_mask_s cn38xx; + struct cvmx_pip_tag_mask_s cn38xxp2; + struct cvmx_pip_tag_mask_s cn50xx; + struct cvmx_pip_tag_mask_s cn52xx; + struct cvmx_pip_tag_mask_s cn52xxp1; + struct cvmx_pip_tag_mask_s cn56xx; + struct cvmx_pip_tag_mask_s cn56xxp1; + struct cvmx_pip_tag_mask_s cn58xx; + struct cvmx_pip_tag_mask_s cn58xxp1; +}; + +union cvmx_pip_tag_secret { + uint64_t u64; + struct cvmx_pip_tag_secret_s { + uint64_t reserved_32_63:32; + uint64_t dst:16; + uint64_t src:16; + } s; + struct cvmx_pip_tag_secret_s cn30xx; + struct cvmx_pip_tag_secret_s cn31xx; + struct cvmx_pip_tag_secret_s cn38xx; + struct cvmx_pip_tag_secret_s cn38xxp2; + struct cvmx_pip_tag_secret_s cn50xx; + struct cvmx_pip_tag_secret_s cn52xx; + struct cvmx_pip_tag_secret_s cn52xxp1; + struct cvmx_pip_tag_secret_s cn56xx; + struct cvmx_pip_tag_secret_s cn56xxp1; + struct cvmx_pip_tag_secret_s cn58xx; + struct cvmx_pip_tag_secret_s cn58xxp1; +}; + +union cvmx_pip_todo_entry { + uint64_t u64; + struct cvmx_pip_todo_entry_s { + uint64_t val:1; + uint64_t reserved_62_62:1; + uint64_t entry:62; + } s; + struct cvmx_pip_todo_entry_s cn30xx; + struct cvmx_pip_todo_entry_s cn31xx; + struct cvmx_pip_todo_entry_s cn38xx; + struct cvmx_pip_todo_entry_s cn38xxp2; + struct cvmx_pip_todo_entry_s cn50xx; + struct cvmx_pip_todo_entry_s cn52xx; + struct cvmx_pip_todo_entry_s cn52xxp1; + struct cvmx_pip_todo_entry_s cn56xx; + struct cvmx_pip_todo_entry_s cn56xxp1; + struct cvmx_pip_todo_entry_s cn58xx; + struct cvmx_pip_todo_entry_s cn58xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-pip.h b/drivers/staging/octeon/cvmx-pip.h new file mode 100644 index 0000000..78dbce8 --- /dev/null +++ b/drivers/staging/octeon/cvmx-pip.h @@ -0,0 +1,524 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Interface to the hardware Packet Input Processing unit. + * + */ + +#ifndef __CVMX_PIP_H__ +#define __CVMX_PIP_H__ + +#include "cvmx-wqe.h" +#include "cvmx-fpa.h" +#include "cvmx-pip-defs.h" + +#define CVMX_PIP_NUM_INPUT_PORTS 40 +#define CVMX_PIP_NUM_WATCHERS 4 + +/* + * Encodes the different error and exception codes + */ +typedef enum { + CVMX_PIP_L4_NO_ERR = 0ull, + /* + * 1 = TCP (UDP) packet not long enough to cover TCP (UDP) + * header + */ + CVMX_PIP_L4_MAL_ERR = 1ull, + /* 2 = TCP/UDP checksum failure */ + CVMX_PIP_CHK_ERR = 2ull, + /* + * 3 = TCP/UDP length check (TCP/UDP length does not match IP + * length). + */ + CVMX_PIP_L4_LENGTH_ERR = 3ull, + /* 4 = illegal TCP/UDP port (either source or dest port is zero) */ + CVMX_PIP_BAD_PRT_ERR = 4ull, + /* 8 = TCP flags = FIN only */ + CVMX_PIP_TCP_FLG8_ERR = 8ull, + /* 9 = TCP flags = 0 */ + CVMX_PIP_TCP_FLG9_ERR = 9ull, + /* 10 = TCP flags = FIN+RST+* */ + CVMX_PIP_TCP_FLG10_ERR = 10ull, + /* 11 = TCP flags = SYN+URG+* */ + CVMX_PIP_TCP_FLG11_ERR = 11ull, + /* 12 = TCP flags = SYN+RST+* */ + CVMX_PIP_TCP_FLG12_ERR = 12ull, + /* 13 = TCP flags = SYN+FIN+* */ + CVMX_PIP_TCP_FLG13_ERR = 13ull +} cvmx_pip_l4_err_t; + +typedef enum { + + CVMX_PIP_IP_NO_ERR = 0ull, + /* 1 = not IPv4 or IPv6 */ + CVMX_PIP_NOT_IP = 1ull, + /* 2 = IPv4 header checksum violation */ + CVMX_PIP_IPV4_HDR_CHK = 2ull, + /* 3 = malformed (packet not long enough to cover IP hdr) */ + CVMX_PIP_IP_MAL_HDR = 3ull, + /* 4 = malformed (packet not long enough to cover len in IP hdr) */ + CVMX_PIP_IP_MAL_PKT = 4ull, + /* 5 = TTL / hop count equal zero */ + CVMX_PIP_TTL_HOP = 5ull, + /* 6 = IPv4 options / IPv6 early extension headers */ + CVMX_PIP_OPTS = 6ull +} cvmx_pip_ip_exc_t; + +/** + * NOTES + * late collision (data received before collision) + * late collisions cannot be detected by the receiver + * they would appear as JAM bits which would appear as bad FCS + * or carrier extend error which is CVMX_PIP_EXTEND_ERR + */ +typedef enum { + /* No error */ + CVMX_PIP_RX_NO_ERR = 0ull, + /* RGM+SPI 1 = partially received packet (buffering/bandwidth + * not adequate) */ + CVMX_PIP_PARTIAL_ERR = 1ull, + /* RGM+SPI 2 = receive packet too large and truncated */ + CVMX_PIP_JABBER_ERR = 2ull, + /* + * RGM 3 = max frame error (pkt len > max frame len) (with FCS + * error) + */ + CVMX_PIP_OVER_FCS_ERR = 3ull, + /* RGM+SPI 4 = max frame error (pkt len > max frame len) */ + CVMX_PIP_OVER_ERR = 4ull, + /* + * RGM 5 = nibble error (data not byte multiple - 100M and 10M + * only) + */ + CVMX_PIP_ALIGN_ERR = 5ull, + /* + * RGM 6 = min frame error (pkt len < min frame len) (with FCS + * error) + */ + CVMX_PIP_UNDER_FCS_ERR = 6ull, + /* RGM 7 = FCS error */ + CVMX_PIP_GMX_FCS_ERR = 7ull, + /* RGM+SPI 8 = min frame error (pkt len < min frame len) */ + CVMX_PIP_UNDER_ERR = 8ull, + /* RGM 9 = Frame carrier extend error */ + CVMX_PIP_EXTEND_ERR = 9ull, + /* + * RGM 10 = length mismatch (len did not match len in L2 + * length/type) + */ + CVMX_PIP_LENGTH_ERR = 10ull, + /* RGM 11 = Frame error (some or all data bits marked err) */ + CVMX_PIP_DAT_ERR = 11ull, + /* SPI 11 = DIP4 error */ + CVMX_PIP_DIP_ERR = 11ull, + /* + * RGM 12 = packet was not large enough to pass the skipper - + * no inspection could occur. + */ + CVMX_PIP_SKIP_ERR = 12ull, + /* + * RGM 13 = studder error (data not repeated - 100M and 10M + * only) + */ + CVMX_PIP_NIBBLE_ERR = 13ull, + /* RGM+SPI 16 = FCS error */ + CVMX_PIP_PIP_FCS = 16L, + /* + * RGM+SPI+PCI 17 = packet was not large enough to pass the + * skipper - no inspection could occur. + */ + CVMX_PIP_PIP_SKIP_ERR = 17L, + /* + * RGM+SPI+PCI 18 = malformed l2 (packet not long enough to + * cover L2 hdr). + */ + CVMX_PIP_PIP_L2_MAL_HDR = 18L + /* + * NOTES: xx = late collision (data received before collision) + * late collisions cannot be detected by the receiver + * they would appear as JAM bits which would appear as + * bad FCS or carrier extend error which is + * CVMX_PIP_EXTEND_ERR + */ +} cvmx_pip_rcv_err_t; + +/** + * This defines the err_code field errors in the work Q entry + */ +typedef union { + cvmx_pip_l4_err_t l4_err; + cvmx_pip_ip_exc_t ip_exc; + cvmx_pip_rcv_err_t rcv_err; +} cvmx_pip_err_t; + +/** + * Status statistics for a port + */ +typedef struct { + /* Inbound octets marked to be dropped by the IPD */ + uint32_t dropped_octets; + /* Inbound packets marked to be dropped by the IPD */ + uint32_t dropped_packets; + /* RAW PCI Packets received by PIP per port */ + uint32_t pci_raw_packets; + /* Number of octets processed by PIP */ + uint32_t octets; + /* Number of packets processed by PIP */ + uint32_t packets; + /* + * Number of indentified L2 multicast packets. Does not + * include broadcast packets. Only includes packets whose + * parse mode is SKIP_TO_L2 + */ + uint32_t multicast_packets; + /* + * Number of indentified L2 broadcast packets. Does not + * include multicast packets. Only includes packets whose + * parse mode is SKIP_TO_L2 + */ + uint32_t broadcast_packets; + /* Number of 64B packets */ + uint32_t len_64_packets; + /* Number of 65-127B packets */ + uint32_t len_65_127_packets; + /* Number of 128-255B packets */ + uint32_t len_128_255_packets; + /* Number of 256-511B packets */ + uint32_t len_256_511_packets; + /* Number of 512-1023B packets */ + uint32_t len_512_1023_packets; + /* Number of 1024-1518B packets */ + uint32_t len_1024_1518_packets; + /* Number of 1519-max packets */ + uint32_t len_1519_max_packets; + /* Number of packets with FCS or Align opcode errors */ + uint32_t fcs_align_err_packets; + /* Number of packets with length < min */ + uint32_t runt_packets; + /* Number of packets with length < min and FCS error */ + uint32_t runt_crc_packets; + /* Number of packets with length > max */ + uint32_t oversize_packets; + /* Number of packets with length > max and FCS error */ + uint32_t oversize_crc_packets; + /* Number of packets without GMX/SPX/PCI errors received by PIP */ + uint32_t inb_packets; + /* + * Total number of octets from all packets received by PIP, + * including CRC + */ + uint64_t inb_octets; + /* Number of packets with GMX/SPX/PCI errors received by PIP */ + uint16_t inb_errors; +} cvmx_pip_port_status_t; + +/** + * Definition of the PIP custom header that can be prepended + * to a packet by external hardware. + */ +typedef union { + uint64_t u64; + struct { + /* + * Documented as R - Set if the Packet is RAWFULL. If + * set, this header must be the full 8 bytes. + */ + uint64_t rawfull:1; + /* Must be zero */ + uint64_t reserved0:5; + /* PIP parse mode for this packet */ + uint64_t parse_mode:2; + /* Must be zero */ + uint64_t reserved1:1; + /* + * Skip amount, including this header, to the + * beginning of the packet + */ + uint64_t skip_len:7; + /* Must be zero */ + uint64_t reserved2:6; + /* POW input queue for this packet */ + uint64_t qos:3; + /* POW input group for this packet */ + uint64_t grp:4; + /* + * Flag to store this packet in the work queue entry, + * if possible + */ + uint64_t rs:1; + /* POW input tag type */ + uint64_t tag_type:2; + /* POW input tag */ + uint64_t tag:32; + } s; +} cvmx_pip_pkt_inst_hdr_t; + +/* CSR typedefs have been moved to cvmx-csr-*.h */ + +/** + * Configure an ethernet input port + * + * @port_num: Port number to configure + * @port_cfg: Port hardware configuration + * @port_tag_cfg: + * Port POW tagging configuration + */ +static inline void cvmx_pip_config_port(uint64_t port_num, + union cvmx_pip_prt_cfgx port_cfg, + union cvmx_pip_prt_tagx port_tag_cfg) +{ + cvmx_write_csr(CVMX_PIP_PRT_CFGX(port_num), port_cfg.u64); + cvmx_write_csr(CVMX_PIP_PRT_TAGX(port_num), port_tag_cfg.u64); +} +#if 0 +/** + * @deprecated This function is a thin wrapper around the Pass1 version + * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for + * setting the group that is incompatible with this function, + * the preferred upgrade path is to use the CSR directly. + * + * Configure the global QoS packet watchers. Each watcher is + * capable of matching a field in a packet to determine the + * QoS queue for scheduling. + * + * @watcher: Watcher number to configure (0 - 3). + * @match_type: Watcher match type + * @match_value: + * Value the watcher will match against + * @qos: QoS queue for packets matching this watcher + */ +static inline void cvmx_pip_config_watcher(uint64_t watcher, + cvmx_pip_qos_watch_types match_type, + uint64_t match_value, uint64_t qos) +{ + cvmx_pip_port_watcher_cfg_t watcher_config; + + watcher_config.u64 = 0; + watcher_config.s.match_type = match_type; + watcher_config.s.match_value = match_value; + watcher_config.s.qos = qos; + + cvmx_write_csr(CVMX_PIP_QOS_WATCHX(watcher), watcher_config.u64); +} +#endif +/** + * Configure the VLAN priority to QoS queue mapping. + * + * @vlan_priority: + * VLAN priority (0-7) + * @qos: QoS queue for packets matching this watcher + */ +static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority, + uint64_t qos) +{ + union cvmx_pip_qos_vlanx pip_qos_vlanx; + pip_qos_vlanx.u64 = 0; + pip_qos_vlanx.s.qos = qos; + cvmx_write_csr(CVMX_PIP_QOS_VLANX(vlan_priority), pip_qos_vlanx.u64); +} + +/** + * Configure the Diffserv to QoS queue mapping. + * + * @diffserv: Diffserv field value (0-63) + * @qos: QoS queue for packets matching this watcher + */ +static inline void cvmx_pip_config_diffserv_qos(uint64_t diffserv, uint64_t qos) +{ + union cvmx_pip_qos_diffx pip_qos_diffx; + pip_qos_diffx.u64 = 0; + pip_qos_diffx.s.qos = qos; + cvmx_write_csr(CVMX_PIP_QOS_DIFFX(diffserv), pip_qos_diffx.u64); +} + +/** + * Get the status counters for a port. + * + * @port_num: Port number to get statistics for. + * @clear: Set to 1 to clear the counters after they are read + * @status: Where to put the results. + */ +static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear, + cvmx_pip_port_status_t *status) +{ + union cvmx_pip_stat_ctl pip_stat_ctl; + union cvmx_pip_stat0_prtx stat0; + union cvmx_pip_stat1_prtx stat1; + union cvmx_pip_stat2_prtx stat2; + union cvmx_pip_stat3_prtx stat3; + union cvmx_pip_stat4_prtx stat4; + union cvmx_pip_stat5_prtx stat5; + union cvmx_pip_stat6_prtx stat6; + union cvmx_pip_stat7_prtx stat7; + union cvmx_pip_stat8_prtx stat8; + union cvmx_pip_stat9_prtx stat9; + union cvmx_pip_stat_inb_pktsx pip_stat_inb_pktsx; + union cvmx_pip_stat_inb_octsx pip_stat_inb_octsx; + union cvmx_pip_stat_inb_errsx pip_stat_inb_errsx; + + pip_stat_ctl.u64 = 0; + pip_stat_ctl.s.rdclr = clear; + cvmx_write_csr(CVMX_PIP_STAT_CTL, pip_stat_ctl.u64); + + stat0.u64 = cvmx_read_csr(CVMX_PIP_STAT0_PRTX(port_num)); + stat1.u64 = cvmx_read_csr(CVMX_PIP_STAT1_PRTX(port_num)); + stat2.u64 = cvmx_read_csr(CVMX_PIP_STAT2_PRTX(port_num)); + stat3.u64 = cvmx_read_csr(CVMX_PIP_STAT3_PRTX(port_num)); + stat4.u64 = cvmx_read_csr(CVMX_PIP_STAT4_PRTX(port_num)); + stat5.u64 = cvmx_read_csr(CVMX_PIP_STAT5_PRTX(port_num)); + stat6.u64 = cvmx_read_csr(CVMX_PIP_STAT6_PRTX(port_num)); + stat7.u64 = cvmx_read_csr(CVMX_PIP_STAT7_PRTX(port_num)); + stat8.u64 = cvmx_read_csr(CVMX_PIP_STAT8_PRTX(port_num)); + stat9.u64 = cvmx_read_csr(CVMX_PIP_STAT9_PRTX(port_num)); + pip_stat_inb_pktsx.u64 = + cvmx_read_csr(CVMX_PIP_STAT_INB_PKTSX(port_num)); + pip_stat_inb_octsx.u64 = + cvmx_read_csr(CVMX_PIP_STAT_INB_OCTSX(port_num)); + pip_stat_inb_errsx.u64 = + cvmx_read_csr(CVMX_PIP_STAT_INB_ERRSX(port_num)); + + status->dropped_octets = stat0.s.drp_octs; + status->dropped_packets = stat0.s.drp_pkts; + status->octets = stat1.s.octs; + status->pci_raw_packets = stat2.s.raw; + status->packets = stat2.s.pkts; + status->multicast_packets = stat3.s.mcst; + status->broadcast_packets = stat3.s.bcst; + status->len_64_packets = stat4.s.h64; + status->len_65_127_packets = stat4.s.h65to127; + status->len_128_255_packets = stat5.s.h128to255; + status->len_256_511_packets = stat5.s.h256to511; + status->len_512_1023_packets = stat6.s.h512to1023; + status->len_1024_1518_packets = stat6.s.h1024to1518; + status->len_1519_max_packets = stat7.s.h1519; + status->fcs_align_err_packets = stat7.s.fcs; + status->runt_packets = stat8.s.undersz; + status->runt_crc_packets = stat8.s.frag; + status->oversize_packets = stat9.s.oversz; + status->oversize_crc_packets = stat9.s.jabber; + status->inb_packets = pip_stat_inb_pktsx.s.pkts; + status->inb_octets = pip_stat_inb_octsx.s.octs; + status->inb_errors = pip_stat_inb_errsx.s.errs; + + if (cvmx_octeon_is_pass1()) { + /* + * Kludge to fix Octeon Pass 1 errata - Drop counts + * don't work. + */ + if (status->inb_packets > status->packets) + status->dropped_packets = + status->inb_packets - status->packets; + else + status->dropped_packets = 0; + if (status->inb_octets - status->inb_packets * 4 > + status->octets) + status->dropped_octets = + status->inb_octets - status->inb_packets * 4 - + status->octets; + else + status->dropped_octets = 0; + } +} + +/** + * Configure the hardware CRC engine + * + * @interface: Interface to configure (0 or 1) + * @invert_result: + * Invert the result of the CRC + * @reflect: Reflect + * @initialization_vector: + * CRC initialization vector + */ +static inline void cvmx_pip_config_crc(uint64_t interface, + uint64_t invert_result, uint64_t reflect, + uint32_t initialization_vector) +{ + if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) { + union cvmx_pip_crc_ctlx config; + union cvmx_pip_crc_ivx pip_crc_ivx; + + config.u64 = 0; + config.s.invres = invert_result; + config.s.reflect = reflect; + cvmx_write_csr(CVMX_PIP_CRC_CTLX(interface), config.u64); + + pip_crc_ivx.u64 = 0; + pip_crc_ivx.s.iv = initialization_vector; + cvmx_write_csr(CVMX_PIP_CRC_IVX(interface), pip_crc_ivx.u64); + } +} + +/** + * Clear all bits in a tag mask. This should be called on + * startup before any calls to cvmx_pip_tag_mask_set. Each bit + * set in the final mask represent a byte used in the packet for + * tag generation. + * + * @mask_index: Which tag mask to clear (0..3) + */ +static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index) +{ + uint64_t index; + union cvmx_pip_tag_incx pip_tag_incx; + pip_tag_incx.u64 = 0; + pip_tag_incx.s.en = 0; + for (index = mask_index * 16; index < (mask_index + 1) * 16; index++) + cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64); +} + +/** + * Sets a range of bits in the tag mask. The tag mask is used + * when the cvmx_pip_port_tag_cfg_t tag_mode is non zero. + * There are four separate masks that can be configured. + * + * @mask_index: Which tag mask to modify (0..3) + * @offset: Offset into the bitmask to set bits at. Use the GCC macro + * offsetof() to determine the offsets into packet headers. + * For example, offsetof(ethhdr, protocol) returns the offset + * of the ethernet protocol field. The bitmask selects which + * bytes to include the the tag, with bit offset X selecting + * byte at offset X from the beginning of the packet data. + * @len: Number of bytes to include. Usually this is the sizeof() + * the field. + */ +static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset, + uint64_t len) +{ + while (len--) { + union cvmx_pip_tag_incx pip_tag_incx; + uint64_t index = mask_index * 16 + offset / 8; + pip_tag_incx.u64 = cvmx_read_csr(CVMX_PIP_TAG_INCX(index)); + pip_tag_incx.s.en |= 0x80 >> (offset & 0x7); + cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64); + offset++; + } +} + +#endif /* __CVMX_PIP_H__ */ diff --git a/drivers/staging/octeon/cvmx-pko-defs.h b/drivers/staging/octeon/cvmx-pko-defs.h new file mode 100644 index 0000000..50e779c --- /dev/null +++ b/drivers/staging/octeon/cvmx-pko-defs.h @@ -0,0 +1,1133 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PKO_DEFS_H__ +#define __CVMX_PKO_DEFS_H__ + +#define CVMX_PKO_MEM_COUNT0 \ + CVMX_ADD_IO_SEG(0x0001180050001080ull) +#define CVMX_PKO_MEM_COUNT1 \ + CVMX_ADD_IO_SEG(0x0001180050001088ull) +#define CVMX_PKO_MEM_DEBUG0 \ + CVMX_ADD_IO_SEG(0x0001180050001100ull) +#define CVMX_PKO_MEM_DEBUG1 \ + CVMX_ADD_IO_SEG(0x0001180050001108ull) +#define CVMX_PKO_MEM_DEBUG10 \ + CVMX_ADD_IO_SEG(0x0001180050001150ull) +#define CVMX_PKO_MEM_DEBUG11 \ + CVMX_ADD_IO_SEG(0x0001180050001158ull) +#define CVMX_PKO_MEM_DEBUG12 \ + CVMX_ADD_IO_SEG(0x0001180050001160ull) +#define CVMX_PKO_MEM_DEBUG13 \ + CVMX_ADD_IO_SEG(0x0001180050001168ull) +#define CVMX_PKO_MEM_DEBUG14 \ + CVMX_ADD_IO_SEG(0x0001180050001170ull) +#define CVMX_PKO_MEM_DEBUG2 \ + CVMX_ADD_IO_SEG(0x0001180050001110ull) +#define CVMX_PKO_MEM_DEBUG3 \ + CVMX_ADD_IO_SEG(0x0001180050001118ull) +#define CVMX_PKO_MEM_DEBUG4 \ + CVMX_ADD_IO_SEG(0x0001180050001120ull) +#define CVMX_PKO_MEM_DEBUG5 \ + CVMX_ADD_IO_SEG(0x0001180050001128ull) +#define CVMX_PKO_MEM_DEBUG6 \ + CVMX_ADD_IO_SEG(0x0001180050001130ull) +#define CVMX_PKO_MEM_DEBUG7 \ + CVMX_ADD_IO_SEG(0x0001180050001138ull) +#define CVMX_PKO_MEM_DEBUG8 \ + CVMX_ADD_IO_SEG(0x0001180050001140ull) +#define CVMX_PKO_MEM_DEBUG9 \ + CVMX_ADD_IO_SEG(0x0001180050001148ull) +#define CVMX_PKO_MEM_PORT_PTRS \ + CVMX_ADD_IO_SEG(0x0001180050001010ull) +#define CVMX_PKO_MEM_PORT_QOS \ + CVMX_ADD_IO_SEG(0x0001180050001018ull) +#define CVMX_PKO_MEM_PORT_RATE0 \ + CVMX_ADD_IO_SEG(0x0001180050001020ull) +#define CVMX_PKO_MEM_PORT_RATE1 \ + CVMX_ADD_IO_SEG(0x0001180050001028ull) +#define CVMX_PKO_MEM_QUEUE_PTRS \ + CVMX_ADD_IO_SEG(0x0001180050001000ull) +#define CVMX_PKO_MEM_QUEUE_QOS \ + CVMX_ADD_IO_SEG(0x0001180050001008ull) +#define CVMX_PKO_REG_BIST_RESULT \ + CVMX_ADD_IO_SEG(0x0001180050000080ull) +#define CVMX_PKO_REG_CMD_BUF \ + CVMX_ADD_IO_SEG(0x0001180050000010ull) +#define CVMX_PKO_REG_CRC_CTLX(offset) \ + CVMX_ADD_IO_SEG(0x0001180050000028ull + (((offset) & 1) * 8)) +#define CVMX_PKO_REG_CRC_ENABLE \ + CVMX_ADD_IO_SEG(0x0001180050000020ull) +#define CVMX_PKO_REG_CRC_IVX(offset) \ + CVMX_ADD_IO_SEG(0x0001180050000038ull + (((offset) & 1) * 8)) +#define CVMX_PKO_REG_DEBUG0 \ + CVMX_ADD_IO_SEG(0x0001180050000098ull) +#define CVMX_PKO_REG_DEBUG1 \ + CVMX_ADD_IO_SEG(0x00011800500000A0ull) +#define CVMX_PKO_REG_DEBUG2 \ + CVMX_ADD_IO_SEG(0x00011800500000A8ull) +#define CVMX_PKO_REG_DEBUG3 \ + CVMX_ADD_IO_SEG(0x00011800500000B0ull) +#define CVMX_PKO_REG_ENGINE_INFLIGHT \ + CVMX_ADD_IO_SEG(0x0001180050000050ull) +#define CVMX_PKO_REG_ENGINE_THRESH \ + CVMX_ADD_IO_SEG(0x0001180050000058ull) +#define CVMX_PKO_REG_ERROR \ + CVMX_ADD_IO_SEG(0x0001180050000088ull) +#define CVMX_PKO_REG_FLAGS \ + CVMX_ADD_IO_SEG(0x0001180050000000ull) +#define CVMX_PKO_REG_GMX_PORT_MODE \ + CVMX_ADD_IO_SEG(0x0001180050000018ull) +#define CVMX_PKO_REG_INT_MASK \ + CVMX_ADD_IO_SEG(0x0001180050000090ull) +#define CVMX_PKO_REG_QUEUE_MODE \ + CVMX_ADD_IO_SEG(0x0001180050000048ull) +#define CVMX_PKO_REG_QUEUE_PTRS1 \ + CVMX_ADD_IO_SEG(0x0001180050000100ull) +#define CVMX_PKO_REG_READ_IDX \ + CVMX_ADD_IO_SEG(0x0001180050000008ull) + +union cvmx_pko_mem_count0 { + uint64_t u64; + struct cvmx_pko_mem_count0_s { + uint64_t reserved_32_63:32; + uint64_t count:32; + } s; + struct cvmx_pko_mem_count0_s cn30xx; + struct cvmx_pko_mem_count0_s cn31xx; + struct cvmx_pko_mem_count0_s cn38xx; + struct cvmx_pko_mem_count0_s cn38xxp2; + struct cvmx_pko_mem_count0_s cn50xx; + struct cvmx_pko_mem_count0_s cn52xx; + struct cvmx_pko_mem_count0_s cn52xxp1; + struct cvmx_pko_mem_count0_s cn56xx; + struct cvmx_pko_mem_count0_s cn56xxp1; + struct cvmx_pko_mem_count0_s cn58xx; + struct cvmx_pko_mem_count0_s cn58xxp1; +}; + +union cvmx_pko_mem_count1 { + uint64_t u64; + struct cvmx_pko_mem_count1_s { + uint64_t reserved_48_63:16; + uint64_t count:48; + } s; + struct cvmx_pko_mem_count1_s cn30xx; + struct cvmx_pko_mem_count1_s cn31xx; + struct cvmx_pko_mem_count1_s cn38xx; + struct cvmx_pko_mem_count1_s cn38xxp2; + struct cvmx_pko_mem_count1_s cn50xx; + struct cvmx_pko_mem_count1_s cn52xx; + struct cvmx_pko_mem_count1_s cn52xxp1; + struct cvmx_pko_mem_count1_s cn56xx; + struct cvmx_pko_mem_count1_s cn56xxp1; + struct cvmx_pko_mem_count1_s cn58xx; + struct cvmx_pko_mem_count1_s cn58xxp1; +}; + +union cvmx_pko_mem_debug0 { + uint64_t u64; + struct cvmx_pko_mem_debug0_s { + uint64_t fau:28; + uint64_t cmd:14; + uint64_t segs:6; + uint64_t size:16; + } s; + struct cvmx_pko_mem_debug0_s cn30xx; + struct cvmx_pko_mem_debug0_s cn31xx; + struct cvmx_pko_mem_debug0_s cn38xx; + struct cvmx_pko_mem_debug0_s cn38xxp2; + struct cvmx_pko_mem_debug0_s cn50xx; + struct cvmx_pko_mem_debug0_s cn52xx; + struct cvmx_pko_mem_debug0_s cn52xxp1; + struct cvmx_pko_mem_debug0_s cn56xx; + struct cvmx_pko_mem_debug0_s cn56xxp1; + struct cvmx_pko_mem_debug0_s cn58xx; + struct cvmx_pko_mem_debug0_s cn58xxp1; +}; + +union cvmx_pko_mem_debug1 { + uint64_t u64; + struct cvmx_pko_mem_debug1_s { + uint64_t i:1; + uint64_t back:4; + uint64_t pool:3; + uint64_t size:16; + uint64_t ptr:40; + } s; + struct cvmx_pko_mem_debug1_s cn30xx; + struct cvmx_pko_mem_debug1_s cn31xx; + struct cvmx_pko_mem_debug1_s cn38xx; + struct cvmx_pko_mem_debug1_s cn38xxp2; + struct cvmx_pko_mem_debug1_s cn50xx; + struct cvmx_pko_mem_debug1_s cn52xx; + struct cvmx_pko_mem_debug1_s cn52xxp1; + struct cvmx_pko_mem_debug1_s cn56xx; + struct cvmx_pko_mem_debug1_s cn56xxp1; + struct cvmx_pko_mem_debug1_s cn58xx; + struct cvmx_pko_mem_debug1_s cn58xxp1; +}; + +union cvmx_pko_mem_debug10 { + uint64_t u64; + struct cvmx_pko_mem_debug10_s { + uint64_t reserved_0_63:64; + } s; + struct cvmx_pko_mem_debug10_cn30xx { + uint64_t fau:28; + uint64_t cmd:14; + uint64_t segs:6; + uint64_t size:16; + } cn30xx; + struct cvmx_pko_mem_debug10_cn30xx cn31xx; + struct cvmx_pko_mem_debug10_cn30xx cn38xx; + struct cvmx_pko_mem_debug10_cn30xx cn38xxp2; + struct cvmx_pko_mem_debug10_cn50xx { + uint64_t reserved_49_63:15; + uint64_t ptrs1:17; + uint64_t reserved_17_31:15; + uint64_t ptrs2:17; + } cn50xx; + struct cvmx_pko_mem_debug10_cn50xx cn52xx; + struct cvmx_pko_mem_debug10_cn50xx cn52xxp1; + struct cvmx_pko_mem_debug10_cn50xx cn56xx; + struct cvmx_pko_mem_debug10_cn50xx cn56xxp1; + struct cvmx_pko_mem_debug10_cn50xx cn58xx; + struct cvmx_pko_mem_debug10_cn50xx cn58xxp1; +}; + +union cvmx_pko_mem_debug11 { + uint64_t u64; + struct cvmx_pko_mem_debug11_s { + uint64_t i:1; + uint64_t back:4; + uint64_t pool:3; + uint64_t size:16; + uint64_t reserved_0_39:40; + } s; + struct cvmx_pko_mem_debug11_cn30xx { + uint64_t i:1; + uint64_t back:4; + uint64_t pool:3; + uint64_t size:16; + uint64_t ptr:40; + } cn30xx; + struct cvmx_pko_mem_debug11_cn30xx cn31xx; + struct cvmx_pko_mem_debug11_cn30xx cn38xx; + struct cvmx_pko_mem_debug11_cn30xx cn38xxp2; + struct cvmx_pko_mem_debug11_cn50xx { + uint64_t reserved_23_63:41; + uint64_t maj:1; + uint64_t uid:3; + uint64_t sop:1; + uint64_t len:1; + uint64_t chk:1; + uint64_t cnt:13; + uint64_t mod:3; + } cn50xx; + struct cvmx_pko_mem_debug11_cn50xx cn52xx; + struct cvmx_pko_mem_debug11_cn50xx cn52xxp1; + struct cvmx_pko_mem_debug11_cn50xx cn56xx; + struct cvmx_pko_mem_debug11_cn50xx cn56xxp1; + struct cvmx_pko_mem_debug11_cn50xx cn58xx; + struct cvmx_pko_mem_debug11_cn50xx cn58xxp1; +}; + +union cvmx_pko_mem_debug12 { + uint64_t u64; + struct cvmx_pko_mem_debug12_s { + uint64_t reserved_0_63:64; + } s; + struct cvmx_pko_mem_debug12_cn30xx { + uint64_t data:64; + } cn30xx; + struct cvmx_pko_mem_debug12_cn30xx cn31xx; + struct cvmx_pko_mem_debug12_cn30xx cn38xx; + struct cvmx_pko_mem_debug12_cn30xx cn38xxp2; + struct cvmx_pko_mem_debug12_cn50xx { + uint64_t fau:28; + uint64_t cmd:14; + uint64_t segs:6; + uint64_t size:16; + } cn50xx; + struct cvmx_pko_mem_debug12_cn50xx cn52xx; + struct cvmx_pko_mem_debug12_cn50xx cn52xxp1; + struct cvmx_pko_mem_debug12_cn50xx cn56xx; + struct cvmx_pko_mem_debug12_cn50xx cn56xxp1; + struct cvmx_pko_mem_debug12_cn50xx cn58xx; + struct cvmx_pko_mem_debug12_cn50xx cn58xxp1; +}; + +union cvmx_pko_mem_debug13 { + uint64_t u64; + struct cvmx_pko_mem_debug13_s { + uint64_t i:1; + uint64_t back:4; + uint64_t pool:3; + uint64_t reserved_0_55:56; + } s; + struct cvmx_pko_mem_debug13_cn30xx { + uint64_t reserved_51_63:13; + uint64_t widx:17; + uint64_t ridx2:17; + uint64_t widx2:17; + } cn30xx; + struct cvmx_pko_mem_debug13_cn30xx cn31xx; + struct cvmx_pko_mem_debug13_cn30xx cn38xx; + struct cvmx_pko_mem_debug13_cn30xx cn38xxp2; + struct cvmx_pko_mem_debug13_cn50xx { + uint64_t i:1; + uint64_t back:4; + uint64_t pool:3; + uint64_t size:16; + uint64_t ptr:40; + } cn50xx; + struct cvmx_pko_mem_debug13_cn50xx cn52xx; + struct cvmx_pko_mem_debug13_cn50xx cn52xxp1; + struct cvmx_pko_mem_debug13_cn50xx cn56xx; + struct cvmx_pko_mem_debug13_cn50xx cn56xxp1; + struct cvmx_pko_mem_debug13_cn50xx cn58xx; + struct cvmx_pko_mem_debug13_cn50xx cn58xxp1; +}; + +union cvmx_pko_mem_debug14 { + uint64_t u64; + struct cvmx_pko_mem_debug14_s { + uint64_t reserved_0_63:64; + } s; + struct cvmx_pko_mem_debug14_cn30xx { + uint64_t reserved_17_63:47; + uint64_t ridx:17; + } cn30xx; + struct cvmx_pko_mem_debug14_cn30xx cn31xx; + struct cvmx_pko_mem_debug14_cn30xx cn38xx; + struct cvmx_pko_mem_debug14_cn30xx cn38xxp2; + struct cvmx_pko_mem_debug14_cn52xx { + uint64_t data:64; + } cn52xx; + struct cvmx_pko_mem_debug14_cn52xx cn52xxp1; + struct cvmx_pko_mem_debug14_cn52xx cn56xx; + struct cvmx_pko_mem_debug14_cn52xx cn56xxp1; +}; + +union cvmx_pko_mem_debug2 { + uint64_t u64; + struct cvmx_pko_mem_debug2_s { + uint64_t i:1; + uint64_t back:4; + uint64_t pool:3; + uint64_t size:16; + uint64_t ptr:40; + } s; + struct cvmx_pko_mem_debug2_s cn30xx; + struct cvmx_pko_mem_debug2_s cn31xx; + struct cvmx_pko_mem_debug2_s cn38xx; + struct cvmx_pko_mem_debug2_s cn38xxp2; + struct cvmx_pko_mem_debug2_s cn50xx; + struct cvmx_pko_mem_debug2_s cn52xx; + struct cvmx_pko_mem_debug2_s cn52xxp1; + struct cvmx_pko_mem_debug2_s cn56xx; + struct cvmx_pko_mem_debug2_s cn56xxp1; + struct cvmx_pko_mem_debug2_s cn58xx; + struct cvmx_pko_mem_debug2_s cn58xxp1; +}; + +union cvmx_pko_mem_debug3 { + uint64_t u64; + struct cvmx_pko_mem_debug3_s { + uint64_t reserved_0_63:64; + } s; + struct cvmx_pko_mem_debug3_cn30xx { + uint64_t i:1; + uint64_t back:4; + uint64_t pool:3; + uint64_t size:16; + uint64_t ptr:40; + } cn30xx; + struct cvmx_pko_mem_debug3_cn30xx cn31xx; + struct cvmx_pko_mem_debug3_cn30xx cn38xx; + struct cvmx_pko_mem_debug3_cn30xx cn38xxp2; + struct cvmx_pko_mem_debug3_cn50xx { + uint64_t data:64; + } cn50xx; + struct cvmx_pko_mem_debug3_cn50xx cn52xx; + struct cvmx_pko_mem_debug3_cn50xx cn52xxp1; + struct cvmx_pko_mem_debug3_cn50xx cn56xx; + struct cvmx_pko_mem_debug3_cn50xx cn56xxp1; + struct cvmx_pko_mem_debug3_cn50xx cn58xx; + struct cvmx_pko_mem_debug3_cn50xx cn58xxp1; +}; + +union cvmx_pko_mem_debug4 { + uint64_t u64; + struct cvmx_pko_mem_debug4_s { + uint64_t reserved_0_63:64; + } s; + struct cvmx_pko_mem_debug4_cn30xx { + uint64_t data:64; + } cn30xx; + struct cvmx_pko_mem_debug4_cn30xx cn31xx; + struct cvmx_pko_mem_debug4_cn30xx cn38xx; + struct cvmx_pko_mem_debug4_cn30xx cn38xxp2; + struct cvmx_pko_mem_debug4_cn50xx { + uint64_t cmnd_segs:3; + uint64_t cmnd_siz:16; + uint64_t cmnd_off:6; + uint64_t uid:3; + uint64_t dread_sop:1; + uint64_t init_dwrite:1; + uint64_t chk_once:1; + uint64_t chk_mode:1; + uint64_t active:1; + uint64_t static_p:1; + uint64_t qos:3; + uint64_t qcb_ridx:5; + uint64_t qid_off_max:4; + uint64_t qid_off:4; + uint64_t qid_base:8; + uint64_t wait:1; + uint64_t minor:2; + uint64_t major:3; + } cn50xx; + struct cvmx_pko_mem_debug4_cn52xx { + uint64_t curr_siz:8; + uint64_t curr_off:16; + uint64_t cmnd_segs:6; + uint64_t cmnd_siz:16; + uint64_t cmnd_off:6; + uint64_t uid:2; + uint64_t dread_sop:1; + uint64_t init_dwrite:1; + uint64_t chk_once:1; + uint64_t chk_mode:1; + uint64_t wait:1; + uint64_t minor:2; + uint64_t major:3; + } cn52xx; + struct cvmx_pko_mem_debug4_cn52xx cn52xxp1; + struct cvmx_pko_mem_debug4_cn52xx cn56xx; + struct cvmx_pko_mem_debug4_cn52xx cn56xxp1; + struct cvmx_pko_mem_debug4_cn50xx cn58xx; + struct cvmx_pko_mem_debug4_cn50xx cn58xxp1; +}; + +union cvmx_pko_mem_debug5 { + uint64_t u64; + struct cvmx_pko_mem_debug5_s { + uint64_t reserved_0_63:64; + } s; + struct cvmx_pko_mem_debug5_cn30xx { + uint64_t dwri_mod:1; + uint64_t dwri_sop:1; + uint64_t dwri_len:1; + uint64_t dwri_cnt:13; + uint64_t cmnd_siz:16; + uint64_t uid:1; + uint64_t xfer_wor:1; + uint64_t xfer_dwr:1; + uint64_t cbuf_fre:1; + uint64_t reserved_27_27:1; + uint64_t chk_mode:1; + uint64_t active:1; + uint64_t qos:3; + uint64_t qcb_ridx:5; + uint64_t qid_off:3; + uint64_t qid_base:7; + uint64_t wait:1; + uint64_t minor:2; + uint64_t major:4; + } cn30xx; + struct cvmx_pko_mem_debug5_cn30xx cn31xx; + struct cvmx_pko_mem_debug5_cn30xx cn38xx; + struct cvmx_pko_mem_debug5_cn30xx cn38xxp2; + struct cvmx_pko_mem_debug5_cn50xx { + uint64_t curr_ptr:29; + uint64_t curr_siz:16; + uint64_t curr_off:16; + uint64_t cmnd_segs:3; + } cn50xx; + struct cvmx_pko_mem_debug5_cn52xx { + uint64_t reserved_54_63:10; + uint64_t nxt_inflt:6; + uint64_t curr_ptr:40; + uint64_t curr_siz:8; + } cn52xx; + struct cvmx_pko_mem_debug5_cn52xx cn52xxp1; + struct cvmx_pko_mem_debug5_cn52xx cn56xx; + struct cvmx_pko_mem_debug5_cn52xx cn56xxp1; + struct cvmx_pko_mem_debug5_cn50xx cn58xx; + struct cvmx_pko_mem_debug5_cn50xx cn58xxp1; +}; + +union cvmx_pko_mem_debug6 { + uint64_t u64; + struct cvmx_pko_mem_debug6_s { + uint64_t reserved_37_63:27; + uint64_t qid_offres:4; + uint64_t qid_offths:4; + uint64_t preempter:1; + uint64_t preemptee:1; + uint64_t preempted:1; + uint64_t active:1; + uint64_t statc:1; + uint64_t qos:3; + uint64_t qcb_ridx:5; + uint64_t qid_offmax:4; + uint64_t reserved_0_11:12; + } s; + struct cvmx_pko_mem_debug6_cn30xx { + uint64_t reserved_11_63:53; + uint64_t qid_offm:3; + uint64_t static_p:1; + uint64_t work_min:3; + uint64_t dwri_chk:1; + uint64_t dwri_uid:1; + uint64_t dwri_mod:2; + } cn30xx; + struct cvmx_pko_mem_debug6_cn30xx cn31xx; + struct cvmx_pko_mem_debug6_cn30xx cn38xx; + struct cvmx_pko_mem_debug6_cn30xx cn38xxp2; + struct cvmx_pko_mem_debug6_cn50xx { + uint64_t reserved_11_63:53; + uint64_t curr_ptr:11; + } cn50xx; + struct cvmx_pko_mem_debug6_cn52xx { + uint64_t reserved_37_63:27; + uint64_t qid_offres:4; + uint64_t qid_offths:4; + uint64_t preempter:1; + uint64_t preemptee:1; + uint64_t preempted:1; + uint64_t active:1; + uint64_t statc:1; + uint64_t qos:3; + uint64_t qcb_ridx:5; + uint64_t qid_offmax:4; + uint64_t qid_off:4; + uint64_t qid_base:8; + } cn52xx; + struct cvmx_pko_mem_debug6_cn52xx cn52xxp1; + struct cvmx_pko_mem_debug6_cn52xx cn56xx; + struct cvmx_pko_mem_debug6_cn52xx cn56xxp1; + struct cvmx_pko_mem_debug6_cn50xx cn58xx; + struct cvmx_pko_mem_debug6_cn50xx cn58xxp1; +}; + +union cvmx_pko_mem_debug7 { + uint64_t u64; + struct cvmx_pko_mem_debug7_s { + uint64_t qos:5; + uint64_t tail:1; + uint64_t reserved_0_57:58; + } s; + struct cvmx_pko_mem_debug7_cn30xx { + uint64_t reserved_58_63:6; + uint64_t dwb:9; + uint64_t start:33; + uint64_t size:16; + } cn30xx; + struct cvmx_pko_mem_debug7_cn30xx cn31xx; + struct cvmx_pko_mem_debug7_cn30xx cn38xx; + struct cvmx_pko_mem_debug7_cn30xx cn38xxp2; + struct cvmx_pko_mem_debug7_cn50xx { + uint64_t qos:5; + uint64_t tail:1; + uint64_t buf_siz:13; + uint64_t buf_ptr:33; + uint64_t qcb_widx:6; + uint64_t qcb_ridx:6; + } cn50xx; + struct cvmx_pko_mem_debug7_cn50xx cn52xx; + struct cvmx_pko_mem_debug7_cn50xx cn52xxp1; + struct cvmx_pko_mem_debug7_cn50xx cn56xx; + struct cvmx_pko_mem_debug7_cn50xx cn56xxp1; + struct cvmx_pko_mem_debug7_cn50xx cn58xx; + struct cvmx_pko_mem_debug7_cn50xx cn58xxp1; +}; + +union cvmx_pko_mem_debug8 { + uint64_t u64; + struct cvmx_pko_mem_debug8_s { + uint64_t reserved_59_63:5; + uint64_t tail:1; + uint64_t buf_siz:13; + uint64_t reserved_0_44:45; + } s; + struct cvmx_pko_mem_debug8_cn30xx { + uint64_t qos:5; + uint64_t tail:1; + uint64_t buf_siz:13; + uint64_t buf_ptr:33; + uint64_t qcb_widx:6; + uint64_t qcb_ridx:6; + } cn30xx; + struct cvmx_pko_mem_debug8_cn30xx cn31xx; + struct cvmx_pko_mem_debug8_cn30xx cn38xx; + struct cvmx_pko_mem_debug8_cn30xx cn38xxp2; + struct cvmx_pko_mem_debug8_cn50xx { + uint64_t reserved_28_63:36; + uint64_t doorbell:20; + uint64_t reserved_6_7:2; + uint64_t static_p:1; + uint64_t s_tail:1; + uint64_t static_q:1; + uint64_t qos:3; + } cn50xx; + struct cvmx_pko_mem_debug8_cn52xx { + uint64_t reserved_29_63:35; + uint64_t preempter:1; + uint64_t doorbell:20; + uint64_t reserved_7_7:1; + uint64_t preemptee:1; + uint64_t static_p:1; + uint64_t s_tail:1; + uint64_t static_q:1; + uint64_t qos:3; + } cn52xx; + struct cvmx_pko_mem_debug8_cn52xx cn52xxp1; + struct cvmx_pko_mem_debug8_cn52xx cn56xx; + struct cvmx_pko_mem_debug8_cn52xx cn56xxp1; + struct cvmx_pko_mem_debug8_cn50xx cn58xx; + struct cvmx_pko_mem_debug8_cn50xx cn58xxp1; +}; + +union cvmx_pko_mem_debug9 { + uint64_t u64; + struct cvmx_pko_mem_debug9_s { + uint64_t reserved_49_63:15; + uint64_t ptrs0:17; + uint64_t reserved_0_31:32; + } s; + struct cvmx_pko_mem_debug9_cn30xx { + uint64_t reserved_28_63:36; + uint64_t doorbell:20; + uint64_t reserved_5_7:3; + uint64_t s_tail:1; + uint64_t static_q:1; + uint64_t qos:3; + } cn30xx; + struct cvmx_pko_mem_debug9_cn30xx cn31xx; + struct cvmx_pko_mem_debug9_cn38xx { + uint64_t reserved_28_63:36; + uint64_t doorbell:20; + uint64_t reserved_6_7:2; + uint64_t static_p:1; + uint64_t s_tail:1; + uint64_t static_q:1; + uint64_t qos:3; + } cn38xx; + struct cvmx_pko_mem_debug9_cn38xx cn38xxp2; + struct cvmx_pko_mem_debug9_cn50xx { + uint64_t reserved_49_63:15; + uint64_t ptrs0:17; + uint64_t reserved_17_31:15; + uint64_t ptrs3:17; + } cn50xx; + struct cvmx_pko_mem_debug9_cn50xx cn52xx; + struct cvmx_pko_mem_debug9_cn50xx cn52xxp1; + struct cvmx_pko_mem_debug9_cn50xx cn56xx; + struct cvmx_pko_mem_debug9_cn50xx cn56xxp1; + struct cvmx_pko_mem_debug9_cn50xx cn58xx; + struct cvmx_pko_mem_debug9_cn50xx cn58xxp1; +}; + +union cvmx_pko_mem_port_ptrs { + uint64_t u64; + struct cvmx_pko_mem_port_ptrs_s { + uint64_t reserved_62_63:2; + uint64_t static_p:1; + uint64_t qos_mask:8; + uint64_t reserved_16_52:37; + uint64_t bp_port:6; + uint64_t eid:4; + uint64_t pid:6; + } s; + struct cvmx_pko_mem_port_ptrs_s cn52xx; + struct cvmx_pko_mem_port_ptrs_s cn52xxp1; + struct cvmx_pko_mem_port_ptrs_s cn56xx; + struct cvmx_pko_mem_port_ptrs_s cn56xxp1; +}; + +union cvmx_pko_mem_port_qos { + uint64_t u64; + struct cvmx_pko_mem_port_qos_s { + uint64_t reserved_61_63:3; + uint64_t qos_mask:8; + uint64_t reserved_10_52:43; + uint64_t eid:4; + uint64_t pid:6; + } s; + struct cvmx_pko_mem_port_qos_s cn52xx; + struct cvmx_pko_mem_port_qos_s cn52xxp1; + struct cvmx_pko_mem_port_qos_s cn56xx; + struct cvmx_pko_mem_port_qos_s cn56xxp1; +}; + +union cvmx_pko_mem_port_rate0 { + uint64_t u64; + struct cvmx_pko_mem_port_rate0_s { + uint64_t reserved_51_63:13; + uint64_t rate_word:19; + uint64_t rate_pkt:24; + uint64_t reserved_6_7:2; + uint64_t pid:6; + } s; + struct cvmx_pko_mem_port_rate0_s cn52xx; + struct cvmx_pko_mem_port_rate0_s cn52xxp1; + struct cvmx_pko_mem_port_rate0_s cn56xx; + struct cvmx_pko_mem_port_rate0_s cn56xxp1; +}; + +union cvmx_pko_mem_port_rate1 { + uint64_t u64; + struct cvmx_pko_mem_port_rate1_s { + uint64_t reserved_32_63:32; + uint64_t rate_lim:24; + uint64_t reserved_6_7:2; + uint64_t pid:6; + } s; + struct cvmx_pko_mem_port_rate1_s cn52xx; + struct cvmx_pko_mem_port_rate1_s cn52xxp1; + struct cvmx_pko_mem_port_rate1_s cn56xx; + struct cvmx_pko_mem_port_rate1_s cn56xxp1; +}; + +union cvmx_pko_mem_queue_ptrs { + uint64_t u64; + struct cvmx_pko_mem_queue_ptrs_s { + uint64_t s_tail:1; + uint64_t static_p:1; + uint64_t static_q:1; + uint64_t qos_mask:8; + uint64_t buf_ptr:36; + uint64_t tail:1; + uint64_t index:3; + uint64_t port:6; + uint64_t queue:7; + } s; + struct cvmx_pko_mem_queue_ptrs_s cn30xx; + struct cvmx_pko_mem_queue_ptrs_s cn31xx; + struct cvmx_pko_mem_queue_ptrs_s cn38xx; + struct cvmx_pko_mem_queue_ptrs_s cn38xxp2; + struct cvmx_pko_mem_queue_ptrs_s cn50xx; + struct cvmx_pko_mem_queue_ptrs_s cn52xx; + struct cvmx_pko_mem_queue_ptrs_s cn52xxp1; + struct cvmx_pko_mem_queue_ptrs_s cn56xx; + struct cvmx_pko_mem_queue_ptrs_s cn56xxp1; + struct cvmx_pko_mem_queue_ptrs_s cn58xx; + struct cvmx_pko_mem_queue_ptrs_s cn58xxp1; +}; + +union cvmx_pko_mem_queue_qos { + uint64_t u64; + struct cvmx_pko_mem_queue_qos_s { + uint64_t reserved_61_63:3; + uint64_t qos_mask:8; + uint64_t reserved_13_52:40; + uint64_t pid:6; + uint64_t qid:7; + } s; + struct cvmx_pko_mem_queue_qos_s cn30xx; + struct cvmx_pko_mem_queue_qos_s cn31xx; + struct cvmx_pko_mem_queue_qos_s cn38xx; + struct cvmx_pko_mem_queue_qos_s cn38xxp2; + struct cvmx_pko_mem_queue_qos_s cn50xx; + struct cvmx_pko_mem_queue_qos_s cn52xx; + struct cvmx_pko_mem_queue_qos_s cn52xxp1; + struct cvmx_pko_mem_queue_qos_s cn56xx; + struct cvmx_pko_mem_queue_qos_s cn56xxp1; + struct cvmx_pko_mem_queue_qos_s cn58xx; + struct cvmx_pko_mem_queue_qos_s cn58xxp1; +}; + +union cvmx_pko_reg_bist_result { + uint64_t u64; + struct cvmx_pko_reg_bist_result_s { + uint64_t reserved_0_63:64; + } s; + struct cvmx_pko_reg_bist_result_cn30xx { + uint64_t reserved_27_63:37; + uint64_t psb2:5; + uint64_t count:1; + uint64_t rif:1; + uint64_t wif:1; + uint64_t ncb:1; + uint64_t out:1; + uint64_t crc:1; + uint64_t chk:1; + uint64_t qsb:2; + uint64_t qcb:2; + uint64_t pdb:4; + uint64_t psb:7; + } cn30xx; + struct cvmx_pko_reg_bist_result_cn30xx cn31xx; + struct cvmx_pko_reg_bist_result_cn30xx cn38xx; + struct cvmx_pko_reg_bist_result_cn30xx cn38xxp2; + struct cvmx_pko_reg_bist_result_cn50xx { + uint64_t reserved_33_63:31; + uint64_t csr:1; + uint64_t iob:1; + uint64_t out_crc:1; + uint64_t out_ctl:3; + uint64_t out_sta:1; + uint64_t out_wif:1; + uint64_t prt_chk:3; + uint64_t prt_nxt:1; + uint64_t prt_psb:6; + uint64_t ncb_inb:2; + uint64_t prt_qcb:2; + uint64_t prt_qsb:3; + uint64_t dat_dat:4; + uint64_t dat_ptr:4; + } cn50xx; + struct cvmx_pko_reg_bist_result_cn52xx { + uint64_t reserved_35_63:29; + uint64_t csr:1; + uint64_t iob:1; + uint64_t out_dat:1; + uint64_t out_ctl:3; + uint64_t out_sta:1; + uint64_t out_wif:1; + uint64_t prt_chk:3; + uint64_t prt_nxt:1; + uint64_t prt_psb:8; + uint64_t ncb_inb:2; + uint64_t prt_qcb:2; + uint64_t prt_qsb:3; + uint64_t prt_ctl:2; + uint64_t dat_dat:2; + uint64_t dat_ptr:4; + } cn52xx; + struct cvmx_pko_reg_bist_result_cn52xx cn52xxp1; + struct cvmx_pko_reg_bist_result_cn52xx cn56xx; + struct cvmx_pko_reg_bist_result_cn52xx cn56xxp1; + struct cvmx_pko_reg_bist_result_cn50xx cn58xx; + struct cvmx_pko_reg_bist_result_cn50xx cn58xxp1; +}; + +union cvmx_pko_reg_cmd_buf { + uint64_t u64; + struct cvmx_pko_reg_cmd_buf_s { + uint64_t reserved_23_63:41; + uint64_t pool:3; + uint64_t reserved_13_19:7; + uint64_t size:13; + } s; + struct cvmx_pko_reg_cmd_buf_s cn30xx; + struct cvmx_pko_reg_cmd_buf_s cn31xx; + struct cvmx_pko_reg_cmd_buf_s cn38xx; + struct cvmx_pko_reg_cmd_buf_s cn38xxp2; + struct cvmx_pko_reg_cmd_buf_s cn50xx; + struct cvmx_pko_reg_cmd_buf_s cn52xx; + struct cvmx_pko_reg_cmd_buf_s cn52xxp1; + struct cvmx_pko_reg_cmd_buf_s cn56xx; + struct cvmx_pko_reg_cmd_buf_s cn56xxp1; + struct cvmx_pko_reg_cmd_buf_s cn58xx; + struct cvmx_pko_reg_cmd_buf_s cn58xxp1; +}; + +union cvmx_pko_reg_crc_ctlx { + uint64_t u64; + struct cvmx_pko_reg_crc_ctlx_s { + uint64_t reserved_2_63:62; + uint64_t invres:1; + uint64_t refin:1; + } s; + struct cvmx_pko_reg_crc_ctlx_s cn38xx; + struct cvmx_pko_reg_crc_ctlx_s cn38xxp2; + struct cvmx_pko_reg_crc_ctlx_s cn58xx; + struct cvmx_pko_reg_crc_ctlx_s cn58xxp1; +}; + +union cvmx_pko_reg_crc_enable { + uint64_t u64; + struct cvmx_pko_reg_crc_enable_s { + uint64_t reserved_32_63:32; + uint64_t enable:32; + } s; + struct cvmx_pko_reg_crc_enable_s cn38xx; + struct cvmx_pko_reg_crc_enable_s cn38xxp2; + struct cvmx_pko_reg_crc_enable_s cn58xx; + struct cvmx_pko_reg_crc_enable_s cn58xxp1; +}; + +union cvmx_pko_reg_crc_ivx { + uint64_t u64; + struct cvmx_pko_reg_crc_ivx_s { + uint64_t reserved_32_63:32; + uint64_t iv:32; + } s; + struct cvmx_pko_reg_crc_ivx_s cn38xx; + struct cvmx_pko_reg_crc_ivx_s cn38xxp2; + struct cvmx_pko_reg_crc_ivx_s cn58xx; + struct cvmx_pko_reg_crc_ivx_s cn58xxp1; +}; + +union cvmx_pko_reg_debug0 { + uint64_t u64; + struct cvmx_pko_reg_debug0_s { + uint64_t asserts:64; + } s; + struct cvmx_pko_reg_debug0_cn30xx { + uint64_t reserved_17_63:47; + uint64_t asserts:17; + } cn30xx; + struct cvmx_pko_reg_debug0_cn30xx cn31xx; + struct cvmx_pko_reg_debug0_cn30xx cn38xx; + struct cvmx_pko_reg_debug0_cn30xx cn38xxp2; + struct cvmx_pko_reg_debug0_s cn50xx; + struct cvmx_pko_reg_debug0_s cn52xx; + struct cvmx_pko_reg_debug0_s cn52xxp1; + struct cvmx_pko_reg_debug0_s cn56xx; + struct cvmx_pko_reg_debug0_s cn56xxp1; + struct cvmx_pko_reg_debug0_s cn58xx; + struct cvmx_pko_reg_debug0_s cn58xxp1; +}; + +union cvmx_pko_reg_debug1 { + uint64_t u64; + struct cvmx_pko_reg_debug1_s { + uint64_t asserts:64; + } s; + struct cvmx_pko_reg_debug1_s cn50xx; + struct cvmx_pko_reg_debug1_s cn52xx; + struct cvmx_pko_reg_debug1_s cn52xxp1; + struct cvmx_pko_reg_debug1_s cn56xx; + struct cvmx_pko_reg_debug1_s cn56xxp1; + struct cvmx_pko_reg_debug1_s cn58xx; + struct cvmx_pko_reg_debug1_s cn58xxp1; +}; + +union cvmx_pko_reg_debug2 { + uint64_t u64; + struct cvmx_pko_reg_debug2_s { + uint64_t asserts:64; + } s; + struct cvmx_pko_reg_debug2_s cn50xx; + struct cvmx_pko_reg_debug2_s cn52xx; + struct cvmx_pko_reg_debug2_s cn52xxp1; + struct cvmx_pko_reg_debug2_s cn56xx; + struct cvmx_pko_reg_debug2_s cn56xxp1; + struct cvmx_pko_reg_debug2_s cn58xx; + struct cvmx_pko_reg_debug2_s cn58xxp1; +}; + +union cvmx_pko_reg_debug3 { + uint64_t u64; + struct cvmx_pko_reg_debug3_s { + uint64_t asserts:64; + } s; + struct cvmx_pko_reg_debug3_s cn50xx; + struct cvmx_pko_reg_debug3_s cn52xx; + struct cvmx_pko_reg_debug3_s cn52xxp1; + struct cvmx_pko_reg_debug3_s cn56xx; + struct cvmx_pko_reg_debug3_s cn56xxp1; + struct cvmx_pko_reg_debug3_s cn58xx; + struct cvmx_pko_reg_debug3_s cn58xxp1; +}; + +union cvmx_pko_reg_engine_inflight { + uint64_t u64; + struct cvmx_pko_reg_engine_inflight_s { + uint64_t reserved_40_63:24; + uint64_t engine9:4; + uint64_t engine8:4; + uint64_t engine7:4; + uint64_t engine6:4; + uint64_t engine5:4; + uint64_t engine4:4; + uint64_t engine3:4; + uint64_t engine2:4; + uint64_t engine1:4; + uint64_t engine0:4; + } s; + struct cvmx_pko_reg_engine_inflight_s cn52xx; + struct cvmx_pko_reg_engine_inflight_s cn52xxp1; + struct cvmx_pko_reg_engine_inflight_s cn56xx; + struct cvmx_pko_reg_engine_inflight_s cn56xxp1; +}; + +union cvmx_pko_reg_engine_thresh { + uint64_t u64; + struct cvmx_pko_reg_engine_thresh_s { + uint64_t reserved_10_63:54; + uint64_t mask:10; + } s; + struct cvmx_pko_reg_engine_thresh_s cn52xx; + struct cvmx_pko_reg_engine_thresh_s cn52xxp1; + struct cvmx_pko_reg_engine_thresh_s cn56xx; + struct cvmx_pko_reg_engine_thresh_s cn56xxp1; +}; + +union cvmx_pko_reg_error { + uint64_t u64; + struct cvmx_pko_reg_error_s { + uint64_t reserved_3_63:61; + uint64_t currzero:1; + uint64_t doorbell:1; + uint64_t parity:1; + } s; + struct cvmx_pko_reg_error_cn30xx { + uint64_t reserved_2_63:62; + uint64_t doorbell:1; + uint64_t parity:1; + } cn30xx; + struct cvmx_pko_reg_error_cn30xx cn31xx; + struct cvmx_pko_reg_error_cn30xx cn38xx; + struct cvmx_pko_reg_error_cn30xx cn38xxp2; + struct cvmx_pko_reg_error_s cn50xx; + struct cvmx_pko_reg_error_s cn52xx; + struct cvmx_pko_reg_error_s cn52xxp1; + struct cvmx_pko_reg_error_s cn56xx; + struct cvmx_pko_reg_error_s cn56xxp1; + struct cvmx_pko_reg_error_s cn58xx; + struct cvmx_pko_reg_error_s cn58xxp1; +}; + +union cvmx_pko_reg_flags { + uint64_t u64; + struct cvmx_pko_reg_flags_s { + uint64_t reserved_4_63:60; + uint64_t reset:1; + uint64_t store_be:1; + uint64_t ena_dwb:1; + uint64_t ena_pko:1; + } s; + struct cvmx_pko_reg_flags_s cn30xx; + struct cvmx_pko_reg_flags_s cn31xx; + struct cvmx_pko_reg_flags_s cn38xx; + struct cvmx_pko_reg_flags_s cn38xxp2; + struct cvmx_pko_reg_flags_s cn50xx; + struct cvmx_pko_reg_flags_s cn52xx; + struct cvmx_pko_reg_flags_s cn52xxp1; + struct cvmx_pko_reg_flags_s cn56xx; + struct cvmx_pko_reg_flags_s cn56xxp1; + struct cvmx_pko_reg_flags_s cn58xx; + struct cvmx_pko_reg_flags_s cn58xxp1; +}; + +union cvmx_pko_reg_gmx_port_mode { + uint64_t u64; + struct cvmx_pko_reg_gmx_port_mode_s { + uint64_t reserved_6_63:58; + uint64_t mode1:3; + uint64_t mode0:3; + } s; + struct cvmx_pko_reg_gmx_port_mode_s cn30xx; + struct cvmx_pko_reg_gmx_port_mode_s cn31xx; + struct cvmx_pko_reg_gmx_port_mode_s cn38xx; + struct cvmx_pko_reg_gmx_port_mode_s cn38xxp2; + struct cvmx_pko_reg_gmx_port_mode_s cn50xx; + struct cvmx_pko_reg_gmx_port_mode_s cn52xx; + struct cvmx_pko_reg_gmx_port_mode_s cn52xxp1; + struct cvmx_pko_reg_gmx_port_mode_s cn56xx; + struct cvmx_pko_reg_gmx_port_mode_s cn56xxp1; + struct cvmx_pko_reg_gmx_port_mode_s cn58xx; + struct cvmx_pko_reg_gmx_port_mode_s cn58xxp1; +}; + +union cvmx_pko_reg_int_mask { + uint64_t u64; + struct cvmx_pko_reg_int_mask_s { + uint64_t reserved_3_63:61; + uint64_t currzero:1; + uint64_t doorbell:1; + uint64_t parity:1; + } s; + struct cvmx_pko_reg_int_mask_cn30xx { + uint64_t reserved_2_63:62; + uint64_t doorbell:1; + uint64_t parity:1; + } cn30xx; + struct cvmx_pko_reg_int_mask_cn30xx cn31xx; + struct cvmx_pko_reg_int_mask_cn30xx cn38xx; + struct cvmx_pko_reg_int_mask_cn30xx cn38xxp2; + struct cvmx_pko_reg_int_mask_s cn50xx; + struct cvmx_pko_reg_int_mask_s cn52xx; + struct cvmx_pko_reg_int_mask_s cn52xxp1; + struct cvmx_pko_reg_int_mask_s cn56xx; + struct cvmx_pko_reg_int_mask_s cn56xxp1; + struct cvmx_pko_reg_int_mask_s cn58xx; + struct cvmx_pko_reg_int_mask_s cn58xxp1; +}; + +union cvmx_pko_reg_queue_mode { + uint64_t u64; + struct cvmx_pko_reg_queue_mode_s { + uint64_t reserved_2_63:62; + uint64_t mode:2; + } s; + struct cvmx_pko_reg_queue_mode_s cn30xx; + struct cvmx_pko_reg_queue_mode_s cn31xx; + struct cvmx_pko_reg_queue_mode_s cn38xx; + struct cvmx_pko_reg_queue_mode_s cn38xxp2; + struct cvmx_pko_reg_queue_mode_s cn50xx; + struct cvmx_pko_reg_queue_mode_s cn52xx; + struct cvmx_pko_reg_queue_mode_s cn52xxp1; + struct cvmx_pko_reg_queue_mode_s cn56xx; + struct cvmx_pko_reg_queue_mode_s cn56xxp1; + struct cvmx_pko_reg_queue_mode_s cn58xx; + struct cvmx_pko_reg_queue_mode_s cn58xxp1; +}; + +union cvmx_pko_reg_queue_ptrs1 { + uint64_t u64; + struct cvmx_pko_reg_queue_ptrs1_s { + uint64_t reserved_2_63:62; + uint64_t idx3:1; + uint64_t qid7:1; + } s; + struct cvmx_pko_reg_queue_ptrs1_s cn50xx; + struct cvmx_pko_reg_queue_ptrs1_s cn52xx; + struct cvmx_pko_reg_queue_ptrs1_s cn52xxp1; + struct cvmx_pko_reg_queue_ptrs1_s cn56xx; + struct cvmx_pko_reg_queue_ptrs1_s cn56xxp1; + struct cvmx_pko_reg_queue_ptrs1_s cn58xx; + struct cvmx_pko_reg_queue_ptrs1_s cn58xxp1; +}; + +union cvmx_pko_reg_read_idx { + uint64_t u64; + struct cvmx_pko_reg_read_idx_s { + uint64_t reserved_16_63:48; + uint64_t inc:8; + uint64_t index:8; + } s; + struct cvmx_pko_reg_read_idx_s cn30xx; + struct cvmx_pko_reg_read_idx_s cn31xx; + struct cvmx_pko_reg_read_idx_s cn38xx; + struct cvmx_pko_reg_read_idx_s cn38xxp2; + struct cvmx_pko_reg_read_idx_s cn50xx; + struct cvmx_pko_reg_read_idx_s cn52xx; + struct cvmx_pko_reg_read_idx_s cn52xxp1; + struct cvmx_pko_reg_read_idx_s cn56xx; + struct cvmx_pko_reg_read_idx_s cn56xxp1; + struct cvmx_pko_reg_read_idx_s cn58xx; + struct cvmx_pko_reg_read_idx_s cn58xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-pko.c b/drivers/staging/octeon/cvmx-pko.c new file mode 100644 index 0000000..00db915 --- /dev/null +++ b/drivers/staging/octeon/cvmx-pko.c @@ -0,0 +1,506 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * Support library for the hardware Packet Output unit. + */ + +#include <asm/octeon/octeon.h> + +#include "cvmx-config.h" +#include "cvmx-pko.h" +#include "cvmx-helper.h" + +/** + * Internal state of packet output + */ + +/** + * Call before any other calls to initialize the packet + * output system. This does chip global config, and should only be + * done by one core. + */ + +void cvmx_pko_initialize_global(void) +{ + int i; + uint64_t priority = 8; + union cvmx_pko_reg_cmd_buf config; + + /* + * Set the size of the PKO command buffers to an odd number of + * 64bit words. This allows the normal two word send to stay + * aligned and never span a comamnd word buffer. + */ + config.u64 = 0; + config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL; + config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1; + + cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64); + + for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++) + cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1, + &priority); + + /* + * If we aren't using all of the queues optimize PKO's + * internal memory. + */ + if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) + || OCTEON_IS_MODEL(OCTEON_CN56XX) + || OCTEON_IS_MODEL(OCTEON_CN52XX)) { + int num_interfaces = cvmx_helper_get_number_of_interfaces(); + int last_port = + cvmx_helper_get_last_ipd_port(num_interfaces - 1); + int max_queues = + cvmx_pko_get_base_queue(last_port) + + cvmx_pko_get_num_queues(last_port); + if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { + if (max_queues <= 32) + cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2); + else if (max_queues <= 64) + cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1); + } else { + if (max_queues <= 64) + cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2); + else if (max_queues <= 128) + cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1); + } + } +} + +/** + * This function does per-core initialization required by the PKO routines. + * This must be called on all cores that will do packet output, and must + * be called after the FPA has been initialized and filled with pages. + * + * Returns 0 on success + * !0 on failure + */ +int cvmx_pko_initialize_local(void) +{ + /* Nothing to do */ + return 0; +} + +/** + * Enables the packet output hardware. It must already be + * configured. + */ +void cvmx_pko_enable(void) +{ + union cvmx_pko_reg_flags flags; + + flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS); + if (flags.s.ena_pko) + cvmx_dprintf + ("Warning: Enabling PKO when PKO already enabled.\n"); + + flags.s.ena_dwb = 1; + flags.s.ena_pko = 1; + /* + * always enable big endian for 3-word command. Does nothing + * for 2-word. + */ + flags.s.store_be = 1; + cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64); +} + +/** + * Disables the packet output. Does not affect any configuration. + */ +void cvmx_pko_disable(void) +{ + union cvmx_pko_reg_flags pko_reg_flags; + pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS); + pko_reg_flags.s.ena_pko = 0; + cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64); +} + + +/** + * Reset the packet output. + */ +static void __cvmx_pko_reset(void) +{ + union cvmx_pko_reg_flags pko_reg_flags; + pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS); + pko_reg_flags.s.reset = 1; + cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64); +} + +/** + * Shutdown and free resources required by packet output. + */ +void cvmx_pko_shutdown(void) +{ + union cvmx_pko_mem_queue_ptrs config; + int queue; + + cvmx_pko_disable(); + + for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) { + config.u64 = 0; + config.s.tail = 1; + config.s.index = 0; + config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID; + config.s.queue = queue & 0x7f; + config.s.qos_mask = 0; + config.s.buf_ptr = 0; + if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) { + union cvmx_pko_reg_queue_ptrs1 config1; + config1.u64 = 0; + config1.s.qid7 = queue >> 7; + cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); + } + cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); + cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue)); + } + __cvmx_pko_reset(); +} + +/** + * Configure a output port and the associated queues for use. + * + * @port: Port to configure. + * @base_queue: First queue number to associate with this port. + * @num_queues: Number of queues to associate with this port + * @priority: Array of priority levels for each queue. Values are + * allowed to be 0-8. A value of 8 get 8 times the traffic + * of a value of 1. A value of 0 indicates that no rounds + * will be participated in. These priorities can be changed + * on the fly while the pko is enabled. A priority of 9 + * indicates that static priority should be used. If static + * priority is used all queues with static priority must be + * contiguous starting at the base_queue, and lower numbered + * queues have higher priority than higher numbered queues. + * There must be num_queues elements in the array. + */ +cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue, + uint64_t num_queues, + const uint64_t priority[]) +{ + cvmx_pko_status_t result_code; + uint64_t queue; + union cvmx_pko_mem_queue_ptrs config; + union cvmx_pko_reg_queue_ptrs1 config1; + int static_priority_base = -1; + int static_priority_end = -1; + + if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS) + && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) { + cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n", + (unsigned long long)port); + return CVMX_PKO_INVALID_PORT; + } + + if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) { + cvmx_dprintf + ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n", + (unsigned long long)(base_queue + num_queues)); + return CVMX_PKO_INVALID_QUEUE; + } + + if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { + /* + * Validate the static queue priority setup and set + * static_priority_base and static_priority_end + * accordingly. + */ + for (queue = 0; queue < num_queues; queue++) { + /* Find first queue of static priority */ + if (static_priority_base == -1 + && priority[queue] == + CVMX_PKO_QUEUE_STATIC_PRIORITY) + static_priority_base = queue; + /* Find last queue of static priority */ + if (static_priority_base != -1 + && static_priority_end == -1 + && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY + && queue) + static_priority_end = queue - 1; + else if (static_priority_base != -1 + && static_priority_end == -1 + && queue == num_queues - 1) + /* all queues are static priority */ + static_priority_end = queue; + /* + * Check to make sure all static priority + * queues are contiguous. Also catches some + * cases of static priorites not starting at + * queue 0. + */ + if (static_priority_end != -1 + && (int)queue > static_priority_end + && priority[queue] == + CVMX_PKO_QUEUE_STATIC_PRIORITY) { + cvmx_dprintf("ERROR: cvmx_pko_config_port: " + "Static priority queues aren't " + "contiguous or don't start at " + "base queue. q: %d, eq: %d\n", + (int)queue, static_priority_end); + return CVMX_PKO_INVALID_PRIORITY; + } + } + if (static_priority_base > 0) { + cvmx_dprintf("ERROR: cvmx_pko_config_port: Static " + "priority queues don't start at base " + "queue. sq: %d\n", + static_priority_base); + return CVMX_PKO_INVALID_PRIORITY; + } +#if 0 + cvmx_dprintf("Port %d: Static priority queue base: %d, " + "end: %d\n", port, + static_priority_base, static_priority_end); +#endif + } + /* + * At this point, static_priority_base and static_priority_end + * are either both -1, or are valid start/end queue + * numbers. + */ + + result_code = CVMX_PKO_SUCCESS; + +#ifdef PKO_DEBUG + cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues, + CVMX_PKO_QUEUES_PER_PORT_INTERFACE0, + CVMX_PKO_QUEUES_PER_PORT_INTERFACE1); +#endif + + for (queue = 0; queue < num_queues; queue++) { + uint64_t *buf_ptr = NULL; + + config1.u64 = 0; + config1.s.idx3 = queue >> 3; + config1.s.qid7 = (base_queue + queue) >> 7; + + config.u64 = 0; + config.s.tail = queue == (num_queues - 1); + config.s.index = queue; + config.s.port = port; + config.s.queue = base_queue + queue; + + if (!cvmx_octeon_is_pass1()) { + config.s.static_p = static_priority_base >= 0; + config.s.static_q = (int)queue <= static_priority_end; + config.s.s_tail = (int)queue == static_priority_end; + } + /* + * Convert the priority into an enable bit field. Try + * to space the bits out evenly so the packet don't + * get grouped up + */ + switch ((int)priority[queue]) { + case 0: + config.s.qos_mask = 0x00; + break; + case 1: + config.s.qos_mask = 0x01; + break; + case 2: + config.s.qos_mask = 0x11; + break; + case 3: + config.s.qos_mask = 0x49; + break; + case 4: + config.s.qos_mask = 0x55; + break; + case 5: + config.s.qos_mask = 0x57; + break; + case 6: + config.s.qos_mask = 0x77; + break; + case 7: + config.s.qos_mask = 0x7f; + break; + case 8: + config.s.qos_mask = 0xff; + break; + case CVMX_PKO_QUEUE_STATIC_PRIORITY: + /* Pass 1 will fall through to the error case */ + if (!cvmx_octeon_is_pass1()) { + config.s.qos_mask = 0xff; + break; + } + default: + cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid " + "priority %llu\n", + (unsigned long long)priority[queue]); + config.s.qos_mask = 0xff; + result_code = CVMX_PKO_INVALID_PRIORITY; + break; + } + + if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { + cvmx_cmd_queue_result_t cmd_res = + cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO + (base_queue + queue), + CVMX_PKO_MAX_QUEUE_DEPTH, + CVMX_FPA_OUTPUT_BUFFER_POOL, + CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE + - + CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST + * 8); + if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) { + switch (cmd_res) { + case CVMX_CMD_QUEUE_NO_MEMORY: + cvmx_dprintf("ERROR: " + "cvmx_pko_config_port: " + "Unable to allocate " + "output buffer.\n"); + return CVMX_PKO_NO_MEMORY; + case CVMX_CMD_QUEUE_ALREADY_SETUP: + cvmx_dprintf + ("ERROR: cvmx_pko_config_port: Port already setup.\n"); + return CVMX_PKO_PORT_ALREADY_SETUP; + case CVMX_CMD_QUEUE_INVALID_PARAM: + default: + cvmx_dprintf + ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n"); + return CVMX_PKO_CMD_QUEUE_INIT_ERROR; + } + } + + buf_ptr = + (uint64_t *) + cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO + (base_queue + queue)); + config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr); + } else + config.s.buf_ptr = 0; + + CVMX_SYNCWS; + + if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) + cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); + cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); + } + + return result_code; +} + +#ifdef PKO_DEBUG +/** + * Show map of ports -> queues for different cores. + */ +void cvmx_pko_show_queue_map() +{ + int core, port; + int pko_output_ports = 36; + + cvmx_dprintf("port"); + for (port = 0; port < pko_output_ports; port++) + cvmx_dprintf("%3d ", port); + cvmx_dprintf("\n"); + + for (core = 0; core < CVMX_MAX_CORES; core++) { + cvmx_dprintf("\n%2d: ", core); + for (port = 0; port < pko_output_ports; port++) { + cvmx_dprintf("%3d ", + cvmx_pko_get_base_queue_per_core(port, + core)); + } + } + cvmx_dprintf("\n"); +} +#endif + +/** + * Rate limit a PKO port to a max packets/sec. This function is only + * supported on CN51XX and higher, excluding CN58XX. + * + * @port: Port to rate limit + * @packets_s: Maximum packet/sec + * @burst: Maximum number of packets to burst in a row before rate + * limiting cuts in. + * + * Returns Zero on success, negative on failure + */ +int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst) +{ + union cvmx_pko_mem_port_rate0 pko_mem_port_rate0; + union cvmx_pko_mem_port_rate1 pko_mem_port_rate1; + + pko_mem_port_rate0.u64 = 0; + pko_mem_port_rate0.s.pid = port; + pko_mem_port_rate0.s.rate_pkt = + cvmx_sysinfo_get()->cpu_clock_hz / packets_s / 16; + /* No cost per word since we are limited by packets/sec, not bits/sec */ + pko_mem_port_rate0.s.rate_word = 0; + + pko_mem_port_rate1.u64 = 0; + pko_mem_port_rate1.s.pid = port; + pko_mem_port_rate1.s.rate_lim = + ((uint64_t) pko_mem_port_rate0.s.rate_pkt * burst) >> 8; + + cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64); + cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64); + return 0; +} + +/** + * Rate limit a PKO port to a max bits/sec. This function is only + * supported on CN51XX and higher, excluding CN58XX. + * + * @port: Port to rate limit + * @bits_s: PKO rate limit in bits/sec + * @burst: Maximum number of bits to burst before rate + * limiting cuts in. + * + * Returns Zero on success, negative on failure + */ +int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst) +{ + union cvmx_pko_mem_port_rate0 pko_mem_port_rate0; + union cvmx_pko_mem_port_rate1 pko_mem_port_rate1; + uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz; + uint64_t tokens_per_bit = clock_rate * 16 / bits_s; + + pko_mem_port_rate0.u64 = 0; + pko_mem_port_rate0.s.pid = port; + /* + * Each packet has a 12 bytes of interframe gap, an 8 byte + * preamble, and a 4 byte CRC. These are not included in the + * per word count. Multiply by 8 to covert to bits and divide + * by 256 for limit granularity. + */ + pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256; + /* Each 8 byte word has 64bits */ + pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit; + + pko_mem_port_rate1.u64 = 0; + pko_mem_port_rate1.s.pid = port; + pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256; + + cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64); + cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64); + return 0; +} diff --git a/drivers/staging/octeon/cvmx-pko.h b/drivers/staging/octeon/cvmx-pko.h new file mode 100644 index 0000000..f068c19 --- /dev/null +++ b/drivers/staging/octeon/cvmx-pko.h @@ -0,0 +1,610 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * + * Interface to the hardware Packet Output unit. + * + * Starting with SDK 1.7.0, the PKO output functions now support + * two types of locking. CVMX_PKO_LOCK_ATOMIC_TAG continues to + * function similarly to previous SDKs by using POW atomic tags + * to preserve ordering and exclusivity. As a new option, you + * can now pass CVMX_PKO_LOCK_CMD_QUEUE which uses a ll/sc + * memory based locking instead. This locking has the advantage + * of not affecting the tag state but doesn't preserve packet + * ordering. CVMX_PKO_LOCK_CMD_QUEUE is appropriate in most + * generic code while CVMX_PKO_LOCK_CMD_QUEUE should be used + * with hand tuned fast path code. + * + * Some of other SDK differences visible to the command command + * queuing: + * - PKO indexes are no longer stored in the FAU. A large + * percentage of the FAU register block used to be tied up + * maintaining PKO queue pointers. These are now stored in a + * global named block. + * - The PKO <b>use_locking</b> parameter can now have a global + * effect. Since all application use the same named block, + * queue locking correctly applies across all operating + * systems when using CVMX_PKO_LOCK_CMD_QUEUE. + * - PKO 3 word commands are now supported. Use + * cvmx_pko_send_packet_finish3(). + * + */ + +#ifndef __CVMX_PKO_H__ +#define __CVMX_PKO_H__ + +#include "cvmx-fpa.h" +#include "cvmx-pow.h" +#include "cvmx-cmd-queue.h" +#include "cvmx-pko-defs.h" + +/* Adjust the command buffer size by 1 word so that in the case of using only + * two word PKO commands no command words stradle buffers. The useful values + * for this are 0 and 1. */ +#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1) + +#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256 +#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \ + OCTEON_IS_MODEL(OCTEON_CN3010) || OCTEON_IS_MODEL(OCTEON_CN3005) || \ + OCTEON_IS_MODEL(OCTEON_CN50XX)) ? 32 : \ + (OCTEON_IS_MODEL(OCTEON_CN58XX) || \ + OCTEON_IS_MODEL(OCTEON_CN56XX)) ? 256 : 128) +#define CVMX_PKO_NUM_OUTPUT_PORTS 40 +/* use this for queues that are not used */ +#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63 +#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9 +#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF +#define CVMX_PKO_MAX_QUEUE_DEPTH 0 + +typedef enum { + CVMX_PKO_SUCCESS, + CVMX_PKO_INVALID_PORT, + CVMX_PKO_INVALID_QUEUE, + CVMX_PKO_INVALID_PRIORITY, + CVMX_PKO_NO_MEMORY, + CVMX_PKO_PORT_ALREADY_SETUP, + CVMX_PKO_CMD_QUEUE_INIT_ERROR +} cvmx_pko_status_t; + +/** + * This enumeration represents the differnet locking modes supported by PKO. + */ +typedef enum { + /* + * PKO doesn't do any locking. It is the responsibility of the + * application to make sure that no other core is accessing + * the same queue at the smae time + */ + CVMX_PKO_LOCK_NONE = 0, + /* + * PKO performs an atomic tagswitch to insure exclusive access + * to the output queue. This will maintain packet ordering on + * output. + */ + CVMX_PKO_LOCK_ATOMIC_TAG = 1, + /* + * PKO uses the common command queue locks to insure exclusive + * access to the output queue. This is a memory based + * ll/sc. This is the most portable locking mechanism. + */ + CVMX_PKO_LOCK_CMD_QUEUE = 2, +} cvmx_pko_lock_t; + +typedef struct { + uint32_t packets; + uint64_t octets; + uint64_t doorbell; +} cvmx_pko_port_status_t; + +/** + * This structure defines the address to use on a packet enqueue + */ +typedef union { + uint64_t u64; + struct { + /* Must CVMX_IO_SEG */ + uint64_t mem_space:2; + /* Must be zero */ + uint64_t reserved:13; + /* Must be one */ + uint64_t is_io:1; + /* The ID of the device on the non-coherent bus */ + uint64_t did:8; + /* Must be zero */ + uint64_t reserved2:4; + /* Must be zero */ + uint64_t reserved3:18; + /* + * The hardware likes to have the output port in + * addition to the output queue, + */ + uint64_t port:6; + /* + * The output queue to send the packet to (0-127 are + * legal) + */ + uint64_t queue:9; + /* Must be zero */ + uint64_t reserved4:3; + } s; +} cvmx_pko_doorbell_address_t; + +/** + * Structure of the first packet output command word. + */ +typedef union { + uint64_t u64; + struct { + /* + * The size of the reg1 operation - could be 8, 16, + * 32, or 64 bits. + */ + uint64_t size1:2; + /* + * The size of the reg0 operation - could be 8, 16, + * 32, or 64 bits. + */ + uint64_t size0:2; + /* + * If set, subtract 1, if clear, subtract packet + * size. + */ + uint64_t subone1:1; + /* + * The register, subtract will be done if reg1 is + * non-zero. + */ + uint64_t reg1:11; + /* If set, subtract 1, if clear, subtract packet size */ + uint64_t subone0:1; + /* The register, subtract will be done if reg0 is non-zero */ + uint64_t reg0:11; + /* + * When set, interpret segment pointer and segment + * bytes in little endian order. + */ + uint64_t le:1; + /* + * When set, packet data not allocated in L2 cache by + * PKO. + */ + uint64_t n2:1; + /* + * If set and rsp is set, word3 contains a pointer to + * a work queue entry. + */ + uint64_t wqp:1; + /* If set, the hardware will send a response when done */ + uint64_t rsp:1; + /* + * If set, the supplied pkt_ptr is really a pointer to + * a list of pkt_ptr's. + */ + uint64_t gather:1; + /* + * If ipoffp1 is non zero, (ipoffp1-1) is the number + * of bytes to IP header, and the hardware will + * calculate and insert the UDP/TCP checksum. + */ + uint64_t ipoffp1:7; + /* + * If set, ignore the I bit (force to zero) from all + * pointer structures. + */ + uint64_t ignore_i:1; + /* + * If clear, the hardware will attempt to free the + * buffers containing the packet. + */ + uint64_t dontfree:1; + /* + * The total number of segs in the packet, if gather + * set, also gather list length. + */ + uint64_t segs:6; + /* Including L2, but no trailing CRC */ + uint64_t total_bytes:16; + } s; +} cvmx_pko_command_word0_t; + +/* CSR typedefs have been moved to cvmx-csr-*.h */ + +/** + * Definition of internal state for Packet output processing + */ +typedef struct { + /* ptr to start of buffer, offset kept in FAU reg */ + uint64_t *start_ptr; +} cvmx_pko_state_elem_t; + +/** + * Call before any other calls to initialize the packet + * output system. + */ +extern void cvmx_pko_initialize_global(void); +extern int cvmx_pko_initialize_local(void); + +/** + * Enables the packet output hardware. It must already be + * configured. + */ +extern void cvmx_pko_enable(void); + +/** + * Disables the packet output. Does not affect any configuration. + */ +extern void cvmx_pko_disable(void); + +/** + * Shutdown and free resources required by packet output. + */ + +extern void cvmx_pko_shutdown(void); + +/** + * Configure a output port and the associated queues for use. + * + * @port: Port to configure. + * @base_queue: First queue number to associate with this port. + * @num_queues: Number of queues t oassociate with this port + * @priority: Array of priority levels for each queue. Values are + * allowed to be 1-8. A value of 8 get 8 times the traffic + * of a value of 1. There must be num_queues elements in the + * array. + */ +extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, + uint64_t base_queue, + uint64_t num_queues, + const uint64_t priority[]); + +/** + * Ring the packet output doorbell. This tells the packet + * output hardware that "len" command words have been added + * to its pending list. This command includes the required + * CVMX_SYNCWS before the doorbell ring. + * + * @port: Port the packet is for + * @queue: Queue the packet is for + * @len: Length of the command in 64 bit words + */ +static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue, + uint64_t len) +{ + cvmx_pko_doorbell_address_t ptr; + + ptr.u64 = 0; + ptr.s.mem_space = CVMX_IO_SEG; + ptr.s.did = CVMX_OCT_DID_PKT_SEND; + ptr.s.is_io = 1; + ptr.s.port = port; + ptr.s.queue = queue; + /* + * Need to make sure output queue data is in DRAM before + * doorbell write. + */ + CVMX_SYNCWS; + cvmx_write_io(ptr.u64, len); +} + +/** + * Prepare to send a packet. This may initiate a tag switch to + * get exclusive access to the output queue structure, and + * performs other prep work for the packet send operation. + * + * cvmx_pko_send_packet_finish() MUST be called after this function is called, + * and must be called with the same port/queue/use_locking arguments. + * + * The use_locking parameter allows the caller to use three + * possible locking modes. + * - CVMX_PKO_LOCK_NONE + * - PKO doesn't do any locking. It is the responsibility + * of the application to make sure that no other core + * is accessing the same queue at the smae time. + * - CVMX_PKO_LOCK_ATOMIC_TAG + * - PKO performs an atomic tagswitch to insure exclusive + * access to the output queue. This will maintain + * packet ordering on output. + * - CVMX_PKO_LOCK_CMD_QUEUE + * - PKO uses the common command queue locks to insure + * exclusive access to the output queue. This is a + * memory based ll/sc. This is the most portable + * locking mechanism. + * + * NOTE: If atomic locking is used, the POW entry CANNOT be + * descheduled, as it does not contain a valid WQE pointer. + * + * @port: Port to send it on + * @queue: Queue to use + * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or + * CVMX_PKO_LOCK_CMD_QUEUE + */ + +static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue, + cvmx_pko_lock_t use_locking) +{ + if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) { + /* + * Must do a full switch here to handle all cases. We + * use a fake WQE pointer, as the POW does not access + * this memory. The WQE pointer and group are only + * used if this work is descheduled, which is not + * supported by the + * cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish + * combination. Note that this is a special case in + * which these fake values can be used - this is not a + * general technique. + */ + uint32_t tag = + CVMX_TAG_SW_BITS_INTERNAL << CVMX_TAG_SW_SHIFT | + CVMX_TAG_SUBGROUP_PKO << CVMX_TAG_SUBGROUP_SHIFT | + (CVMX_TAG_SUBGROUP_MASK & queue); + cvmx_pow_tag_sw_full((cvmx_wqe_t *) cvmx_phys_to_ptr(0x80), tag, + CVMX_POW_TAG_TYPE_ATOMIC, 0); + } +} + +/** + * Complete packet output. cvmx_pko_send_packet_prepare() must be + * called exactly once before this, and the same parameters must be + * passed to both cvmx_pko_send_packet_prepare() and + * cvmx_pko_send_packet_finish(). + * + * @port: Port to send it on + * @queue: Queue to use + * @pko_command: + * PKO HW command word + * @packet: Packet to send + * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or + * CVMX_PKO_LOCK_CMD_QUEUE + * + * Returns returns CVMX_PKO_SUCCESS on success, or error code on + * failure of output + */ +static inline cvmx_pko_status_t cvmx_pko_send_packet_finish( + uint64_t port, + uint64_t queue, + cvmx_pko_command_word0_t pko_command, + union cvmx_buf_ptr packet, + cvmx_pko_lock_t use_locking) +{ + cvmx_cmd_queue_result_t result; + if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) + cvmx_pow_tag_sw_wait(); + result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue), + (use_locking == CVMX_PKO_LOCK_CMD_QUEUE), + pko_command.u64, packet.u64); + if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) { + cvmx_pko_doorbell(port, queue, 2); + return CVMX_PKO_SUCCESS; + } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) + || (result == CVMX_CMD_QUEUE_FULL)) { + return CVMX_PKO_NO_MEMORY; + } else { + return CVMX_PKO_INVALID_QUEUE; + } +} + +/** + * Complete packet output. cvmx_pko_send_packet_prepare() must be + * called exactly once before this, and the same parameters must be + * passed to both cvmx_pko_send_packet_prepare() and + * cvmx_pko_send_packet_finish(). + * + * @port: Port to send it on + * @queue: Queue to use + * @pko_command: + * PKO HW command word + * @packet: Packet to send + * @addr: Plysical address of a work queue entry or physical address + * to zero on complete. + * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or + * CVMX_PKO_LOCK_CMD_QUEUE + * + * Returns returns CVMX_PKO_SUCCESS on success, or error code on + * failure of output + */ +static inline cvmx_pko_status_t cvmx_pko_send_packet_finish3( + uint64_t port, + uint64_t queue, + cvmx_pko_command_word0_t pko_command, + union cvmx_buf_ptr packet, + uint64_t addr, + cvmx_pko_lock_t use_locking) +{ + cvmx_cmd_queue_result_t result; + if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) + cvmx_pow_tag_sw_wait(); + result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue), + (use_locking == CVMX_PKO_LOCK_CMD_QUEUE), + pko_command.u64, packet.u64, addr); + if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) { + cvmx_pko_doorbell(port, queue, 3); + return CVMX_PKO_SUCCESS; + } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY) + || (result == CVMX_CMD_QUEUE_FULL)) { + return CVMX_PKO_NO_MEMORY; + } else { + return CVMX_PKO_INVALID_QUEUE; + } +} + +/** + * Return the pko output queue associated with a port and a specific core. + * In normal mode (PKO lockless operation is disabled), the value returned + * is the base queue. + * + * @port: Port number + * @core: Core to get queue for + * + * Returns Core-specific output queue + */ +static inline int cvmx_pko_get_base_queue_per_core(int port, int core) +{ +#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 +#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 16 +#endif +#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 +#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 16 +#endif + + if (port < CVMX_PKO_MAX_PORTS_INTERFACE0) + return port * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + core; + else if (port >= 16 && port < 16 + CVMX_PKO_MAX_PORTS_INTERFACE1) + return CVMX_PKO_MAX_PORTS_INTERFACE0 * + CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + (port - + 16) * + CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + core; + else if ((port >= 32) && (port < 36)) + return CVMX_PKO_MAX_PORTS_INTERFACE0 * + CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + + CVMX_PKO_MAX_PORTS_INTERFACE1 * + CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + (port - + 32) * + CVMX_PKO_QUEUES_PER_PORT_PCI; + else if ((port >= 36) && (port < 40)) + return CVMX_PKO_MAX_PORTS_INTERFACE0 * + CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + + CVMX_PKO_MAX_PORTS_INTERFACE1 * + CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + + 4 * CVMX_PKO_QUEUES_PER_PORT_PCI + (port - + 36) * + CVMX_PKO_QUEUES_PER_PORT_LOOP; + else + /* Given the limit on the number of ports we can map to + * CVMX_MAX_OUTPUT_QUEUES_STATIC queues (currently 256, + * divided among all cores), the remaining unmapped ports + * are assigned an illegal queue number */ + return CVMX_PKO_ILLEGAL_QUEUE; +} + +/** + * For a given port number, return the base pko output queue + * for the port. + * + * @port: Port number + * Returns Base output queue + */ +static inline int cvmx_pko_get_base_queue(int port) +{ + return cvmx_pko_get_base_queue_per_core(port, 0); +} + +/** + * For a given port number, return the number of pko output queues. + * + * @port: Port number + * Returns Number of output queues + */ +static inline int cvmx_pko_get_num_queues(int port) +{ + if (port < 16) + return CVMX_PKO_QUEUES_PER_PORT_INTERFACE0; + else if (port < 32) + return CVMX_PKO_QUEUES_PER_PORT_INTERFACE1; + else if (port < 36) + return CVMX_PKO_QUEUES_PER_PORT_PCI; + else if (port < 40) + return CVMX_PKO_QUEUES_PER_PORT_LOOP; + else + return 0; +} + +/** + * Get the status counters for a port. + * + * @port_num: Port number to get statistics for. + * @clear: Set to 1 to clear the counters after they are read + * @status: Where to put the results. + */ +static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear, + cvmx_pko_port_status_t *status) +{ + union cvmx_pko_reg_read_idx pko_reg_read_idx; + union cvmx_pko_mem_count0 pko_mem_count0; + union cvmx_pko_mem_count1 pko_mem_count1; + + pko_reg_read_idx.u64 = 0; + pko_reg_read_idx.s.index = port_num; + cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64); + + pko_mem_count0.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT0); + status->packets = pko_mem_count0.s.count; + if (clear) { + pko_mem_count0.s.count = port_num; + cvmx_write_csr(CVMX_PKO_MEM_COUNT0, pko_mem_count0.u64); + } + + pko_mem_count1.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT1); + status->octets = pko_mem_count1.s.count; + if (clear) { + pko_mem_count1.s.count = port_num; + cvmx_write_csr(CVMX_PKO_MEM_COUNT1, pko_mem_count1.u64); + } + + if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) { + union cvmx_pko_mem_debug9 debug9; + pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num); + cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64); + debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9); + status->doorbell = debug9.cn38xx.doorbell; + } else { + union cvmx_pko_mem_debug8 debug8; + pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num); + cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64); + debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8); + status->doorbell = debug8.cn58xx.doorbell; + } +} + +/** + * Rate limit a PKO port to a max packets/sec. This function is only + * supported on CN57XX, CN56XX, CN55XX, and CN54XX. + * + * @port: Port to rate limit + * @packets_s: Maximum packet/sec + * @burst: Maximum number of packets to burst in a row before rate + * limiting cuts in. + * + * Returns Zero on success, negative on failure + */ +extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst); + +/** + * Rate limit a PKO port to a max bits/sec. This function is only + * supported on CN57XX, CN56XX, CN55XX, and CN54XX. + * + * @port: Port to rate limit + * @bits_s: PKO rate limit in bits/sec + * @burst: Maximum number of bits to burst before rate + * limiting cuts in. + * + * Returns Zero on success, negative on failure + */ +extern int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst); + +#endif /* __CVMX_PKO_H__ */ diff --git a/drivers/staging/octeon/cvmx-pow.h b/drivers/staging/octeon/cvmx-pow.h new file mode 100644 index 0000000..c5d66f2 --- /dev/null +++ b/drivers/staging/octeon/cvmx-pow.h @@ -0,0 +1,1982 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * Interface to the hardware Packet Order / Work unit. + * + * New, starting with SDK 1.7.0, cvmx-pow supports a number of + * extended consistency checks. The define + * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW + * internal state checks to find common programming errors. If + * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default + * enabled. For example, cvmx-pow will check for the following + * program errors or POW state inconsistency. + * - Requesting a POW operation with an active tag switch in + * progress. + * - Waiting for a tag switch to complete for an excessively + * long period. This is normally a sign of an error in locking + * causing deadlock. + * - Illegal tag switches from NULL_NULL. + * - Illegal tag switches from NULL. + * - Illegal deschedule request. + * - WQE pointer not matching the one attached to the core by + * the POW. + * + */ + +#ifndef __CVMX_POW_H__ +#define __CVMX_POW_H__ + +#include <asm/octeon/cvmx-pow-defs.h> + +#include "cvmx-scratch.h" +#include "cvmx-wqe.h" + +/* Default to having all POW constancy checks turned on */ +#ifndef CVMX_ENABLE_POW_CHECKS +#define CVMX_ENABLE_POW_CHECKS 1 +#endif + +enum cvmx_pow_tag_type { + /* Tag ordering is maintained */ + CVMX_POW_TAG_TYPE_ORDERED = 0L, + /* Tag ordering is maintained, and at most one PP has the tag */ + CVMX_POW_TAG_TYPE_ATOMIC = 1L, + /* + * The work queue entry from the order - NEVER tag switch from + * NULL to NULL + */ + CVMX_POW_TAG_TYPE_NULL = 2L, + /* A tag switch to NULL, and there is no space reserved in POW + * - NEVER tag switch to NULL_NULL + * - NEVER tag switch from NULL_NULL + * - NULL_NULL is entered at the beginning of time and on a deschedule. + * - NULL_NULL can be exited by a new work request. A NULL_SWITCH + * load can also switch the state to NULL + */ + CVMX_POW_TAG_TYPE_NULL_NULL = 3L +}; + +/** + * Wait flag values for pow functions. + */ +typedef enum { + CVMX_POW_WAIT = 1, + CVMX_POW_NO_WAIT = 0, +} cvmx_pow_wait_t; + +/** + * POW tag operations. These are used in the data stored to the POW. + */ +typedef enum { + /* + * switch the tag (only) for this PP + * - the previous tag should be non-NULL in this case + * - tag switch response required + * - fields used: op, type, tag + */ + CVMX_POW_TAG_OP_SWTAG = 0L, + /* + * switch the tag for this PP, with full information + * - this should be used when the previous tag is NULL + * - tag switch response required + * - fields used: address, op, grp, type, tag + */ + CVMX_POW_TAG_OP_SWTAG_FULL = 1L, + /* + * switch the tag (and/or group) for this PP and de-schedule + * - OK to keep the tag the same and only change the group + * - fields used: op, no_sched, grp, type, tag + */ + CVMX_POW_TAG_OP_SWTAG_DESCH = 2L, + /* + * just de-schedule + * - fields used: op, no_sched + */ + CVMX_POW_TAG_OP_DESCH = 3L, + /* + * create an entirely new work queue entry + * - fields used: address, op, qos, grp, type, tag + */ + CVMX_POW_TAG_OP_ADDWQ = 4L, + /* + * just update the work queue pointer and grp for this PP + * - fields used: address, op, grp + */ + CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L, + /* + * set the no_sched bit on the de-schedule list + * + * - does nothing if the selected entry is not on the + * de-schedule list + * + * - does nothing if the stored work queue pointer does not + * match the address field + * + * - fields used: address, index, op + * + * Before issuing a *_NSCHED operation, SW must guarantee + * that all prior deschedules and set/clr NSCHED operations + * are complete and all prior switches are complete. The + * hardware provides the opsdone bit and swdone bit for SW + * polling. After issuing a *_NSCHED operation, SW must + * guarantee that the set/clr NSCHED is complete before any + * subsequent operations. + */ + CVMX_POW_TAG_OP_SET_NSCHED = 6L, + /* + * clears the no_sched bit on the de-schedule list + * + * - does nothing if the selected entry is not on the + * de-schedule list + * + * - does nothing if the stored work queue pointer does not + * match the address field + * + * - fields used: address, index, op + * + * Before issuing a *_NSCHED operation, SW must guarantee that + * all prior deschedules and set/clr NSCHED operations are + * complete and all prior switches are complete. The hardware + * provides the opsdone bit and swdone bit for SW + * polling. After issuing a *_NSCHED operation, SW must + * guarantee that the set/clr NSCHED is complete before any + * subsequent operations. + */ + CVMX_POW_TAG_OP_CLR_NSCHED = 7L, + /* do nothing */ + CVMX_POW_TAG_OP_NOP = 15L +} cvmx_pow_tag_op_t; + +/** + * This structure defines the store data on a store to POW + */ +typedef union { + uint64_t u64; + struct { + /* + * Don't reschedule this entry. no_sched is used for + * CVMX_POW_TAG_OP_SWTAG_DESCH and + * CVMX_POW_TAG_OP_DESCH + */ + uint64_t no_sched:1; + uint64_t unused:2; + /* Tontains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */ + uint64_t index:13; + /* The operation to perform */ + cvmx_pow_tag_op_t op:4; + uint64_t unused2:2; + /* + * The QOS level for the packet. qos is only used for + * CVMX_POW_TAG_OP_ADDWQ + */ + uint64_t qos:3; + /* + * The group that the work queue entry will be + * scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, + * CVMX_POW_TAG_OP_SWTAG_FULL, + * CVMX_POW_TAG_OP_SWTAG_DESCH, and + * CVMX_POW_TAG_OP_UPDATE_WQP_GRP + */ + uint64_t grp:4; + /* + * The type of the tag. type is used for everything + * except CVMX_POW_TAG_OP_DESCH, + * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and + * CVMX_POW_TAG_OP_*_NSCHED + */ + uint64_t type:3; + /* + * The actual tag. tag is used for everything except + * CVMX_POW_TAG_OP_DESCH, + * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and + * CVMX_POW_TAG_OP_*_NSCHED + */ + uint64_t tag:32; + } s; +} cvmx_pow_tag_req_t; + +/** + * This structure describes the address to load stuff from POW + */ +typedef union { + uint64_t u64; + + /** + * Address for new work request loads (did<2:0> == 0) + */ + struct { + /* Mips64 address region. Should be CVMX_IO_SEG */ + uint64_t mem_region:2; + /* Must be zero */ + uint64_t reserved_49_61:13; + /* Must be one */ + uint64_t is_io:1; + /* the ID of POW -- did<2:0> == 0 in this case */ + uint64_t did:8; + /* Must be zero */ + uint64_t reserved_4_39:36; + /* + * If set, don't return load response until work is + * available. + */ + uint64_t wait:1; + /* Must be zero */ + uint64_t reserved_0_2:3; + } swork; + + /** + * Address for loads to get POW internal status + */ + struct { + /* Mips64 address region. Should be CVMX_IO_SEG */ + uint64_t mem_region:2; + /* Must be zero */ + uint64_t reserved_49_61:13; + /* Must be one */ + uint64_t is_io:1; + /* the ID of POW -- did<2:0> == 1 in this case */ + uint64_t did:8; + /* Must be zero */ + uint64_t reserved_10_39:30; + /* The core id to get status for */ + uint64_t coreid:4; + /* + * If set and get_cur is set, return reverse tag-list + * pointer rather than forward tag-list pointer. + */ + uint64_t get_rev:1; + /* + * If set, return current status rather than pending + * status. + */ + uint64_t get_cur:1; + /* + * If set, get the work-queue pointer rather than + * tag/type. + */ + uint64_t get_wqp:1; + /* Must be zero */ + uint64_t reserved_0_2:3; + } sstatus; + + /** + * Address for memory loads to get POW internal state + */ + struct { + /* Mips64 address region. Should be CVMX_IO_SEG */ + uint64_t mem_region:2; + /* Must be zero */ + uint64_t reserved_49_61:13; + /* Must be one */ + uint64_t is_io:1; + /* the ID of POW -- did<2:0> == 2 in this case */ + uint64_t did:8; + /* Must be zero */ + uint64_t reserved_16_39:24; + /* POW memory index */ + uint64_t index:11; + /* + * If set, return deschedule information rather than + * the standard response for work-queue index (invalid + * if the work-queue entry is not on the deschedule + * list). + */ + uint64_t get_des:1; + /* + * If set, get the work-queue pointer rather than + * tag/type (no effect when get_des set). + */ + uint64_t get_wqp:1; + /* Must be zero */ + uint64_t reserved_0_2:3; + } smemload; + + /** + * Address for index/pointer loads + */ + struct { + /* Mips64 address region. Should be CVMX_IO_SEG */ + uint64_t mem_region:2; + /* Must be zero */ + uint64_t reserved_49_61:13; + /* Must be one */ + uint64_t is_io:1; + /* the ID of POW -- did<2:0> == 3 in this case */ + uint64_t did:8; + /* Must be zero */ + uint64_t reserved_9_39:31; + /* + * when {get_rmt ==0 AND get_des_get_tail == 0}, this + * field selects one of eight POW internal-input + * queues (0-7), one per QOS level; values 8-15 are + * illegal in this case; when {get_rmt ==0 AND + * get_des_get_tail == 1}, this field selects one of + * 16 deschedule lists (per group); when get_rmt ==1, + * this field selects one of 16 memory-input queue + * lists. The two memory-input queue lists associated + * with each QOS level are: + * + * - qosgrp = 0, qosgrp = 8: QOS0 + * - qosgrp = 1, qosgrp = 9: QOS1 + * - qosgrp = 2, qosgrp = 10: QOS2 + * - qosgrp = 3, qosgrp = 11: QOS3 + * - qosgrp = 4, qosgrp = 12: QOS4 + * - qosgrp = 5, qosgrp = 13: QOS5 + * - qosgrp = 6, qosgrp = 14: QOS6 + * - qosgrp = 7, qosgrp = 15: QOS7 + */ + uint64_t qosgrp:4; + /* + * If set and get_rmt is clear, return deschedule list + * indexes rather than indexes for the specified qos + * level; if set and get_rmt is set, return the tail + * pointer rather than the head pointer for the + * specified qos level. + */ + uint64_t get_des_get_tail:1; + /* + * If set, return remote pointers rather than the + * local indexes for the specified qos level. + */ + uint64_t get_rmt:1; + /* Must be zero */ + uint64_t reserved_0_2:3; + } sindexload; + + /** + * address for NULL_RD request (did<2:0> == 4) when this is read, + * HW attempts to change the state to NULL if it is NULL_NULL (the + * hardware cannot switch from NULL_NULL to NULL if a POW entry is + * not available - software may need to recover by finishing + * another piece of work before a POW entry can ever become + * available.) + */ + struct { + /* Mips64 address region. Should be CVMX_IO_SEG */ + uint64_t mem_region:2; + /* Must be zero */ + uint64_t reserved_49_61:13; + /* Must be one */ + uint64_t is_io:1; + /* the ID of POW -- did<2:0> == 4 in this case */ + uint64_t did:8; + /* Must be zero */ + uint64_t reserved_0_39:40; + } snull_rd; +} cvmx_pow_load_addr_t; + +/** + * This structure defines the response to a load/SENDSINGLE to POW + * (except CSR reads) + */ +typedef union { + uint64_t u64; + + /** + * Response to new work request loads + */ + struct { + /* + * Set when no new work queue entry was returned. * + * If there was de-scheduled work, the HW will + * definitely return it. When this bit is set, it + * could mean either mean: + * + * - There was no work, or + * + * - There was no work that the HW could find. This + * case can happen, regardless of the wait bit value + * in the original request, when there is work in + * the IQ's that is too deep down the list. + */ + uint64_t no_work:1; + /* Must be zero */ + uint64_t reserved_40_62:23; + /* 36 in O1 -- the work queue pointer */ + uint64_t addr:40; + } s_work; + + /** + * Result for a POW Status Load (when get_cur==0 and get_wqp==0) + */ + struct { + uint64_t reserved_62_63:2; + /* Set when there is a pending non-NULL SWTAG or + * SWTAG_FULL, and the POW entry has not left the list + * for the original tag. */ + uint64_t pend_switch:1; + /* Set when SWTAG_FULL and pend_switch is set. */ + uint64_t pend_switch_full:1; + /* + * Set when there is a pending NULL SWTAG, or an + * implicit switch to NULL. + */ + uint64_t pend_switch_null:1; + /* Set when there is a pending DESCHED or SWTAG_DESCHED. */ + uint64_t pend_desched:1; + /* + * Set when there is a pending SWTAG_DESCHED and + * pend_desched is set. + */ + uint64_t pend_desched_switch:1; + /* Set when nosched is desired and pend_desched is set. */ + uint64_t pend_nosched:1; + /* Set when there is a pending GET_WORK. */ + uint64_t pend_new_work:1; + /* + * When pend_new_work is set, this bit indicates that + * the wait bit was set. + */ + uint64_t pend_new_work_wait:1; + /* Set when there is a pending NULL_RD. */ + uint64_t pend_null_rd:1; + /* Set when there is a pending CLR_NSCHED. */ + uint64_t pend_nosched_clr:1; + uint64_t reserved_51:1; + /* This is the index when pend_nosched_clr is set. */ + uint64_t pend_index:11; + /* + * This is the new_grp when (pend_desched AND + * pend_desched_switch) is set. + */ + uint64_t pend_grp:4; + uint64_t reserved_34_35:2; + /* + * This is the tag type when pend_switch or + * (pend_desched AND pend_desched_switch) are set. + */ + uint64_t pend_type:2; + /* + * - this is the tag when pend_switch or (pend_desched + * AND pend_desched_switch) are set. + */ + uint64_t pend_tag:32; + } s_sstatus0; + + /** + * Result for a POW Status Load (when get_cur==0 and get_wqp==1) + */ + struct { + uint64_t reserved_62_63:2; + /* + * Set when there is a pending non-NULL SWTAG or + * SWTAG_FULL, and the POW entry has not left the list + * for the original tag. + */ + uint64_t pend_switch:1; + /* Set when SWTAG_FULL and pend_switch is set. */ + uint64_t pend_switch_full:1; + /* + * Set when there is a pending NULL SWTAG, or an + * implicit switch to NULL. + */ + uint64_t pend_switch_null:1; + /* + * Set when there is a pending DESCHED or + * SWTAG_DESCHED. + */ + uint64_t pend_desched:1; + /* + * Set when there is a pending SWTAG_DESCHED and + * pend_desched is set. + */ + uint64_t pend_desched_switch:1; + /* Set when nosched is desired and pend_desched is set. */ + uint64_t pend_nosched:1; + /* Set when there is a pending GET_WORK. */ + uint64_t pend_new_work:1; + /* + * When pend_new_work is set, this bit indicates that + * the wait bit was set. + */ + uint64_t pend_new_work_wait:1; + /* Set when there is a pending NULL_RD. */ + uint64_t pend_null_rd:1; + /* Set when there is a pending CLR_NSCHED. */ + uint64_t pend_nosched_clr:1; + uint64_t reserved_51:1; + /* This is the index when pend_nosched_clr is set. */ + uint64_t pend_index:11; + /* + * This is the new_grp when (pend_desched AND + * pend_desched_switch) is set. + */ + uint64_t pend_grp:4; + /* This is the wqp when pend_nosched_clr is set. */ + uint64_t pend_wqp:36; + } s_sstatus1; + + /** + * Result for a POW Status Load (when get_cur==1, get_wqp==0, and + * get_rev==0) + */ + struct { + uint64_t reserved_62_63:2; + /* + * Points to the next POW entry in the tag list when + * tail == 0 (and tag_type is not NULL or NULL_NULL). + */ + uint64_t link_index:11; + /* The POW entry attached to the core. */ + uint64_t index:11; + /* + * The group attached to the core (updated when new + * tag list entered on SWTAG_FULL). + */ + uint64_t grp:4; + /* + * Set when this POW entry is at the head of its tag + * list (also set when in the NULL or NULL_NULL + * state). + */ + uint64_t head:1; + /* + * Set when this POW entry is at the tail of its tag + * list (also set when in the NULL or NULL_NULL + * state). + */ + uint64_t tail:1; + /* + * The tag type attached to the core (updated when new + * tag list entered on SWTAG, SWTAG_FULL, or + * SWTAG_DESCHED). + */ + uint64_t tag_type:2; + /* + * The tag attached to the core (updated when new tag + * list entered on SWTAG, SWTAG_FULL, or + * SWTAG_DESCHED). + */ + uint64_t tag:32; + } s_sstatus2; + + /** + * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1) + */ + struct { + uint64_t reserved_62_63:2; + /* + * Points to the prior POW entry in the tag list when + * head == 0 (and tag_type is not NULL or + * NULL_NULL). This field is unpredictable when the + * core's state is NULL or NULL_NULL. + */ + uint64_t revlink_index:11; + /* The POW entry attached to the core. */ + uint64_t index:11; + /* + * The group attached to the core (updated when new + * tag list entered on SWTAG_FULL). + */ + uint64_t grp:4; + /* Set when this POW entry is at the head of its tag + * list (also set when in the NULL or NULL_NULL + * state). + */ + uint64_t head:1; + /* + * Set when this POW entry is at the tail of its tag + * list (also set when in the NULL or NULL_NULL + * state). + */ + uint64_t tail:1; + /* + * The tag type attached to the core (updated when new + * tag list entered on SWTAG, SWTAG_FULL, or + * SWTAG_DESCHED). + */ + uint64_t tag_type:2; + /* + * The tag attached to the core (updated when new tag + * list entered on SWTAG, SWTAG_FULL, or + * SWTAG_DESCHED). + */ + uint64_t tag:32; + } s_sstatus3; + + /** + * Result for a POW Status Load (when get_cur==1, get_wqp==1, and + * get_rev==0) + */ + struct { + uint64_t reserved_62_63:2; + /* + * Points to the next POW entry in the tag list when + * tail == 0 (and tag_type is not NULL or NULL_NULL). + */ + uint64_t link_index:11; + /* The POW entry attached to the core. */ + uint64_t index:11; + /* + * The group attached to the core (updated when new + * tag list entered on SWTAG_FULL). + */ + uint64_t grp:4; + /* + * The wqp attached to the core (updated when new tag + * list entered on SWTAG_FULL). + */ + uint64_t wqp:36; + } s_sstatus4; + + /** + * Result for a POW Status Load (when get_cur==1, get_wqp==1, and + * get_rev==1) + */ + struct { + uint64_t reserved_62_63:2; + /* + * Points to the prior POW entry in the tag list when + * head == 0 (and tag_type is not NULL or + * NULL_NULL). This field is unpredictable when the + * core's state is NULL or NULL_NULL. + */ + uint64_t revlink_index:11; + /* The POW entry attached to the core. */ + uint64_t index:11; + /* + * The group attached to the core (updated when new + * tag list entered on SWTAG_FULL). + */ + uint64_t grp:4; + /* + * The wqp attached to the core (updated when new tag + * list entered on SWTAG_FULL). + */ + uint64_t wqp:36; + } s_sstatus5; + + /** + * Result For POW Memory Load (get_des == 0 and get_wqp == 0) + */ + struct { + uint64_t reserved_51_63:13; + /* + * The next entry in the input, free, descheduled_head + * list (unpredictable if entry is the tail of the + * list). + */ + uint64_t next_index:11; + /* The group of the POW entry. */ + uint64_t grp:4; + uint64_t reserved_35:1; + /* + * Set when this POW entry is at the tail of its tag + * list (also set when in the NULL or NULL_NULL + * state). + */ + uint64_t tail:1; + /* The tag type of the POW entry. */ + uint64_t tag_type:2; + /* The tag of the POW entry. */ + uint64_t tag:32; + } s_smemload0; + + /** + * Result For POW Memory Load (get_des == 0 and get_wqp == 1) + */ + struct { + uint64_t reserved_51_63:13; + /* + * The next entry in the input, free, descheduled_head + * list (unpredictable if entry is the tail of the + * list). + */ + uint64_t next_index:11; + /* The group of the POW entry. */ + uint64_t grp:4; + /* The WQP held in the POW entry. */ + uint64_t wqp:36; + } s_smemload1; + + /** + * Result For POW Memory Load (get_des == 1) + */ + struct { + uint64_t reserved_51_63:13; + /* + * The next entry in the tag list connected to the + * descheduled head. + */ + uint64_t fwd_index:11; + /* The group of the POW entry. */ + uint64_t grp:4; + /* The nosched bit for the POW entry. */ + uint64_t nosched:1; + /* There is a pending tag switch */ + uint64_t pend_switch:1; + /* + * The next tag type for the new tag list when + * pend_switch is set. + */ + uint64_t pend_type:2; + /* + * The next tag for the new tag list when pend_switch + * is set. + */ + uint64_t pend_tag:32; + } s_smemload2; + + /** + * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0) + */ + struct { + uint64_t reserved_52_63:12; + /* + * set when there is one or more POW entries on the + * free list. + */ + uint64_t free_val:1; + /* + * set when there is exactly one POW entry on the free + * list. + */ + uint64_t free_one:1; + uint64_t reserved_49:1; + /* + * when free_val is set, indicates the first entry on + * the free list. + */ + uint64_t free_head:11; + uint64_t reserved_37:1; + /* + * when free_val is set, indicates the last entry on + * the free list. + */ + uint64_t free_tail:11; + /* + * set when there is one or more POW entries on the + * input Q list selected by qosgrp. + */ + uint64_t loc_val:1; + /* + * set when there is exactly one POW entry on the + * input Q list selected by qosgrp. + */ + uint64_t loc_one:1; + uint64_t reserved_23:1; + /* + * when loc_val is set, indicates the first entry on + * the input Q list selected by qosgrp. + */ + uint64_t loc_head:11; + uint64_t reserved_11:1; + /* + * when loc_val is set, indicates the last entry on + * the input Q list selected by qosgrp. + */ + uint64_t loc_tail:11; + } sindexload0; + + /** + * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1) + */ + struct { + uint64_t reserved_52_63:12; + /* + * set when there is one or more POW entries on the + * nosched list. + */ + uint64_t nosched_val:1; + /* + * set when there is exactly one POW entry on the + * nosched list. + */ + uint64_t nosched_one:1; + uint64_t reserved_49:1; + /* + * when nosched_val is set, indicates the first entry + * on the nosched list. + */ + uint64_t nosched_head:11; + uint64_t reserved_37:1; + /* + * when nosched_val is set, indicates the last entry + * on the nosched list. + */ + uint64_t nosched_tail:11; + /* + * set when there is one or more descheduled heads on + * the descheduled list selected by qosgrp. + */ + uint64_t des_val:1; + /* + * set when there is exactly one descheduled head on + * the descheduled list selected by qosgrp. + */ + uint64_t des_one:1; + uint64_t reserved_23:1; + /* + * when des_val is set, indicates the first + * descheduled head on the descheduled list selected + * by qosgrp. + */ + uint64_t des_head:11; + uint64_t reserved_11:1; + /* + * when des_val is set, indicates the last descheduled + * head on the descheduled list selected by qosgrp. + */ + uint64_t des_tail:11; + } sindexload1; + + /** + * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0) + */ + struct { + uint64_t reserved_39_63:25; + /* + * Set when this DRAM list is the current head + * (i.e. is the next to be reloaded when the POW + * hardware reloads a POW entry from DRAM). The POW + * hardware alternates between the two DRAM lists + * associated with a QOS level when it reloads work + * from DRAM into the POW unit. + */ + uint64_t rmt_is_head:1; + /* + * Set when the DRAM portion of the input Q list + * selected by qosgrp contains one or more pieces of + * work. + */ + uint64_t rmt_val:1; + /* + * Set when the DRAM portion of the input Q list + * selected by qosgrp contains exactly one piece of + * work. + */ + uint64_t rmt_one:1; + /* + * When rmt_val is set, indicates the first piece of + * work on the DRAM input Q list selected by + * qosgrp. + */ + uint64_t rmt_head:36; + } sindexload2; + + /** + * Result For POW Index/Pointer Load (get_rmt == + * 1/get_des_get_tail == 1) + */ + struct { + uint64_t reserved_39_63:25; + /* + * set when this DRAM list is the current head + * (i.e. is the next to be reloaded when the POW + * hardware reloads a POW entry from DRAM). The POW + * hardware alternates between the two DRAM lists + * associated with a QOS level when it reloads work + * from DRAM into the POW unit. + */ + uint64_t rmt_is_head:1; + /* + * set when the DRAM portion of the input Q list + * selected by qosgrp contains one or more pieces of + * work. + */ + uint64_t rmt_val:1; + /* + * set when the DRAM portion of the input Q list + * selected by qosgrp contains exactly one piece of + * work. + */ + uint64_t rmt_one:1; + /* + * when rmt_val is set, indicates the last piece of + * work on the DRAM input Q list selected by + * qosgrp. + */ + uint64_t rmt_tail:36; + } sindexload3; + + /** + * Response to NULL_RD request loads + */ + struct { + uint64_t unused:62; + /* of type cvmx_pow_tag_type_t. state is one of the + * following: + * + * - CVMX_POW_TAG_TYPE_ORDERED + * - CVMX_POW_TAG_TYPE_ATOMIC + * - CVMX_POW_TAG_TYPE_NULL + * - CVMX_POW_TAG_TYPE_NULL_NULL + */ + uint64_t state:2; + } s_null_rd; + +} cvmx_pow_tag_load_resp_t; + +/** + * This structure describes the address used for stores to the POW. + * The store address is meaningful on stores to the POW. The + * hardware assumes that an aligned 64-bit store was used for all + * these stores. Note the assumption that the work queue entry is + * aligned on an 8-byte boundary (since the low-order 3 address bits + * must be zero). Note that not all fields are used by all + * operations. + * + * NOTE: The following is the behavior of the pending switch bit at the PP + * for POW stores (i.e. when did<7:3> == 0xc) + * - did<2:0> == 0 => pending switch bit is set + * - did<2:0> == 1 => no affect on the pending switch bit + * - did<2:0> == 3 => pending switch bit is cleared + * - did<2:0> == 7 => no affect on the pending switch bit + * - did<2:0> == others => must not be used + * - No other loads/stores have an affect on the pending switch bit + * - The switch bus from POW can clear the pending switch bit + * + * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle + * ADDWQ command that only contains the pointer). SW must never use + * did<2:0> == 2. + */ +typedef union { + /** + * Unsigned 64 bit integer representation of store address + */ + uint64_t u64; + + struct { + /* Memory region. Should be CVMX_IO_SEG in most cases */ + uint64_t mem_reg:2; + uint64_t reserved_49_61:13; /* Must be zero */ + uint64_t is_io:1; /* Must be one */ + /* Device ID of POW. Note that different sub-dids are used. */ + uint64_t did:8; + uint64_t reserved_36_39:4; /* Must be zero */ + /* Address field. addr<2:0> must be zero */ + uint64_t addr:36; + } stag; +} cvmx_pow_tag_store_addr_t; + +/** + * decode of the store data when an IOBDMA SENDSINGLE is sent to POW + */ +typedef union { + uint64_t u64; + + struct { + /* + * the (64-bit word) location in scratchpad to write + * to (if len != 0) + */ + uint64_t scraddr:8; + /* the number of words in the response (0 => no response) */ + uint64_t len:8; + /* the ID of the device on the non-coherent bus */ + uint64_t did:8; + uint64_t unused:36; + /* if set, don't return load response until work is available */ + uint64_t wait:1; + uint64_t unused2:3; + } s; + +} cvmx_pow_iobdma_store_t; + +/* CSR typedefs have been moved to cvmx-csr-*.h */ + +/** + * Get the POW tag for this core. This returns the current + * tag type, tag, group, and POW entry index associated with + * this core. Index is only valid if the tag type isn't NULL_NULL. + * If a tag switch is pending this routine returns the tag before + * the tag switch, not after. + * + * Returns Current tag + */ +static inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void) +{ + cvmx_pow_load_addr_t load_addr; + cvmx_pow_tag_load_resp_t load_resp; + cvmx_pow_tag_req_t result; + + load_addr.u64 = 0; + load_addr.sstatus.mem_region = CVMX_IO_SEG; + load_addr.sstatus.is_io = 1; + load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1; + load_addr.sstatus.coreid = cvmx_get_core_num(); + load_addr.sstatus.get_cur = 1; + load_resp.u64 = cvmx_read_csr(load_addr.u64); + result.u64 = 0; + result.s.grp = load_resp.s_sstatus2.grp; + result.s.index = load_resp.s_sstatus2.index; + result.s.type = load_resp.s_sstatus2.tag_type; + result.s.tag = load_resp.s_sstatus2.tag; + return result; +} + +/** + * Get the POW WQE for this core. This returns the work queue + * entry currently associated with this core. + * + * Returns WQE pointer + */ +static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void) +{ + cvmx_pow_load_addr_t load_addr; + cvmx_pow_tag_load_resp_t load_resp; + + load_addr.u64 = 0; + load_addr.sstatus.mem_region = CVMX_IO_SEG; + load_addr.sstatus.is_io = 1; + load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1; + load_addr.sstatus.coreid = cvmx_get_core_num(); + load_addr.sstatus.get_cur = 1; + load_addr.sstatus.get_wqp = 1; + load_resp.u64 = cvmx_read_csr(load_addr.u64); + return (cvmx_wqe_t *) cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp); +} + +#ifndef CVMX_MF_CHORD +#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30) +#endif + +/** + * Print a warning if a tag switch is pending for this core + * + * @function: Function name checking for a pending tag switch + */ +static inline void __cvmx_pow_warn_if_pending_switch(const char *function) +{ + uint64_t switch_complete; + CVMX_MF_CHORD(switch_complete); + if (!switch_complete) + pr_warning("%s called with tag switch in progress\n", function); +} + +/** + * Waits for a tag switch to complete by polling the completion bit. + * Note that switches to NULL complete immediately and do not need + * to be waited for. + */ +static inline void cvmx_pow_tag_sw_wait(void) +{ + const uint64_t MAX_CYCLES = 1ull << 31; + uint64_t switch_complete; + uint64_t start_cycle = cvmx_get_cycle(); + while (1) { + CVMX_MF_CHORD(switch_complete); + if (unlikely(switch_complete)) + break; + if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) { + pr_warning("Tag switch is taking a long time, " + "possible deadlock\n"); + start_cycle = -MAX_CYCLES - 1; + } + } +} + +/** + * Synchronous work request. Requests work from the POW. + * This function does NOT wait for previous tag switches to complete, + * so the caller must ensure that there is not a pending tag switch. + * + * @wait: When set, call stalls until work becomes avaiable, or times out. + * If not set, returns immediately. + * + * Returns Returns the WQE pointer from POW. Returns NULL if no work + * was available. + */ +static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t + wait) +{ + cvmx_pow_load_addr_t ptr; + cvmx_pow_tag_load_resp_t result; + + if (CVMX_ENABLE_POW_CHECKS) + __cvmx_pow_warn_if_pending_switch(__func__); + + ptr.u64 = 0; + ptr.swork.mem_region = CVMX_IO_SEG; + ptr.swork.is_io = 1; + ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG; + ptr.swork.wait = wait; + + result.u64 = cvmx_read_csr(ptr.u64); + + if (result.s_work.no_work) + return NULL; + else + return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr); +} + +/** + * Synchronous work request. Requests work from the POW. + * This function waits for any previous tag switch to complete before + * requesting the new work. + * + * @wait: When set, call stalls until work becomes avaiable, or times out. + * If not set, returns immediately. + * + * Returns Returns the WQE pointer from POW. Returns NULL if no work + * was available. + */ +static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait) +{ + if (CVMX_ENABLE_POW_CHECKS) + __cvmx_pow_warn_if_pending_switch(__func__); + + /* Must not have a switch pending when requesting work */ + cvmx_pow_tag_sw_wait(); + return cvmx_pow_work_request_sync_nocheck(wait); + +} + +/** + * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state. + * This function waits for any previous tag switch to complete before + * requesting the null_rd. + * + * Returns Returns the POW state of type cvmx_pow_tag_type_t. + */ +static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void) +{ + cvmx_pow_load_addr_t ptr; + cvmx_pow_tag_load_resp_t result; + + if (CVMX_ENABLE_POW_CHECKS) + __cvmx_pow_warn_if_pending_switch(__func__); + + /* Must not have a switch pending when requesting work */ + cvmx_pow_tag_sw_wait(); + + ptr.u64 = 0; + ptr.snull_rd.mem_region = CVMX_IO_SEG; + ptr.snull_rd.is_io = 1; + ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD; + + result.u64 = cvmx_read_csr(ptr.u64); + + return (enum cvmx_pow_tag_type) result.s_null_rd.state; +} + +/** + * Asynchronous work request. Work is requested from the POW unit, + * and should later be checked with function + * cvmx_pow_work_response_async. This function does NOT wait for + * previous tag switches to complete, so the caller must ensure that + * there is not a pending tag switch. + * + * @scr_addr: Scratch memory address that response will be returned + * to, which is either a valid WQE, or a response with the + * invalid bit set. Byte address, must be 8 byte aligned. + * + * @wait: 1 to cause response to wait for work to become available (or + * timeout), 0 to cause response to return immediately + */ +static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, + cvmx_pow_wait_t wait) +{ + cvmx_pow_iobdma_store_t data; + + if (CVMX_ENABLE_POW_CHECKS) + __cvmx_pow_warn_if_pending_switch(__func__); + + /* scr_addr must be 8 byte aligned */ + data.s.scraddr = scr_addr >> 3; + data.s.len = 1; + data.s.did = CVMX_OCT_DID_TAG_SWTAG; + data.s.wait = wait; + cvmx_send_single(data.u64); +} + +/** + * Asynchronous work request. Work is requested from the POW unit, + * and should later be checked with function + * cvmx_pow_work_response_async. This function waits for any previous + * tag switch to complete before requesting the new work. + * + * @scr_addr: Scratch memory address that response will be returned + * to, which is either a valid WQE, or a response with the + * invalid bit set. Byte address, must be 8 byte aligned. + * + * @wait: 1 to cause response to wait for work to become available (or + * timeout), 0 to cause response to return immediately + */ +static inline void cvmx_pow_work_request_async(int scr_addr, + cvmx_pow_wait_t wait) +{ + if (CVMX_ENABLE_POW_CHECKS) + __cvmx_pow_warn_if_pending_switch(__func__); + + /* Must not have a switch pending when requesting work */ + cvmx_pow_tag_sw_wait(); + cvmx_pow_work_request_async_nocheck(scr_addr, wait); +} + +/** + * Gets result of asynchronous work request. Performs a IOBDMA sync + * to wait for the response. + * + * @scr_addr: Scratch memory address to get result from Byte address, + * must be 8 byte aligned. + * + * Returns Returns the WQE from the scratch register, or NULL if no + * work was available. + */ +static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr) +{ + cvmx_pow_tag_load_resp_t result; + + CVMX_SYNCIOBDMA; + result.u64 = cvmx_scratch_read64(scr_addr); + + if (result.s_work.no_work) + return NULL; + else + return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr); +} + +/** + * Checks if a work queue entry pointer returned by a work + * request is valid. It may be invalid due to no work + * being available or due to a timeout. + * + * @wqe_ptr: pointer to a work queue entry returned by the POW + * + * Returns 0 if pointer is valid + * 1 if invalid (no work was returned) + */ +static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr) +{ + return wqe_ptr == NULL; +} + +/** + * Starts a tag switch to the provided tag value and tag type. + * Completion for the tag switch must be checked for separately. This + * function does NOT update the work queue entry in dram to match tag + * value and type, so the application must keep track of these if they + * are important to the application. This tag switch command must not + * be used for switches to NULL, as the tag switch pending bit will be + * set by the switch request, but never cleared by the hardware. + * + * NOTE: This should not be used when switching from a NULL tag. Use + * cvmx_pow_tag_sw_full() instead. + * + * This function does no checks, so the caller must ensure that any + * previous tag switch has completed. + * + * @tag: new tag value + * @tag_type: new tag type (ordered or atomic) + */ +static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag, + enum cvmx_pow_tag_type tag_type) +{ + cvmx_addr_t ptr; + cvmx_pow_tag_req_t tag_req; + + if (CVMX_ENABLE_POW_CHECKS) { + cvmx_pow_tag_req_t current_tag; + __cvmx_pow_warn_if_pending_switch(__func__); + current_tag = cvmx_pow_get_current_tag(); + if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) + pr_warning("%s called with NULL_NULL tag\n", + __func__); + if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) + pr_warning("%s called with NULL tag\n", __func__); + if ((current_tag.s.type == tag_type) + && (current_tag.s.tag == tag)) + pr_warning("%s called to perform a tag switch to the " + "same tag\n", + __func__); + if (tag_type == CVMX_POW_TAG_TYPE_NULL) + pr_warning("%s called to perform a tag switch to " + "NULL. Use cvmx_pow_tag_sw_null() instead\n", + __func__); + } + + /* + * Note that WQE in DRAM is not updated here, as the POW does + * not read from DRAM once the WQE is in flight. See hardware + * manual for complete details. It is the application's + * responsibility to keep track of the current tag value if + * that is important. + */ + + tag_req.u64 = 0; + tag_req.s.op = CVMX_POW_TAG_OP_SWTAG; + tag_req.s.tag = tag; + tag_req.s.type = tag_type; + + ptr.u64 = 0; + ptr.sio.mem_region = CVMX_IO_SEG; + ptr.sio.is_io = 1; + ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG; + + /* once this store arrives at POW, it will attempt the switch + software must wait for the switch to complete separately */ + cvmx_write_io(ptr.u64, tag_req.u64); +} + +/** + * Starts a tag switch to the provided tag value and tag type. + * Completion for the tag switch must be checked for separately. This + * function does NOT update the work queue entry in dram to match tag + * value and type, so the application must keep track of these if they + * are important to the application. This tag switch command must not + * be used for switches to NULL, as the tag switch pending bit will be + * set by the switch request, but never cleared by the hardware. + * + * NOTE: This should not be used when switching from a NULL tag. Use + * cvmx_pow_tag_sw_full() instead. + * + * This function waits for any previous tag switch to complete, and also + * displays an error on tag switches to NULL. + * + * @tag: new tag value + * @tag_type: new tag type (ordered or atomic) + */ +static inline void cvmx_pow_tag_sw(uint32_t tag, + enum cvmx_pow_tag_type tag_type) +{ + if (CVMX_ENABLE_POW_CHECKS) + __cvmx_pow_warn_if_pending_switch(__func__); + + /* + * Note that WQE in DRAM is not updated here, as the POW does + * not read from DRAM once the WQE is in flight. See hardware + * manual for complete details. It is the application's + * responsibility to keep track of the current tag value if + * that is important. + */ + + /* + * Ensure that there is not a pending tag switch, as a tag + * switch cannot be started if a previous switch is still + * pending. + */ + cvmx_pow_tag_sw_wait(); + cvmx_pow_tag_sw_nocheck(tag, tag_type); +} + +/** + * Starts a tag switch to the provided tag value and tag type. + * Completion for the tag switch must be checked for separately. This + * function does NOT update the work queue entry in dram to match tag + * value and type, so the application must keep track of these if they + * are important to the application. This tag switch command must not + * be used for switches to NULL, as the tag switch pending bit will be + * set by the switch request, but never cleared by the hardware. + * + * This function must be used for tag switches from NULL. + * + * This function does no checks, so the caller must ensure that any + * previous tag switch has completed. + * + * @wqp: pointer to work queue entry to submit. This entry is + * updated to match the other parameters + * @tag: tag value to be assigned to work queue entry + * @tag_type: type of tag + * @group: group value for the work queue entry. + */ +static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag, + enum cvmx_pow_tag_type tag_type, + uint64_t group) +{ + cvmx_addr_t ptr; + cvmx_pow_tag_req_t tag_req; + + if (CVMX_ENABLE_POW_CHECKS) { + cvmx_pow_tag_req_t current_tag; + __cvmx_pow_warn_if_pending_switch(__func__); + current_tag = cvmx_pow_get_current_tag(); + if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) + pr_warning("%s called with NULL_NULL tag\n", + __func__); + if ((current_tag.s.type == tag_type) + && (current_tag.s.tag == tag)) + pr_warning("%s called to perform a tag switch to " + "the same tag\n", + __func__); + if (tag_type == CVMX_POW_TAG_TYPE_NULL) + pr_warning("%s called to perform a tag switch to " + "NULL. Use cvmx_pow_tag_sw_null() instead\n", + __func__); + if (wqp != cvmx_phys_to_ptr(0x80)) + if (wqp != cvmx_pow_get_current_wqp()) + pr_warning("%s passed WQE(%p) doesn't match " + "the address in the POW(%p)\n", + __func__, wqp, + cvmx_pow_get_current_wqp()); + } + + /* + * Note that WQE in DRAM is not updated here, as the POW does + * not read from DRAM once the WQE is in flight. See hardware + * manual for complete details. It is the application's + * responsibility to keep track of the current tag value if + * that is important. + */ + + tag_req.u64 = 0; + tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL; + tag_req.s.tag = tag; + tag_req.s.type = tag_type; + tag_req.s.grp = group; + + ptr.u64 = 0; + ptr.sio.mem_region = CVMX_IO_SEG; + ptr.sio.is_io = 1; + ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG; + ptr.sio.offset = CAST64(wqp); + + /* + * once this store arrives at POW, it will attempt the switch + * software must wait for the switch to complete separately. + */ + cvmx_write_io(ptr.u64, tag_req.u64); +} + +/** + * Starts a tag switch to the provided tag value and tag type. + * Completion for the tag switch must be checked for separately. This + * function does NOT update the work queue entry in dram to match tag + * value and type, so the application must keep track of these if they + * are important to the application. This tag switch command must not + * be used for switches to NULL, as the tag switch pending bit will be + * set by the switch request, but never cleared by the hardware. + * + * This function must be used for tag switches from NULL. + * + * This function waits for any pending tag switches to complete + * before requesting the tag switch. + * + * @wqp: pointer to work queue entry to submit. This entry is updated + * to match the other parameters + * @tag: tag value to be assigned to work queue entry + * @tag_type: type of tag + * @group: group value for the work queue entry. + */ +static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag, + enum cvmx_pow_tag_type tag_type, + uint64_t group) +{ + if (CVMX_ENABLE_POW_CHECKS) + __cvmx_pow_warn_if_pending_switch(__func__); + + /* + * Ensure that there is not a pending tag switch, as a tag + * switch cannot be started if a previous switch is still + * pending. + */ + cvmx_pow_tag_sw_wait(); + cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group); +} + +/** + * Switch to a NULL tag, which ends any ordering or + * synchronization provided by the POW for the current + * work queue entry. This operation completes immediatly, + * so completetion should not be waited for. + * This function does NOT wait for previous tag switches to complete, + * so the caller must ensure that any previous tag switches have completed. + */ +static inline void cvmx_pow_tag_sw_null_nocheck(void) +{ + cvmx_addr_t ptr; + cvmx_pow_tag_req_t tag_req; + + if (CVMX_ENABLE_POW_CHECKS) { + cvmx_pow_tag_req_t current_tag; + __cvmx_pow_warn_if_pending_switch(__func__); + current_tag = cvmx_pow_get_current_tag(); + if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) + pr_warning("%s called with NULL_NULL tag\n", + __func__); + if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) + pr_warning("%s called when we already have a " + "NULL tag\n", + __func__); + } + + tag_req.u64 = 0; + tag_req.s.op = CVMX_POW_TAG_OP_SWTAG; + tag_req.s.type = CVMX_POW_TAG_TYPE_NULL; + + ptr.u64 = 0; + ptr.sio.mem_region = CVMX_IO_SEG; + ptr.sio.is_io = 1; + ptr.sio.did = CVMX_OCT_DID_TAG_TAG1; + + cvmx_write_io(ptr.u64, tag_req.u64); + + /* switch to NULL completes immediately */ +} + +/** + * Switch to a NULL tag, which ends any ordering or + * synchronization provided by the POW for the current + * work queue entry. This operation completes immediatly, + * so completetion should not be waited for. + * This function waits for any pending tag switches to complete + * before requesting the switch to NULL. + */ +static inline void cvmx_pow_tag_sw_null(void) +{ + if (CVMX_ENABLE_POW_CHECKS) + __cvmx_pow_warn_if_pending_switch(__func__); + + /* + * Ensure that there is not a pending tag switch, as a tag + * switch cannot be started if a previous switch is still + * pending. + */ + cvmx_pow_tag_sw_wait(); + cvmx_pow_tag_sw_null_nocheck(); + + /* switch to NULL completes immediately */ +} + +/** + * Submits work to an input queue. This function updates the work + * queue entry in DRAM to match the arguments given. Note that the + * tag provided is for the work queue entry submitted, and is + * unrelated to the tag that the core currently holds. + * + * @wqp: pointer to work queue entry to submit. This entry is + * updated to match the other parameters + * @tag: tag value to be assigned to work queue entry + * @tag_type: type of tag + * @qos: Input queue to add to. + * @grp: group value for the work queue entry. + */ +static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag, + enum cvmx_pow_tag_type tag_type, + uint64_t qos, uint64_t grp) +{ + cvmx_addr_t ptr; + cvmx_pow_tag_req_t tag_req; + + wqp->qos = qos; + wqp->tag = tag; + wqp->tag_type = tag_type; + wqp->grp = grp; + + tag_req.u64 = 0; + tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ; + tag_req.s.type = tag_type; + tag_req.s.tag = tag; + tag_req.s.qos = qos; + tag_req.s.grp = grp; + + ptr.u64 = 0; + ptr.sio.mem_region = CVMX_IO_SEG; + ptr.sio.is_io = 1; + ptr.sio.did = CVMX_OCT_DID_TAG_TAG1; + ptr.sio.offset = cvmx_ptr_to_phys(wqp); + + /* + * SYNC write to memory before the work submit. This is + * necessary as POW may read values from DRAM at this time. + */ + CVMX_SYNCWS; + cvmx_write_io(ptr.u64, tag_req.u64); +} + +/** + * This function sets the group mask for a core. The group mask + * indicates which groups each core will accept work from. There are + * 16 groups. + * + * @core_num: core to apply mask to + * @mask: Group mask. There are 16 groups, so only bits 0-15 are valid, + * representing groups 0-15. + * Each 1 bit in the mask enables the core to accept work from + * the corresponding group. + */ +static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask) +{ + union cvmx_pow_pp_grp_mskx grp_msk; + + grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num)); + grp_msk.s.grp_msk = mask; + cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64); +} + +/** + * This function sets POW static priorities for a core. Each input queue has + * an associated priority value. + * + * @core_num: core to apply priorities to + * @priority: Vector of 8 priorities, one per POW Input Queue (0-7). + * Highest priority is 0 and lowest is 7. A priority value + * of 0xF instructs POW to skip the Input Queue when + * scheduling to this specific core. + * NOTE: priorities should not have gaps in values, meaning + * {0,1,1,1,1,1,1,1} is a valid configuration while + * {0,2,2,2,2,2,2,2} is not. + */ +static inline void cvmx_pow_set_priority(uint64_t core_num, + const uint8_t priority[]) +{ + /* POW priorities are supported on CN5xxx and later */ + if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) { + union cvmx_pow_pp_grp_mskx grp_msk; + + grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num)); + grp_msk.s.qos0_pri = priority[0]; + grp_msk.s.qos1_pri = priority[1]; + grp_msk.s.qos2_pri = priority[2]; + grp_msk.s.qos3_pri = priority[3]; + grp_msk.s.qos4_pri = priority[4]; + grp_msk.s.qos5_pri = priority[5]; + grp_msk.s.qos6_pri = priority[6]; + grp_msk.s.qos7_pri = priority[7]; + + /* Detect gaps between priorities and flag error */ + { + int i; + uint32_t prio_mask = 0; + + for (i = 0; i < 8; i++) + if (priority[i] != 0xF) + prio_mask |= 1 << priority[i]; + + if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) { + pr_err("POW static priorities should be " + "contiguous (0x%llx)\n", + (unsigned long long)prio_mask); + return; + } + } + + cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64); + } +} + +/** + * Performs a tag switch and then an immediate deschedule. This completes + * immediatly, so completion must not be waited for. This function does NOT + * update the wqe in DRAM to match arguments. + * + * This function does NOT wait for any prior tag switches to complete, so the + * calling code must do this. + * + * Note the following CAVEAT of the Octeon HW behavior when + * re-scheduling DE-SCHEDULEd items whose (next) state is + * ORDERED: + * - If there are no switches pending at the time that the + * HW executes the de-schedule, the HW will only re-schedule + * the head of the FIFO associated with the given tag. This + * means that in many respects, the HW treats this ORDERED + * tag as an ATOMIC tag. Note that in the SWTAG_DESCH + * case (to an ORDERED tag), the HW will do the switch + * before the deschedule whenever it is possible to do + * the switch immediately, so it may often look like + * this case. + * - If there is a pending switch to ORDERED at the time + * the HW executes the de-schedule, the HW will perform + * the switch at the time it re-schedules, and will be + * able to reschedule any/all of the entries with the + * same tag. + * Due to this behavior, the RECOMMENDATION to software is + * that they have a (next) state of ATOMIC when they + * DE-SCHEDULE. If an ORDERED tag is what was really desired, + * SW can choose to immediately switch to an ORDERED tag + * after the work (that has an ATOMIC tag) is re-scheduled. + * Note that since there are never any tag switches pending + * when the HW re-schedules, this switch can be IMMEDIATE upon + * the reception of the pointer during the re-schedule. + * + * @tag: New tag value + * @tag_type: New tag type + * @group: New group value + * @no_sched: Control whether this work queue entry will be rescheduled. + * - 1 : don't schedule this work + * - 0 : allow this work to be scheduled. + */ +static inline void cvmx_pow_tag_sw_desched_nocheck( + uint32_t tag, + enum cvmx_pow_tag_type tag_type, + uint64_t group, + uint64_t no_sched) +{ + cvmx_addr_t ptr; + cvmx_pow_tag_req_t tag_req; + + if (CVMX_ENABLE_POW_CHECKS) { + cvmx_pow_tag_req_t current_tag; + __cvmx_pow_warn_if_pending_switch(__func__); + current_tag = cvmx_pow_get_current_tag(); + if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) + pr_warning("%s called with NULL_NULL tag\n", + __func__); + if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) + pr_warning("%s called with NULL tag. Deschedule not " + "allowed from NULL state\n", + __func__); + if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC) + && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC)) + pr_warning("%s called where neither the before or " + "after tag is ATOMIC\n", + __func__); + } + + tag_req.u64 = 0; + tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH; + tag_req.s.tag = tag; + tag_req.s.type = tag_type; + tag_req.s.grp = group; + tag_req.s.no_sched = no_sched; + + ptr.u64 = 0; + ptr.sio.mem_region = CVMX_IO_SEG; + ptr.sio.is_io = 1; + ptr.sio.did = CVMX_OCT_DID_TAG_TAG3; + /* + * since TAG3 is used, this store will clear the local pending + * switch bit. + */ + cvmx_write_io(ptr.u64, tag_req.u64); +} + +/** + * Performs a tag switch and then an immediate deschedule. This completes + * immediatly, so completion must not be waited for. This function does NOT + * update the wqe in DRAM to match arguments. + * + * This function waits for any prior tag switches to complete, so the + * calling code may call this function with a pending tag switch. + * + * Note the following CAVEAT of the Octeon HW behavior when + * re-scheduling DE-SCHEDULEd items whose (next) state is + * ORDERED: + * - If there are no switches pending at the time that the + * HW executes the de-schedule, the HW will only re-schedule + * the head of the FIFO associated with the given tag. This + * means that in many respects, the HW treats this ORDERED + * tag as an ATOMIC tag. Note that in the SWTAG_DESCH + * case (to an ORDERED tag), the HW will do the switch + * before the deschedule whenever it is possible to do + * the switch immediately, so it may often look like + * this case. + * - If there is a pending switch to ORDERED at the time + * the HW executes the de-schedule, the HW will perform + * the switch at the time it re-schedules, and will be + * able to reschedule any/all of the entries with the + * same tag. + * Due to this behavior, the RECOMMENDATION to software is + * that they have a (next) state of ATOMIC when they + * DE-SCHEDULE. If an ORDERED tag is what was really desired, + * SW can choose to immediately switch to an ORDERED tag + * after the work (that has an ATOMIC tag) is re-scheduled. + * Note that since there are never any tag switches pending + * when the HW re-schedules, this switch can be IMMEDIATE upon + * the reception of the pointer during the re-schedule. + * + * @tag: New tag value + * @tag_type: New tag type + * @group: New group value + * @no_sched: Control whether this work queue entry will be rescheduled. + * - 1 : don't schedule this work + * - 0 : allow this work to be scheduled. + */ +static inline void cvmx_pow_tag_sw_desched(uint32_t tag, + enum cvmx_pow_tag_type tag_type, + uint64_t group, uint64_t no_sched) +{ + if (CVMX_ENABLE_POW_CHECKS) + __cvmx_pow_warn_if_pending_switch(__func__); + + /* Need to make sure any writes to the work queue entry are complete */ + CVMX_SYNCWS; + /* + * Ensure that there is not a pending tag switch, as a tag + * switch cannot be started if a previous switch is still + * pending. + */ + cvmx_pow_tag_sw_wait(); + cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched); +} + +/** + * Descchedules the current work queue entry. + * + * @no_sched: no schedule flag value to be set on the work queue + * entry. If this is set the entry will not be + * rescheduled. + */ +static inline void cvmx_pow_desched(uint64_t no_sched) +{ + cvmx_addr_t ptr; + cvmx_pow_tag_req_t tag_req; + + if (CVMX_ENABLE_POW_CHECKS) { + cvmx_pow_tag_req_t current_tag; + __cvmx_pow_warn_if_pending_switch(__func__); + current_tag = cvmx_pow_get_current_tag(); + if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) + pr_warning("%s called with NULL_NULL tag\n", + __func__); + if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) + pr_warning("%s called with NULL tag. Deschedule not " + "expected from NULL state\n", + __func__); + } + + /* Need to make sure any writes to the work queue entry are complete */ + CVMX_SYNCWS; + + tag_req.u64 = 0; + tag_req.s.op = CVMX_POW_TAG_OP_DESCH; + tag_req.s.no_sched = no_sched; + + ptr.u64 = 0; + ptr.sio.mem_region = CVMX_IO_SEG; + ptr.sio.is_io = 1; + ptr.sio.did = CVMX_OCT_DID_TAG_TAG3; + /* + * since TAG3 is used, this store will clear the local pending + * switch bit. + */ + cvmx_write_io(ptr.u64, tag_req.u64); +} + +/**************************************************** +* Define usage of bits within the 32 bit tag values. +*****************************************************/ + +/* + * Number of bits of the tag used by software. The SW bits are always + * a contiguous block of the high starting at bit 31. The hardware + * bits are always the low bits. By default, the top 8 bits of the + * tag are reserved for software, and the low 24 are set by the IPD + * unit. + */ +#define CVMX_TAG_SW_BITS (8) +#define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS) + +/* Below is the list of values for the top 8 bits of the tag. */ +/* + * Tag values with top byte of this value are reserved for internal + * executive uses. + */ +#define CVMX_TAG_SW_BITS_INTERNAL 0x1 +/* The executive divides the remaining 24 bits as follows: + * - the upper 8 bits (bits 23 - 16 of the tag) define a subgroup + * + * - the lower 16 bits (bits 15 - 0 of the tag) define are the value + * with the subgroup + * + * Note that this section describes the format of tags generated by + * software - refer to the hardware documentation for a description of + * the tags values generated by the packet input hardware. Subgroups + * are defined here. + */ +/* Mask for the value portion of the tag */ +#define CVMX_TAG_SUBGROUP_MASK 0xFFFF +#define CVMX_TAG_SUBGROUP_SHIFT 16 +#define CVMX_TAG_SUBGROUP_PKO 0x1 + +/* End of executive tag subgroup definitions */ + +/* + * The remaining values software bit values 0x2 - 0xff are available + * for application use. + */ + +/** + * This function creates a 32 bit tag value from the two values provided. + * + * @sw_bits: The upper bits (number depends on configuration) are set + * to this value. The remainder of bits are set by the + * hw_bits parameter. + * + * @hw_bits: The lower bits (number depends on configuration) are set + * to this value. The remainder of bits are set by the + * sw_bits parameter. + * + * Returns 32 bit value of the combined hw and sw bits. + */ +static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits) +{ + return ((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) << + CVMX_TAG_SW_SHIFT) | + (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS)); +} + +/** + * Extracts the bits allocated for software use from the tag + * + * @tag: 32 bit tag value + * + * Returns N bit software tag value, where N is configurable with the + * CVMX_TAG_SW_BITS define + */ +static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag) +{ + return (tag >> (32 - CVMX_TAG_SW_BITS)) & + cvmx_build_mask(CVMX_TAG_SW_BITS); +} + +/** + * + * Extracts the bits allocated for hardware use from the tag + * + * @tag: 32 bit tag value + * + * Returns (32 - N) bit software tag value, where N is configurable + * with the CVMX_TAG_SW_BITS define + */ +static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag) +{ + return tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS); +} + +/** + * Store the current POW internal state into the supplied + * buffer. It is recommended that you pass a buffer of at least + * 128KB. The format of the capture may change based on SDK + * version and Octeon chip. + * + * @buffer: Buffer to store capture into + * @buffer_size: + * The size of the supplied buffer + * + * Returns Zero on sucess, negative on failure + */ +extern int cvmx_pow_capture(void *buffer, int buffer_size); + +/** + * Dump a POW capture to the console in a human readable format. + * + * @buffer: POW capture from cvmx_pow_capture() + * @buffer_size: + * Size of the buffer + */ +extern void cvmx_pow_display(void *buffer, int buffer_size); + +/** + * Return the number of POW entries supported by this chip + * + * Returns Number of POW entries + */ +extern int cvmx_pow_get_num_entries(void); + +#endif /* __CVMX_POW_H__ */ diff --git a/drivers/staging/octeon/cvmx-scratch.h b/drivers/staging/octeon/cvmx-scratch.h new file mode 100644 index 0000000..96b70cf --- /dev/null +++ b/drivers/staging/octeon/cvmx-scratch.h @@ -0,0 +1,139 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * + * This file provides support for the processor local scratch memory. + * Scratch memory is byte addressable - all addresses are byte addresses. + * + */ + +#ifndef __CVMX_SCRATCH_H__ +#define __CVMX_SCRATCH_H__ + +/* + * Note: This define must be a long, not a long long in order to + * compile without warnings for both 32bit and 64bit. + */ +#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */ + +/** + * Reads an 8 bit value from the processor local scratchpad memory. + * + * @address: byte address to read from + * + * Returns value read + */ +static inline uint8_t cvmx_scratch_read8(uint64_t address) +{ + return *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address); +} + +/** + * Reads a 16 bit value from the processor local scratchpad memory. + * + * @address: byte address to read from + * + * Returns value read + */ +static inline uint16_t cvmx_scratch_read16(uint64_t address) +{ + return *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address); +} + +/** + * Reads a 32 bit value from the processor local scratchpad memory. + * + * @address: byte address to read from + * + * Returns value read + */ +static inline uint32_t cvmx_scratch_read32(uint64_t address) +{ + return *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address); +} + +/** + * Reads a 64 bit value from the processor local scratchpad memory. + * + * @address: byte address to read from + * + * Returns value read + */ +static inline uint64_t cvmx_scratch_read64(uint64_t address) +{ + return *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address); +} + +/** + * Writes an 8 bit value to the processor local scratchpad memory. + * + * @address: byte address to write to + * @value: value to write + */ +static inline void cvmx_scratch_write8(uint64_t address, uint64_t value) +{ + *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address) = + (uint8_t) value; +} + +/** + * Writes a 32 bit value to the processor local scratchpad memory. + * + * @address: byte address to write to + * @value: value to write + */ +static inline void cvmx_scratch_write16(uint64_t address, uint64_t value) +{ + *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address) = + (uint16_t) value; +} + +/** + * Writes a 16 bit value to the processor local scratchpad memory. + * + * @address: byte address to write to + * @value: value to write + */ +static inline void cvmx_scratch_write32(uint64_t address, uint64_t value) +{ + *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address) = + (uint32_t) value; +} + +/** + * Writes a 64 bit value to the processor local scratchpad memory. + * + * @address: byte address to write to + * @value: value to write + */ +static inline void cvmx_scratch_write64(uint64_t address, uint64_t value) +{ + *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address) = value; +} + +#endif /* __CVMX_SCRATCH_H__ */ diff --git a/drivers/staging/octeon/cvmx-smix-defs.h b/drivers/staging/octeon/cvmx-smix-defs.h new file mode 100644 index 0000000..9ae45fc --- /dev/null +++ b/drivers/staging/octeon/cvmx-smix-defs.h @@ -0,0 +1,178 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_SMIX_DEFS_H__ +#define __CVMX_SMIX_DEFS_H__ + +#define CVMX_SMIX_CLK(offset) \ + CVMX_ADD_IO_SEG(0x0001180000001818ull + (((offset) & 1) * 256)) +#define CVMX_SMIX_CMD(offset) \ + CVMX_ADD_IO_SEG(0x0001180000001800ull + (((offset) & 1) * 256)) +#define CVMX_SMIX_EN(offset) \ + CVMX_ADD_IO_SEG(0x0001180000001820ull + (((offset) & 1) * 256)) +#define CVMX_SMIX_RD_DAT(offset) \ + CVMX_ADD_IO_SEG(0x0001180000001810ull + (((offset) & 1) * 256)) +#define CVMX_SMIX_WR_DAT(offset) \ + CVMX_ADD_IO_SEG(0x0001180000001808ull + (((offset) & 1) * 256)) + +union cvmx_smix_clk { + uint64_t u64; + struct cvmx_smix_clk_s { + uint64_t reserved_25_63:39; + uint64_t mode:1; + uint64_t reserved_21_23:3; + uint64_t sample_hi:5; + uint64_t sample_mode:1; + uint64_t reserved_14_14:1; + uint64_t clk_idle:1; + uint64_t preamble:1; + uint64_t sample:4; + uint64_t phase:8; + } s; + struct cvmx_smix_clk_cn30xx { + uint64_t reserved_21_63:43; + uint64_t sample_hi:5; + uint64_t reserved_14_15:2; + uint64_t clk_idle:1; + uint64_t preamble:1; + uint64_t sample:4; + uint64_t phase:8; + } cn30xx; + struct cvmx_smix_clk_cn30xx cn31xx; + struct cvmx_smix_clk_cn30xx cn38xx; + struct cvmx_smix_clk_cn30xx cn38xxp2; + struct cvmx_smix_clk_cn50xx { + uint64_t reserved_25_63:39; + uint64_t mode:1; + uint64_t reserved_21_23:3; + uint64_t sample_hi:5; + uint64_t reserved_14_15:2; + uint64_t clk_idle:1; + uint64_t preamble:1; + uint64_t sample:4; + uint64_t phase:8; + } cn50xx; + struct cvmx_smix_clk_s cn52xx; + struct cvmx_smix_clk_cn50xx cn52xxp1; + struct cvmx_smix_clk_s cn56xx; + struct cvmx_smix_clk_cn50xx cn56xxp1; + struct cvmx_smix_clk_cn30xx cn58xx; + struct cvmx_smix_clk_cn30xx cn58xxp1; +}; + +union cvmx_smix_cmd { + uint64_t u64; + struct cvmx_smix_cmd_s { + uint64_t reserved_18_63:46; + uint64_t phy_op:2; + uint64_t reserved_13_15:3; + uint64_t phy_adr:5; + uint64_t reserved_5_7:3; + uint64_t reg_adr:5; + } s; + struct cvmx_smix_cmd_cn30xx { + uint64_t reserved_17_63:47; + uint64_t phy_op:1; + uint64_t reserved_13_15:3; + uint64_t phy_adr:5; + uint64_t reserved_5_7:3; + uint64_t reg_adr:5; + } cn30xx; + struct cvmx_smix_cmd_cn30xx cn31xx; + struct cvmx_smix_cmd_cn30xx cn38xx; + struct cvmx_smix_cmd_cn30xx cn38xxp2; + struct cvmx_smix_cmd_s cn50xx; + struct cvmx_smix_cmd_s cn52xx; + struct cvmx_smix_cmd_s cn52xxp1; + struct cvmx_smix_cmd_s cn56xx; + struct cvmx_smix_cmd_s cn56xxp1; + struct cvmx_smix_cmd_cn30xx cn58xx; + struct cvmx_smix_cmd_cn30xx cn58xxp1; +}; + +union cvmx_smix_en { + uint64_t u64; + struct cvmx_smix_en_s { + uint64_t reserved_1_63:63; + uint64_t en:1; + } s; + struct cvmx_smix_en_s cn30xx; + struct cvmx_smix_en_s cn31xx; + struct cvmx_smix_en_s cn38xx; + struct cvmx_smix_en_s cn38xxp2; + struct cvmx_smix_en_s cn50xx; + struct cvmx_smix_en_s cn52xx; + struct cvmx_smix_en_s cn52xxp1; + struct cvmx_smix_en_s cn56xx; + struct cvmx_smix_en_s cn56xxp1; + struct cvmx_smix_en_s cn58xx; + struct cvmx_smix_en_s cn58xxp1; +}; + +union cvmx_smix_rd_dat { + uint64_t u64; + struct cvmx_smix_rd_dat_s { + uint64_t reserved_18_63:46; + uint64_t pending:1; + uint64_t val:1; + uint64_t dat:16; + } s; + struct cvmx_smix_rd_dat_s cn30xx; + struct cvmx_smix_rd_dat_s cn31xx; + struct cvmx_smix_rd_dat_s cn38xx; + struct cvmx_smix_rd_dat_s cn38xxp2; + struct cvmx_smix_rd_dat_s cn50xx; + struct cvmx_smix_rd_dat_s cn52xx; + struct cvmx_smix_rd_dat_s cn52xxp1; + struct cvmx_smix_rd_dat_s cn56xx; + struct cvmx_smix_rd_dat_s cn56xxp1; + struct cvmx_smix_rd_dat_s cn58xx; + struct cvmx_smix_rd_dat_s cn58xxp1; +}; + +union cvmx_smix_wr_dat { + uint64_t u64; + struct cvmx_smix_wr_dat_s { + uint64_t reserved_18_63:46; + uint64_t pending:1; + uint64_t val:1; + uint64_t dat:16; + } s; + struct cvmx_smix_wr_dat_s cn30xx; + struct cvmx_smix_wr_dat_s cn31xx; + struct cvmx_smix_wr_dat_s cn38xx; + struct cvmx_smix_wr_dat_s cn38xxp2; + struct cvmx_smix_wr_dat_s cn50xx; + struct cvmx_smix_wr_dat_s cn52xx; + struct cvmx_smix_wr_dat_s cn52xxp1; + struct cvmx_smix_wr_dat_s cn56xx; + struct cvmx_smix_wr_dat_s cn56xxp1; + struct cvmx_smix_wr_dat_s cn58xx; + struct cvmx_smix_wr_dat_s cn58xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-spi.c b/drivers/staging/octeon/cvmx-spi.c new file mode 100644 index 0000000..82794d9 --- /dev/null +++ b/drivers/staging/octeon/cvmx-spi.c @@ -0,0 +1,667 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * + * Support library for the SPI + */ +#include <asm/octeon/octeon.h> + +#include "cvmx-config.h" + +#include "cvmx-pko.h" +#include "cvmx-spi.h" + +#include "cvmx-spxx-defs.h" +#include "cvmx-stxx-defs.h" +#include "cvmx-srxx-defs.h" + +#define INVOKE_CB(function_p, args...) \ + do { \ + if (function_p) { \ + res = function_p(args); \ + if (res) \ + return res; \ + } \ + } while (0) + +#if CVMX_ENABLE_DEBUG_PRINTS +static const char *modes[] = + { "UNKNOWN", "TX Halfplex", "Rx Halfplex", "Duplex" }; +#endif + +/* Default callbacks, can be overridden + * using cvmx_spi_get_callbacks/cvmx_spi_set_callbacks + */ +static cvmx_spi_callbacks_t cvmx_spi_callbacks = { + .reset_cb = cvmx_spi_reset_cb, + .calendar_setup_cb = cvmx_spi_calendar_setup_cb, + .clock_detect_cb = cvmx_spi_clock_detect_cb, + .training_cb = cvmx_spi_training_cb, + .calendar_sync_cb = cvmx_spi_calendar_sync_cb, + .interface_up_cb = cvmx_spi_interface_up_cb +}; + +/** + * Get current SPI4 initialization callbacks + * + * @callbacks: Pointer to the callbacks structure.to fill + * + * Returns Pointer to cvmx_spi_callbacks_t structure. + */ +void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks) +{ + memcpy(callbacks, &cvmx_spi_callbacks, sizeof(cvmx_spi_callbacks)); +} + +/** + * Set new SPI4 initialization callbacks + * + * @new_callbacks: Pointer to an updated callbacks structure. + */ +void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks) +{ + memcpy(&cvmx_spi_callbacks, new_callbacks, sizeof(cvmx_spi_callbacks)); +} + +/** + * Initialize and start the SPI interface. + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @timeout: Timeout to wait for clock synchronization in seconds + * @num_ports: Number of SPI ports to configure + * + * Returns Zero on success, negative of failure. + */ +int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout, + int num_ports) +{ + int res = -1; + + if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))) + return res; + + /* Callback to perform SPI4 reset */ + INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode); + + /* Callback to perform calendar setup */ + INVOKE_CB(cvmx_spi_callbacks.calendar_setup_cb, interface, mode, + num_ports); + + /* Callback to perform clock detection */ + INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout); + + /* Callback to perform SPI4 link training */ + INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout); + + /* Callback to perform calendar sync */ + INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode, + timeout); + + /* Callback to handle interface coming up */ + INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode); + + return res; +} + +/** + * This routine restarts the SPI interface after it has lost synchronization + * with its correspondent system. + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @timeout: Timeout to wait for clock synchronization in seconds + * + * Returns Zero on success, negative of failure. + */ +int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout) +{ + int res = -1; + + if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))) + return res; + + cvmx_dprintf("SPI%d: Restart %s\n", interface, modes[mode]); + + /* Callback to perform SPI4 reset */ + INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode); + + /* NOTE: Calendar setup is not performed during restart */ + /* Refer to cvmx_spi_start_interface() for the full sequence */ + + /* Callback to perform clock detection */ + INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout); + + /* Callback to perform SPI4 link training */ + INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout); + + /* Callback to perform calendar sync */ + INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode, + timeout); + + /* Callback to handle interface coming up */ + INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode); + + return res; +} + +/** + * Callback to perform SPI4 reset + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode) +{ + union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl; + union cvmx_spxx_clk_ctl spxx_clk_ctl; + union cvmx_spxx_bist_stat spxx_bist_stat; + union cvmx_spxx_int_msk spxx_int_msk; + union cvmx_stxx_int_msk stxx_int_msk; + union cvmx_spxx_trn4_ctl spxx_trn4_ctl; + int index; + uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000; + + /* Disable SPI error events while we run BIST */ + spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface)); + cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0); + stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface)); + cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0); + + /* Run BIST in the SPI interface */ + cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0); + cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0); + spxx_clk_ctl.u64 = 0; + spxx_clk_ctl.s.runbist = 1; + cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64); + cvmx_wait(10 * MS); + spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface)); + if (spxx_bist_stat.s.stat0) + cvmx_dprintf + ("ERROR SPI%d: BIST failed on receive datapath FIFO\n", + interface); + if (spxx_bist_stat.s.stat1) + cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n", + interface); + if (spxx_bist_stat.s.stat2) + cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n", + interface); + + /* Clear the calendar table after BIST to fix parity errors */ + for (index = 0; index < 32; index++) { + union cvmx_srxx_spi4_calx srxx_spi4_calx; + union cvmx_stxx_spi4_calx stxx_spi4_calx; + + srxx_spi4_calx.u64 = 0; + srxx_spi4_calx.s.oddpar = 1; + cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface), + srxx_spi4_calx.u64); + + stxx_spi4_calx.u64 = 0; + stxx_spi4_calx.s.oddpar = 1; + cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface), + stxx_spi4_calx.u64); + } + + /* Re enable reporting of error interrupts */ + cvmx_write_csr(CVMX_SPXX_INT_REG(interface), + cvmx_read_csr(CVMX_SPXX_INT_REG(interface))); + cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64); + cvmx_write_csr(CVMX_STXX_INT_REG(interface), + cvmx_read_csr(CVMX_STXX_INT_REG(interface))); + cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64); + + /* Setup the CLKDLY right in the middle */ + spxx_clk_ctl.u64 = 0; + spxx_clk_ctl.s.seetrn = 0; + spxx_clk_ctl.s.clkdly = 0x10; + spxx_clk_ctl.s.runbist = 0; + spxx_clk_ctl.s.statdrv = 0; + /* This should always be on the opposite edge as statdrv */ + spxx_clk_ctl.s.statrcv = 1; + spxx_clk_ctl.s.sndtrn = 0; + spxx_clk_ctl.s.drptrn = 0; + spxx_clk_ctl.s.rcvtrn = 0; + spxx_clk_ctl.s.srxdlck = 0; + cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64); + cvmx_wait(100 * MS); + + /* Reset SRX0 DLL */ + spxx_clk_ctl.s.srxdlck = 1; + cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64); + + /* Waiting for Inf0 Spi4 RX DLL to lock */ + cvmx_wait(100 * MS); + + /* Enable dynamic alignment */ + spxx_trn4_ctl.s.trntest = 0; + spxx_trn4_ctl.s.jitter = 1; + spxx_trn4_ctl.s.clr_boot = 1; + spxx_trn4_ctl.s.set_boot = 0; + if (OCTEON_IS_MODEL(OCTEON_CN58XX)) + spxx_trn4_ctl.s.maxdist = 3; + else + spxx_trn4_ctl.s.maxdist = 8; + spxx_trn4_ctl.s.macro_en = 1; + spxx_trn4_ctl.s.mux_en = 1; + cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64); + + spxx_dbg_deskew_ctl.u64 = 0; + cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface), + spxx_dbg_deskew_ctl.u64); + + return 0; +} + +/** + * Callback to setup calendar and miscellaneous settings before clock detection + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @num_ports: Number of ports to configure on SPI + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode, + int num_ports) +{ + int port; + int index; + if (mode & CVMX_SPI_MODE_RX_HALFPLEX) { + union cvmx_srxx_com_ctl srxx_com_ctl; + union cvmx_srxx_spi4_stat srxx_spi4_stat; + + /* SRX0 number of Ports */ + srxx_com_ctl.u64 = 0; + srxx_com_ctl.s.prts = num_ports - 1; + srxx_com_ctl.s.st_en = 0; + srxx_com_ctl.s.inf_en = 0; + cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64); + + /* SRX0 Calendar Table. This round robbins through all ports */ + port = 0; + index = 0; + while (port < num_ports) { + union cvmx_srxx_spi4_calx srxx_spi4_calx; + srxx_spi4_calx.u64 = 0; + srxx_spi4_calx.s.prt0 = port++; + srxx_spi4_calx.s.prt1 = port++; + srxx_spi4_calx.s.prt2 = port++; + srxx_spi4_calx.s.prt3 = port++; + srxx_spi4_calx.s.oddpar = + ~(cvmx_dpop(srxx_spi4_calx.u64) & 1); + cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface), + srxx_spi4_calx.u64); + index++; + } + srxx_spi4_stat.u64 = 0; + srxx_spi4_stat.s.len = num_ports; + srxx_spi4_stat.s.m = 1; + cvmx_write_csr(CVMX_SRXX_SPI4_STAT(interface), + srxx_spi4_stat.u64); + } + + if (mode & CVMX_SPI_MODE_TX_HALFPLEX) { + union cvmx_stxx_arb_ctl stxx_arb_ctl; + union cvmx_gmxx_tx_spi_max gmxx_tx_spi_max; + union cvmx_gmxx_tx_spi_thresh gmxx_tx_spi_thresh; + union cvmx_gmxx_tx_spi_ctl gmxx_tx_spi_ctl; + union cvmx_stxx_spi4_stat stxx_spi4_stat; + union cvmx_stxx_spi4_dat stxx_spi4_dat; + + /* STX0 Config */ + stxx_arb_ctl.u64 = 0; + stxx_arb_ctl.s.igntpa = 0; + stxx_arb_ctl.s.mintrn = 0; + cvmx_write_csr(CVMX_STXX_ARB_CTL(interface), stxx_arb_ctl.u64); + + gmxx_tx_spi_max.u64 = 0; + gmxx_tx_spi_max.s.max1 = 8; + gmxx_tx_spi_max.s.max2 = 4; + gmxx_tx_spi_max.s.slice = 0; + cvmx_write_csr(CVMX_GMXX_TX_SPI_MAX(interface), + gmxx_tx_spi_max.u64); + + gmxx_tx_spi_thresh.u64 = 0; + gmxx_tx_spi_thresh.s.thresh = 4; + cvmx_write_csr(CVMX_GMXX_TX_SPI_THRESH(interface), + gmxx_tx_spi_thresh.u64); + + gmxx_tx_spi_ctl.u64 = 0; + gmxx_tx_spi_ctl.s.tpa_clr = 0; + gmxx_tx_spi_ctl.s.cont_pkt = 0; + cvmx_write_csr(CVMX_GMXX_TX_SPI_CTL(interface), + gmxx_tx_spi_ctl.u64); + + /* STX0 Training Control */ + stxx_spi4_dat.u64 = 0; + /*Minimum needed by dynamic alignment */ + stxx_spi4_dat.s.alpha = 32; + stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */ + cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface), + stxx_spi4_dat.u64); + + /* STX0 Calendar Table. This round robbins through all ports */ + port = 0; + index = 0; + while (port < num_ports) { + union cvmx_stxx_spi4_calx stxx_spi4_calx; + stxx_spi4_calx.u64 = 0; + stxx_spi4_calx.s.prt0 = port++; + stxx_spi4_calx.s.prt1 = port++; + stxx_spi4_calx.s.prt2 = port++; + stxx_spi4_calx.s.prt3 = port++; + stxx_spi4_calx.s.oddpar = + ~(cvmx_dpop(stxx_spi4_calx.u64) & 1); + cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface), + stxx_spi4_calx.u64); + index++; + } + stxx_spi4_stat.u64 = 0; + stxx_spi4_stat.s.len = num_ports; + stxx_spi4_stat.s.m = 1; + cvmx_write_csr(CVMX_STXX_SPI4_STAT(interface), + stxx_spi4_stat.u64); + } + + return 0; +} + +/** + * Callback to perform clock detection + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @timeout: Timeout to wait for clock synchronization in seconds + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout) +{ + int clock_transitions; + union cvmx_spxx_clk_stat stat; + uint64_t timeout_time; + uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000; + + /* + * Regardless of operating mode, both Tx and Rx clocks must be + * present for the SPI interface to operate. + */ + cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface); + timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; + /* + * Require 100 clock transitions in order to avoid any noise + * in the beginning. + */ + clock_transitions = 100; + do { + stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface)); + if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) { + /* + * We've seen a clock transition, so decrement + * the number we still need. + */ + clock_transitions--; + cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64); + stat.s.s4clk0 = 0; + stat.s.s4clk1 = 0; + } + if (cvmx_get_cycle() > timeout_time) { + cvmx_dprintf("SPI%d: Timeout\n", interface); + return -1; + } + } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0); + + cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface); + timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; + /* + * Require 100 clock transitions in order to avoid any noise in the + * beginning. + */ + clock_transitions = 100; + do { + stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface)); + if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) { + /* + * We've seen a clock transition, so decrement + * the number we still need + */ + clock_transitions--; + cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64); + stat.s.d4clk0 = 0; + stat.s.d4clk1 = 0; + } + if (cvmx_get_cycle() > timeout_time) { + cvmx_dprintf("SPI%d: Timeout\n", interface); + return -1; + } + } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0); + + return 0; +} + +/** + * Callback to perform link training + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @timeout: Timeout to wait for link to be trained (in seconds) + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout) +{ + union cvmx_spxx_trn4_ctl spxx_trn4_ctl; + union cvmx_spxx_clk_stat stat; + uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000; + uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; + int rx_training_needed; + + /* SRX0 & STX0 Inf0 Links are configured - begin training */ + union cvmx_spxx_clk_ctl spxx_clk_ctl; + spxx_clk_ctl.u64 = 0; + spxx_clk_ctl.s.seetrn = 0; + spxx_clk_ctl.s.clkdly = 0x10; + spxx_clk_ctl.s.runbist = 0; + spxx_clk_ctl.s.statdrv = 0; + /* This should always be on the opposite edge as statdrv */ + spxx_clk_ctl.s.statrcv = 1; + spxx_clk_ctl.s.sndtrn = 1; + spxx_clk_ctl.s.drptrn = 1; + spxx_clk_ctl.s.rcvtrn = 1; + spxx_clk_ctl.s.srxdlck = 1; + cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64); + cvmx_wait(1000 * MS); + + /* SRX0 clear the boot bit */ + spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface)); + spxx_trn4_ctl.s.clr_boot = 1; + cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64); + + /* Wait for the training sequence to complete */ + cvmx_dprintf("SPI%d: Waiting for training\n", interface); + cvmx_wait(1000 * MS); + /* Wait a really long time here */ + timeout_time = cvmx_get_cycle() + 1000ull * MS * 600; + /* + * The HRM says we must wait for 34 + 16 * MAXDIST training sequences. + * We'll be pessimistic and wait for a lot more. + */ + rx_training_needed = 500; + do { + stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface)); + if (stat.s.srxtrn && rx_training_needed) { + rx_training_needed--; + cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64); + stat.s.srxtrn = 0; + } + if (cvmx_get_cycle() > timeout_time) { + cvmx_dprintf("SPI%d: Timeout\n", interface); + return -1; + } + } while (stat.s.srxtrn == 0); + + return 0; +} + +/** + * Callback to perform calendar data synchronization + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @timeout: Timeout to wait for calendar data in seconds + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout) +{ + uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000; + if (mode & CVMX_SPI_MODE_RX_HALFPLEX) { + /* SRX0 interface should be good, send calendar data */ + union cvmx_srxx_com_ctl srxx_com_ctl; + cvmx_dprintf + ("SPI%d: Rx is synchronized, start sending calendar data\n", + interface); + srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface)); + srxx_com_ctl.s.inf_en = 1; + srxx_com_ctl.s.st_en = 1; + cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64); + } + + if (mode & CVMX_SPI_MODE_TX_HALFPLEX) { + /* STX0 has achieved sync */ + /* The corespondant board should be sending calendar data */ + /* Enable the STX0 STAT receiver. */ + union cvmx_spxx_clk_stat stat; + uint64_t timeout_time; + union cvmx_stxx_com_ctl stxx_com_ctl; + stxx_com_ctl.u64 = 0; + stxx_com_ctl.s.st_en = 1; + cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64); + + /* Waiting for calendar sync on STX0 STAT */ + cvmx_dprintf("SPI%d: Waiting to sync on STX[%d] STAT\n", + interface, interface); + timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; + /* SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10) */ + do { + stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface)); + if (cvmx_get_cycle() > timeout_time) { + cvmx_dprintf("SPI%d: Timeout\n", interface); + return -1; + } + } while (stat.s.stxcal == 0); + } + + return 0; +} + +/** + * Callback to handle interface up + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode) +{ + union cvmx_gmxx_rxx_frm_min gmxx_rxx_frm_min; + union cvmx_gmxx_rxx_frm_max gmxx_rxx_frm_max; + union cvmx_gmxx_rxx_jabber gmxx_rxx_jabber; + + if (mode & CVMX_SPI_MODE_RX_HALFPLEX) { + union cvmx_srxx_com_ctl srxx_com_ctl; + srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface)); + srxx_com_ctl.s.inf_en = 1; + cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64); + cvmx_dprintf("SPI%d: Rx is now up\n", interface); + } + + if (mode & CVMX_SPI_MODE_TX_HALFPLEX) { + union cvmx_stxx_com_ctl stxx_com_ctl; + stxx_com_ctl.u64 = cvmx_read_csr(CVMX_STXX_COM_CTL(interface)); + stxx_com_ctl.s.inf_en = 1; + cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64); + cvmx_dprintf("SPI%d: Tx is now up\n", interface); + } + + gmxx_rxx_frm_min.u64 = 0; + gmxx_rxx_frm_min.s.len = 64; + cvmx_write_csr(CVMX_GMXX_RXX_FRM_MIN(0, interface), + gmxx_rxx_frm_min.u64); + gmxx_rxx_frm_max.u64 = 0; + gmxx_rxx_frm_max.s.len = 64 * 1024 - 4; + cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(0, interface), + gmxx_rxx_frm_max.u64); + gmxx_rxx_jabber.u64 = 0; + gmxx_rxx_jabber.s.cnt = 64 * 1024 - 4; + cvmx_write_csr(CVMX_GMXX_RXX_JABBER(0, interface), gmxx_rxx_jabber.u64); + + return 0; +} diff --git a/drivers/staging/octeon/cvmx-spi.h b/drivers/staging/octeon/cvmx-spi.h new file mode 100644 index 0000000..e814648 --- /dev/null +++ b/drivers/staging/octeon/cvmx-spi.h @@ -0,0 +1,269 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/* + * + * This file contains defines for the SPI interface + */ +#ifndef __CVMX_SPI_H__ +#define __CVMX_SPI_H__ + +#include "cvmx-gmxx-defs.h" + +/* CSR typedefs have been moved to cvmx-csr-*.h */ + +typedef enum { + CVMX_SPI_MODE_UNKNOWN = 0, + CVMX_SPI_MODE_TX_HALFPLEX = 1, + CVMX_SPI_MODE_RX_HALFPLEX = 2, + CVMX_SPI_MODE_DUPLEX = 3 +} cvmx_spi_mode_t; + +/** Callbacks structure to customize SPI4 initialization sequence */ +typedef struct { + /** Called to reset SPI4 DLL */ + int (*reset_cb) (int interface, cvmx_spi_mode_t mode); + + /** Called to setup calendar */ + int (*calendar_setup_cb) (int interface, cvmx_spi_mode_t mode, + int num_ports); + + /** Called for Tx and Rx clock detection */ + int (*clock_detect_cb) (int interface, cvmx_spi_mode_t mode, + int timeout); + + /** Called to perform link training */ + int (*training_cb) (int interface, cvmx_spi_mode_t mode, int timeout); + + /** Called for calendar data synchronization */ + int (*calendar_sync_cb) (int interface, cvmx_spi_mode_t mode, + int timeout); + + /** Called when interface is up */ + int (*interface_up_cb) (int interface, cvmx_spi_mode_t mode); + +} cvmx_spi_callbacks_t; + +/** + * Return true if the supplied interface is configured for SPI + * + * @interface: Interface to check + * Returns True if interface is SPI + */ +static inline int cvmx_spi_is_spi_interface(int interface) +{ + uint64_t gmxState = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface)); + return (gmxState & 0x2) && (gmxState & 0x1); +} + +/** + * Initialize and start the SPI interface. + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @timeout: Timeout to wait for clock synchronization in seconds + * @num_ports: Number of SPI ports to configure + * + * Returns Zero on success, negative of failure. + */ +extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, + int timeout, int num_ports); + +/** + * This routine restarts the SPI interface after it has lost synchronization + * with its corespondant system. + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @timeout: Timeout to wait for clock synchronization in seconds + * Returns Zero on success, negative of failure. + */ +extern int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, + int timeout); + +/** + * Return non-zero if the SPI interface has a SPI4000 attached + * + * @interface: SPI interface the SPI4000 is connected to + * + * Returns + */ +static inline int cvmx_spi4000_is_present(int interface) +{ + return 0; +} + +/** + * Initialize the SPI4000 for use + * + * @interface: SPI interface the SPI4000 is connected to + */ +static inline int cvmx_spi4000_initialize(int interface) +{ + return 0; +} + +/** + * Poll all the SPI4000 port and check its speed + * + * @interface: Interface the SPI4000 is on + * @port: Port to poll (0-9) + * Returns Status of the port. 0=down. All other values the port is up. + */ +static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed( + int interface, + int port) +{ + union cvmx_gmxx_rxx_rx_inbnd r; + r.u64 = 0; + return r; +} + +/** + * Get current SPI4 initialization callbacks + * + * @callbacks: Pointer to the callbacks structure.to fill + * + * Returns Pointer to cvmx_spi_callbacks_t structure. + */ +extern void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks); + +/** + * Set new SPI4 initialization callbacks + * + * @new_callbacks: Pointer to an updated callbacks structure. + */ +extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks); + +/** + * Callback to perform SPI4 reset + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode); + +/** + * Callback to setup calendar and miscellaneous settings before clock + * detection + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @num_ports: Number of ports to configure on SPI + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode, + int num_ports); + +/** + * Callback to perform clock detection + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @timeout: Timeout to wait for clock synchronization in seconds + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, + int timeout); + +/** + * Callback to perform link training + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @timeout: Timeout to wait for link to be trained (in seconds) + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, + int timeout); + +/** + * Callback to perform calendar data synchronization + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * @timeout: Timeout to wait for calendar data in seconds + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, + int timeout); + +/** + * Callback to handle interface up + * + * @interface: The identifier of the packet interface to configure and + * use as a SPI interface. + * @mode: The operating mode for the SPI interface. The interface + * can operate as a full duplex (both Tx and Rx data paths + * active) or as a halfplex (either the Tx data path is + * active or the Rx data path is active, but not both). + * + * Returns Zero on success, non-zero error code on failure (will cause + * SPI initialization to abort) + */ +extern int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode); + +#endif /* __CVMX_SPI_H__ */ diff --git a/drivers/staging/octeon/cvmx-spxx-defs.h b/drivers/staging/octeon/cvmx-spxx-defs.h new file mode 100644 index 0000000..b16940e --- /dev/null +++ b/drivers/staging/octeon/cvmx-spxx-defs.h @@ -0,0 +1,347 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_SPXX_DEFS_H__ +#define __CVMX_SPXX_DEFS_H__ + +#define CVMX_SPXX_BCKPRS_CNT(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000340ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_BIST_STAT(block_id) \ + CVMX_ADD_IO_SEG(0x00011800900007F8ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_CLK_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000348ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_CLK_STAT(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000350ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_DBG_DESKEW_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000368ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_DBG_DESKEW_STATE(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000370ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_DRV_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000358ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_ERR_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000320ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_INT_DAT(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000318ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_INT_MSK(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000308ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_INT_REG(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000300ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_INT_SYNC(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000310ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_TPA_ACC(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000338ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_TPA_MAX(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000330ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_TPA_SEL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000328ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SPXX_TRN4_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000360ull + (((block_id) & 1) * 0x8000000ull)) + +union cvmx_spxx_bckprs_cnt { + uint64_t u64; + struct cvmx_spxx_bckprs_cnt_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_spxx_bckprs_cnt_s cn38xx; + struct cvmx_spxx_bckprs_cnt_s cn38xxp2; + struct cvmx_spxx_bckprs_cnt_s cn58xx; + struct cvmx_spxx_bckprs_cnt_s cn58xxp1; +}; + +union cvmx_spxx_bist_stat { + uint64_t u64; + struct cvmx_spxx_bist_stat_s { + uint64_t reserved_3_63:61; + uint64_t stat2:1; + uint64_t stat1:1; + uint64_t stat0:1; + } s; + struct cvmx_spxx_bist_stat_s cn38xx; + struct cvmx_spxx_bist_stat_s cn38xxp2; + struct cvmx_spxx_bist_stat_s cn58xx; + struct cvmx_spxx_bist_stat_s cn58xxp1; +}; + +union cvmx_spxx_clk_ctl { + uint64_t u64; + struct cvmx_spxx_clk_ctl_s { + uint64_t reserved_17_63:47; + uint64_t seetrn:1; + uint64_t reserved_12_15:4; + uint64_t clkdly:5; + uint64_t runbist:1; + uint64_t statdrv:1; + uint64_t statrcv:1; + uint64_t sndtrn:1; + uint64_t drptrn:1; + uint64_t rcvtrn:1; + uint64_t srxdlck:1; + } s; + struct cvmx_spxx_clk_ctl_s cn38xx; + struct cvmx_spxx_clk_ctl_s cn38xxp2; + struct cvmx_spxx_clk_ctl_s cn58xx; + struct cvmx_spxx_clk_ctl_s cn58xxp1; +}; + +union cvmx_spxx_clk_stat { + uint64_t u64; + struct cvmx_spxx_clk_stat_s { + uint64_t reserved_11_63:53; + uint64_t stxcal:1; + uint64_t reserved_9_9:1; + uint64_t srxtrn:1; + uint64_t s4clk1:1; + uint64_t s4clk0:1; + uint64_t d4clk1:1; + uint64_t d4clk0:1; + uint64_t reserved_0_3:4; + } s; + struct cvmx_spxx_clk_stat_s cn38xx; + struct cvmx_spxx_clk_stat_s cn38xxp2; + struct cvmx_spxx_clk_stat_s cn58xx; + struct cvmx_spxx_clk_stat_s cn58xxp1; +}; + +union cvmx_spxx_dbg_deskew_ctl { + uint64_t u64; + struct cvmx_spxx_dbg_deskew_ctl_s { + uint64_t reserved_30_63:34; + uint64_t fallnop:1; + uint64_t fall8:1; + uint64_t reserved_26_27:2; + uint64_t sstep_go:1; + uint64_t sstep:1; + uint64_t reserved_22_23:2; + uint64_t clrdly:1; + uint64_t dec:1; + uint64_t inc:1; + uint64_t mux:1; + uint64_t offset:5; + uint64_t bitsel:5; + uint64_t offdly:6; + uint64_t dllfrc:1; + uint64_t dlldis:1; + } s; + struct cvmx_spxx_dbg_deskew_ctl_s cn38xx; + struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2; + struct cvmx_spxx_dbg_deskew_ctl_s cn58xx; + struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1; +}; + +union cvmx_spxx_dbg_deskew_state { + uint64_t u64; + struct cvmx_spxx_dbg_deskew_state_s { + uint64_t reserved_9_63:55; + uint64_t testres:1; + uint64_t unxterm:1; + uint64_t muxsel:2; + uint64_t offset:5; + } s; + struct cvmx_spxx_dbg_deskew_state_s cn38xx; + struct cvmx_spxx_dbg_deskew_state_s cn38xxp2; + struct cvmx_spxx_dbg_deskew_state_s cn58xx; + struct cvmx_spxx_dbg_deskew_state_s cn58xxp1; +}; + +union cvmx_spxx_drv_ctl { + uint64_t u64; + struct cvmx_spxx_drv_ctl_s { + uint64_t reserved_0_63:64; + } s; + struct cvmx_spxx_drv_ctl_cn38xx { + uint64_t reserved_16_63:48; + uint64_t stx4ncmp:4; + uint64_t stx4pcmp:4; + uint64_t srx4cmp:8; + } cn38xx; + struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2; + struct cvmx_spxx_drv_ctl_cn58xx { + uint64_t reserved_24_63:40; + uint64_t stx4ncmp:4; + uint64_t stx4pcmp:4; + uint64_t reserved_10_15:6; + uint64_t srx4cmp:10; + } cn58xx; + struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1; +}; + +union cvmx_spxx_err_ctl { + uint64_t u64; + struct cvmx_spxx_err_ctl_s { + uint64_t reserved_9_63:55; + uint64_t prtnxa:1; + uint64_t dipcls:1; + uint64_t dippay:1; + uint64_t reserved_4_5:2; + uint64_t errcnt:4; + } s; + struct cvmx_spxx_err_ctl_s cn38xx; + struct cvmx_spxx_err_ctl_s cn38xxp2; + struct cvmx_spxx_err_ctl_s cn58xx; + struct cvmx_spxx_err_ctl_s cn58xxp1; +}; + +union cvmx_spxx_int_dat { + uint64_t u64; + struct cvmx_spxx_int_dat_s { + uint64_t reserved_32_63:32; + uint64_t mul:1; + uint64_t reserved_14_30:17; + uint64_t calbnk:2; + uint64_t rsvop:4; + uint64_t prt:8; + } s; + struct cvmx_spxx_int_dat_s cn38xx; + struct cvmx_spxx_int_dat_s cn38xxp2; + struct cvmx_spxx_int_dat_s cn58xx; + struct cvmx_spxx_int_dat_s cn58xxp1; +}; + +union cvmx_spxx_int_msk { + uint64_t u64; + struct cvmx_spxx_int_msk_s { + uint64_t reserved_12_63:52; + uint64_t calerr:1; + uint64_t syncerr:1; + uint64_t diperr:1; + uint64_t tpaovr:1; + uint64_t rsverr:1; + uint64_t drwnng:1; + uint64_t clserr:1; + uint64_t spiovr:1; + uint64_t reserved_2_3:2; + uint64_t abnorm:1; + uint64_t prtnxa:1; + } s; + struct cvmx_spxx_int_msk_s cn38xx; + struct cvmx_spxx_int_msk_s cn38xxp2; + struct cvmx_spxx_int_msk_s cn58xx; + struct cvmx_spxx_int_msk_s cn58xxp1; +}; + +union cvmx_spxx_int_reg { + uint64_t u64; + struct cvmx_spxx_int_reg_s { + uint64_t reserved_32_63:32; + uint64_t spf:1; + uint64_t reserved_12_30:19; + uint64_t calerr:1; + uint64_t syncerr:1; + uint64_t diperr:1; + uint64_t tpaovr:1; + uint64_t rsverr:1; + uint64_t drwnng:1; + uint64_t clserr:1; + uint64_t spiovr:1; + uint64_t reserved_2_3:2; + uint64_t abnorm:1; + uint64_t prtnxa:1; + } s; + struct cvmx_spxx_int_reg_s cn38xx; + struct cvmx_spxx_int_reg_s cn38xxp2; + struct cvmx_spxx_int_reg_s cn58xx; + struct cvmx_spxx_int_reg_s cn58xxp1; +}; + +union cvmx_spxx_int_sync { + uint64_t u64; + struct cvmx_spxx_int_sync_s { + uint64_t reserved_12_63:52; + uint64_t calerr:1; + uint64_t syncerr:1; + uint64_t diperr:1; + uint64_t tpaovr:1; + uint64_t rsverr:1; + uint64_t drwnng:1; + uint64_t clserr:1; + uint64_t spiovr:1; + uint64_t reserved_2_3:2; + uint64_t abnorm:1; + uint64_t prtnxa:1; + } s; + struct cvmx_spxx_int_sync_s cn38xx; + struct cvmx_spxx_int_sync_s cn38xxp2; + struct cvmx_spxx_int_sync_s cn58xx; + struct cvmx_spxx_int_sync_s cn58xxp1; +}; + +union cvmx_spxx_tpa_acc { + uint64_t u64; + struct cvmx_spxx_tpa_acc_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_spxx_tpa_acc_s cn38xx; + struct cvmx_spxx_tpa_acc_s cn38xxp2; + struct cvmx_spxx_tpa_acc_s cn58xx; + struct cvmx_spxx_tpa_acc_s cn58xxp1; +}; + +union cvmx_spxx_tpa_max { + uint64_t u64; + struct cvmx_spxx_tpa_max_s { + uint64_t reserved_32_63:32; + uint64_t max:32; + } s; + struct cvmx_spxx_tpa_max_s cn38xx; + struct cvmx_spxx_tpa_max_s cn38xxp2; + struct cvmx_spxx_tpa_max_s cn58xx; + struct cvmx_spxx_tpa_max_s cn58xxp1; +}; + +union cvmx_spxx_tpa_sel { + uint64_t u64; + struct cvmx_spxx_tpa_sel_s { + uint64_t reserved_4_63:60; + uint64_t prtsel:4; + } s; + struct cvmx_spxx_tpa_sel_s cn38xx; + struct cvmx_spxx_tpa_sel_s cn38xxp2; + struct cvmx_spxx_tpa_sel_s cn58xx; + struct cvmx_spxx_tpa_sel_s cn58xxp1; +}; + +union cvmx_spxx_trn4_ctl { + uint64_t u64; + struct cvmx_spxx_trn4_ctl_s { + uint64_t reserved_13_63:51; + uint64_t trntest:1; + uint64_t jitter:3; + uint64_t clr_boot:1; + uint64_t set_boot:1; + uint64_t maxdist:5; + uint64_t macro_en:1; + uint64_t mux_en:1; + } s; + struct cvmx_spxx_trn4_ctl_s cn38xx; + struct cvmx_spxx_trn4_ctl_s cn38xxp2; + struct cvmx_spxx_trn4_ctl_s cn58xx; + struct cvmx_spxx_trn4_ctl_s cn58xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-srxx-defs.h b/drivers/staging/octeon/cvmx-srxx-defs.h new file mode 100644 index 0000000..d82b366 --- /dev/null +++ b/drivers/staging/octeon/cvmx-srxx-defs.h @@ -0,0 +1,126 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_SRXX_DEFS_H__ +#define __CVMX_SRXX_DEFS_H__ + +#define CVMX_SRXX_COM_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000200ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SRXX_IGN_RX_FULL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000218ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SRXX_SPI4_CALX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000000ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SRXX_SPI4_STAT(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000208ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SRXX_SW_TICK_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000220ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_SRXX_SW_TICK_DAT(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000228ull + (((block_id) & 1) * 0x8000000ull)) + +union cvmx_srxx_com_ctl { + uint64_t u64; + struct cvmx_srxx_com_ctl_s { + uint64_t reserved_8_63:56; + uint64_t prts:4; + uint64_t st_en:1; + uint64_t reserved_1_2:2; + uint64_t inf_en:1; + } s; + struct cvmx_srxx_com_ctl_s cn38xx; + struct cvmx_srxx_com_ctl_s cn38xxp2; + struct cvmx_srxx_com_ctl_s cn58xx; + struct cvmx_srxx_com_ctl_s cn58xxp1; +}; + +union cvmx_srxx_ign_rx_full { + uint64_t u64; + struct cvmx_srxx_ign_rx_full_s { + uint64_t reserved_16_63:48; + uint64_t ignore:16; + } s; + struct cvmx_srxx_ign_rx_full_s cn38xx; + struct cvmx_srxx_ign_rx_full_s cn38xxp2; + struct cvmx_srxx_ign_rx_full_s cn58xx; + struct cvmx_srxx_ign_rx_full_s cn58xxp1; +}; + +union cvmx_srxx_spi4_calx { + uint64_t u64; + struct cvmx_srxx_spi4_calx_s { + uint64_t reserved_17_63:47; + uint64_t oddpar:1; + uint64_t prt3:4; + uint64_t prt2:4; + uint64_t prt1:4; + uint64_t prt0:4; + } s; + struct cvmx_srxx_spi4_calx_s cn38xx; + struct cvmx_srxx_spi4_calx_s cn38xxp2; + struct cvmx_srxx_spi4_calx_s cn58xx; + struct cvmx_srxx_spi4_calx_s cn58xxp1; +}; + +union cvmx_srxx_spi4_stat { + uint64_t u64; + struct cvmx_srxx_spi4_stat_s { + uint64_t reserved_16_63:48; + uint64_t m:8; + uint64_t reserved_7_7:1; + uint64_t len:7; + } s; + struct cvmx_srxx_spi4_stat_s cn38xx; + struct cvmx_srxx_spi4_stat_s cn38xxp2; + struct cvmx_srxx_spi4_stat_s cn58xx; + struct cvmx_srxx_spi4_stat_s cn58xxp1; +}; + +union cvmx_srxx_sw_tick_ctl { + uint64_t u64; + struct cvmx_srxx_sw_tick_ctl_s { + uint64_t reserved_14_63:50; + uint64_t eop:1; + uint64_t sop:1; + uint64_t mod:4; + uint64_t opc:4; + uint64_t adr:4; + } s; + struct cvmx_srxx_sw_tick_ctl_s cn38xx; + struct cvmx_srxx_sw_tick_ctl_s cn58xx; + struct cvmx_srxx_sw_tick_ctl_s cn58xxp1; +}; + +union cvmx_srxx_sw_tick_dat { + uint64_t u64; + struct cvmx_srxx_sw_tick_dat_s { + uint64_t dat:64; + } s; + struct cvmx_srxx_sw_tick_dat_s cn38xx; + struct cvmx_srxx_sw_tick_dat_s cn58xx; + struct cvmx_srxx_sw_tick_dat_s cn58xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-stxx-defs.h b/drivers/staging/octeon/cvmx-stxx-defs.h new file mode 100644 index 0000000..4f209b6 --- /dev/null +++ b/drivers/staging/octeon/cvmx-stxx-defs.h @@ -0,0 +1,292 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_STXX_DEFS_H__ +#define __CVMX_STXX_DEFS_H__ + +#define CVMX_STXX_ARB_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000608ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_BCKPRS_CNT(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000688ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_COM_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000600ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_DIP_CNT(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000690ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_IGN_CAL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000610ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_INT_MSK(block_id) \ + CVMX_ADD_IO_SEG(0x00011800900006A0ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_INT_REG(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000698ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_INT_SYNC(block_id) \ + CVMX_ADD_IO_SEG(0x00011800900006A8ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_MIN_BST(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000618ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_SPI4_CALX(offset, block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000400ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_SPI4_DAT(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000628ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_SPI4_STAT(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000630ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_STAT_BYTES_HI(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000648ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_STAT_BYTES_LO(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000680ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_STAT_CTL(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000638ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_STXX_STAT_PKT_XMT(block_id) \ + CVMX_ADD_IO_SEG(0x0001180090000640ull + (((block_id) & 1) * 0x8000000ull)) + +union cvmx_stxx_arb_ctl { + uint64_t u64; + struct cvmx_stxx_arb_ctl_s { + uint64_t reserved_6_63:58; + uint64_t mintrn:1; + uint64_t reserved_4_4:1; + uint64_t igntpa:1; + uint64_t reserved_0_2:3; + } s; + struct cvmx_stxx_arb_ctl_s cn38xx; + struct cvmx_stxx_arb_ctl_s cn38xxp2; + struct cvmx_stxx_arb_ctl_s cn58xx; + struct cvmx_stxx_arb_ctl_s cn58xxp1; +}; + +union cvmx_stxx_bckprs_cnt { + uint64_t u64; + struct cvmx_stxx_bckprs_cnt_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_stxx_bckprs_cnt_s cn38xx; + struct cvmx_stxx_bckprs_cnt_s cn38xxp2; + struct cvmx_stxx_bckprs_cnt_s cn58xx; + struct cvmx_stxx_bckprs_cnt_s cn58xxp1; +}; + +union cvmx_stxx_com_ctl { + uint64_t u64; + struct cvmx_stxx_com_ctl_s { + uint64_t reserved_4_63:60; + uint64_t st_en:1; + uint64_t reserved_1_2:2; + uint64_t inf_en:1; + } s; + struct cvmx_stxx_com_ctl_s cn38xx; + struct cvmx_stxx_com_ctl_s cn38xxp2; + struct cvmx_stxx_com_ctl_s cn58xx; + struct cvmx_stxx_com_ctl_s cn58xxp1; +}; + +union cvmx_stxx_dip_cnt { + uint64_t u64; + struct cvmx_stxx_dip_cnt_s { + uint64_t reserved_8_63:56; + uint64_t frmmax:4; + uint64_t dipmax:4; + } s; + struct cvmx_stxx_dip_cnt_s cn38xx; + struct cvmx_stxx_dip_cnt_s cn38xxp2; + struct cvmx_stxx_dip_cnt_s cn58xx; + struct cvmx_stxx_dip_cnt_s cn58xxp1; +}; + +union cvmx_stxx_ign_cal { + uint64_t u64; + struct cvmx_stxx_ign_cal_s { + uint64_t reserved_16_63:48; + uint64_t igntpa:16; + } s; + struct cvmx_stxx_ign_cal_s cn38xx; + struct cvmx_stxx_ign_cal_s cn38xxp2; + struct cvmx_stxx_ign_cal_s cn58xx; + struct cvmx_stxx_ign_cal_s cn58xxp1; +}; + +union cvmx_stxx_int_msk { + uint64_t u64; + struct cvmx_stxx_int_msk_s { + uint64_t reserved_8_63:56; + uint64_t frmerr:1; + uint64_t unxfrm:1; + uint64_t nosync:1; + uint64_t diperr:1; + uint64_t datovr:1; + uint64_t ovrbst:1; + uint64_t calpar1:1; + uint64_t calpar0:1; + } s; + struct cvmx_stxx_int_msk_s cn38xx; + struct cvmx_stxx_int_msk_s cn38xxp2; + struct cvmx_stxx_int_msk_s cn58xx; + struct cvmx_stxx_int_msk_s cn58xxp1; +}; + +union cvmx_stxx_int_reg { + uint64_t u64; + struct cvmx_stxx_int_reg_s { + uint64_t reserved_9_63:55; + uint64_t syncerr:1; + uint64_t frmerr:1; + uint64_t unxfrm:1; + uint64_t nosync:1; + uint64_t diperr:1; + uint64_t datovr:1; + uint64_t ovrbst:1; + uint64_t calpar1:1; + uint64_t calpar0:1; + } s; + struct cvmx_stxx_int_reg_s cn38xx; + struct cvmx_stxx_int_reg_s cn38xxp2; + struct cvmx_stxx_int_reg_s cn58xx; + struct cvmx_stxx_int_reg_s cn58xxp1; +}; + +union cvmx_stxx_int_sync { + uint64_t u64; + struct cvmx_stxx_int_sync_s { + uint64_t reserved_8_63:56; + uint64_t frmerr:1; + uint64_t unxfrm:1; + uint64_t nosync:1; + uint64_t diperr:1; + uint64_t datovr:1; + uint64_t ovrbst:1; + uint64_t calpar1:1; + uint64_t calpar0:1; + } s; + struct cvmx_stxx_int_sync_s cn38xx; + struct cvmx_stxx_int_sync_s cn38xxp2; + struct cvmx_stxx_int_sync_s cn58xx; + struct cvmx_stxx_int_sync_s cn58xxp1; +}; + +union cvmx_stxx_min_bst { + uint64_t u64; + struct cvmx_stxx_min_bst_s { + uint64_t reserved_9_63:55; + uint64_t minb:9; + } s; + struct cvmx_stxx_min_bst_s cn38xx; + struct cvmx_stxx_min_bst_s cn38xxp2; + struct cvmx_stxx_min_bst_s cn58xx; + struct cvmx_stxx_min_bst_s cn58xxp1; +}; + +union cvmx_stxx_spi4_calx { + uint64_t u64; + struct cvmx_stxx_spi4_calx_s { + uint64_t reserved_17_63:47; + uint64_t oddpar:1; + uint64_t prt3:4; + uint64_t prt2:4; + uint64_t prt1:4; + uint64_t prt0:4; + } s; + struct cvmx_stxx_spi4_calx_s cn38xx; + struct cvmx_stxx_spi4_calx_s cn38xxp2; + struct cvmx_stxx_spi4_calx_s cn58xx; + struct cvmx_stxx_spi4_calx_s cn58xxp1; +}; + +union cvmx_stxx_spi4_dat { + uint64_t u64; + struct cvmx_stxx_spi4_dat_s { + uint64_t reserved_32_63:32; + uint64_t alpha:16; + uint64_t max_t:16; + } s; + struct cvmx_stxx_spi4_dat_s cn38xx; + struct cvmx_stxx_spi4_dat_s cn38xxp2; + struct cvmx_stxx_spi4_dat_s cn58xx; + struct cvmx_stxx_spi4_dat_s cn58xxp1; +}; + +union cvmx_stxx_spi4_stat { + uint64_t u64; + struct cvmx_stxx_spi4_stat_s { + uint64_t reserved_16_63:48; + uint64_t m:8; + uint64_t reserved_7_7:1; + uint64_t len:7; + } s; + struct cvmx_stxx_spi4_stat_s cn38xx; + struct cvmx_stxx_spi4_stat_s cn38xxp2; + struct cvmx_stxx_spi4_stat_s cn58xx; + struct cvmx_stxx_spi4_stat_s cn58xxp1; +}; + +union cvmx_stxx_stat_bytes_hi { + uint64_t u64; + struct cvmx_stxx_stat_bytes_hi_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_stxx_stat_bytes_hi_s cn38xx; + struct cvmx_stxx_stat_bytes_hi_s cn38xxp2; + struct cvmx_stxx_stat_bytes_hi_s cn58xx; + struct cvmx_stxx_stat_bytes_hi_s cn58xxp1; +}; + +union cvmx_stxx_stat_bytes_lo { + uint64_t u64; + struct cvmx_stxx_stat_bytes_lo_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_stxx_stat_bytes_lo_s cn38xx; + struct cvmx_stxx_stat_bytes_lo_s cn38xxp2; + struct cvmx_stxx_stat_bytes_lo_s cn58xx; + struct cvmx_stxx_stat_bytes_lo_s cn58xxp1; +}; + +union cvmx_stxx_stat_ctl { + uint64_t u64; + struct cvmx_stxx_stat_ctl_s { + uint64_t reserved_5_63:59; + uint64_t clr:1; + uint64_t bckprs:4; + } s; + struct cvmx_stxx_stat_ctl_s cn38xx; + struct cvmx_stxx_stat_ctl_s cn38xxp2; + struct cvmx_stxx_stat_ctl_s cn58xx; + struct cvmx_stxx_stat_ctl_s cn58xxp1; +}; + +union cvmx_stxx_stat_pkt_xmt { + uint64_t u64; + struct cvmx_stxx_stat_pkt_xmt_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_stxx_stat_pkt_xmt_s cn38xx; + struct cvmx_stxx_stat_pkt_xmt_s cn38xxp2; + struct cvmx_stxx_stat_pkt_xmt_s cn58xx; + struct cvmx_stxx_stat_pkt_xmt_s cn58xxp1; +}; + +#endif diff --git a/drivers/staging/octeon/cvmx-wqe.h b/drivers/staging/octeon/cvmx-wqe.h new file mode 100644 index 0000000..6536109 --- /dev/null +++ b/drivers/staging/octeon/cvmx-wqe.h @@ -0,0 +1,397 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * + * This header file defines the work queue entry (wqe) data structure. + * Since this is a commonly used structure that depends on structures + * from several hardware blocks, those definitions have been placed + * in this file to create a single point of definition of the wqe + * format. + * Data structures are still named according to the block that they + * relate to. + * + */ + +#ifndef __CVMX_WQE_H__ +#define __CVMX_WQE_H__ + +#include "cvmx-packet.h" + + +#define OCT_TAG_TYPE_STRING(x) \ + (((x) == CVMX_POW_TAG_TYPE_ORDERED) ? "ORDERED" : \ + (((x) == CVMX_POW_TAG_TYPE_ATOMIC) ? "ATOMIC" : \ + (((x) == CVMX_POW_TAG_TYPE_NULL) ? "NULL" : \ + "NULL_NULL"))) + +/** + * HW decode / err_code in work queue entry + */ +typedef union { + uint64_t u64; + + /* Use this struct if the hardware determines that the packet is IP */ + struct { + /* HW sets this to the number of buffers used by this packet */ + uint64_t bufs:8; + /* HW sets to the number of L2 bytes prior to the IP */ + uint64_t ip_offset:8; + /* set to 1 if we found DSA/VLAN in the L2 */ + uint64_t vlan_valid:1; + /* Set to 1 if the DSA/VLAN tag is stacked */ + uint64_t vlan_stacked:1; + uint64_t unassigned:1; + /* HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */ + uint64_t vlan_cfi:1; + /* HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */ + uint64_t vlan_id:12; + /* Ring Identifier (if PCIe). Requires PIP_GBL_CTL[RING_EN]=1 */ + uint64_t pr:4; + uint64_t unassigned2:8; + /* the packet needs to be decompressed */ + uint64_t dec_ipcomp:1; + /* the packet is either TCP or UDP */ + uint64_t tcp_or_udp:1; + /* the packet needs to be decrypted (ESP or AH) */ + uint64_t dec_ipsec:1; + /* the packet is IPv6 */ + uint64_t is_v6:1; + + /* + * (rcv_error, not_IP, IP_exc, is_frag, L4_error, + * software, etc.). + */ + + /* + * reserved for software use, hardware will clear on + * packet creation. + */ + uint64_t software:1; + /* exceptional conditions below */ + /* the receive interface hardware detected an L4 error + * (only applies if !is_frag) (only applies if + * !rcv_error && !not_IP && !IP_exc && !is_frag) + * failure indicated in err_code below, decode: + * + * - 1 = Malformed L4 + * - 2 = L4 Checksum Error: the L4 checksum value is + * - 3 = UDP Length Error: The UDP length field would + * make the UDP data longer than what remains in + * the IP packet (as defined by the IP header + * length field). + * - 4 = Bad L4 Port: either the source or destination + * TCP/UDP port is 0. + * - 8 = TCP FIN Only: the packet is TCP and only the + * FIN flag set. + * - 9 = TCP No Flags: the packet is TCP and no flags + * are set. + * - 10 = TCP FIN RST: the packet is TCP and both FIN + * and RST are set. + * - 11 = TCP SYN URG: the packet is TCP and both SYN + * and URG are set. + * - 12 = TCP SYN RST: the packet is TCP and both SYN + * and RST are set. + * - 13 = TCP SYN FIN: the packet is TCP and both SYN + * and FIN are set. + */ + uint64_t L4_error:1; + /* set if the packet is a fragment */ + uint64_t is_frag:1; + /* the receive interface hardware detected an IP error + * / exception (only applies if !rcv_error && !not_IP) + * failure indicated in err_code below, decode: + * + * - 1 = Not IP: the IP version field is neither 4 nor + * 6. + * - 2 = IPv4 Header Checksum Error: the IPv4 header + * has a checksum violation. + * - 3 = IP Malformed Header: the packet is not long + * enough to contain the IP header. + * - 4 = IP Malformed: the packet is not long enough + * to contain the bytes indicated by the IP + * header. Pad is allowed. + * - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6 + * Hop Count field are zero. + * - 6 = IP Options + */ + uint64_t IP_exc:1; + /* + * Set if the hardware determined that the packet is a + * broadcast. + */ + uint64_t is_bcast:1; + /* + * St if the hardware determined that the packet is a + * multi-cast. + */ + uint64_t is_mcast:1; + /* + * Set if the packet may not be IP (must be zero in + * this case). + */ + uint64_t not_IP:1; + /* + * The receive interface hardware detected a receive + * error (must be zero in this case). + */ + uint64_t rcv_error:1; + /* lower err_code = first-level descriptor of the + * work */ + /* zero for packet submitted by hardware that isn't on + * the slow path */ + /* type is cvmx_pip_err_t */ + uint64_t err_code:8; + } s; + + /* use this to get at the 16 vlan bits */ + struct { + uint64_t unused1:16; + uint64_t vlan:16; + uint64_t unused2:32; + } svlan; + + /* + * use this struct if the hardware could not determine that + * the packet is ip. + */ + struct { + /* + * HW sets this to the number of buffers used by this + * packet. + */ + uint64_t bufs:8; + uint64_t unused:8; + /* set to 1 if we found DSA/VLAN in the L2 */ + uint64_t vlan_valid:1; + /* Set to 1 if the DSA/VLAN tag is stacked */ + uint64_t vlan_stacked:1; + uint64_t unassigned:1; + /* + * HW sets to the DSA/VLAN CFI flag (valid when + * vlan_valid) + */ + uint64_t vlan_cfi:1; + /* + * HW sets to the DSA/VLAN_ID field (valid when + * vlan_valid). + */ + uint64_t vlan_id:12; + /* + * Ring Identifier (if PCIe). Requires + * PIP_GBL_CTL[RING_EN]=1 + */ + uint64_t pr:4; + uint64_t unassigned2:12; + /* + * reserved for software use, hardware will clear on + * packet creation. + */ + uint64_t software:1; + uint64_t unassigned3:1; + /* + * set if the hardware determined that the packet is + * rarp. + */ + uint64_t is_rarp:1; + /* + * set if the hardware determined that the packet is + * arp + */ + uint64_t is_arp:1; + /* + * set if the hardware determined that the packet is a + * broadcast. + */ + uint64_t is_bcast:1; + /* + * set if the hardware determined that the packet is a + * multi-cast + */ + uint64_t is_mcast:1; + /* + * set if the packet may not be IP (must be one in + * this case) + */ + uint64_t not_IP:1; + /* The receive interface hardware detected a receive + * error. Failure indicated in err_code below, + * decode: + * + * - 1 = partial error: a packet was partially + * received, but internal buffering / bandwidth + * was not adequate to receive the entire + * packet. + * - 2 = jabber error: the RGMII packet was too large + * and is truncated. + * - 3 = overrun error: the RGMII packet is longer + * than allowed and had an FCS error. + * - 4 = oversize error: the RGMII packet is longer + * than allowed. + * - 5 = alignment error: the RGMII packet is not an + * integer number of bytes + * and had an FCS error (100M and 10M only). + * - 6 = fragment error: the RGMII packet is shorter + * than allowed and had an FCS error. + * - 7 = GMX FCS error: the RGMII packet had an FCS + * error. + * - 8 = undersize error: the RGMII packet is shorter + * than allowed. + * - 9 = extend error: the RGMII packet had an extend + * error. + * - 10 = length mismatch error: the RGMII packet had + * a length that did not match the length field + * in the L2 HDR. + * - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII + * packet had one or more data reception errors + * (RXERR) or the SPI4 packet had one or more + * DIP4 errors. + * - 12 = RGMII skip error/SPI4 Abort Error: the RGMII + * packet was not large enough to cover the + * skipped bytes or the SPI4 packet was + * terminated with an About EOPS. + * - 13 = RGMII nibble error/SPI4 Port NXA Error: the + * RGMII packet had a studder error (data not + * repeated - 10/100M only) or the SPI4 packet + * was sent to an NXA. + * - 16 = FCS error: a SPI4.2 packet had an FCS error. + * - 17 = Skip error: a packet was not large enough to + * cover the skipped bytes. + * - 18 = L2 header malformed: the packet is not long + * enough to contain the L2. + */ + + uint64_t rcv_error:1; + /* + * lower err_code = first-level descriptor of the + * work + */ + /* + * zero for packet submitted by hardware that isn't on + * the slow path + */ + /* type is cvmx_pip_err_t (union, so can't use directly */ + uint64_t err_code:8; + } snoip; + +} cvmx_pip_wqe_word2; + +/** + * Work queue entry format + * + * must be 8-byte aligned + */ +typedef struct { + + /***************************************************************** + * WORD 0 + * HW WRITE: the following 64 bits are filled by HW when a packet arrives + */ + + /** + * raw chksum result generated by the HW + */ + uint16_t hw_chksum; + /** + * Field unused by hardware - available for software + */ + uint8_t unused; + /** + * Next pointer used by hardware for list maintenance. + * May be written/read by HW before the work queue + * entry is scheduled to a PP + * (Only 36 bits used in Octeon 1) + */ + uint64_t next_ptr:40; + + /***************************************************************** + * WORD 1 + * HW WRITE: the following 64 bits are filled by HW when a packet arrives + */ + + /** + * HW sets to the total number of bytes in the packet + */ + uint64_t len:16; + /** + * HW sets this to input physical port + */ + uint64_t ipprt:6; + + /** + * HW sets this to what it thought the priority of the input packet was + */ + uint64_t qos:3; + + /** + * the group that the work queue entry will be scheduled to + */ + uint64_t grp:4; + /** + * the type of the tag (ORDERED, ATOMIC, NULL) + */ + uint64_t tag_type:3; + /** + * the synchronization/ordering tag + */ + uint64_t tag:32; + + /** + * WORD 2 HW WRITE: the following 64-bits are filled in by + * hardware when a packet arrives This indicates a variety of + * status and error conditions. + */ + cvmx_pip_wqe_word2 word2; + + /** + * Pointer to the first segment of the packet. + */ + union cvmx_buf_ptr packet_ptr; + + /** + * HW WRITE: octeon will fill in a programmable amount from the + * packet, up to (at most, but perhaps less) the amount + * needed to fill the work queue entry to 128 bytes + * + * If the packet is recognized to be IP, the hardware starts + * (except that the IPv4 header is padded for appropriate + * alignment) writing here where the IP header starts. If the + * packet is not recognized to be IP, the hardware starts + * writing the beginning of the packet here. + */ + uint8_t packet_data[96]; + + /** + * If desired, SW can make the work Q entry any length. For the + * purposes of discussion here, Assume 128B always, as this is all that + * the hardware deals with. + * + */ + +} CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t; + +#endif /* __CVMX_WQE_H__ */ diff --git a/drivers/staging/octeon/ethernet-common.c b/drivers/staging/octeon/ethernet-common.c new file mode 100644 index 0000000..3e6f5b8 --- /dev/null +++ b/drivers/staging/octeon/ethernet-common.c @@ -0,0 +1,328 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ +#include <linux/kernel.h> +#include <linux/mii.h> +#include <net/dst.h> + +#include <asm/atomic.h> +#include <asm/octeon/octeon.h> + +#include "ethernet-defines.h" +#include "ethernet-tx.h" +#include "ethernet-mdio.h" +#include "ethernet-util.h" +#include "octeon-ethernet.h" +#include "ethernet-common.h" + +#include "cvmx-pip.h" +#include "cvmx-pko.h" +#include "cvmx-fau.h" +#include "cvmx-helper.h" + +#include "cvmx-gmxx-defs.h" + +/** + * Get the low level ethernet statistics + * + * @dev: Device to get the statistics from + * Returns Pointer to the statistics + */ +static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) +{ + cvmx_pip_port_status_t rx_status; + cvmx_pko_port_status_t tx_status; + struct octeon_ethernet *priv = netdev_priv(dev); + + if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { + if (octeon_is_simulation()) { + /* The simulator doesn't support statistics */ + memset(&rx_status, 0, sizeof(rx_status)); + memset(&tx_status, 0, sizeof(tx_status)); + } else { + cvmx_pip_get_port_status(priv->port, 1, &rx_status); + cvmx_pko_get_port_status(priv->port, 1, &tx_status); + } + + priv->stats.rx_packets += rx_status.inb_packets; + priv->stats.tx_packets += tx_status.packets; + priv->stats.rx_bytes += rx_status.inb_octets; + priv->stats.tx_bytes += tx_status.octets; + priv->stats.multicast += rx_status.multicast_packets; + priv->stats.rx_crc_errors += rx_status.inb_errors; + priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; + + /* + * The drop counter must be incremented atomically + * since the RX tasklet also increments it. + */ +#ifdef CONFIG_64BIT + atomic64_add(rx_status.dropped_packets, + (atomic64_t *)&priv->stats.rx_dropped); +#else + atomic_add(rx_status.dropped_packets, + (atomic_t *)&priv->stats.rx_dropped); +#endif + } + + return &priv->stats; +} + +/** + * Set the multicast list. Currently unimplemented. + * + * @dev: Device to work on + */ +static void cvm_oct_common_set_multicast_list(struct net_device *dev) +{ + union cvmx_gmxx_prtx_cfg gmx_cfg; + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + + if ((interface < 2) + && (cvmx_helper_interface_get_mode(interface) != + CVMX_HELPER_INTERFACE_MODE_SPI)) { + union cvmx_gmxx_rxx_adr_ctl control; + control.u64 = 0; + control.s.bcst = 1; /* Allow broadcast MAC addresses */ + + if (dev->mc_list || (dev->flags & IFF_ALLMULTI) || + (dev->flags & IFF_PROMISC)) + /* Force accept multicast packets */ + control.s.mcst = 2; + else + /* Force reject multicat packets */ + control.s.mcst = 1; + + if (dev->flags & IFF_PROMISC) + /* + * Reject matches if promisc. Since CAM is + * shut off, should accept everything. + */ + control.s.cam_mode = 0; + else + /* Filter packets based on the CAM */ + control.s.cam_mode = 1; + + gmx_cfg.u64 = + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), + gmx_cfg.u64 & ~1ull); + + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), + control.u64); + if (dev->flags & IFF_PROMISC) + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN + (index, interface), 0); + else + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN + (index, interface), 1); + + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), + gmx_cfg.u64); + } +} + +/** + * Set the hardware MAC address for a device + * + * @dev: Device to change the MAC address for + * @addr: Address structure to change it too. MAC address is addr + 2. + * Returns Zero on success + */ +static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + union cvmx_gmxx_prtx_cfg gmx_cfg; + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + + memcpy(dev->dev_addr, addr + 2, 6); + + if ((interface < 2) + && (cvmx_helper_interface_get_mode(interface) != + CVMX_HELPER_INTERFACE_MODE_SPI)) { + int i; + uint8_t *ptr = addr; + uint64_t mac = 0; + for (i = 0; i < 6; i++) + mac = (mac << 8) | (uint64_t) (ptr[i + 2]); + + gmx_cfg.u64 = + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), + gmx_cfg.u64 & ~1ull); + + cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), + ptr[2]); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), + ptr[3]); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), + ptr[4]); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), + ptr[5]); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), + ptr[6]); + cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), + ptr[7]); + cvm_oct_common_set_multicast_list(dev); + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), + gmx_cfg.u64); + } + return 0; +} + +/** + * Change the link MTU. Unimplemented + * + * @dev: Device to change + * @new_mtu: The new MTU + * + * Returns Zero on success + */ +static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) + int vlan_bytes = 4; +#else + int vlan_bytes = 0; +#endif + + /* + * Limit the MTU to make sure the ethernet packets are between + * 64 bytes and 65535 bytes. + */ + if ((new_mtu + 14 + 4 + vlan_bytes < 64) + || (new_mtu + 14 + 4 + vlan_bytes > 65392)) { + pr_err("MTU must be between %d and %d.\n", + 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes); + return -EINVAL; + } + dev->mtu = new_mtu; + + if ((interface < 2) + && (cvmx_helper_interface_get_mode(interface) != + CVMX_HELPER_INTERFACE_MODE_SPI)) { + /* Add ethernet header and FCS, and VLAN if configured. */ + int max_packet = new_mtu + 14 + 4 + vlan_bytes; + + if (OCTEON_IS_MODEL(OCTEON_CN3XXX) + || OCTEON_IS_MODEL(OCTEON_CN58XX)) { + /* Signal errors on packets larger than the MTU */ + cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), + max_packet); + } else { + /* + * Set the hardware to truncate packets larger + * than the MTU and smaller the 64 bytes. + */ + union cvmx_pip_frm_len_chkx frm_len_chk; + frm_len_chk.u64 = 0; + frm_len_chk.s.minlen = 64; + frm_len_chk.s.maxlen = max_packet; + cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), + frm_len_chk.u64); + } + /* + * Set the hardware to truncate packets larger than + * the MTU. The jabber register must be set to a + * multiple of 8 bytes, so round up. + */ + cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), + (max_packet + 7) & ~7u); + } + return 0; +} + +/** + * Per network device initialization + * + * @dev: Device to initialize + * Returns Zero on success + */ +int cvm_oct_common_init(struct net_device *dev) +{ + static int count; + char mac[8] = { 0x00, 0x00, + octeon_bootinfo->mac_addr_base[0], + octeon_bootinfo->mac_addr_base[1], + octeon_bootinfo->mac_addr_base[2], + octeon_bootinfo->mac_addr_base[3], + octeon_bootinfo->mac_addr_base[4], + octeon_bootinfo->mac_addr_base[5] + count + }; + struct octeon_ethernet *priv = netdev_priv(dev); + + /* + * Force the interface to use the POW send if always_use_pow + * was specified or it is in the pow send list. + */ + if ((pow_send_group != -1) + && (always_use_pow || strstr(pow_send_list, dev->name))) + priv->queue = -1; + + if (priv->queue != -1) { + dev->hard_start_xmit = cvm_oct_xmit; + if (USE_HW_TCPUDP_CHECKSUM) + dev->features |= NETIF_F_IP_CSUM; + } else + dev->hard_start_xmit = cvm_oct_xmit_pow; + count++; + + dev->get_stats = cvm_oct_common_get_stats; + dev->set_mac_address = cvm_oct_common_set_mac_address; + dev->set_multicast_list = cvm_oct_common_set_multicast_list; + dev->change_mtu = cvm_oct_common_change_mtu; + dev->do_ioctl = cvm_oct_ioctl; + /* We do our own locking, Linux doesn't need to */ + dev->features |= NETIF_F_LLTX; + SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = cvm_oct_poll_controller; +#endif + + cvm_oct_mdio_setup_device(dev); + dev->set_mac_address(dev, mac); + dev->change_mtu(dev, dev->mtu); + + /* + * Zero out stats for port so we won't mistakenly show + * counters from the bootloader. + */ + memset(dev->get_stats(dev), 0, sizeof(struct net_device_stats)); + + return 0; +} + +void cvm_oct_common_uninit(struct net_device *dev) +{ + /* Currently nothing to do */ +} diff --git a/drivers/staging/octeon/ethernet-common.h b/drivers/staging/octeon/ethernet-common.h new file mode 100644 index 0000000..2bd9cd7 --- /dev/null +++ b/drivers/staging/octeon/ethernet-common.h @@ -0,0 +1,29 @@ +/********************************************************************* + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +*********************************************************************/ + +int cvm_oct_common_init(struct net_device *dev); +void cvm_oct_common_uninit(struct net_device *dev); diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h new file mode 100644 index 0000000..8f7374e --- /dev/null +++ b/drivers/staging/octeon/ethernet-defines.h @@ -0,0 +1,134 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ + +/* + * A few defines are used to control the operation of this driver: + * CONFIG_CAVIUM_RESERVE32 + * This kernel config options controls the amount of memory configured + * in a wired TLB entry for all processes to share. If this is set, the + * driver will use this memory instead of kernel memory for pools. This + * allows 32bit userspace application to access the buffers, but also + * requires all received packets to be copied. + * CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS + * This kernel config option allows the user to control the number of + * packet and work queue buffers allocated by the driver. If this is zero, + * the driver uses the default from below. + * USE_SKBUFFS_IN_HW + * Tells the driver to populate the packet buffers with kernel skbuffs. + * This allows the driver to receive packets without copying them. It also + * means that 32bit userspace can't access the packet buffers. + * USE_32BIT_SHARED + * This define tells the driver to allocate memory for buffers from the + * 32bit sahred region instead of the kernel memory space. + * USE_HW_TCPUDP_CHECKSUM + * Controls if the Octeon TCP/UDP checksum engine is used for packet + * output. If this is zero, the kernel will perform the checksum in + * software. + * USE_MULTICORE_RECEIVE + * Process receive interrupts on multiple cores. This spreads the network + * load across the first 8 processors. If ths is zero, only one core + * processes incomming packets. + * USE_ASYNC_IOBDMA + * Use asynchronous IO access to hardware. This uses Octeon's asynchronous + * IOBDMAs to issue IO accesses without stalling. Set this to zero + * to disable this. Note that IOBDMAs require CVMSEG. + * REUSE_SKBUFFS_WITHOUT_FREE + * Allows the TX path to free an skbuff into the FPA hardware pool. This + * can significantly improve performance for forwarding and bridging, but + * may be somewhat dangerous. Checks are made, but if any buffer is reused + * without the proper Linux cleanup, the networking stack may have very + * bizarre bugs. + */ +#ifndef __ETHERNET_DEFINES_H__ +#define __ETHERNET_DEFINES_H__ + +#include "cvmx-config.h" + + +#define OCTEON_ETHERNET_VERSION "1.9" + +#ifndef CONFIG_CAVIUM_RESERVE32 +#define CONFIG_CAVIUM_RESERVE32 0 +#endif + +#if CONFIG_CAVIUM_RESERVE32 +#define USE_32BIT_SHARED 1 +#define USE_SKBUFFS_IN_HW 0 +#define REUSE_SKBUFFS_WITHOUT_FREE 0 +#else +#define USE_32BIT_SHARED 0 +#define USE_SKBUFFS_IN_HW 1 +#ifdef CONFIG_NETFILTER +#define REUSE_SKBUFFS_WITHOUT_FREE 0 +#else +#define REUSE_SKBUFFS_WITHOUT_FREE 1 +#endif +#endif + +/* Max interrupts per second per core */ +#define INTERRUPT_LIMIT 10000 + +/* Don't limit the number of interrupts */ +/*#define INTERRUPT_LIMIT 0 */ +#define USE_HW_TCPUDP_CHECKSUM 1 + +#define USE_MULTICORE_RECEIVE 1 + +/* Enable Random Early Dropping under load */ +#define USE_RED 1 +#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0) + +/* + * Allow SW based preamble removal at 10Mbps to workaround PHYs giving + * us bad preambles. + */ +#define USE_10MBPS_PREAMBLE_WORKAROUND 1 +/* + * Use this to have all FPA frees also tell the L2 not to write data + * to memory. + */ +#define DONT_WRITEBACK(x) (x) +/* Use this to not have FPA frees control L2 */ +/*#define DONT_WRITEBACK(x) 0 */ + +/* Maximum number of packets to process per interrupt. */ +#define MAX_RX_PACKETS 120 +#define MAX_OUT_QUEUE_DEPTH 1000 + +#ifndef CONFIG_SMP +#undef USE_MULTICORE_RECEIVE +#define USE_MULTICORE_RECEIVE 0 +#endif + +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +#define FAU_NUM_PACKET_BUFFERS_TO_FREE (CVMX_FAU_REG_END - sizeof(uint32_t)) +#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1) + + +#endif /* __ETHERNET_DEFINES_H__ */ diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c new file mode 100644 index 0000000..93cab0a --- /dev/null +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -0,0 +1,231 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ +#include <linux/kernel.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <net/dst.h> + +#include <asm/octeon/octeon.h> + +#include "ethernet-defines.h" +#include "octeon-ethernet.h" +#include "ethernet-mdio.h" + +#include "cvmx-helper-board.h" + +#include "cvmx-smix-defs.h" + +DECLARE_MUTEX(mdio_sem); + +/** + * Perform an MII read. Called by the generic MII routines + * + * @dev: Device to perform read for + * @phy_id: The MII phy id + * @location: Register location to read + * Returns Result from the read or zero on failure + */ +static int cvm_oct_mdio_read(struct net_device *dev, int phy_id, int location) +{ + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_rd_dat smi_rd; + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = 1; + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = location; + cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64); + + do { + if (!in_interrupt()) + yield(); + smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(0)); + } while (smi_rd.s.pending); + + if (smi_rd.s.val) + return smi_rd.s.dat; + else + return 0; +} + +static int cvm_oct_mdio_dummy_read(struct net_device *dev, int phy_id, + int location) +{ + return 0xffff; +} + +/** + * Perform an MII write. Called by the generic MII routines + * + * @dev: Device to perform write for + * @phy_id: The MII phy id + * @location: Register location to write + * @val: Value to write + */ +static void cvm_oct_mdio_write(struct net_device *dev, int phy_id, int location, + int val) +{ + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_wr_dat smi_wr; + + smi_wr.u64 = 0; + smi_wr.s.dat = val; + cvmx_write_csr(CVMX_SMIX_WR_DAT(0), smi_wr.u64); + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = 0; + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = location; + cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64); + + do { + if (!in_interrupt()) + yield(); + smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(0)); + } while (smi_wr.s.pending); +} + +static void cvm_oct_mdio_dummy_write(struct net_device *dev, int phy_id, + int location, int val) +{ +} + +static void cvm_oct_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, "cavium-ethernet"); + strcpy(info->version, OCTEON_ETHERNET_VERSION); + strcpy(info->bus_info, "Builtin"); +} + +static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + int ret; + + down(&mdio_sem); + ret = mii_ethtool_gset(&priv->mii_info, cmd); + up(&mdio_sem); + + return ret; +} + +static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + int ret; + + down(&mdio_sem); + ret = mii_ethtool_sset(&priv->mii_info, cmd); + up(&mdio_sem); + + return ret; +} + +static int cvm_oct_nway_reset(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + int ret; + + down(&mdio_sem); + ret = mii_nway_restart(&priv->mii_info); + up(&mdio_sem); + + return ret; +} + +static u32 cvm_oct_get_link(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + u32 ret; + + down(&mdio_sem); + ret = mii_link_ok(&priv->mii_info); + up(&mdio_sem); + + return ret; +} + +struct ethtool_ops cvm_oct_ethtool_ops = { + .get_drvinfo = cvm_oct_get_drvinfo, + .get_settings = cvm_oct_get_settings, + .set_settings = cvm_oct_set_settings, + .nway_reset = cvm_oct_nway_reset, + .get_link = cvm_oct_get_link, + .get_sg = ethtool_op_get_sg, + .get_tx_csum = ethtool_op_get_tx_csum, +}; + +/** + * IOCTL support for PHY control + * + * @dev: Device to change + * @rq: the request + * @cmd: the command + * Returns Zero on success + */ +int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(rq); + unsigned int duplex_chg; + int ret; + + down(&mdio_sem); + ret = generic_mii_ioctl(&priv->mii_info, data, cmd, &duplex_chg); + up(&mdio_sem); + + return ret; +} + +/** + * Setup the MDIO device structures + * + * @dev: Device to setup + * + * Returns Zero on success, negative on failure + */ +int cvm_oct_mdio_setup_device(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + int phy_id = cvmx_helper_board_get_mii_address(priv->port); + if (phy_id != -1) { + priv->mii_info.dev = dev; + priv->mii_info.phy_id = phy_id; + priv->mii_info.phy_id_mask = 0xff; + priv->mii_info.supports_gmii = 1; + priv->mii_info.reg_num_mask = 0x1f; + priv->mii_info.mdio_read = cvm_oct_mdio_read; + priv->mii_info.mdio_write = cvm_oct_mdio_write; + } else { + /* Supply dummy MDIO routines so the kernel won't crash + if the user tries to read them */ + priv->mii_info.mdio_read = cvm_oct_mdio_dummy_read; + priv->mii_info.mdio_write = cvm_oct_mdio_dummy_write; + } + return 0; +} diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h new file mode 100644 index 0000000..6314141 --- /dev/null +++ b/drivers/staging/octeon/ethernet-mdio.h @@ -0,0 +1,46 @@ +/********************************************************************* + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +*********************************************************************/ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/init.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/string.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/seq_file.h> +#include <linux/proc_fs.h> +#include <net/dst.h> +#ifdef CONFIG_XFRM +#include <linux/xfrm.h> +#include <net/xfrm.h> +#endif /* CONFIG_XFRM */ + +extern struct ethtool_ops cvm_oct_ethtool_ops; +int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +int cvm_oct_mdio_setup_device(struct net_device *dev); diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c new file mode 100644 index 0000000..b595903 --- /dev/null +++ b/drivers/staging/octeon/ethernet-mem.c @@ -0,0 +1,198 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/mii.h> +#include <net/dst.h> + +#include <asm/octeon/octeon.h> + +#include "ethernet-defines.h" + +#include "cvmx-fpa.h" + +/** + * Fill the supplied hardware pool with skbuffs + * + * @pool: Pool to allocate an skbuff for + * @size: Size of the buffer needed for the pool + * @elements: Number of buffers to allocate + */ +static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) +{ + int freed = elements; + while (freed) { + + struct sk_buff *skb = dev_alloc_skb(size + 128); + if (unlikely(skb == NULL)) { + pr_warning + ("Failed to allocate skb for hardware pool %d\n", + pool); + break; + } + + skb_reserve(skb, 128 - (((unsigned long)skb->data) & 0x7f)); + *(struct sk_buff **)(skb->data - sizeof(void *)) = skb; + cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128)); + freed--; + } + return elements - freed; +} + +/** + * Free the supplied hardware pool of skbuffs + * + * @pool: Pool to allocate an skbuff for + * @size: Size of the buffer needed for the pool + * @elements: Number of buffers to allocate + */ +static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) +{ + char *memory; + + do { + memory = cvmx_fpa_alloc(pool); + if (memory) { + struct sk_buff *skb = + *(struct sk_buff **)(memory - sizeof(void *)); + elements--; + dev_kfree_skb(skb); + } + } while (memory); + + if (elements < 0) + pr_warning("Freeing of pool %u had too many skbuffs (%d)\n", + pool, elements); + else if (elements > 0) + pr_warning("Freeing of pool %u is missing %d skbuffs\n", + pool, elements); +} + +/** + * This function fills a hardware pool with memory. Depending + * on the config defines, this memory might come from the + * kernel or global 32bit memory allocated with + * cvmx_bootmem_alloc. + * + * @pool: Pool to populate + * @size: Size of each buffer in the pool + * @elements: Number of buffers to allocate + */ +static int cvm_oct_fill_hw_memory(int pool, int size, int elements) +{ + char *memory; + int freed = elements; + + if (USE_32BIT_SHARED) { + extern uint64_t octeon_reserve32_memory; + + memory = + cvmx_bootmem_alloc_range(elements * size, 128, + octeon_reserve32_memory, + octeon_reserve32_memory + + (CONFIG_CAVIUM_RESERVE32 << 20) - + 1); + if (memory == NULL) + panic("Unable to allocate %u bytes for FPA pool %d\n", + elements * size, pool); + + pr_notice("Memory range %p - %p reserved for " + "hardware\n", memory, + memory + elements * size - 1); + + while (freed) { + cvmx_fpa_free(memory, pool, 0); + memory += size; + freed--; + } + } else { + while (freed) { + /* We need to force alignment to 128 bytes here */ + memory = kmalloc(size + 127, GFP_ATOMIC); + if (unlikely(memory == NULL)) { + pr_warning("Unable to allocate %u bytes for " + "FPA pool %d\n", + elements * size, pool); + break; + } + memory = (char *)(((unsigned long)memory + 127) & -128); + cvmx_fpa_free(memory, pool, 0); + freed--; + } + } + return elements - freed; +} + +/** + * Free memory previously allocated with cvm_oct_fill_hw_memory + * + * @pool: FPA pool to free + * @size: Size of each buffer in the pool + * @elements: Number of buffers that should be in the pool + */ +static void cvm_oct_free_hw_memory(int pool, int size, int elements) +{ + if (USE_32BIT_SHARED) { + pr_warning("Warning: 32 shared memory is not freeable\n"); + } else { + char *memory; + do { + memory = cvmx_fpa_alloc(pool); + if (memory) { + elements--; + kfree(phys_to_virt(cvmx_ptr_to_phys(memory))); + } + } while (memory); + + if (elements < 0) + pr_warning("Freeing of pool %u had too many " + "buffers (%d)\n", + pool, elements); + else if (elements > 0) + pr_warning("Warning: Freeing of pool %u is " + "missing %d buffers\n", + pool, elements); + } +} + +int cvm_oct_mem_fill_fpa(int pool, int size, int elements) +{ + int freed; + if (USE_SKBUFFS_IN_HW) + freed = cvm_oct_fill_hw_skbuff(pool, size, elements); + else + freed = cvm_oct_fill_hw_memory(pool, size, elements); + return freed; +} + +void cvm_oct_mem_empty_fpa(int pool, int size, int elements) +{ + if (USE_SKBUFFS_IN_HW) + cvm_oct_free_hw_skbuff(pool, size, elements); + else + cvm_oct_free_hw_memory(pool, size, elements); +} diff --git a/drivers/staging/octeon/ethernet-mem.h b/drivers/staging/octeon/ethernet-mem.h new file mode 100644 index 0000000..713f2ed --- /dev/null +++ b/drivers/staging/octeon/ethernet-mem.h @@ -0,0 +1,29 @@ +/********************************************************************* + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +********************************************************************/ + +int cvm_oct_mem_fill_fpa(int pool, int size, int elements); +void cvm_oct_mem_empty_fpa(int pool, int size, int elements); diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c new file mode 100644 index 0000000..8fa88fc --- /dev/null +++ b/drivers/staging/octeon/ethernet-proc.c @@ -0,0 +1,256 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ +#include <linux/kernel.h> +#include <linux/mii.h> +#include <linux/seq_file.h> +#include <linux/proc_fs.h> +#include <net/dst.h> + +#include <asm/octeon/octeon.h> + +#include "octeon-ethernet.h" +#include "ethernet-defines.h" + +#include "cvmx-helper.h" +#include "cvmx-pip.h" + +static unsigned long long cvm_oct_stats_read_switch(struct net_device *dev, + int phy_id, int offset) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + + priv->mii_info.mdio_write(dev, phy_id, 0x1d, 0xcc00 | offset); + return ((uint64_t) priv->mii_info. + mdio_read(dev, phy_id, + 0x1e) << 16) | (uint64_t) priv->mii_info. + mdio_read(dev, phy_id, 0x1f); +} + +static int cvm_oct_stats_switch_show(struct seq_file *m, void *v) +{ + static const int ports[] = { 0, 1, 2, 3, 9, -1 }; + struct net_device *dev = cvm_oct_device[0]; + int index = 0; + + while (ports[index] != -1) { + + /* Latch port */ + struct octeon_ethernet *priv = netdev_priv(dev); + + priv->mii_info.mdio_write(dev, 0x1b, 0x1d, + 0xdc00 | ports[index]); + seq_printf(m, "\nSwitch Port %d\n", ports[index]); + seq_printf(m, "InGoodOctets: %12llu\t" + "OutOctets: %12llu\t" + "64 Octets: %12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, + 0x00) | + (cvm_oct_stats_read_switch(dev, 0x1b, 0x01) << 32), + cvm_oct_stats_read_switch(dev, 0x1b, + 0x0E) | + (cvm_oct_stats_read_switch(dev, 0x1b, 0x0F) << 32), + cvm_oct_stats_read_switch(dev, 0x1b, 0x08)); + + seq_printf(m, "InBadOctets: %12llu\t" + "OutUnicast: %12llu\t" + "65-127 Octets: %12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, 0x02), + cvm_oct_stats_read_switch(dev, 0x1b, 0x10), + cvm_oct_stats_read_switch(dev, 0x1b, 0x09)); + + seq_printf(m, "InUnicast: %12llu\t" + "OutBroadcasts: %12llu\t" + "128-255 Octets: %12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, 0x04), + cvm_oct_stats_read_switch(dev, 0x1b, 0x13), + cvm_oct_stats_read_switch(dev, 0x1b, 0x0A)); + + seq_printf(m, "InBroadcasts: %12llu\t" + "OutMulticasts: %12llu\t" + "256-511 Octets: %12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, 0x06), + cvm_oct_stats_read_switch(dev, 0x1b, 0x12), + cvm_oct_stats_read_switch(dev, 0x1b, 0x0B)); + + seq_printf(m, "InMulticasts: %12llu\t" + "OutPause: %12llu\t" + "512-1023 Octets:%12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, 0x07), + cvm_oct_stats_read_switch(dev, 0x1b, 0x15), + cvm_oct_stats_read_switch(dev, 0x1b, 0x0C)); + + seq_printf(m, "InPause: %12llu\t" + "Excessive: %12llu\t" + "1024-Max Octets:%12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, 0x16), + cvm_oct_stats_read_switch(dev, 0x1b, 0x11), + cvm_oct_stats_read_switch(dev, 0x1b, 0x0D)); + + seq_printf(m, "InUndersize: %12llu\t" + "Collisions: %12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, 0x18), + cvm_oct_stats_read_switch(dev, 0x1b, 0x1E)); + + seq_printf(m, "InFragments: %12llu\t" + "Deferred: %12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, 0x19), + cvm_oct_stats_read_switch(dev, 0x1b, 0x05)); + + seq_printf(m, "InOversize: %12llu\t" + "Single: %12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, 0x1A), + cvm_oct_stats_read_switch(dev, 0x1b, 0x14)); + + seq_printf(m, "InJabber: %12llu\t" + "Multiple: %12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, 0x1B), + cvm_oct_stats_read_switch(dev, 0x1b, 0x17)); + + seq_printf(m, "In RxErr: %12llu\t" + "OutFCSErr: %12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, 0x1C), + cvm_oct_stats_read_switch(dev, 0x1b, 0x03)); + + seq_printf(m, "InFCSErr: %12llu\t" + "Late: %12llu\n", + cvm_oct_stats_read_switch(dev, 0x1b, 0x1D), + cvm_oct_stats_read_switch(dev, 0x1b, 0x1F)); + index++; + } + return 0; +} + +/** + * User is reading /proc/octeon_ethernet_stats + * + * @m: + * @v: + * Returns + */ +static int cvm_oct_stats_show(struct seq_file *m, void *v) +{ + struct octeon_ethernet *priv; + int port; + + for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { + + if (cvm_oct_device[port]) { + priv = netdev_priv(cvm_oct_device[port]); + + seq_printf(m, "\nOcteon Port %d (%s)\n", port, + cvm_oct_device[port]->name); + seq_printf(m, + "rx_packets: %12lu\t" + "tx_packets: %12lu\n", + priv->stats.rx_packets, + priv->stats.tx_packets); + seq_printf(m, + "rx_bytes: %12lu\t" + "tx_bytes: %12lu\n", + priv->stats.rx_bytes, priv->stats.tx_bytes); + seq_printf(m, + "rx_errors: %12lu\t" + "tx_errors: %12lu\n", + priv->stats.rx_errors, + priv->stats.tx_errors); + seq_printf(m, + "rx_dropped: %12lu\t" + "tx_dropped: %12lu\n", + priv->stats.rx_dropped, + priv->stats.tx_dropped); + seq_printf(m, + "rx_length_errors: %12lu\t" + "tx_aborted_errors: %12lu\n", + priv->stats.rx_length_errors, + priv->stats.tx_aborted_errors); + seq_printf(m, + "rx_over_errors: %12lu\t" + "tx_carrier_errors: %12lu\n", + priv->stats.rx_over_errors, + priv->stats.tx_carrier_errors); + seq_printf(m, + "rx_crc_errors: %12lu\t" + "tx_fifo_errors: %12lu\n", + priv->stats.rx_crc_errors, + priv->stats.tx_fifo_errors); + seq_printf(m, + "rx_frame_errors: %12lu\t" + "tx_heartbeat_errors: %12lu\n", + priv->stats.rx_frame_errors, + priv->stats.tx_heartbeat_errors); + seq_printf(m, + "rx_fifo_errors: %12lu\t" + "tx_window_errors: %12lu\n", + priv->stats.rx_fifo_errors, + priv->stats.tx_window_errors); + seq_printf(m, + "rx_missed_errors: %12lu\t" + "multicast: %12lu\n", + priv->stats.rx_missed_errors, + priv->stats.multicast); + } + } + + if (cvm_oct_device[0]) { + priv = netdev_priv(cvm_oct_device[0]); + if (priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) + cvm_oct_stats_switch_show(m, v); + } + return 0; +} + +/** + * /proc/octeon_ethernet_stats was openned. Use the single_open iterator + * + * @inode: + * @file: + * Returns + */ +static int cvm_oct_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, cvm_oct_stats_show, NULL); +} + +static const struct file_operations cvm_oct_stats_operations = { + .open = cvm_oct_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void cvm_oct_proc_initialize(void) +{ + struct proc_dir_entry *entry = + create_proc_entry("octeon_ethernet_stats", 0, NULL); + if (entry) + entry->proc_fops = &cvm_oct_stats_operations; +} + +void cvm_oct_proc_shutdown(void) +{ + remove_proc_entry("octeon_ethernet_stats", NULL); +} diff --git a/drivers/staging/octeon/ethernet-proc.h b/drivers/staging/octeon/ethernet-proc.h new file mode 100644 index 0000000..82c7d9f --- /dev/null +++ b/drivers/staging/octeon/ethernet-proc.h @@ -0,0 +1,29 @@ +/********************************************************************* + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +*********************************************************************/ + +void cvm_oct_proc_initialize(void); +void cvm_oct_proc_shutdown(void); diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c new file mode 100644 index 0000000..8579f16 --- /dev/null +++ b/drivers/staging/octeon/ethernet-rgmii.c @@ -0,0 +1,397 @@ +/********************************************************************* + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/mii.h> +#include <net/dst.h> + +#include <asm/octeon/octeon.h> + +#include "ethernet-defines.h" +#include "octeon-ethernet.h" +#include "ethernet-common.h" +#include "ethernet-util.h" + +#include "cvmx-helper.h" + +#include <asm/octeon/cvmx-ipd-defs.h> +#include <asm/octeon/cvmx-npi-defs.h> +#include "cvmx-gmxx-defs.h" + +DEFINE_SPINLOCK(global_register_lock); + +static int number_rgmii_ports; + +static void cvm_oct_rgmii_poll(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + unsigned long flags; + cvmx_helper_link_info_t link_info; + + /* + * Take the global register lock since we are going to touch + * registers that affect more than one port. + */ + spin_lock_irqsave(&global_register_lock, flags); + + link_info = cvmx_helper_link_get(priv->port); + if (link_info.u64 == priv->link_info) { + + /* + * If the 10Mbps preamble workaround is supported and we're + * at 10Mbps we may need to do some special checking. + */ + if (USE_10MBPS_PREAMBLE_WORKAROUND && (link_info.s.speed == 10)) { + + /* + * Read the GMXX_RXX_INT_REG[PCTERR] bit and + * see if we are getting preamble errors. + */ + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; + gmxx_rxx_int_reg.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_INT_REG + (index, interface)); + if (gmxx_rxx_int_reg.s.pcterr) { + + /* + * We are getting preamble errors at + * 10Mbps. Most likely the PHY is + * giving us packets with mis aligned + * preambles. In order to get these + * packets we need to disable preamble + * checking and do it in software. + */ + union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; + union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; + + /* Disable preamble checking */ + gmxx_rxx_frm_ctl.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL + (index, interface)); + gmxx_rxx_frm_ctl.s.pre_chk = 0; + cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL + (index, interface), + gmxx_rxx_frm_ctl.u64); + + /* Disable FCS stripping */ + ipd_sub_port_fcs.u64 = + cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); + ipd_sub_port_fcs.s.port_bit &= + 0xffffffffull ^ (1ull << priv->port); + cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, + ipd_sub_port_fcs.u64); + + /* Clear any error bits */ + cvmx_write_csr(CVMX_GMXX_RXX_INT_REG + (index, interface), + gmxx_rxx_int_reg.u64); + DEBUGPRINT("%s: Using 10Mbps with software " + "preamble removal\n", + dev->name); + } + } + spin_unlock_irqrestore(&global_register_lock, flags); + return; + } + + /* If the 10Mbps preamble workaround is allowed we need to on + preamble checking, FCS stripping, and clear error bits on + every speed change. If errors occur during 10Mbps operation + the above code will change this stuff */ + if (USE_10MBPS_PREAMBLE_WORKAROUND) { + + union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; + union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; + union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + + /* Enable preamble checking */ + gmxx_rxx_frm_ctl.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); + gmxx_rxx_frm_ctl.s.pre_chk = 1; + cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface), + gmxx_rxx_frm_ctl.u64); + /* Enable FCS stripping */ + ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); + ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port; + cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64); + /* Clear any error bits */ + gmxx_rxx_int_reg.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface)); + cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), + gmxx_rxx_int_reg.u64); + } + + link_info = cvmx_helper_link_autoconf(priv->port); + priv->link_info = link_info.u64; + spin_unlock_irqrestore(&global_register_lock, flags); + + /* Tell Linux */ + if (link_info.s.link_up) { + + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + if (priv->queue != -1) + DEBUGPRINT + ("%s: %u Mbps %s duplex, port %2d, queue %2d\n", + dev->name, link_info.s.speed, + (link_info.s.full_duplex) ? "Full" : "Half", + priv->port, priv->queue); + else + DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n", + dev->name, link_info.s.speed, + (link_info.s.full_duplex) ? "Full" : "Half", + priv->port); + } else { + + if (netif_carrier_ok(dev)) + netif_carrier_off(dev); + DEBUGPRINT("%s: Link down\n", dev->name); + } +} + +static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id) +{ + union cvmx_npi_rsl_int_blocks rsl_int_blocks; + int index; + irqreturn_t return_status = IRQ_NONE; + + rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS); + + /* Check and see if this interrupt was caused by the GMX0 block */ + if (rsl_int_blocks.s.gmx0) { + + int interface = 0; + /* Loop through every port of this interface */ + for (index = 0; + index < cvmx_helper_ports_on_interface(interface); + index++) { + + /* Read the GMX interrupt status bits */ + union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg; + gmx_rx_int_reg.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_INT_REG + (index, interface)); + gmx_rx_int_reg.u64 &= + cvmx_read_csr(CVMX_GMXX_RXX_INT_EN + (index, interface)); + /* Poll the port if inband status changed */ + if (gmx_rx_int_reg.s.phy_dupx + || gmx_rx_int_reg.s.phy_link + || gmx_rx_int_reg.s.phy_spd) { + + struct net_device *dev = + cvm_oct_device[cvmx_helper_get_ipd_port + (interface, index)]; + if (dev) + cvm_oct_rgmii_poll(dev); + gmx_rx_int_reg.u64 = 0; + gmx_rx_int_reg.s.phy_dupx = 1; + gmx_rx_int_reg.s.phy_link = 1; + gmx_rx_int_reg.s.phy_spd = 1; + cvmx_write_csr(CVMX_GMXX_RXX_INT_REG + (index, interface), + gmx_rx_int_reg.u64); + return_status = IRQ_HANDLED; + } + } + } + + /* Check and see if this interrupt was caused by the GMX1 block */ + if (rsl_int_blocks.s.gmx1) { + + int interface = 1; + /* Loop through every port of this interface */ + for (index = 0; + index < cvmx_helper_ports_on_interface(interface); + index++) { + + /* Read the GMX interrupt status bits */ + union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg; + gmx_rx_int_reg.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_INT_REG + (index, interface)); + gmx_rx_int_reg.u64 &= + cvmx_read_csr(CVMX_GMXX_RXX_INT_EN + (index, interface)); + /* Poll the port if inband status changed */ + if (gmx_rx_int_reg.s.phy_dupx + || gmx_rx_int_reg.s.phy_link + || gmx_rx_int_reg.s.phy_spd) { + + struct net_device *dev = + cvm_oct_device[cvmx_helper_get_ipd_port + (interface, index)]; + if (dev) + cvm_oct_rgmii_poll(dev); + gmx_rx_int_reg.u64 = 0; + gmx_rx_int_reg.s.phy_dupx = 1; + gmx_rx_int_reg.s.phy_link = 1; + gmx_rx_int_reg.s.phy_spd = 1; + cvmx_write_csr(CVMX_GMXX_RXX_INT_REG + (index, interface), + gmx_rx_int_reg.u64); + return_status = IRQ_HANDLED; + } + } + } + return return_status; +} + +static int cvm_oct_rgmii_open(struct net_device *dev) +{ + union cvmx_gmxx_prtx_cfg gmx_cfg; + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + cvmx_helper_link_info_t link_info; + + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + gmx_cfg.s.en = 1; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); + + if (!octeon_is_simulation()) { + link_info = cvmx_helper_link_get(priv->port); + if (!link_info.s.link_up) + netif_carrier_off(dev); + } + + return 0; +} + +static int cvm_oct_rgmii_stop(struct net_device *dev) +{ + union cvmx_gmxx_prtx_cfg gmx_cfg; + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + gmx_cfg.s.en = 0; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); + return 0; +} + +int cvm_oct_rgmii_init(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + int r; + + cvm_oct_common_init(dev); + dev->open = cvm_oct_rgmii_open; + dev->stop = cvm_oct_rgmii_stop; + dev->stop(dev); + + /* + * Due to GMX errata in CN3XXX series chips, it is necessary + * to take the link down immediately whne the PHY changes + * state. In order to do this we call the poll function every + * time the RGMII inband status changes. This may cause + * problems if the PHY doesn't implement inband status + * properly. + */ + if (number_rgmii_ports == 0) { + r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt, + IRQF_SHARED, "RGMII", &number_rgmii_ports); + } + number_rgmii_ports++; + + /* + * Only true RGMII ports need to be polled. In GMII mode, port + * 0 is really a RGMII port. + */ + if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) + && (priv->port == 0)) + || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { + + if (!octeon_is_simulation()) { + + union cvmx_gmxx_rxx_int_en gmx_rx_int_en; + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + + /* + * Enable interrupts on inband status changes + * for this port. + */ + gmx_rx_int_en.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_INT_EN + (index, interface)); + gmx_rx_int_en.s.phy_dupx = 1; + gmx_rx_int_en.s.phy_link = 1; + gmx_rx_int_en.s.phy_spd = 1; + cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), + gmx_rx_int_en.u64); + priv->poll = cvm_oct_rgmii_poll; + } + } + + return 0; +} + +void cvm_oct_rgmii_uninit(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + cvm_oct_common_uninit(dev); + + /* + * Only true RGMII ports need to be polled. In GMII mode, port + * 0 is really a RGMII port. + */ + if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) + && (priv->port == 0)) + || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { + + if (!octeon_is_simulation()) { + + union cvmx_gmxx_rxx_int_en gmx_rx_int_en; + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + + /* + * Disable interrupts on inband status changes + * for this port. + */ + gmx_rx_int_en.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_INT_EN + (index, interface)); + gmx_rx_int_en.s.phy_dupx = 0; + gmx_rx_int_en.s.phy_link = 0; + gmx_rx_int_en.s.phy_spd = 0; + cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), + gmx_rx_int_en.u64); + } + } + + /* Remove the interrupt handler when the last port is removed. */ + number_rgmii_ports--; + if (number_rgmii_ports == 0) + free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); +} diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c new file mode 100644 index 0000000..1b237b7 --- /dev/null +++ b/drivers/staging/octeon/ethernet-rx.c @@ -0,0 +1,505 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/cache.h> +#include <linux/netdevice.h> +#include <linux/init.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/string.h> +#include <linux/prefetch.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/seq_file.h> +#include <linux/proc_fs.h> +#include <net/dst.h> +#ifdef CONFIG_XFRM +#include <linux/xfrm.h> +#include <net/xfrm.h> +#endif /* CONFIG_XFRM */ + +#include <asm/atomic.h> + +#include <asm/octeon/octeon.h> + +#include "ethernet-defines.h" +#include "octeon-ethernet.h" +#include "ethernet-mem.h" +#include "ethernet-util.h" + +#include "cvmx-helper.h" +#include "cvmx-wqe.h" +#include "cvmx-fau.h" +#include "cvmx-pow.h" +#include "cvmx-pip.h" +#include "cvmx-scratch.h" + +#include "cvmx-gmxx-defs.h" + +struct cvm_tasklet_wrapper { + struct tasklet_struct t; +}; + +/* + * Aligning the tasklet_struct on cachline boundries seems to decrease + * throughput even though in theory it would reduce contantion on the + * cache lines containing the locks. + */ + +static struct cvm_tasklet_wrapper cvm_oct_tasklet[NR_CPUS]; + +/** + * Interrupt handler. The interrupt occurs whenever the POW + * transitions from 0->1 packets in our group. + * + * @cpl: + * @dev_id: + * @regs: + * Returns + */ +irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) +{ + /* Acknowledge the interrupt */ + if (INTERRUPT_LIMIT) + cvmx_write_csr(CVMX_POW_WQ_INT, 1 << pow_receive_group); + else + cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001 << pow_receive_group); + preempt_disable(); + tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t); + preempt_enable(); + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * This is called when the kernel needs to manually poll the + * device. For Octeon, this is simply calling the interrupt + * handler. We actually poll all the devices, not just the + * one supplied. + * + * @dev: Device to poll. Unused + */ +void cvm_oct_poll_controller(struct net_device *dev) +{ + preempt_disable(); + tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t); + preempt_enable(); +} +#endif + +/** + * This is called on receive errors, and determines if the packet + * can be dropped early-on in cvm_oct_tasklet_rx(). + * + * @work: Work queue entry pointing to the packet. + * Returns Non-zero if the packet can be dropped, zero otherwise. + */ +static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) +{ + if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) { + /* + * Ignore length errors on min size packets. Some + * equipment incorrectly pads packets to 64+4FCS + * instead of 60+4FCS. Note these packets still get + * counted as frame errors. + */ + } else + if (USE_10MBPS_PREAMBLE_WORKAROUND + && ((work->word2.snoip.err_code == 5) + || (work->word2.snoip.err_code == 7))) { + + /* + * We received a packet with either an alignment error + * or a FCS error. This may be signalling that we are + * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK} + * off. If this is the case we need to parse the + * packet to determine if we can remove a non spec + * preamble and generate a correct packet. + */ + int interface = cvmx_helper_get_interface_num(work->ipprt); + int index = cvmx_helper_get_interface_index_num(work->ipprt); + union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; + gmxx_rxx_frm_ctl.u64 = + cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); + if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { + + uint8_t *ptr = + cvmx_phys_to_ptr(work->packet_ptr.s.addr); + int i = 0; + + while (i < work->len - 1) { + if (*ptr != 0x55) + break; + ptr++; + i++; + } + + if (*ptr == 0xd5) { + /* + DEBUGPRINT("Port %d received 0xd5 preamble\n", work->ipprt); + */ + work->packet_ptr.s.addr += i + 1; + work->len -= i + 5; + } else if ((*ptr & 0xf) == 0xd) { + /* + DEBUGPRINT("Port %d received 0x?d preamble\n", work->ipprt); + */ + work->packet_ptr.s.addr += i; + work->len -= i + 4; + for (i = 0; i < work->len; i++) { + *ptr = + ((*ptr & 0xf0) >> 4) | + ((*(ptr + 1) & 0xf) << 4); + ptr++; + } + } else { + DEBUGPRINT("Port %d unknown preamble, packet " + "dropped\n", + work->ipprt); + /* + cvmx_helper_dump_packet(work); + */ + cvm_oct_free_work(work); + return 1; + } + } + } else { + DEBUGPRINT("Port %d receive error code %d, packet dropped\n", + work->ipprt, work->word2.snoip.err_code); + cvm_oct_free_work(work); + return 1; + } + + return 0; +} + +/** + * Tasklet function that is scheduled on a core when an interrupt occurs. + * + * @unused: + */ +void cvm_oct_tasklet_rx(unsigned long unused) +{ + const int coreid = cvmx_get_core_num(); + uint64_t old_group_mask; + uint64_t old_scratch; + int rx_count = 0; + int number_to_free; + int num_freed; + int packet_not_copied; + + /* Prefetch cvm_oct_device since we know we need it soon */ + prefetch(cvm_oct_device); + + if (USE_ASYNC_IOBDMA) { + /* Save scratch in case userspace is using it */ + CVMX_SYNCIOBDMA; + old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); + } + + /* Only allow work for our group (and preserve priorities) */ + old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); + cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), + (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); + + if (USE_ASYNC_IOBDMA) + cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); + + while (1) { + struct sk_buff *skb = NULL; + int skb_in_hw; + cvmx_wqe_t *work; + + if (USE_ASYNC_IOBDMA) { + work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); + } else { + if ((INTERRUPT_LIMIT == 0) + || likely(rx_count < MAX_RX_PACKETS)) + work = + cvmx_pow_work_request_sync + (CVMX_POW_NO_WAIT); + else + work = NULL; + } + prefetch(work); + if (work == NULL) + break; + + /* + * Limit each core to processing MAX_RX_PACKETS + * packets without a break. This way the RX can't + * starve the TX task. + */ + if (USE_ASYNC_IOBDMA) { + + if ((INTERRUPT_LIMIT == 0) + || likely(rx_count < MAX_RX_PACKETS)) + cvmx_pow_work_request_async_nocheck + (CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); + else { + cvmx_scratch_write64(CVMX_SCR_SCRATCH, + 0x8000000000000000ull); + cvmx_pow_tag_sw_null_nocheck(); + } + } + + skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; + if (likely(skb_in_hw)) { + skb = + *(struct sk_buff + **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - + sizeof(void *)); + prefetch(&skb->head); + prefetch(&skb->len); + } + prefetch(cvm_oct_device[work->ipprt]); + + rx_count++; + /* Immediately throw away all packets with receive errors */ + if (unlikely(work->word2.snoip.rcv_error)) { + if (cvm_oct_check_rcv_error(work)) + continue; + } + + /* + * We can only use the zero copy path if skbuffs are + * in the FPA pool and the packet fits in a single + * buffer. + */ + if (likely(skb_in_hw)) { + /* + * This calculation was changed in case the + * skb header is using a different address + * aliasing type than the buffer. It doesn't + * make any differnece now, but the new one is + * more correct. + */ + skb->data = + skb->head + work->packet_ptr.s.addr - + cvmx_ptr_to_phys(skb->head); + prefetch(skb->data); + skb->len = work->len; + skb_set_tail_pointer(skb, skb->len); + packet_not_copied = 1; + } else { + + /* + * We have to copy the packet. First allocate + * an skbuff for it. + */ + skb = dev_alloc_skb(work->len); + if (!skb) { + DEBUGPRINT("Port %d failed to allocate " + "skbuff, packet dropped\n", + work->ipprt); + cvm_oct_free_work(work); + continue; + } + + /* + * Check if we've received a packet that was + * entirely stored in the work entry. This is + * untested. + */ + if (unlikely(work->word2.s.bufs == 0)) { + uint8_t *ptr = work->packet_data; + + if (likely(!work->word2.s.not_IP)) { + /* + * The beginning of the packet + * moves for IP packets. + */ + if (work->word2.s.is_v6) + ptr += 2; + else + ptr += 6; + } + memcpy(skb_put(skb, work->len), ptr, work->len); + /* No packet buffers to free */ + } else { + int segments = work->word2.s.bufs; + union cvmx_buf_ptr segment_ptr = + work->packet_ptr; + int len = work->len; + + while (segments--) { + union cvmx_buf_ptr next_ptr = + *(union cvmx_buf_ptr *) + cvmx_phys_to_ptr(segment_ptr.s. + addr - 8); + /* + * Octeon Errata PKI-100: The segment size is + * wrong. Until it is fixed, calculate the + * segment size based on the packet pool + * buffer size. When it is fixed, the + * following line should be replaced with this + * one: int segment_size = + * segment_ptr.s.size; + */ + int segment_size = + CVMX_FPA_PACKET_POOL_SIZE - + (segment_ptr.s.addr - + (((segment_ptr.s.addr >> 7) - + segment_ptr.s.back) << 7)); + /* Don't copy more than what is left + in the packet */ + if (segment_size > len) + segment_size = len; + /* Copy the data into the packet */ + memcpy(skb_put(skb, segment_size), + cvmx_phys_to_ptr(segment_ptr.s. + addr), + segment_size); + /* Reduce the amount of bytes left + to copy */ + len -= segment_size; + segment_ptr = next_ptr; + } + } + packet_not_copied = 0; + } + + if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) && + cvm_oct_device[work->ipprt])) { + struct net_device *dev = cvm_oct_device[work->ipprt]; + struct octeon_ethernet *priv = netdev_priv(dev); + + /* Only accept packets for devices + that are currently up */ + if (likely(dev->flags & IFF_UP)) { + skb->protocol = eth_type_trans(skb, dev); + skb->dev = dev; + + if (unlikely + (work->word2.s.not_IP + || work->word2.s.IP_exc + || work->word2.s.L4_error)) + skb->ip_summed = CHECKSUM_NONE; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* Increment RX stats for virtual ports */ + if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { +#ifdef CONFIG_64BIT + atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); + atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); +#else + atomic_add(1, (atomic_t *)&priv->stats.rx_packets); + atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); +#endif + } + netif_receive_skb(skb); + } else { + /* + * Drop any packet received for a + * device that isn't up. + */ + /* + DEBUGPRINT("%s: Device not up, packet dropped\n", + dev->name); + */ +#ifdef CONFIG_64BIT + atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); +#else + atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); +#endif + dev_kfree_skb_irq(skb); + } + } else { + /* + * Drop any packet received for a device that + * doesn't exist. + */ + DEBUGPRINT("Port %d not controlled by Linux, packet " + "dropped\n", + work->ipprt); + dev_kfree_skb_irq(skb); + } + /* + * Check to see if the skbuff and work share the same + * packet buffer. + */ + if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) { + /* + * This buffer needs to be replaced, increment + * the number of buffers we need to free by + * one. + */ + cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, + 1); + + cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, + DONT_WRITEBACK(1)); + } else { + cvm_oct_free_work(work); + } + } + + /* Restore the original POW group mask */ + cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); + if (USE_ASYNC_IOBDMA) { + /* Restore the scratch area */ + cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); + } + + if (USE_SKBUFFS_IN_HW) { + /* Refill the packet buffer pool */ + number_to_free = + cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); + + if (number_to_free > 0) { + cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, + -number_to_free); + num_freed = + cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, + CVMX_FPA_PACKET_POOL_SIZE, + number_to_free); + if (num_freed != number_to_free) { + cvmx_fau_atomic_add32 + (FAU_NUM_PACKET_BUFFERS_TO_FREE, + number_to_free - num_freed); + } + } + } +} + +void cvm_oct_rx_initialize(void) +{ + int i; + /* Initialize all of the tasklets */ + for (i = 0; i < NR_CPUS; i++) + tasklet_init(&cvm_oct_tasklet[i].t, cvm_oct_tasklet_rx, 0); +} + +void cvm_oct_rx_shutdown(void) +{ + int i; + /* Shutdown all of the tasklets */ + for (i = 0; i < NR_CPUS; i++) + tasklet_kill(&cvm_oct_tasklet[i].t); +} diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h new file mode 100644 index 0000000..a9b72b8 --- /dev/null +++ b/drivers/staging/octeon/ethernet-rx.h @@ -0,0 +1,33 @@ +/********************************************************************* + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +*********************************************************************/ + +irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id); +void cvm_oct_poll_controller(struct net_device *dev); +void cvm_oct_tasklet_rx(unsigned long unused); + +void cvm_oct_rx_initialize(void); +void cvm_oct_rx_shutdown(void); diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c new file mode 100644 index 0000000..58fa39c --- /dev/null +++ b/drivers/staging/octeon/ethernet-sgmii.c @@ -0,0 +1,129 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/mii.h> +#include <net/dst.h> + +#include <asm/octeon/octeon.h> + +#include "ethernet-defines.h" +#include "octeon-ethernet.h" +#include "ethernet-util.h" +#include "ethernet-common.h" + +#include "cvmx-helper.h" + +#include "cvmx-gmxx-defs.h" + +static int cvm_oct_sgmii_open(struct net_device *dev) +{ + union cvmx_gmxx_prtx_cfg gmx_cfg; + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + cvmx_helper_link_info_t link_info; + + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + gmx_cfg.s.en = 1; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); + + if (!octeon_is_simulation()) { + link_info = cvmx_helper_link_get(priv->port); + if (!link_info.s.link_up) + netif_carrier_off(dev); + } + + return 0; +} + +static int cvm_oct_sgmii_stop(struct net_device *dev) +{ + union cvmx_gmxx_prtx_cfg gmx_cfg; + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + gmx_cfg.s.en = 0; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); + return 0; +} + +static void cvm_oct_sgmii_poll(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + cvmx_helper_link_info_t link_info; + + link_info = cvmx_helper_link_get(priv->port); + if (link_info.u64 == priv->link_info) + return; + + link_info = cvmx_helper_link_autoconf(priv->port); + priv->link_info = link_info.u64; + + /* Tell Linux */ + if (link_info.s.link_up) { + + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + if (priv->queue != -1) + DEBUGPRINT + ("%s: %u Mbps %s duplex, port %2d, queue %2d\n", + dev->name, link_info.s.speed, + (link_info.s.full_duplex) ? "Full" : "Half", + priv->port, priv->queue); + else + DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n", + dev->name, link_info.s.speed, + (link_info.s.full_duplex) ? "Full" : "Half", + priv->port); + } else { + if (netif_carrier_ok(dev)) + netif_carrier_off(dev); + DEBUGPRINT("%s: Link down\n", dev->name); + } +} + +int cvm_oct_sgmii_init(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + cvm_oct_common_init(dev); + dev->open = cvm_oct_sgmii_open; + dev->stop = cvm_oct_sgmii_stop; + dev->stop(dev); + if (!octeon_is_simulation()) + priv->poll = cvm_oct_sgmii_poll; + + /* FIXME: Need autoneg logic */ + return 0; +} + +void cvm_oct_sgmii_uninit(struct net_device *dev) +{ + cvm_oct_common_uninit(dev); +} diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c new file mode 100644 index 0000000..e0971bb --- /dev/null +++ b/drivers/staging/octeon/ethernet-spi.c @@ -0,0 +1,323 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/mii.h> +#include <net/dst.h> + +#include <asm/octeon/octeon.h> + +#include "ethernet-defines.h" +#include "octeon-ethernet.h" +#include "ethernet-common.h" +#include "ethernet-util.h" + +#include "cvmx-spi.h" + +#include <asm/octeon/cvmx-npi-defs.h> +#include "cvmx-spxx-defs.h" +#include "cvmx-stxx-defs.h" + +static int number_spi_ports; +static int need_retrain[2] = { 0, 0 }; + +static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id) +{ + irqreturn_t return_status = IRQ_NONE; + union cvmx_npi_rsl_int_blocks rsl_int_blocks; + + /* Check and see if this interrupt was caused by the GMX block */ + rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS); + if (rsl_int_blocks.s.spx1) { /* 19 - SPX1_INT_REG & STX1_INT_REG */ + + union cvmx_spxx_int_reg spx_int_reg; + union cvmx_stxx_int_reg stx_int_reg; + + spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(1)); + cvmx_write_csr(CVMX_SPXX_INT_REG(1), spx_int_reg.u64); + if (!need_retrain[1]) { + + spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(1)); + if (spx_int_reg.s.spf) + pr_err("SPI1: SRX Spi4 interface down\n"); + if (spx_int_reg.s.calerr) + pr_err("SPI1: SRX Spi4 Calendar table " + "parity error\n"); + if (spx_int_reg.s.syncerr) + pr_err("SPI1: SRX Consecutive Spi4 DIP4 " + "errors have exceeded " + "SPX_ERR_CTL[ERRCNT]\n"); + if (spx_int_reg.s.diperr) + pr_err("SPI1: SRX Spi4 DIP4 error\n"); + if (spx_int_reg.s.tpaovr) + pr_err("SPI1: SRX Selected port has hit " + "TPA overflow\n"); + if (spx_int_reg.s.rsverr) + pr_err("SPI1: SRX Spi4 reserved control " + "word detected\n"); + if (spx_int_reg.s.drwnng) + pr_err("SPI1: SRX Spi4 receive FIFO " + "drowning/overflow\n"); + if (spx_int_reg.s.clserr) + pr_err("SPI1: SRX Spi4 packet closed on " + "non-16B alignment without EOP\n"); + if (spx_int_reg.s.spiovr) + pr_err("SPI1: SRX Spi4 async FIFO overflow\n"); + if (spx_int_reg.s.abnorm) + pr_err("SPI1: SRX Abnormal packet " + "termination (ERR bit)\n"); + if (spx_int_reg.s.prtnxa) + pr_err("SPI1: SRX Port out of range\n"); + } + + stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(1)); + cvmx_write_csr(CVMX_STXX_INT_REG(1), stx_int_reg.u64); + if (!need_retrain[1]) { + + stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(1)); + if (stx_int_reg.s.syncerr) + pr_err("SPI1: STX Interface encountered a " + "fatal error\n"); + if (stx_int_reg.s.frmerr) + pr_err("SPI1: STX FRMCNT has exceeded " + "STX_DIP_CNT[MAXFRM]\n"); + if (stx_int_reg.s.unxfrm) + pr_err("SPI1: STX Unexpected framing " + "sequence\n"); + if (stx_int_reg.s.nosync) + pr_err("SPI1: STX ERRCNT has exceeded " + "STX_DIP_CNT[MAXDIP]\n"); + if (stx_int_reg.s.diperr) + pr_err("SPI1: STX DIP2 error on the Spi4 " + "Status channel\n"); + if (stx_int_reg.s.datovr) + pr_err("SPI1: STX Spi4 FIFO overflow error\n"); + if (stx_int_reg.s.ovrbst) + pr_err("SPI1: STX Transmit packet burst " + "too big\n"); + if (stx_int_reg.s.calpar1) + pr_err("SPI1: STX Calendar Table Parity " + "Error Bank1\n"); + if (stx_int_reg.s.calpar0) + pr_err("SPI1: STX Calendar Table Parity " + "Error Bank0\n"); + } + + cvmx_write_csr(CVMX_SPXX_INT_MSK(1), 0); + cvmx_write_csr(CVMX_STXX_INT_MSK(1), 0); + need_retrain[1] = 1; + return_status = IRQ_HANDLED; + } + + if (rsl_int_blocks.s.spx0) { /* 18 - SPX0_INT_REG & STX0_INT_REG */ + union cvmx_spxx_int_reg spx_int_reg; + union cvmx_stxx_int_reg stx_int_reg; + + spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(0)); + cvmx_write_csr(CVMX_SPXX_INT_REG(0), spx_int_reg.u64); + if (!need_retrain[0]) { + + spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(0)); + if (spx_int_reg.s.spf) + pr_err("SPI0: SRX Spi4 interface down\n"); + if (spx_int_reg.s.calerr) + pr_err("SPI0: SRX Spi4 Calendar table " + "parity error\n"); + if (spx_int_reg.s.syncerr) + pr_err("SPI0: SRX Consecutive Spi4 DIP4 " + "errors have exceeded " + "SPX_ERR_CTL[ERRCNT]\n"); + if (spx_int_reg.s.diperr) + pr_err("SPI0: SRX Spi4 DIP4 error\n"); + if (spx_int_reg.s.tpaovr) + pr_err("SPI0: SRX Selected port has hit " + "TPA overflow\n"); + if (spx_int_reg.s.rsverr) + pr_err("SPI0: SRX Spi4 reserved control " + "word detected\n"); + if (spx_int_reg.s.drwnng) + pr_err("SPI0: SRX Spi4 receive FIFO " + "drowning/overflow\n"); + if (spx_int_reg.s.clserr) + pr_err("SPI0: SRX Spi4 packet closed on " + "non-16B alignment without EOP\n"); + if (spx_int_reg.s.spiovr) + pr_err("SPI0: SRX Spi4 async FIFO overflow\n"); + if (spx_int_reg.s.abnorm) + pr_err("SPI0: SRX Abnormal packet " + "termination (ERR bit)\n"); + if (spx_int_reg.s.prtnxa) + pr_err("SPI0: SRX Port out of range\n"); + } + + stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(0)); + cvmx_write_csr(CVMX_STXX_INT_REG(0), stx_int_reg.u64); + if (!need_retrain[0]) { + + stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(0)); + if (stx_int_reg.s.syncerr) + pr_err("SPI0: STX Interface encountered a " + "fatal error\n"); + if (stx_int_reg.s.frmerr) + pr_err("SPI0: STX FRMCNT has exceeded " + "STX_DIP_CNT[MAXFRM]\n"); + if (stx_int_reg.s.unxfrm) + pr_err("SPI0: STX Unexpected framing " + "sequence\n"); + if (stx_int_reg.s.nosync) + pr_err("SPI0: STX ERRCNT has exceeded " + "STX_DIP_CNT[MAXDIP]\n"); + if (stx_int_reg.s.diperr) + pr_err("SPI0: STX DIP2 error on the Spi4 " + "Status channel\n"); + if (stx_int_reg.s.datovr) + pr_err("SPI0: STX Spi4 FIFO overflow error\n"); + if (stx_int_reg.s.ovrbst) + pr_err("SPI0: STX Transmit packet burst " + "too big\n"); + if (stx_int_reg.s.calpar1) + pr_err("SPI0: STX Calendar Table Parity " + "Error Bank1\n"); + if (stx_int_reg.s.calpar0) + pr_err("SPI0: STX Calendar Table Parity " + "Error Bank0\n"); + } + + cvmx_write_csr(CVMX_SPXX_INT_MSK(0), 0); + cvmx_write_csr(CVMX_STXX_INT_MSK(0), 0); + need_retrain[0] = 1; + return_status = IRQ_HANDLED; + } + + return return_status; +} + +static void cvm_oct_spi_enable_error_reporting(int interface) +{ + union cvmx_spxx_int_msk spxx_int_msk; + union cvmx_stxx_int_msk stxx_int_msk; + + spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface)); + spxx_int_msk.s.calerr = 1; + spxx_int_msk.s.syncerr = 1; + spxx_int_msk.s.diperr = 1; + spxx_int_msk.s.tpaovr = 1; + spxx_int_msk.s.rsverr = 1; + spxx_int_msk.s.drwnng = 1; + spxx_int_msk.s.clserr = 1; + spxx_int_msk.s.spiovr = 1; + spxx_int_msk.s.abnorm = 1; + spxx_int_msk.s.prtnxa = 1; + cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64); + + stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface)); + stxx_int_msk.s.frmerr = 1; + stxx_int_msk.s.unxfrm = 1; + stxx_int_msk.s.nosync = 1; + stxx_int_msk.s.diperr = 1; + stxx_int_msk.s.datovr = 1; + stxx_int_msk.s.ovrbst = 1; + stxx_int_msk.s.calpar1 = 1; + stxx_int_msk.s.calpar0 = 1; + cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64); +} + +static void cvm_oct_spi_poll(struct net_device *dev) +{ + static int spi4000_port; + struct octeon_ethernet *priv = netdev_priv(dev); + int interface; + + for (interface = 0; interface < 2; interface++) { + + if ((priv->port == interface * 16) && need_retrain[interface]) { + + if (cvmx_spi_restart_interface + (interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) { + need_retrain[interface] = 0; + cvm_oct_spi_enable_error_reporting(interface); + } + } + + /* + * The SPI4000 TWSI interface is very slow. In order + * not to bring the system to a crawl, we only poll a + * single port every second. This means negotiation + * speed changes take up to 10 seconds, but at least + * we don't waste absurd amounts of time waiting for + * TWSI. + */ + if (priv->port == spi4000_port) { + /* + * This function does nothing if it is called on an + * interface without a SPI4000. + */ + cvmx_spi4000_check_speed(interface, priv->port); + /* + * Normal ordering increments. By decrementing + * we only match once per iteration. + */ + spi4000_port--; + if (spi4000_port < 0) + spi4000_port = 10; + } + } +} + +int cvm_oct_spi_init(struct net_device *dev) +{ + int r; + struct octeon_ethernet *priv = netdev_priv(dev); + + if (number_spi_ports == 0) { + r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt, + IRQF_SHARED, "SPI", &number_spi_ports); + } + number_spi_ports++; + + if ((priv->port == 0) || (priv->port == 16)) { + cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port)); + priv->poll = cvm_oct_spi_poll; + } + cvm_oct_common_init(dev); + return 0; +} + +void cvm_oct_spi_uninit(struct net_device *dev) +{ + int interface; + + cvm_oct_common_uninit(dev); + number_spi_ports--; + if (number_spi_ports == 0) { + for (interface = 0; interface < 2; interface++) { + cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0); + cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0); + } + free_irq(8 + 46, &number_spi_ports); + } +} diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c new file mode 100644 index 0000000..77b7122 --- /dev/null +++ b/drivers/staging/octeon/ethernet-tx.c @@ -0,0 +1,634 @@ +/********************************************************************* + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +*********************************************************************/ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/init.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/string.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/seq_file.h> +#include <linux/proc_fs.h> +#include <net/dst.h> +#ifdef CONFIG_XFRM +#include <linux/xfrm.h> +#include <net/xfrm.h> +#endif /* CONFIG_XFRM */ + +#include <asm/atomic.h> + +#include <asm/octeon/octeon.h> + +#include "ethernet-defines.h" +#include "octeon-ethernet.h" +#include "ethernet-util.h" + +#include "cvmx-wqe.h" +#include "cvmx-fau.h" +#include "cvmx-pko.h" +#include "cvmx-helper.h" + +#include "cvmx-gmxx-defs.h" + +/* + * You can define GET_SKBUFF_QOS() to override how the skbuff output + * function determines which output queue is used. The default + * implementation always uses the base queue for the port. If, for + * example, you wanted to use the skb->priority fieid, define + * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority) + */ +#ifndef GET_SKBUFF_QOS +#define GET_SKBUFF_QOS(skb) 0 +#endif + +/** + * Packet transmit + * + * @skb: Packet to send + * @dev: Device info structure + * Returns Always returns zero + */ +int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) +{ + cvmx_pko_command_word0_t pko_command; + union cvmx_buf_ptr hw_buffer; + uint64_t old_scratch; + uint64_t old_scratch2; + int dropped; + int qos; + struct octeon_ethernet *priv = netdev_priv(dev); + int32_t in_use; + int32_t buffers_to_free; +#if REUSE_SKBUFFS_WITHOUT_FREE + unsigned char *fpa_head; +#endif + + /* + * Prefetch the private data structure. It is larger that one + * cache line. + */ + prefetch(priv); + + /* Start off assuming no drop */ + dropped = 0; + + /* + * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to + * completely remove "qos" in the event neither interface + * supports multiple queues per port. + */ + if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || + (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { + qos = GET_SKBUFF_QOS(skb); + if (qos <= 0) + qos = 0; + else if (qos >= cvmx_pko_get_num_queues(priv->port)) + qos = 0; + } else + qos = 0; + + if (USE_ASYNC_IOBDMA) { + /* Save scratch in case userspace is using it */ + CVMX_SYNCIOBDMA; + old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); + old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); + + /* + * Assume we're going to be able t osend this + * packet. Fetch and increment the number of pending + * packets for output. + */ + cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8, + FAU_NUM_PACKET_BUFFERS_TO_FREE, + 0); + cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, + priv->fau + qos * 4, 1); + } + + /* + * The CN3XXX series of parts has an errata (GMX-401) which + * causes the GMX block to hang if a collision occurs towards + * the end of a <68 byte packet. As a workaround for this, we + * pad packets to be 68 bytes whenever we are in half duplex + * mode. We don't handle the case of having a small packet but + * no room to add the padding. The kernel should always give + * us at least a cache line + */ + if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { + union cvmx_gmxx_prtx_cfg gmx_prt_cfg; + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + + if (interface < 2) { + /* We only need to pad packet in half duplex mode */ + gmx_prt_cfg.u64 = + cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + if (gmx_prt_cfg.s.duplex == 0) { + int add_bytes = 64 - skb->len; + if ((skb_tail_pointer(skb) + add_bytes) <= + skb_end_pointer(skb)) + memset(__skb_put(skb, add_bytes), 0, + add_bytes); + } + } + } + + /* Build the PKO buffer pointer */ + hw_buffer.u64 = 0; + hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data); + hw_buffer.s.pool = 0; + hw_buffer.s.size = + (unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head; + + /* Build the PKO command */ + pko_command.u64 = 0; + pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ + pko_command.s.segs = 1; + pko_command.s.total_bytes = skb->len; + pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; + pko_command.s.subone0 = 1; + + pko_command.s.dontfree = 1; + pko_command.s.reg0 = priv->fau + qos * 4; + /* + * See if we can put this skb in the FPA pool. Any strange + * behavior from the Linux networking stack will most likely + * be caused by a bug in the following code. If some field is + * in use by the network stack and get carried over when a + * buffer is reused, bad thing may happen. If in doubt and + * you dont need the absolute best performance, disable the + * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has + * shown a 25% increase in performance under some loads. + */ +#if REUSE_SKBUFFS_WITHOUT_FREE + fpa_head = skb->head + 128 - ((unsigned long)skb->head & 0x7f); + if (unlikely(skb->data < fpa_head)) { + /* + * printk("TX buffer beginning can't meet FPA + * alignment constraints\n"); + */ + goto dont_put_skbuff_in_hw; + } + if (unlikely + ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) { + /* + printk("TX buffer isn't large enough for the FPA\n"); + */ + goto dont_put_skbuff_in_hw; + } + if (unlikely(skb_shared(skb))) { + /* + printk("TX buffer sharing data with someone else\n"); + */ + goto dont_put_skbuff_in_hw; + } + if (unlikely(skb_cloned(skb))) { + /* + printk("TX buffer has been cloned\n"); + */ + goto dont_put_skbuff_in_hw; + } + if (unlikely(skb_header_cloned(skb))) { + /* + printk("TX buffer header has been cloned\n"); + */ + goto dont_put_skbuff_in_hw; + } + if (unlikely(skb->destructor)) { + /* + printk("TX buffer has a destructor\n"); + */ + goto dont_put_skbuff_in_hw; + } + if (unlikely(skb_shinfo(skb)->nr_frags)) { + /* + printk("TX buffer has fragments\n"); + */ + goto dont_put_skbuff_in_hw; + } + if (unlikely + (skb->truesize != + sizeof(*skb) + skb_end_pointer(skb) - skb->head)) { + /* + printk("TX buffer truesize has been changed\n"); + */ + goto dont_put_skbuff_in_hw; + } + + /* + * We can use this buffer in the FPA. We don't need the FAU + * update anymore + */ + pko_command.s.reg0 = 0; + pko_command.s.dontfree = 0; + + hw_buffer.s.back = (skb->data - fpa_head) >> 7; + *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; + + /* + * The skbuff will be reused without ever being freed. We must + * cleanup a bunch of Linux stuff. + */ + dst_release(skb->dst); + skb->dst = NULL; +#ifdef CONFIG_XFRM + secpath_put(skb->sp); + skb->sp = NULL; +#endif + nf_reset(skb); + +#ifdef CONFIG_NET_SCHED + skb->tc_index = 0; +#ifdef CONFIG_NET_CLS_ACT + skb->tc_verd = 0; +#endif /* CONFIG_NET_CLS_ACT */ +#endif /* CONFIG_NET_SCHED */ + +dont_put_skbuff_in_hw: +#endif /* REUSE_SKBUFFS_WITHOUT_FREE */ + + /* Check if we can use the hardware checksumming */ + if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) && + ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)) + && ((ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) + || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP))) { + /* Use hardware checksum calc */ + pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; + } + + if (USE_ASYNC_IOBDMA) { + /* Get the number of skbuffs in use by the hardware */ + CVMX_SYNCIOBDMA; + in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH); + buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); + } else { + /* Get the number of skbuffs in use by the hardware */ + in_use = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 1); + buffers_to_free = + cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); + } + + /* + * If we're sending faster than the receive can free them then + * don't do the HW free. + */ + if ((buffers_to_free < -100) && !pko_command.s.dontfree) { + pko_command.s.dontfree = 1; + pko_command.s.reg0 = priv->fau + qos * 4; + } + + cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, + CVMX_PKO_LOCK_CMD_QUEUE); + + /* Drop this packet if we have too many already queued to the HW */ + if (unlikely + (skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { + /* + DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name); + */ + dropped = 1; + } + /* Send the packet to the output queue */ + else if (unlikely + (cvmx_pko_send_packet_finish + (priv->port, priv->queue + qos, pko_command, hw_buffer, + CVMX_PKO_LOCK_CMD_QUEUE))) { + DEBUGPRINT("%s: Failed to send the packet\n", dev->name); + dropped = 1; + } + + if (USE_ASYNC_IOBDMA) { + /* Restore the scratch area */ + cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); + cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); + } + + if (unlikely(dropped)) { + dev_kfree_skb_any(skb); + cvmx_fau_atomic_add32(priv->fau + qos * 4, -1); + priv->stats.tx_dropped++; + } else { + if (USE_SKBUFFS_IN_HW) { + /* Put this packet on the queue to be freed later */ + if (pko_command.s.dontfree) + skb_queue_tail(&priv->tx_free_list[qos], skb); + else { + cvmx_fau_atomic_add32 + (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); + cvmx_fau_atomic_add32(priv->fau + qos * 4, -1); + } + } else { + /* Put this packet on the queue to be freed later */ + skb_queue_tail(&priv->tx_free_list[qos], skb); + } + } + + /* Free skbuffs not in use by the hardware, possibly two at a time */ + if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) { + spin_lock(&priv->tx_free_list[qos].lock); + /* + * Check again now that we have the lock. It might + * have changed. + */ + if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) + dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos])); + if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) + dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos])); + spin_unlock(&priv->tx_free_list[qos].lock); + } + + return 0; +} + +/** + * Packet transmit to the POW + * + * @skb: Packet to send + * @dev: Device info structure + * Returns Always returns zero + */ +int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + void *packet_buffer; + void *copy_location; + + /* Get a work queue entry */ + cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); + if (unlikely(work == NULL)) { + DEBUGPRINT("%s: Failed to allocate a work queue entry\n", + dev->name); + priv->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; + } + + /* Get a packet buffer */ + packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL); + if (unlikely(packet_buffer == NULL)) { + DEBUGPRINT("%s: Failed to allocate a packet buffer\n", + dev->name); + cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); + priv->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; + } + + /* + * Calculate where we need to copy the data to. We need to + * leave 8 bytes for a next pointer (unused). We also need to + * include any configure skip. Then we need to align the IP + * packet src and dest into the same 64bit word. The below + * calculation may add a little extra, but that doesn't + * hurt. + */ + copy_location = packet_buffer + sizeof(uint64_t); + copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6; + + /* + * We have to copy the packet since whoever processes this + * packet will free it to a hardware pool. We can't use the + * trick of counting outstanding packets like in + * cvm_oct_xmit. + */ + memcpy(copy_location, skb->data, skb->len); + + /* + * Fill in some of the work queue fields. We may need to add + * more if the software at the other end needs them. + */ + work->hw_chksum = skb->csum; + work->len = skb->len; + work->ipprt = priv->port; + work->qos = priv->port & 0x7; + work->grp = pow_send_group; + work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE; + work->tag = pow_send_group; /* FIXME */ + /* Default to zero. Sets of zero later are commented out */ + work->word2.u64 = 0; + work->word2.s.bufs = 1; + work->packet_ptr.u64 = 0; + work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location); + work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; + work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE; + work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7; + + if (skb->protocol == htons(ETH_P_IP)) { + work->word2.s.ip_offset = 14; +#if 0 + work->word2.s.vlan_valid = 0; /* FIXME */ + work->word2.s.vlan_cfi = 0; /* FIXME */ + work->word2.s.vlan_id = 0; /* FIXME */ + work->word2.s.dec_ipcomp = 0; /* FIXME */ +#endif + work->word2.s.tcp_or_udp = + (ip_hdr(skb)->protocol == IP_PROTOCOL_TCP) + || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP); +#if 0 + /* FIXME */ + work->word2.s.dec_ipsec = 0; + /* We only support IPv4 right now */ + work->word2.s.is_v6 = 0; + /* Hardware would set to zero */ + work->word2.s.software = 0; + /* No error, packet is internal */ + work->word2.s.L4_error = 0; +#endif + work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) + || (ip_hdr(skb)->frag_off == + 1 << 14)); +#if 0 + /* Assume Linux is sending a good packet */ + work->word2.s.IP_exc = 0; +#endif + work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST); + work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST); +#if 0 + /* This is an IP packet */ + work->word2.s.not_IP = 0; + /* No error, packet is internal */ + work->word2.s.rcv_error = 0; + /* No error, packet is internal */ + work->word2.s.err_code = 0; +#endif + + /* + * When copying the data, include 4 bytes of the + * ethernet header to align the same way hardware + * does. + */ + memcpy(work->packet_data, skb->data + 10, + sizeof(work->packet_data)); + } else { +#if 0 + work->word2.snoip.vlan_valid = 0; /* FIXME */ + work->word2.snoip.vlan_cfi = 0; /* FIXME */ + work->word2.snoip.vlan_id = 0; /* FIXME */ + work->word2.snoip.software = 0; /* Hardware would set to zero */ +#endif + work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP); + work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP); + work->word2.snoip.is_bcast = + (skb->pkt_type == PACKET_BROADCAST); + work->word2.snoip.is_mcast = + (skb->pkt_type == PACKET_MULTICAST); + work->word2.snoip.not_IP = 1; /* IP was done up above */ +#if 0 + /* No error, packet is internal */ + work->word2.snoip.rcv_error = 0; + /* No error, packet is internal */ + work->word2.snoip.err_code = 0; +#endif + memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); + } + + /* Submit the packet to the POW */ + cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos, + work->grp); + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len; + dev_kfree_skb(skb); + return 0; +} + +/** + * Transmit a work queue entry out of the ethernet port. Both + * the work queue entry and the packet data can optionally be + * freed. The work will be freed on error as well. + * + * @dev: Device to transmit out. + * @work_queue_entry: + * Work queue entry to send + * @do_free: True if the work queue entry and packet data should be + * freed. If false, neither will be freed. + * @qos: Index into the queues for this port to transmit on. This + * is used to implement QoS if their are multiple queues per + * port. This parameter must be between 0 and the number of + * queues per port minus 1. Values outside of this range will + * be change to zero. + * + * Returns Zero on success, negative on failure. + */ +int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, + int do_free, int qos) +{ + unsigned long flags; + union cvmx_buf_ptr hw_buffer; + cvmx_pko_command_word0_t pko_command; + int dropped; + struct octeon_ethernet *priv = netdev_priv(dev); + cvmx_wqe_t *work = work_queue_entry; + + if (!(dev->flags & IFF_UP)) { + DEBUGPRINT("%s: Device not up\n", dev->name); + if (do_free) + cvm_oct_free_work(work); + return -1; + } + + /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely + remove "qos" in the event neither interface supports + multiple queues per port */ + if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || + (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { + if (qos <= 0) + qos = 0; + else if (qos >= cvmx_pko_get_num_queues(priv->port)) + qos = 0; + } else + qos = 0; + + /* Start off assuming no drop */ + dropped = 0; + + local_irq_save(flags); + cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, + CVMX_PKO_LOCK_CMD_QUEUE); + + /* Build the PKO buffer pointer */ + hw_buffer.u64 = 0; + hw_buffer.s.addr = work->packet_ptr.s.addr; + hw_buffer.s.pool = CVMX_FPA_PACKET_POOL; + hw_buffer.s.size = CVMX_FPA_PACKET_POOL_SIZE; + hw_buffer.s.back = work->packet_ptr.s.back; + + /* Build the PKO command */ + pko_command.u64 = 0; + pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ + pko_command.s.dontfree = !do_free; + pko_command.s.segs = work->word2.s.bufs; + pko_command.s.total_bytes = work->len; + + /* Check if we can use the hardware checksumming */ + if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc)) + pko_command.s.ipoffp1 = 0; + else + pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; + + /* Send the packet to the output queue */ + if (unlikely + (cvmx_pko_send_packet_finish + (priv->port, priv->queue + qos, pko_command, hw_buffer, + CVMX_PKO_LOCK_CMD_QUEUE))) { + DEBUGPRINT("%s: Failed to send the packet\n", dev->name); + dropped = -1; + } + local_irq_restore(flags); + + if (unlikely(dropped)) { + if (do_free) + cvm_oct_free_work(work); + priv->stats.tx_dropped++; + } else if (do_free) + cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); + + return dropped; +} +EXPORT_SYMBOL(cvm_oct_transmit_qos); + +/** + * This function frees all skb that are currenty queued for TX. + * + * @dev: Device being shutdown + */ +void cvm_oct_tx_shutdown(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + unsigned long flags; + int qos; + + for (qos = 0; qos < 16; qos++) { + spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); + while (skb_queue_len(&priv->tx_free_list[qos])) + dev_kfree_skb_any(__skb_dequeue + (&priv->tx_free_list[qos])); + spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); + } +} diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h new file mode 100644 index 0000000..5106236 --- /dev/null +++ b/drivers/staging/octeon/ethernet-tx.h @@ -0,0 +1,32 @@ +/********************************************************************* + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +*********************************************************************/ + +int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev); +int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev); +int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, + int do_free, int qos); +void cvm_oct_tx_shutdown(struct net_device *dev); diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h new file mode 100644 index 0000000..37b6659 --- /dev/null +++ b/drivers/staging/octeon/ethernet-util.h @@ -0,0 +1,81 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +*********************************************************************/ + +#define DEBUGPRINT(format, ...) do { if (printk_ratelimit()) \ + printk(format, ##__VA_ARGS__); \ + } while (0) + +/** + * Given a packet data address, return a pointer to the + * beginning of the packet buffer. + * + * @packet_ptr: Packet data hardware address + * Returns Packet buffer pointer + */ +static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr) +{ + return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back) + << 7); +} + +/** + * Given an IPD/PKO port number, return the logical interface it is + * on. + * + * @ipd_port: Port to check + * + * Returns Logical interface + */ +static inline int INTERFACE(int ipd_port) +{ + if (ipd_port < 32) /* Interface 0 or 1 for RGMII,GMII,SPI, etc */ + return ipd_port >> 4; + else if (ipd_port < 36) /* Interface 2 for NPI */ + return 2; + else if (ipd_port < 40) /* Interface 3 for loopback */ + return 3; + else if (ipd_port == 40) /* Non existant interface for POW0 */ + return 4; + else + panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port); +} + +/** + * Given an IPD/PKO port number, return the port's index on a + * logical interface. + * + * @ipd_port: Port to check + * + * Returns Index into interface port list + */ +static inline int INDEX(int ipd_port) +{ + if (ipd_port < 32) + return ipd_port & 15; + else + return ipd_port & 3; +} diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c new file mode 100644 index 0000000..f08eb32 --- /dev/null +++ b/drivers/staging/octeon/ethernet-xaui.c @@ -0,0 +1,127 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/mii.h> +#include <net/dst.h> + +#include <asm/octeon/octeon.h> + +#include "ethernet-defines.h" +#include "octeon-ethernet.h" +#include "ethernet-common.h" +#include "ethernet-util.h" + +#include "cvmx-helper.h" + +#include "cvmx-gmxx-defs.h" + +static int cvm_oct_xaui_open(struct net_device *dev) +{ + union cvmx_gmxx_prtx_cfg gmx_cfg; + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + cvmx_helper_link_info_t link_info; + + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + gmx_cfg.s.en = 1; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); + + if (!octeon_is_simulation()) { + link_info = cvmx_helper_link_get(priv->port); + if (!link_info.s.link_up) + netif_carrier_off(dev); + } + return 0; +} + +static int cvm_oct_xaui_stop(struct net_device *dev) +{ + union cvmx_gmxx_prtx_cfg gmx_cfg; + struct octeon_ethernet *priv = netdev_priv(dev); + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + + gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); + gmx_cfg.s.en = 0; + cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); + return 0; +} + +static void cvm_oct_xaui_poll(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + cvmx_helper_link_info_t link_info; + + link_info = cvmx_helper_link_get(priv->port); + if (link_info.u64 == priv->link_info) + return; + + link_info = cvmx_helper_link_autoconf(priv->port); + priv->link_info = link_info.u64; + + /* Tell Linux */ + if (link_info.s.link_up) { + + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + if (priv->queue != -1) + DEBUGPRINT + ("%s: %u Mbps %s duplex, port %2d, queue %2d\n", + dev->name, link_info.s.speed, + (link_info.s.full_duplex) ? "Full" : "Half", + priv->port, priv->queue); + else + DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n", + dev->name, link_info.s.speed, + (link_info.s.full_duplex) ? "Full" : "Half", + priv->port); + } else { + if (netif_carrier_ok(dev)) + netif_carrier_off(dev); + DEBUGPRINT("%s: Link down\n", dev->name); + } +} + +int cvm_oct_xaui_init(struct net_device *dev) +{ + struct octeon_ethernet *priv = netdev_priv(dev); + cvm_oct_common_init(dev); + dev->open = cvm_oct_xaui_open; + dev->stop = cvm_oct_xaui_stop; + dev->stop(dev); + if (!octeon_is_simulation()) + priv->poll = cvm_oct_xaui_poll; + + return 0; +} + +void cvm_oct_xaui_uninit(struct net_device *dev) +{ + cvm_oct_common_uninit(dev); +} diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c new file mode 100644 index 0000000..e8ef9e0 --- /dev/null +++ b/drivers/staging/octeon/ethernet.c @@ -0,0 +1,507 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/mii.h> + +#include <net/dst.h> + +#include <asm/octeon/octeon.h> + +#include "ethernet-defines.h" +#include "ethernet-mem.h" +#include "ethernet-rx.h" +#include "ethernet-tx.h" +#include "ethernet-util.h" +#include "ethernet-proc.h" +#include "ethernet-common.h" +#include "octeon-ethernet.h" + +#include "cvmx-pip.h" +#include "cvmx-pko.h" +#include "cvmx-fau.h" +#include "cvmx-ipd.h" +#include "cvmx-helper.h" + +#include "cvmx-smix-defs.h" + +#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \ + && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS +int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS; +#else +int num_packet_buffers = 1024; +#endif +module_param(num_packet_buffers, int, 0444); +MODULE_PARM_DESC(num_packet_buffers, "\n" + "\tNumber of packet buffers to allocate and store in the\n" + "\tFPA. By default, 1024 packet buffers are used unless\n" + "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined."); + +int pow_receive_group = 15; +module_param(pow_receive_group, int, 0444); +MODULE_PARM_DESC(pow_receive_group, "\n" + "\tPOW group to receive packets from. All ethernet hardware\n" + "\twill be configured to send incomming packets to this POW\n" + "\tgroup. Also any other software can submit packets to this\n" + "\tgroup for the kernel to process."); + +int pow_send_group = -1; +module_param(pow_send_group, int, 0644); +MODULE_PARM_DESC(pow_send_group, "\n" + "\tPOW group to send packets to other software on. This\n" + "\tcontrols the creation of the virtual device pow0.\n" + "\talways_use_pow also depends on this value."); + +int always_use_pow; +module_param(always_use_pow, int, 0444); +MODULE_PARM_DESC(always_use_pow, "\n" + "\tWhen set, always send to the pow group. This will cause\n" + "\tpackets sent to real ethernet devices to be sent to the\n" + "\tPOW group instead of the hardware. Unless some other\n" + "\tapplication changes the config, packets will still be\n" + "\treceived from the low level hardware. Use this option\n" + "\tto allow a CVMX app to intercept all packets from the\n" + "\tlinux kernel. You must specify pow_send_group along with\n" + "\tthis option."); + +char pow_send_list[128] = ""; +module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444); +MODULE_PARM_DESC(pow_send_list, "\n" + "\tComma separated list of ethernet devices that should use the\n" + "\tPOW for transmit instead of the actual ethernet hardware. This\n" + "\tis a per port version of always_use_pow. always_use_pow takes\n" + "\tprecedence over this list. For example, setting this to\n" + "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" + "\tusing the pow_send_group."); + +static int disable_core_queueing = 1; +module_param(disable_core_queueing, int, 0444); +MODULE_PARM_DESC(disable_core_queueing, "\n" + "\tWhen set the networking core's tx_queue_len is set to zero. This\n" + "\tallows packets to be sent without lock contention in the packet\n" + "\tscheduler resulting in some cases in improved throughput.\n"); + +/** + * Periodic timer to check auto negotiation + */ +static struct timer_list cvm_oct_poll_timer; + +/** + * Array of every ethernet device owned by this driver indexed by + * the ipd input port number. + */ +struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; + +extern struct semaphore mdio_sem; + +/** + * Periodic timer tick for slow management operations + * + * @arg: Device to check + */ +static void cvm_do_timer(unsigned long arg) +{ + static int port; + if (port < CVMX_PIP_NUM_INPUT_PORTS) { + if (cvm_oct_device[port]) { + int queues_per_port; + int qos; + struct octeon_ethernet *priv = + netdev_priv(cvm_oct_device[port]); + if (priv->poll) { + /* skip polling if we don't get the lock */ + if (!down_trylock(&mdio_sem)) { + priv->poll(cvm_oct_device[port]); + up(&mdio_sem); + } + } + + queues_per_port = cvmx_pko_get_num_queues(port); + /* Drain any pending packets in the free list */ + for (qos = 0; qos < queues_per_port; qos++) { + if (skb_queue_len(&priv->tx_free_list[qos])) { + spin_lock(&priv->tx_free_list[qos]. + lock); + while (skb_queue_len + (&priv->tx_free_list[qos]) > + cvmx_fau_fetch_and_add32(priv-> + fau + + qos * 4, + 0)) + dev_kfree_skb(__skb_dequeue + (&priv-> + tx_free_list + [qos])); + spin_unlock(&priv->tx_free_list[qos]. + lock); + } + } + cvm_oct_device[port]->get_stats(cvm_oct_device[port]); + } + port++; + /* Poll the next port in a 50th of a second. + This spreads the polling of ports out a little bit */ + mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50); + } else { + port = 0; + /* All ports have been polled. Start the next iteration through + the ports in one second */ + mod_timer(&cvm_oct_poll_timer, jiffies + HZ); + } +} + +/** + * Configure common hardware for all interfaces + */ +static __init void cvm_oct_configure_common_hw(void) +{ + int r; + /* Setup the FPA */ + cvmx_fpa_enable(); + cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, + num_packet_buffers); + cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, + num_packet_buffers); + if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) + cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, + CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); + + if (USE_RED) + cvmx_helper_setup_red(num_packet_buffers / 4, + num_packet_buffers / 8); + + /* Enable the MII interface */ + if (!octeon_is_simulation()) + cvmx_write_csr(CVMX_SMIX_EN(0), 1); + + /* Register an IRQ hander for to receive POW interrupts */ + r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, + cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet", + cvm_oct_device); + +#if defined(CONFIG_SMP) && 0 + if (USE_MULTICORE_RECEIVE) { + irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group, + cpu_online_mask); + } +#endif +} + +/** + * Free a work queue entry received in a intercept callback. + * + * @work_queue_entry: + * Work queue entry to free + * Returns Zero on success, Negative on failure. + */ +int cvm_oct_free_work(void *work_queue_entry) +{ + cvmx_wqe_t *work = work_queue_entry; + + int segments = work->word2.s.bufs; + union cvmx_buf_ptr segment_ptr = work->packet_ptr; + + while (segments--) { + union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *) + cvmx_phys_to_ptr(segment_ptr.s.addr - 8); + if (unlikely(!segment_ptr.s.i)) + cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr), + segment_ptr.s.pool, + DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE / + 128)); + segment_ptr = next_ptr; + } + cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); + + return 0; +} +EXPORT_SYMBOL(cvm_oct_free_work); + +/** + * Module/ driver initialization. Creates the linux network + * devices. + * + * Returns Zero on success + */ +static int __init cvm_oct_init_module(void) +{ + int num_interfaces; + int interface; + int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; + int qos; + + pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION); + + cvm_oct_proc_initialize(); + cvm_oct_rx_initialize(); + cvm_oct_configure_common_hw(); + + cvmx_helper_initialize_packet_io_global(); + + /* Change the input group for all ports before input is enabled */ + num_interfaces = cvmx_helper_get_number_of_interfaces(); + for (interface = 0; interface < num_interfaces; interface++) { + int num_ports = cvmx_helper_ports_on_interface(interface); + int port; + + for (port = cvmx_helper_get_ipd_port(interface, 0); + port < cvmx_helper_get_ipd_port(interface, num_ports); + port++) { + union cvmx_pip_prt_tagx pip_prt_tagx; + pip_prt_tagx.u64 = + cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); + pip_prt_tagx.s.grp = pow_receive_group; + cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), + pip_prt_tagx.u64); + } + } + + cvmx_helper_ipd_and_packet_input_enable(); + + memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); + + /* + * Initialize the FAU used for counting packet buffers that + * need to be freed. + */ + cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); + + if ((pow_send_group != -1)) { + struct net_device *dev; + pr_info("\tConfiguring device for POW only access\n"); + dev = alloc_etherdev(sizeof(struct octeon_ethernet)); + if (dev) { + /* Initialize the device private structure. */ + struct octeon_ethernet *priv = netdev_priv(dev); + memset(priv, 0, sizeof(struct octeon_ethernet)); + + dev->init = cvm_oct_common_init; + priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; + priv->port = CVMX_PIP_NUM_INPUT_PORTS; + priv->queue = -1; + strcpy(dev->name, "pow%d"); + for (qos = 0; qos < 16; qos++) + skb_queue_head_init(&priv->tx_free_list[qos]); + + if (register_netdev(dev) < 0) { + pr_err("Failed to register ethernet " + "device for POW\n"); + kfree(dev); + } else { + cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; + pr_info("%s: POW send group %d, receive " + "group %d\n", + dev->name, pow_send_group, + pow_receive_group); + } + } else { + pr_err("Failed to allocate ethernet device " + "for POW\n"); + } + } + + num_interfaces = cvmx_helper_get_number_of_interfaces(); + for (interface = 0; interface < num_interfaces; interface++) { + cvmx_helper_interface_mode_t imode = + cvmx_helper_interface_get_mode(interface); + int num_ports = cvmx_helper_ports_on_interface(interface); + int port; + + for (port = cvmx_helper_get_ipd_port(interface, 0); + port < cvmx_helper_get_ipd_port(interface, num_ports); + port++) { + struct octeon_ethernet *priv; + struct net_device *dev = + alloc_etherdev(sizeof(struct octeon_ethernet)); + if (!dev) { + pr_err("Failed to allocate ethernet device " + "for port %d\n", port); + continue; + } + if (disable_core_queueing) + dev->tx_queue_len = 0; + + /* Initialize the device private structure. */ + priv = netdev_priv(dev); + memset(priv, 0, sizeof(struct octeon_ethernet)); + + priv->imode = imode; + priv->port = port; + priv->queue = cvmx_pko_get_base_queue(priv->port); + priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; + for (qos = 0; qos < 16; qos++) + skb_queue_head_init(&priv->tx_free_list[qos]); + for (qos = 0; qos < cvmx_pko_get_num_queues(port); + qos++) + cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); + + switch (priv->imode) { + + /* These types don't support ports to IPD/PKO */ + case CVMX_HELPER_INTERFACE_MODE_DISABLED: + case CVMX_HELPER_INTERFACE_MODE_PCIE: + case CVMX_HELPER_INTERFACE_MODE_PICMG: + break; + + case CVMX_HELPER_INTERFACE_MODE_NPI: + dev->init = cvm_oct_common_init; + dev->uninit = cvm_oct_common_uninit; + strcpy(dev->name, "npi%d"); + break; + + case CVMX_HELPER_INTERFACE_MODE_XAUI: + dev->init = cvm_oct_xaui_init; + dev->uninit = cvm_oct_xaui_uninit; + strcpy(dev->name, "xaui%d"); + break; + + case CVMX_HELPER_INTERFACE_MODE_LOOP: + dev->init = cvm_oct_common_init; + dev->uninit = cvm_oct_common_uninit; + strcpy(dev->name, "loop%d"); + break; + + case CVMX_HELPER_INTERFACE_MODE_SGMII: + dev->init = cvm_oct_sgmii_init; + dev->uninit = cvm_oct_sgmii_uninit; + strcpy(dev->name, "eth%d"); + break; + + case CVMX_HELPER_INTERFACE_MODE_SPI: + dev->init = cvm_oct_spi_init; + dev->uninit = cvm_oct_spi_uninit; + strcpy(dev->name, "spi%d"); + break; + + case CVMX_HELPER_INTERFACE_MODE_RGMII: + case CVMX_HELPER_INTERFACE_MODE_GMII: + dev->init = cvm_oct_rgmii_init; + dev->uninit = cvm_oct_rgmii_uninit; + strcpy(dev->name, "eth%d"); + break; + } + + if (!dev->init) { + kfree(dev); + } else if (register_netdev(dev) < 0) { + pr_err("Failed to register ethernet device " + "for interface %d, port %d\n", + interface, priv->port); + kfree(dev); + } else { + cvm_oct_device[priv->port] = dev; + fau -= + cvmx_pko_get_num_queues(priv->port) * + sizeof(uint32_t); + } + } + } + + if (INTERRUPT_LIMIT) { + /* + * Set the POW timer rate to give an interrupt at most + * INTERRUPT_LIMIT times per second. + */ + cvmx_write_csr(CVMX_POW_WQ_INT_PC, + octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT * + 16 * 256) << 8); + + /* + * Enable POW timer interrupt. It will count when + * there are packets available. + */ + cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), + 0x1ful << 24); + } else { + /* Enable POW interrupt when our port has at least one packet */ + cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001); + } + + /* Enable the poll timer for checking RGMII status */ + init_timer(&cvm_oct_poll_timer); + cvm_oct_poll_timer.data = 0; + cvm_oct_poll_timer.function = cvm_do_timer; + mod_timer(&cvm_oct_poll_timer, jiffies + HZ); + + return 0; +} + +/** + * Module / driver shutdown + * + * Returns Zero on success + */ +static void __exit cvm_oct_cleanup_module(void) +{ + int port; + + /* Disable POW interrupt */ + cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0); + + cvmx_ipd_disable(); + + /* Free the interrupt handler */ + free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device); + + del_timer(&cvm_oct_poll_timer); + cvm_oct_rx_shutdown(); + cvmx_pko_disable(); + + /* Free the ethernet devices */ + for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { + if (cvm_oct_device[port]) { + cvm_oct_tx_shutdown(cvm_oct_device[port]); + unregister_netdev(cvm_oct_device[port]); + kfree(cvm_oct_device[port]); + cvm_oct_device[port] = NULL; + } + } + + cvmx_pko_shutdown(); + cvm_oct_proc_shutdown(); + + cvmx_ipd_free_ptr(); + + /* Free the HW pools */ + cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, + num_packet_buffers); + cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, + num_packet_buffers); + if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) + cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, + CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); +} + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>"); +MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver."); +module_init(cvm_oct_init_module); +module_exit(cvm_oct_cleanup_module); diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h new file mode 100644 index 0000000..b319907 --- /dev/null +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -0,0 +1,127 @@ +/********************************************************************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2007 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information +**********************************************************************/ + +/* + * External interface for the Cavium Octeon ethernet driver. + */ +#ifndef OCTEON_ETHERNET_H +#define OCTEON_ETHERNET_H + +/** + * This is the definition of the Ethernet driver's private + * driver state stored in netdev_priv(dev). + */ +struct octeon_ethernet { + /* PKO hardware output port */ + int port; + /* PKO hardware queue for the port */ + int queue; + /* Hardware fetch and add to count outstanding tx buffers */ + int fau; + /* + * Type of port. This is one of the enums in + * cvmx_helper_interface_mode_t + */ + int imode; + /* List of outstanding tx buffers per queue */ + struct sk_buff_head tx_free_list[16]; + /* Device statistics */ + struct net_device_stats stats +; /* Generic MII info structure */ + struct mii_if_info mii_info; + /* Last negotiated link state */ + uint64_t link_info; + /* Called periodically to check link status */ + void (*poll) (struct net_device *dev); +}; + +/** + * Free a work queue entry received in a intercept callback. + * + * @work_queue_entry: + * Work queue entry to free + * Returns Zero on success, Negative on failure. + */ +int cvm_oct_free_work(void *work_queue_entry); + +/** + * Transmit a work queue entry out of the ethernet port. Both + * the work queue entry and the packet data can optionally be + * freed. The work will be freed on error as well. + * + * @dev: Device to transmit out. + * @work_queue_entry: + * Work queue entry to send + * @do_free: True if the work queue entry and packet data should be + * freed. If false, neither will be freed. + * @qos: Index into the queues for this port to transmit on. This + * is used to implement QoS if their are multiple queues per + * port. This parameter must be between 0 and the number of + * queues per port minus 1. Values outside of this range will + * be change to zero. + * + * Returns Zero on success, negative on failure. + */ +int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, + int do_free, int qos); + +/** + * Transmit a work queue entry out of the ethernet port. Both + * the work queue entry and the packet data can optionally be + * freed. The work will be freed on error as well. This simply + * wraps cvmx_oct_transmit_qos() for backwards compatability. + * + * @dev: Device to transmit out. + * @work_queue_entry: + * Work queue entry to send + * @do_free: True if the work queue entry and packet data should be + * freed. If false, neither will be freed. + * + * Returns Zero on success, negative on failure. + */ +static inline int cvm_oct_transmit(struct net_device *dev, + void *work_queue_entry, int do_free) +{ + return cvm_oct_transmit_qos(dev, work_queue_entry, do_free, 0); +} + +extern int cvm_oct_rgmii_init(struct net_device *dev); +extern void cvm_oct_rgmii_uninit(struct net_device *dev); +extern int cvm_oct_sgmii_init(struct net_device *dev); +extern void cvm_oct_sgmii_uninit(struct net_device *dev); +extern int cvm_oct_spi_init(struct net_device *dev); +extern void cvm_oct_spi_uninit(struct net_device *dev); +extern int cvm_oct_xaui_init(struct net_device *dev); +extern void cvm_oct_xaui_uninit(struct net_device *dev); + +extern int always_use_pow; +extern int pow_send_group; +extern int pow_receive_group; +extern char pow_send_list[]; +extern struct net_device *cvm_oct_device[]; + +#endif diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c index a411702..6f8866d 100644 --- a/drivers/w1/masters/w1-gpio.c +++ b/drivers/w1/masters/w1-gpio.c @@ -74,6 +74,9 @@ static int __init w1_gpio_probe(struct platform_device *pdev) if (err) goto free_gpio; + if (pdata->enable_external_pullup) + pdata->enable_external_pullup(1); + platform_set_drvdata(pdev, master); return 0; @@ -91,6 +94,9 @@ static int __exit w1_gpio_remove(struct platform_device *pdev) struct w1_bus_master *master = platform_get_drvdata(pdev); struct w1_gpio_platform_data *pdata = pdev->dev.platform_data; + if (pdata->enable_external_pullup) + pdata->enable_external_pullup(0); + w1_remove_master_device(master); gpio_free(pdata->pin); kfree(master); @@ -98,12 +104,41 @@ static int __exit w1_gpio_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM + +static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct w1_gpio_platform_data *pdata = pdev->dev.platform_data; + + if (pdata->enable_external_pullup) + pdata->enable_external_pullup(0); + + return 0; +} + +static int w1_gpio_resume(struct platform_device *pdev) +{ + struct w1_gpio_platform_data *pdata = pdev->dev.platform_data; + + if (pdata->enable_external_pullup) + pdata->enable_external_pullup(1); + + return 0; +} + +#else +#define w1_gpio_suspend NULL +#define w1_gpio_resume NULL +#endif + static struct platform_driver w1_gpio_driver = { .driver = { .name = "w1-gpio", .owner = THIS_MODULE, }, .remove = __exit_p(w1_gpio_remove), + .suspend = w1_gpio_suspend, + .resume = w1_gpio_resume, }; static int __init w1_gpio_init(void) diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c index 90f98df..f90afdb 100644 --- a/drivers/watchdog/alim7101_wdt.c +++ b/drivers/watchdog/alim7101_wdt.c @@ -322,7 +322,8 @@ static int wdt_notify_sys(struct notifier_block *this, * watchdog on reboot with no heartbeat */ wdt_change(WDT_ENABLE); - printk(KERN_INFO PFX "Watchdog timer is now enabled with no heartbeat - should reboot in ~1 second.\n"); + printk(KERN_INFO PFX "Watchdog timer is now enabled " + "with no heartbeat - should reboot in ~1 second.\n"); } return NOTIFY_DONE; } @@ -374,12 +375,17 @@ static int __init alim7101_wdt_init(void) pci_dev_put(ali1543_south); if ((tmp & 0x1e) == 0x00) { if (!use_gpio) { - printk(KERN_INFO PFX "Detected old alim7101 revision 'a1d'. If this is a cobalt board, set the 'use_gpio' module parameter.\n"); + printk(KERN_INFO PFX + "Detected old alim7101 revision 'a1d'. " + "If this is a cobalt board, set the 'use_gpio' " + "module parameter.\n"); goto err_out; } nowayout = 1; } else if ((tmp & 0x1e) != 0x12 && (tmp & 0x1e) != 0x00) { - printk(KERN_INFO PFX "ALi 1543 South-Bridge does not have the correct revision number (???1001?) - WDT not set\n"); + printk(KERN_INFO PFX + "ALi 1543 South-Bridge does not have the correct " + "revision number (???1001?) - WDT not set\n"); goto err_out; } @@ -409,7 +415,8 @@ static int __init alim7101_wdt_init(void) if (nowayout) __module_get(THIS_MODULE); - printk(KERN_INFO PFX "WDT driver for ALi M7101 initialised. timeout=%d sec (nowayout=%d)\n", + printk(KERN_INFO PFX "WDT driver for ALi M7101 initialised. " + "timeout=%d sec (nowayout=%d)\n", timeout, nowayout); return 0; diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c index 55dcbfe..3fe9742 100644 --- a/drivers/watchdog/ar7_wdt.c +++ b/drivers/watchdog/ar7_wdt.c @@ -246,7 +246,8 @@ static long ar7_wdt_ioctl(struct file *file, static struct watchdog_info ident = { .identity = LONGNAME, .firmware_version = 1, - .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING), + .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE), }; int new_margin; diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c index 29e52c2..b185daf 100644 --- a/drivers/watchdog/at91rm9200_wdt.c +++ b/drivers/watchdog/at91rm9200_wdt.c @@ -268,7 +268,8 @@ static int __init at91_wdt_init(void) if not reset to the default */ if (at91_wdt_settimeout(wdt_time)) { at91_wdt_settimeout(WDT_DEFAULT_TIME); - pr_info("at91_wdt: wdt_time value must be 1 <= wdt_time <= 256, using %d\n", wdt_time); + pr_info("at91_wdt: wdt_time value must be 1 <= wdt_time <= 256" + ", using %d\n", wdt_time); } return platform_driver_register(&at91wdt_driver); diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c index 435b057..eac2602 100644 --- a/drivers/watchdog/at91sam9_wdt.c +++ b/drivers/watchdog/at91sam9_wdt.c @@ -156,7 +156,8 @@ static int at91_wdt_settimeout(unsigned int timeout) static const struct watchdog_info at91_wdt_info = { .identity = DRV_NAME, - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, }; /* diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c index 067a57c..c7b3f9d 100644 --- a/drivers/watchdog/bfin_wdt.c +++ b/drivers/watchdog/bfin_wdt.c @@ -27,10 +27,15 @@ #include <linux/uaccess.h> #include <asm/blackfin.h> -#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) +#define stamp(fmt, args...) \ + pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) #define stampit() stamp("here i am") -#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) -#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); }) +#define pr_devinit(fmt, args...) \ + ({ static const __devinitconst char __fmt[] = fmt; \ + printk(__fmt, ## args); }) +#define pr_init(fmt, args...) \ + ({ static const __initconst char __fmt[] = fmt; \ + printk(__fmt, ## args); }) #define WATCHDOG_NAME "bfin-wdt" #define PFX WATCHDOG_NAME ": " @@ -476,7 +481,8 @@ static int __init bfin_wdt_init(void) return ret; } - bfin_wdt_device = platform_device_register_simple(WATCHDOG_NAME, -1, NULL, 0); + bfin_wdt_device = platform_device_register_simple(WATCHDOG_NAME, + -1, NULL, 0); if (IS_ERR(bfin_wdt_device)) { pr_init(KERN_ERR PFX "unable to register device\n"); platform_driver_unregister(&bfin_wdt_driver); diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index 41070e4..081f295 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -154,9 +154,9 @@ static struct cpwd *cpwd_device; static struct timer_list cpwd_timer; -static int wd0_timeout = 0; -static int wd1_timeout = 0; -static int wd2_timeout = 0; +static int wd0_timeout; +static int wd1_timeout; +static int wd2_timeout; module_param(wd0_timeout, int, 0); MODULE_PARM_DESC(wd0_timeout, "Default watchdog0 timeout in 1/10secs"); diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index c51d0b0..83e22e7 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c @@ -193,7 +193,7 @@ static struct miscdevice davinci_wdt_miscdev = { .fops = &davinci_wdt_fops, }; -static int davinci_wdt_probe(struct platform_device *pdev) +static int __devinit davinci_wdt_probe(struct platform_device *pdev) { int ret = 0, size; struct resource *res; @@ -237,7 +237,7 @@ static int davinci_wdt_probe(struct platform_device *pdev) return ret; } -static int davinci_wdt_remove(struct platform_device *pdev) +static int __devexit davinci_wdt_remove(struct platform_device *pdev) { misc_deregister(&davinci_wdt_miscdev); if (wdt_mem) { @@ -254,7 +254,7 @@ static struct platform_driver platform_wdt_driver = { .owner = THIS_MODULE, }, .probe = davinci_wdt_probe, - .remove = davinci_wdt_remove, + .remove = __devexit_p(davinci_wdt_remove), }; static int __init davinci_wdt_init(void) diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 3137361..c0b9169 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -19,6 +19,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> +#include <linux/nmi.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/mm.h> @@ -47,7 +48,7 @@ #define PCI_BIOS32_PARAGRAPH_LEN 16 #define PCI_ROM_BASE1 0x000F0000 #define ROM_SIZE 0x10000 -#define HPWDT_VERSION "1.01" +#define HPWDT_VERSION "1.1.1" struct bios32_service_dir { u32 signature; @@ -119,6 +120,7 @@ static int nowayout = WATCHDOG_NOWAYOUT; static char expect_release; static unsigned long hpwdt_is_open; static unsigned int allow_kdump; +static int hpwdt_nmi_sourcing; static void __iomem *pci_mem_addr; /* the PCI-memory address */ static unsigned long __iomem *hpwdt_timer_reg; @@ -468,21 +470,22 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI) return NOTIFY_OK; - spin_lock_irqsave(&rom_lock, rom_pl); - if (!die_nmi_called) - asminline_call(&cmn_regs, cru_rom_addr); - die_nmi_called = 1; - spin_unlock_irqrestore(&rom_lock, rom_pl); - if (cmn_regs.u1.ral == 0) { - printk(KERN_WARNING "hpwdt: An NMI occurred, " - "but unable to determine source.\n"); - } else { - if (allow_kdump) - hpwdt_stop(); - panic("An NMI occurred, please see the Integrated " - "Management Log for details.\n"); + if (hpwdt_nmi_sourcing) { + spin_lock_irqsave(&rom_lock, rom_pl); + if (!die_nmi_called) + asminline_call(&cmn_regs, cru_rom_addr); + die_nmi_called = 1; + spin_unlock_irqrestore(&rom_lock, rom_pl); + if (cmn_regs.u1.ral == 0) { + printk(KERN_WARNING "hpwdt: An NMI occurred, " + "but unable to determine source.\n"); + } else { + if (allow_kdump) + hpwdt_stop(); + panic("An NMI occurred, please see the Integrated " + "Management Log for details.\n"); + } } - return NOTIFY_OK; } @@ -627,12 +630,38 @@ static struct notifier_block die_notifier = { * Init & Exit */ +#ifdef ARCH_HAS_NMI_WATCHDOG +static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev) +{ + /* + * If nmi_watchdog is turned off then we can turn on + * our nmi sourcing capability. + */ + if (!nmi_watchdog_active()) + hpwdt_nmi_sourcing = 1; + else + dev_warn(&dev->dev, "NMI sourcing is disabled. To enable this " + "functionality you must reboot with nmi_watchdog=0.\n"); +} +#else +static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev) +{ + dev_warn(&dev->dev, "NMI sourcing is disabled. " + "Your kernel does not support a NMI Watchdog.\n"); +} +#endif + static int __devinit hpwdt_init_one(struct pci_dev *dev, const struct pci_device_id *ent) { int retval; /* + * Check if we can do NMI sourcing or not + */ + hpwdt_check_nmi_sourcing(dev); + + /* * First let's find out if we are on an iLO2 server. We will * not run on a legacy ASM box. * So we only support the G5 ProLiant servers and higher. diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c index d3c0f6d..5133bca 100644 --- a/drivers/watchdog/iTCO_vendor_support.c +++ b/drivers/watchdog/iTCO_vendor_support.c @@ -19,7 +19,7 @@ /* Module and version information */ #define DRV_NAME "iTCO_vendor_support" -#define DRV_VERSION "1.03" +#define DRV_VERSION "1.04" #define PFX DRV_NAME ": " /* Includes */ @@ -35,20 +35,23 @@ #include "iTCO_vendor.h" /* iTCO defines */ -#define SMI_EN acpibase + 0x30 /* SMI Control and Enable Register */ -#define TCOBASE acpibase + 0x60 /* TCO base address */ -#define TCO1_STS TCOBASE + 0x04 /* TCO1 Status Register */ +#define SMI_EN (acpibase + 0x30) /* SMI Control and Enable Register */ +#define TCOBASE (acpibase + 0x60) /* TCO base address */ +#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */ /* List of vendor support modes */ /* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */ #define SUPERMICRO_OLD_BOARD 1 /* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */ #define SUPERMICRO_NEW_BOARD 2 +/* Broken BIOS */ +#define BROKEN_BIOS 911 static int vendorsupport; module_param(vendorsupport, int, 0); MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=" - "0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+"); + "0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+, " + "911=Broken SMI BIOS"); /* * Vendor Specific Support @@ -243,25 +246,92 @@ static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat) } /* + * Vendor Support: 911 + * Board: Some Intel ICHx based motherboards + * iTCO chipset: ICH7+ + * + * Some Intel motherboards have a broken BIOS implementation: i.e. + * the SMI handler clear's the TIMEOUT bit in the TC01_STS register + * and does not reload the time. Thus the TCO watchdog does not reboot + * the system. + * + * These are the conclusions of Andriy Gapon <avg@icyb.net.ua> after + * debugging: the SMI handler is quite simple - it tests value in + * TCO1_CNT against 0x800, i.e. checks TCO_TMR_HLT. If the bit is set + * the handler goes into an infinite loop, apparently to allow the + * second timeout and reboot. Otherwise it simply clears TIMEOUT bit + * in TCO1_STS and that's it. + * So the logic seems to be reversed, because it is hard to see how + * TIMEOUT can get set to 1 and SMI generated when TCO_TMR_HLT is set + * (other than a transitional effect). + * + * The only fix found to get the motherboard(s) to reboot is to put + * the glb_smi_en bit to 0. This is a dirty hack that bypasses the + * broken code by disabling Global SMI. + * + * WARNING: globally disabling SMI could possibly lead to dramatic + * problems, especially on laptops! I.e. various ACPI things where + * SMI is used for communication between OS and firmware. + * + * Don't use this fix if you don't need to!!! + */ + +static void broken_bios_start(unsigned long acpibase) +{ + unsigned long val32; + + val32 = inl(SMI_EN); + /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# + Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */ + val32 &= 0xffffdffe; + outl(val32, SMI_EN); +} + +static void broken_bios_stop(unsigned long acpibase) +{ + unsigned long val32; + + val32 = inl(SMI_EN); + /* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI# + Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */ + val32 |= 0x00002001; + outl(val32, SMI_EN); +} + +/* * Generic Support Functions */ void iTCO_vendor_pre_start(unsigned long acpibase, unsigned int heartbeat) { - if (vendorsupport == SUPERMICRO_OLD_BOARD) + switch (vendorsupport) { + case SUPERMICRO_OLD_BOARD: supermicro_old_pre_start(acpibase); - else if (vendorsupport == SUPERMICRO_NEW_BOARD) + break; + case SUPERMICRO_NEW_BOARD: supermicro_new_pre_start(heartbeat); + break; + case BROKEN_BIOS: + broken_bios_start(acpibase); + break; + } } EXPORT_SYMBOL(iTCO_vendor_pre_start); void iTCO_vendor_pre_stop(unsigned long acpibase) { - if (vendorsupport == SUPERMICRO_OLD_BOARD) + switch (vendorsupport) { + case SUPERMICRO_OLD_BOARD: supermicro_old_pre_stop(acpibase); - else if (vendorsupport == SUPERMICRO_NEW_BOARD) + break; + case SUPERMICRO_NEW_BOARD: supermicro_new_pre_stop(); + break; + case BROKEN_BIOS: + broken_bios_stop(acpibase); + break; + } } EXPORT_SYMBOL(iTCO_vendor_pre_stop); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 648250b..6a51edd 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -236,19 +236,19 @@ MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); /* Address definitions for the TCO */ /* TCO base address */ -#define TCOBASE iTCO_wdt_private.ACPIBASE + 0x60 +#define TCOBASE (iTCO_wdt_private.ACPIBASE + 0x60) /* SMI Control and Enable Register */ -#define SMI_EN iTCO_wdt_private.ACPIBASE + 0x30 - -#define TCO_RLD TCOBASE + 0x00 /* TCO Timer Reload and Curr. Value */ -#define TCOv1_TMR TCOBASE + 0x01 /* TCOv1 Timer Initial Value */ -#define TCO_DAT_IN TCOBASE + 0x02 /* TCO Data In Register */ -#define TCO_DAT_OUT TCOBASE + 0x03 /* TCO Data Out Register */ -#define TCO1_STS TCOBASE + 0x04 /* TCO1 Status Register */ -#define TCO2_STS TCOBASE + 0x06 /* TCO2 Status Register */ -#define TCO1_CNT TCOBASE + 0x08 /* TCO1 Control Register */ -#define TCO2_CNT TCOBASE + 0x0a /* TCO2 Control Register */ -#define TCOv2_TMR TCOBASE + 0x12 /* TCOv2 Timer Initial Value */ +#define SMI_EN (iTCO_wdt_private.ACPIBASE + 0x30) + +#define TCO_RLD (TCOBASE + 0x00) /* TCO Timer Reload and Curr. Value */ +#define TCOv1_TMR (TCOBASE + 0x01) /* TCOv1 Timer Initial Value */ +#define TCO_DAT_IN (TCOBASE + 0x02) /* TCO Data In Register */ +#define TCO_DAT_OUT (TCOBASE + 0x03) /* TCO Data Out Register */ +#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */ +#define TCO2_STS (TCOBASE + 0x06) /* TCO2 Status Register */ +#define TCO1_CNT (TCOBASE + 0x08) /* TCO1 Control Register */ +#define TCO2_CNT (TCOBASE + 0x0a) /* TCO2 Control Register */ +#define TCOv2_TMR (TCOBASE + 0x12) /* TCOv2 Timer Initial Value */ /* internal variables */ static unsigned long is_active; @@ -666,6 +666,11 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev, GCS = RCBA + ICH6_GCS(0x3410). */ if (iTCO_wdt_private.iTCO_version == 2) { pci_read_config_dword(pdev, 0xf0, &base_address); + if ((base_address & 1) == 0) { + printk(KERN_ERR PFX "RCBA is disabled by harddware\n"); + ret = -ENODEV; + goto out; + } RCBA = base_address & 0xffffc000; iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410), 4); } @@ -675,7 +680,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev, printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, " "reboot disabled by hardware\n"); ret = -ENODEV; /* Cannot reset NO_REBOOT bit */ - goto out; + goto out_unmap; } /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ @@ -686,7 +691,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev, printk(KERN_ERR PFX "I/O address 0x%04lx already in use\n", SMI_EN); ret = -EIO; - goto out; + goto out_unmap; } /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */ val32 = inl(SMI_EN); @@ -742,9 +747,10 @@ unreg_region: release_region(TCOBASE, 0x20); unreg_smi_en: release_region(SMI_EN, 4); -out: +out_unmap: if (iTCO_wdt_private.iTCO_version == 2) iounmap(iTCO_wdt_private.gcs); +out: pci_dev_put(iTCO_wdt_private.pdev); iTCO_wdt_private.ACPIBASE = 0; return ret; diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c index 0f761db..bea8a12 100644 --- a/drivers/watchdog/indydog.c +++ b/drivers/watchdog/indydog.c @@ -83,7 +83,6 @@ static int indydog_open(struct inode *inode, struct file *file) indydog_start(); indydog_ping(); - indydog_alive = 1; printk(KERN_INFO "Started watchdog timer.\n"); return nonseekable_open(inode, file); @@ -113,8 +112,7 @@ static long indydog_ioctl(struct file *file, unsigned int cmd, { int options, retval = -EINVAL; static struct watchdog_info ident = { - .options = WDIOF_KEEPALIVEPING | - WDIOF_MAGICCLOSE, + .options = WDIOF_KEEPALIVEPING, .firmware_version = 0, .identity = "Hardware Watchdog for SGI IP22", }; diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c index 2270ee0..daed48d 100644 --- a/drivers/watchdog/it8712f_wdt.c +++ b/drivers/watchdog/it8712f_wdt.c @@ -239,7 +239,8 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd, static struct watchdog_info ident = { .identity = "IT8712F Watchdog", .firmware_version = 1, - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, }; int value; diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c index ae38321..00b03eb 100644 --- a/drivers/watchdog/ks8695_wdt.c +++ b/drivers/watchdog/ks8695_wdt.c @@ -293,8 +293,8 @@ static int __init ks8695_wdt_init(void) if not reset to the default */ if (ks8695_wdt_settimeout(wdt_time)) { ks8695_wdt_settimeout(WDT_DEFAULT_TIME); - pr_info("ks8695_wdt: wdt_time value must be 1 <= wdt_time <= %i, using %d\n", - wdt_time, WDT_MAX_TIME); + pr_info("ks8695_wdt: wdt_time value must be 1 <= wdt_time <= %i" + ", using %d\n", wdt_time, WDT_MAX_TIME); } return platform_driver_register(&ks8695wdt_driver); } diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c index 2dfc275..b6b3f59 100644 --- a/drivers/watchdog/machzwd.c +++ b/drivers/watchdog/machzwd.c @@ -118,7 +118,8 @@ static struct watchdog_info zf_info = { */ static int action; module_param(action, int, 0); -MODULE_PARM_DESC(action, "after watchdog resets, generate: 0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI"); +MODULE_PARM_DESC(action, "after watchdog resets, generate: " + "0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI"); static void zf_ping(unsigned long data); @@ -142,7 +143,8 @@ static unsigned long next_heartbeat; #ifndef ZF_DEBUG # define dprintk(format, args...) #else -# define dprintk(format, args...) printk(KERN_DEBUG PFX ":%s:%d: " format, __func__, __LINE__ , ## args) +# define dprintk(format, args...) printk(KERN_DEBUG PFX + ":%s:%d: " format, __func__, __LINE__ , ## args) #endif @@ -340,7 +342,8 @@ static int zf_close(struct inode *inode, struct file *file) zf_timer_off(); else { del_timer(&zf_timer); - printk(KERN_ERR PFX ": device file closed unexpectedly. Will not stop the WDT!\n"); + printk(KERN_ERR PFX ": device file closed unexpectedly. " + "Will not stop the WDT!\n"); } clear_bit(0, &zf_is_open); zf_expect_close = 0; diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c index 1512ab8..83fa34b 100644 --- a/drivers/watchdog/mpcore_wdt.c +++ b/drivers/watchdog/mpcore_wdt.c @@ -61,7 +61,9 @@ MODULE_PARM_DESC(nowayout, #define ONLY_TESTING 0 static int mpcore_noboot = ONLY_TESTING; module_param(mpcore_noboot, int, 0); -MODULE_PARM_DESC(mpcore_noboot, "MPcore watchdog action, set to 1 to ignore reboots, 0 to reboot (default=" __MODULE_STRING(ONLY_TESTING) ")"); +MODULE_PARM_DESC(mpcore_noboot, "MPcore watchdog action, " + "set to 1 to ignore reboots, 0 to reboot (default=" + __MODULE_STRING(ONLY_TESTING) ")"); /* * This is the interrupt handler. Note that we only use this @@ -416,7 +418,8 @@ static struct platform_driver mpcore_wdt_driver = { }, }; -static char banner[] __initdata = KERN_INFO "MPcore Watchdog Timer: 0.1. mpcore_noboot=%d mpcore_margin=%d sec (nowayout= %d)\n"; +static char banner[] __initdata = KERN_INFO "MPcore Watchdog Timer: 0.1. " + "mpcore_noboot=%d mpcore_margin=%d sec (nowayout= %d)\n"; static int __init mpcore_wdt_init(void) { diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c index 539b6f6..08e8a6a 100644 --- a/drivers/watchdog/mtx-1_wdt.c +++ b/drivers/watchdog/mtx-1_wdt.c @@ -206,7 +206,7 @@ static struct miscdevice mtx1_wdt_misc = { }; -static int mtx1_wdt_probe(struct platform_device *pdev) +static int __devinit mtx1_wdt_probe(struct platform_device *pdev) { int ret; @@ -229,7 +229,7 @@ static int mtx1_wdt_probe(struct platform_device *pdev) return 0; } -static int mtx1_wdt_remove(struct platform_device *pdev) +static int __devexit mtx1_wdt_remove(struct platform_device *pdev) { /* FIXME: do we need to lock this test ? */ if (mtx1_wdt_device.queue) { @@ -242,7 +242,7 @@ static int mtx1_wdt_remove(struct platform_device *pdev) static struct platform_driver mtx1_wdt = { .probe = mtx1_wdt_probe, - .remove = mtx1_wdt_remove, + .remove = __devexit_p(mtx1_wdt_remove), .driver.name = "mtx1-wdt", .driver.owner = THIS_MODULE, }; diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index 6413519..f24d041 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c @@ -246,7 +246,7 @@ static struct miscdevice pnx4008_wdt_miscdev = { .fops = &pnx4008_wdt_fops, }; -static int pnx4008_wdt_probe(struct platform_device *pdev) +static int __devinit pnx4008_wdt_probe(struct platform_device *pdev) { int ret = 0, size; struct resource *res; @@ -299,7 +299,7 @@ out: return ret; } -static int pnx4008_wdt_remove(struct platform_device *pdev) +static int __devexit pnx4008_wdt_remove(struct platform_device *pdev) { misc_deregister(&pnx4008_wdt_miscdev); if (wdt_clk) { @@ -321,7 +321,7 @@ static struct platform_driver platform_wdt_driver = { .owner = THIS_MODULE, }, .probe = pnx4008_wdt_probe, - .remove = pnx4008_wdt_remove, + .remove = __devexit_p(pnx4008_wdt_remove), }; static int __init pnx4008_wdt_init(void) diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index 36e221b..4976bfd 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c @@ -245,7 +245,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev) return 0; } -static int rdc321x_wdt_remove(struct platform_device *pdev) +static int __devexit rdc321x_wdt_remove(struct platform_device *pdev) { if (rdc321x_wdt_device.queue) { rdc321x_wdt_device.queue = 0; @@ -259,7 +259,7 @@ static int rdc321x_wdt_remove(struct platform_device *pdev) static struct platform_driver rdc321x_wdt_driver = { .probe = rdc321x_wdt_probe, - .remove = rdc321x_wdt_remove, + .remove = __devexit_p(rdc321x_wdt_remove), .driver = { .owner = THIS_MODULE, .name = "rdc321x-wdt", diff --git a/drivers/watchdog/rm9k_wdt.c b/drivers/watchdog/rm9k_wdt.c index cce1982..2e44426 100644 --- a/drivers/watchdog/rm9k_wdt.c +++ b/drivers/watchdog/rm9k_wdt.c @@ -345,8 +345,8 @@ static const struct resource *wdt_gpi_get_resource(struct platform_device *pdv, return platform_get_resource_byname(pdv, type, buf); } -/* No hotplugging on the platform bus - use __init */ -static int __init wdt_gpi_probe(struct platform_device *pdv) +/* No hotplugging on the platform bus - use __devinit */ +static int __devinit wdt_gpi_probe(struct platform_device *pdv) { int res; const struct resource @@ -373,7 +373,7 @@ static int __init wdt_gpi_probe(struct platform_device *pdv) return res; } -static int __exit wdt_gpi_remove(struct platform_device *dev) +static int __devexit wdt_gpi_remove(struct platform_device *dev) { int res; diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index e31925e..b57ac6b 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -68,15 +68,10 @@ MODULE_PARM_DESC(tmr_atboot, __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_ATBOOT)); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, 0 to reboot (default depends on ONLY_TESTING)"); +MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, " + "0 to reboot (default depends on ONLY_TESTING)"); MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug, (default 0)"); - -typedef enum close_state { - CLOSE_STATE_NOT, - CLOSE_STATE_ALLOW = 0x4021 -} close_state_t; - static unsigned long open_lock; static struct device *wdt_dev; /* platform device attached to */ static struct resource *wdt_mem; @@ -84,7 +79,7 @@ static struct resource *wdt_irq; static struct clk *wdt_clock; static void __iomem *wdt_base; static unsigned int wdt_count; -static close_state_t allow_close; +static char expect_close; static DEFINE_SPINLOCK(wdt_lock); /* watchdog control routines */ @@ -211,7 +206,7 @@ static int s3c2410wdt_open(struct inode *inode, struct file *file) if (nowayout) __module_get(THIS_MODULE); - allow_close = CLOSE_STATE_NOT; + expect_close = 0; /* start the timer */ s3c2410wdt_start(); @@ -225,13 +220,13 @@ static int s3c2410wdt_release(struct inode *inode, struct file *file) * Lock it in if it's a module and we set nowayout */ - if (allow_close == CLOSE_STATE_ALLOW) + if (expect_close == 42) s3c2410wdt_stop(); else { dev_err(wdt_dev, "Unexpected close, not stopping watchdog\n"); s3c2410wdt_keepalive(); } - allow_close = CLOSE_STATE_NOT; + expect_close = 0; clear_bit(0, &open_lock); return 0; } @@ -247,7 +242,7 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data, size_t i; /* In case it was set long ago */ - allow_close = CLOSE_STATE_NOT; + expect_close = 0; for (i = 0; i != len; i++) { char c; @@ -255,7 +250,7 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data, if (get_user(c, data + i)) return -EFAULT; if (c == 'V') - allow_close = CLOSE_STATE_ALLOW; + expect_close = 42; } } s3c2410wdt_keepalive(); @@ -263,7 +258,7 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data, return len; } -#define OPTIONS WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE +#define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE) static const struct watchdog_info s3c2410_wdt_ident = { .options = OPTIONS, @@ -331,7 +326,7 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param) } /* device interface */ -static int s3c2410wdt_probe(struct platform_device *pdev) +static int __devinit s3c2410wdt_probe(struct platform_device *pdev) { struct resource *res; struct device *dev; @@ -404,7 +399,8 @@ static int s3c2410wdt_probe(struct platform_device *pdev) "tmr_margin value out of range, default %d used\n", CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); else - dev_info(dev, "default timer value is out of range, cannot start\n"); + dev_info(dev, "default timer value is out of range, " + "cannot start\n"); } ret = misc_register(&s3c2410wdt_miscdev); @@ -453,7 +449,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev) return ret; } -static int s3c2410wdt_remove(struct platform_device *dev) +static int __devexit s3c2410wdt_remove(struct platform_device *dev) { release_resource(wdt_mem); kfree(wdt_mem); @@ -515,7 +511,7 @@ static int s3c2410wdt_resume(struct platform_device *dev) static struct platform_driver s3c2410wdt_driver = { .probe = s3c2410wdt_probe, - .remove = s3c2410wdt_remove, + .remove = __devexit_p(s3c2410wdt_remove), .shutdown = s3c2410wdt_shutdown, .suspend = s3c2410wdt_suspend, .resume = s3c2410wdt_resume, diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c index 38f5831..9748eed 100644 --- a/drivers/watchdog/sb_wdog.c +++ b/drivers/watchdog/sb_wdog.c @@ -93,7 +93,7 @@ static int expect_close; static const struct watchdog_info ident = { .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | - WDIOF_KEEPALIVEPING, + WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "SiByte Watchdog", }; @@ -269,9 +269,10 @@ irqreturn_t sbwdog_interrupt(int irq, void *addr) * if it's the second watchdog timer, it's for those users */ if (wd_cfg_reg == user_dog) - printk(KERN_CRIT - "%s in danger of initiating system reset in %ld.%01ld seconds\n", - ident.identity, wd_init / 1000000, (wd_init / 100000) % 10); + printk(KERN_CRIT "%s in danger of initiating system reset " + "in %ld.%01ld seconds\n", + ident.identity, + wd_init / 1000000, (wd_init / 100000) % 10); else cfg |= 1; diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c index d1c390c..626d0e8 100644 --- a/drivers/watchdog/sbc60xxwdt.c +++ b/drivers/watchdog/sbc60xxwdt.c @@ -372,8 +372,9 @@ static int __init sbc60xxwdt_init(void) wdt_miscdev.minor, rc); goto err_out_reboot; } - printk(KERN_INFO PFX "WDT driver for 60XX single board computer initialised. timeout=%d sec (nowayout=%d)\n", - timeout, nowayout); + printk(KERN_INFO PFX + "WDT driver for 60XX single board computer initialised. " + "timeout=%d sec (nowayout=%d)\n", timeout, nowayout); return 0; diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c index b6e6799..68e2e2d 100644 --- a/drivers/watchdog/sbc8360.c +++ b/drivers/watchdog/sbc8360.c @@ -280,8 +280,8 @@ static int sbc8360_close(struct inode *inode, struct file *file) if (expect_close == 42) sbc8360_stop(); else - printk(KERN_CRIT PFX - "SBC8360 device closed unexpectedly. SBC8360 will not stop!\n"); + printk(KERN_CRIT PFX "SBC8360 device closed unexpectedly. " + "SBC8360 will not stop!\n"); clear_bit(0, &sbc8360_is_open); expect_close = 0; diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c index e467ddc..28f1214 100644 --- a/drivers/watchdog/sbc_epx_c3.c +++ b/drivers/watchdog/sbc_epx_c3.c @@ -107,8 +107,7 @@ static long epx_c3_ioctl(struct file *file, unsigned int cmd, int options, retval = -EINVAL; int __user *argp = (void __user *)arg; static const struct watchdog_info ident = { - .options = WDIOF_KEEPALIVEPING | - WDIOF_MAGICCLOSE, + .options = WDIOF_KEEPALIVEPING, .firmware_version = 0, .identity = "Winsystems EPX-C3 H/W Watchdog", }; @@ -174,8 +173,8 @@ static struct notifier_block epx_c3_notifier = { .notifier_call = epx_c3_notify_sys, }; -static const char banner[] __initdata = - KERN_INFO PFX "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n"; +static const char banner[] __initdata = KERN_INFO PFX + "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n"; static int __init watchdog_init(void) { @@ -219,6 +218,9 @@ module_init(watchdog_init); module_exit(watchdog_exit); MODULE_AUTHOR("Calin A. Culianu <calin@ajvar.org>"); -MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. Note that there is no way to probe for this device -- so only use it if you are *sure* you are runnning on this specific SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!"); +MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. " + "Note that there is no way to probe for this device -- " + "so only use it if you are *sure* you are runnning on this specific " + "SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c index 9e19a10..e67b76c 100644 --- a/drivers/watchdog/scx200_wdt.c +++ b/drivers/watchdog/scx200_wdt.c @@ -108,7 +108,9 @@ static int scx200_wdt_open(struct inode *inode, struct file *file) static int scx200_wdt_release(struct inode *inode, struct file *file) { if (expect_close != 42) - printk(KERN_WARNING NAME ": watchdog device closed unexpectedly, will not disable the watchdog timer\n"); + printk(KERN_WARNING NAME + ": watchdog device closed unexpectedly, " + "will not disable the watchdog timer\n"); else if (!nowayout) scx200_wdt_disable(); expect_close = 0; @@ -163,7 +165,8 @@ static long scx200_wdt_ioctl(struct file *file, unsigned int cmd, static const struct watchdog_info ident = { .identity = "NatSemi SCx200 Watchdog", .firmware_version = 1, - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, }; int new_margin; diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c index cdc7138..a03f84e 100644 --- a/drivers/watchdog/shwdt.c +++ b/drivers/watchdog/shwdt.c @@ -494,7 +494,9 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(clock_division_ratio, int, 0); -MODULE_PARM_DESC(clock_division_ratio, "Clock division ratio. Valid ranges are from 0x5 (1.31ms) to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")"); +MODULE_PARM_DESC(clock_division_ratio, + "Clock division ratio. Valid ranges are from 0x5 (1.31ms) " + "to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")"); module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index ebcc9ce..833f49f 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c @@ -71,7 +71,9 @@ static int soft_noboot = 0; #endif /* ONLY_TESTING */ module_param(soft_noboot, int, 0); -MODULE_PARM_DESC(soft_noboot, "Softdog action, set to 1 to ignore reboots, 0 to reboot (default depends on ONLY_TESTING)"); +MODULE_PARM_DESC(soft_noboot, + "Softdog action, set to 1 to ignore reboots, 0 to reboot " + "(default depends on ONLY_TESTING)"); /* * Our timer @@ -264,7 +266,8 @@ static struct notifier_block softdog_notifier = { .notifier_call = softdog_notify_sys, }; -static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n"; +static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 " + "initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n"; static int __init watchdog_init(void) { diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c index a9c7f35..af08972 100644 --- a/drivers/watchdog/w83697hf_wdt.c +++ b/drivers/watchdog/w83697hf_wdt.c @@ -413,7 +413,8 @@ static int __init wdt_init(void) w83697hf_init(); if (early_disable) { if (wdt_running()) - printk(KERN_WARNING PFX "Stopping previously enabled watchdog until userland kicks in\n"); + printk(KERN_WARNING PFX "Stopping previously enabled " + "watchdog until userland kicks in\n"); wdt_disable(); } diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c index a38fa49..a4fe7a3 100644 --- a/drivers/watchdog/wdrtas.c +++ b/drivers/watchdog/wdrtas.c @@ -49,12 +49,7 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS_MISCDEV(TEMP_MINOR); -#ifdef CONFIG_WATCHDOG_NOWAYOUT -static int wdrtas_nowayout = 1; -#else -static int wdrtas_nowayout = 0; -#endif - +static int wdrtas_nowayout = WATCHDOG_NOWAYOUT; static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0); static char wdrtas_expect_close; diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index ab5547f..38d695d 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -37,7 +37,6 @@ #include <linux/mount.h> #include <linux/idr.h> #include <linux/sched.h> -#include <linux/smp_lock.h> #include <net/9p/9p.h> #include <net/9p/client.h> @@ -231,10 +230,8 @@ v9fs_umount_begin(struct super_block *sb) { struct v9fs_session_info *v9ses; - lock_kernel(); v9ses = sb->s_fs_info; v9fs_session_cancel(v9ses); - unlock_kernel(); } static const struct super_operations v9fs_super_ops = { @@ -134,7 +134,7 @@ config TMPFS_POSIX_ACL config HUGETLBFS bool "HugeTLB file system support" depends on X86 || IA64 || PPC64 || SPARC64 || (SUPERH && MMU) || \ - (S390 && 64BIT) || BROKEN + (S390 && 64BIT) || SYS_SUPPORTS_HUGETLBFS || BROKEN help hugetlbfs is a filesystem backing for HugeTLB pages, based on ramfs. For architectures that support it, say Y here and read diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h index a6665f3..9cc1877 100644 --- a/fs/adfs/adfs.h +++ b/fs/adfs/adfs.h @@ -1,3 +1,6 @@ +#include <linux/fs.h> +#include <linux/adfs_fs.h> + /* Internal data structures for ADFS */ #define ADFS_FREE_FRAG 0 @@ -17,6 +20,58 @@ struct buffer_head; /* + * adfs file system inode data in memory + */ +struct adfs_inode_info { + loff_t mmu_private; + unsigned long parent_id; /* object id of parent */ + __u32 loadaddr; /* RISC OS load address */ + __u32 execaddr; /* RISC OS exec address */ + unsigned int filetype; /* RISC OS file type */ + unsigned int attr; /* RISC OS permissions */ + unsigned int stamped:1; /* RISC OS file has date/time */ + struct inode vfs_inode; +}; + +/* + * Forward-declare this + */ +struct adfs_discmap; +struct adfs_dir_ops; + +/* + * ADFS file system superblock data in memory + */ +struct adfs_sb_info { + struct adfs_discmap *s_map; /* bh list containing map */ + struct adfs_dir_ops *s_dir; /* directory operations */ + + uid_t s_uid; /* owner uid */ + gid_t s_gid; /* owner gid */ + umode_t s_owner_mask; /* ADFS owner perm -> unix perm */ + umode_t s_other_mask; /* ADFS other perm -> unix perm */ + + __u32 s_ids_per_zone; /* max. no ids in one zone */ + __u32 s_idlen; /* length of ID in map */ + __u32 s_map_size; /* sector size of a map */ + unsigned long s_size; /* total size (in blocks) of this fs */ + signed int s_map2blk; /* shift left by this for map->sector */ + unsigned int s_log2sharesize;/* log2 share size */ + __le32 s_version; /* disc format version */ + unsigned int s_namelen; /* maximum number of characters in name */ +}; + +static inline struct adfs_sb_info *ADFS_SB(struct super_block *sb) +{ + return sb->s_fs_info; +} + +static inline struct adfs_inode_info *ADFS_I(struct inode *inode) +{ + return container_of(inode, struct adfs_inode_info, vfs_inode); +} + +/* * Directory handling */ struct adfs_dir { diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c index 4d40734..23aa52f 100644 --- a/fs/adfs/dir.c +++ b/fs/adfs/dir.c @@ -9,15 +9,7 @@ * * Common directory handling for ADFS */ -#include <linux/errno.h> -#include <linux/fs.h> -#include <linux/adfs_fs.h> -#include <linux/time.h> -#include <linux/stat.h> -#include <linux/spinlock.h> #include <linux/smp_lock.h> -#include <linux/buffer_head.h> /* for file_fsync() */ - #include "adfs.h" /* diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c index 31df6ad..bafc712 100644 --- a/fs/adfs/dir_f.c +++ b/fs/adfs/dir_f.c @@ -9,15 +9,7 @@ * * E and F format directory handling */ -#include <linux/errno.h> -#include <linux/fs.h> -#include <linux/adfs_fs.h> -#include <linux/time.h> -#include <linux/stat.h> -#include <linux/spinlock.h> #include <linux/buffer_head.h> -#include <linux/string.h> - #include "adfs.h" #include "dir_f.h" diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c index 139e0f3..1796bb35 100644 --- a/fs/adfs/dir_fplus.c +++ b/fs/adfs/dir_fplus.c @@ -7,15 +7,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/errno.h> -#include <linux/fs.h> -#include <linux/adfs_fs.h> -#include <linux/time.h> -#include <linux/stat.h> -#include <linux/spinlock.h> #include <linux/buffer_head.h> -#include <linux/string.h> - #include "adfs.h" #include "dir_fplus.h" diff --git a/fs/adfs/file.c b/fs/adfs/file.c index 8224d54..005ea34 100644 --- a/fs/adfs/file.c +++ b/fs/adfs/file.c @@ -19,10 +19,6 @@ * * adfs regular file handling primitives */ -#include <linux/fs.h> -#include <linux/buffer_head.h> /* for file_fsync() */ -#include <linux/adfs_fs.h> - #include "adfs.h" const struct file_operations adfs_file_operations = { diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 05b3a67..798cb07 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -7,17 +7,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/errno.h> -#include <linux/fs.h> -#include <linux/adfs_fs.h> -#include <linux/time.h> -#include <linux/stat.h> -#include <linux/string.h> -#include <linux/mm.h> #include <linux/smp_lock.h> -#include <linux/module.h> #include <linux/buffer_head.h> - #include "adfs.h" /* @@ -395,4 +386,3 @@ int adfs_write_inode(struct inode *inode, int wait) unlock_kernel(); return ret; } -MODULE_LICENSE("GPL"); diff --git a/fs/adfs/map.c b/fs/adfs/map.c index 568081b..d1a5932 100644 --- a/fs/adfs/map.c +++ b/fs/adfs/map.c @@ -7,14 +7,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include <linux/errno.h> -#include <linux/fs.h> -#include <linux/adfs_fs.h> -#include <linux/spinlock.h> #include <linux/buffer_head.h> - #include <asm/unaligned.h> - #include "adfs.h" /* diff --git a/fs/adfs/super.c b/fs/adfs/super.c index 0ec5aaf..aad92f0 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -8,26 +8,12 @@ * published by the Free Software Foundation. */ #include <linux/module.h> -#include <linux/errno.h> -#include <linux/fs.h> -#include <linux/adfs_fs.h> -#include <linux/slab.h> -#include <linux/time.h> -#include <linux/stat.h> -#include <linux/string.h> #include <linux/init.h> #include <linux/buffer_head.h> -#include <linux/vfs.h> #include <linux/parser.h> -#include <linux/bitops.h> #include <linux/mount.h> #include <linux/seq_file.h> - -#include <asm/uaccess.h> -#include <asm/system.h> - -#include <stdarg.h> - +#include <linux/statfs.h> #include "adfs.h" #include "dir_f.h" #include "dir_fplus.h" @@ -534,3 +520,4 @@ static void __exit exit_adfs_fs(void) module_init(init_adfs_fs) module_exit(exit_adfs_fs) +MODULE_LICENSE("GPL"); diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 89cd2de..615d549 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -735,8 +735,6 @@ parse_options(char *options, befs_mount_options * opts) static void befs_put_super(struct super_block *sb) { - lock_kernel(); - kfree(BEFS_SB(sb)->mount_opts.iocharset); BEFS_SB(sb)->mount_opts.iocharset = NULL; @@ -747,8 +745,6 @@ befs_put_super(struct super_block *sb) kfree(sb->s_fs_info); sb->s_fs_info = NULL; - - unlock_kernel(); } /* Allocate private field of the superblock, fill it. diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 40381df..9fa212b0 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1340,8 +1340,10 @@ static void fill_prstatus(struct elf_prstatus *prstatus, prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; prstatus->pr_sigpend = p->pending.signal.sig[0]; prstatus->pr_sighold = p->blocked.sig[0]; + rcu_read_lock(); + prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); + rcu_read_unlock(); prstatus->pr_pid = task_pid_vnr(p); - prstatus->pr_ppid = task_pid_vnr(p->real_parent); prstatus->pr_pgrp = task_pgrp_vnr(p); prstatus->pr_sid = task_session_vnr(p); if (thread_group_leader(p)) { @@ -1382,8 +1384,10 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, psinfo->pr_psargs[i] = ' '; psinfo->pr_psargs[len] = 0; + rcu_read_lock(); + psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); + rcu_read_unlock(); psinfo->pr_pid = task_pid_vnr(p); - psinfo->pr_ppid = task_pid_vnr(p->real_parent); psinfo->pr_pgrp = task_pgrp_vnr(p); psinfo->pr_sid = task_session_vnr(p); diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index fdb66fa..20fbece 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1387,8 +1387,10 @@ static void fill_prstatus(struct elf_prstatus *prstatus, prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; prstatus->pr_sigpend = p->pending.signal.sig[0]; prstatus->pr_sighold = p->blocked.sig[0]; + rcu_read_lock(); + prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); + rcu_read_unlock(); prstatus->pr_pid = task_pid_vnr(p); - prstatus->pr_ppid = task_pid_vnr(p->real_parent); prstatus->pr_pgrp = task_pgrp_vnr(p); prstatus->pr_sid = task_session_vnr(p); if (thread_group_leader(p)) { @@ -1432,8 +1434,10 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, psinfo->pr_psargs[i] = ' '; psinfo->pr_psargs[len] = 0; + rcu_read_lock(); + psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent)); + rcu_read_unlock(); psinfo->pr_pid = task_pid_vnr(p); - psinfo->pr_ppid = task_pid_vnr(p->real_parent); psinfo->pr_pgrp = task_pgrp_vnr(p); psinfo->pr_sid = task_session_vnr(p); diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 0aac371..c5ded5f 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -788,12 +788,6 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg) if (put_user(compat_ptr(data), &sgio->usr_ptr)) return -EFAULT; - if (copy_in_user(&sgio->status, &sgio32->status, - (4 * sizeof(unsigned char)) + - (2 * sizeof(unsigned short)) + - (3 * sizeof(int)))) - return -EFAULT; - err = sys_ioctl(fd, cmd, (unsigned long) sgio); if (err >= 0) { diff --git a/fs/efs/dir.c b/fs/efs/dir.c index 49308a2..7ee6f7e3 100644 --- a/fs/efs/dir.c +++ b/fs/efs/dir.c @@ -5,12 +5,12 @@ */ #include <linux/buffer_head.h> -#include <linux/smp_lock.h> #include "efs.h" static int efs_readdir(struct file *, void *, filldir_t); const struct file_operations efs_dir_operations = { + .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = efs_readdir, }; @@ -33,8 +33,6 @@ static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) { if (inode->i_size & (EFS_DIRBSIZE-1)) printk(KERN_WARNING "EFS: WARNING: readdir(): directory size not a multiple of EFS_DIRBSIZE\n"); - lock_kernel(); - /* work out where this entry can be found */ block = filp->f_pos >> EFS_DIRBSIZE_BITS; @@ -107,7 +105,6 @@ static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) { filp->f_pos = (block << EFS_DIRBSIZE_BITS) | slot; out: - unlock_kernel(); return 0; } diff --git a/fs/efs/namei.c b/fs/efs/namei.c index c3fb5f9..1511bf9 100644 --- a/fs/efs/namei.c +++ b/fs/efs/namei.c @@ -8,7 +8,6 @@ #include <linux/buffer_head.h> #include <linux/string.h> -#include <linux/smp_lock.h> #include <linux/exportfs.h> #include "efs.h" @@ -63,16 +62,12 @@ struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct namei efs_ino_t inodenum; struct inode * inode = NULL; - lock_kernel(); inodenum = efs_find_entry(dir, dentry->d_name.name, dentry->d_name.len); if (inodenum) { inode = efs_iget(dir->i_sb, inodenum); - if (IS_ERR(inode)) { - unlock_kernel(); + if (IS_ERR(inode)) return ERR_CAST(inode); - } } - unlock_kernel(); return d_splice_alias(inode, dentry); } @@ -115,11 +110,9 @@ struct dentry *efs_get_parent(struct dentry *child) struct dentry *parent = ERR_PTR(-ENOENT); efs_ino_t ino; - lock_kernel(); ino = efs_find_entry(child->d_inode, "..", 2); if (ino) parent = d_obtain_alias(efs_iget(child->d_inode->i_sb, ino)); - unlock_kernel(); return parent; } diff --git a/fs/efs/symlink.c b/fs/efs/symlink.c index 41911ec..75117d0 100644 --- a/fs/efs/symlink.c +++ b/fs/efs/symlink.c @@ -9,7 +9,6 @@ #include <linux/string.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> -#include <linux/smp_lock.h> #include "efs.h" static int efs_symlink_readpage(struct file *file, struct page *page) @@ -22,9 +21,8 @@ static int efs_symlink_readpage(struct file *file, struct page *page) err = -ENAMETOOLONG; if (size > 2 * EFS_BLOCKSIZE) - goto fail_notlocked; + goto fail; - lock_kernel(); /* read first 512 bytes of link target */ err = -EIO; bh = sb_bread(inode->i_sb, efs_bmap(inode, 0)); @@ -40,14 +38,11 @@ static int efs_symlink_readpage(struct file *file, struct page *page) brelse(bh); } link[size] = '\0'; - unlock_kernel(); SetPageUptodate(page); kunmap(page); unlock_page(page); return 0; fail: - unlock_kernel(); -fail_notlocked: SetPageError(page); kunmap(page); unlock_page(page); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 5458e80..085c5c0 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -98,7 +98,7 @@ struct epoll_filefd { struct nested_call_node { struct list_head llink; void *cookie; - int cpu; + void *ctx; }; /* @@ -317,17 +317,17 @@ static void ep_nested_calls_init(struct nested_calls *ncalls) * @nproc: Nested call core function pointer. * @priv: Opaque data to be passed to the @nproc callback. * @cookie: Cookie to be used to identify this nested call. + * @ctx: This instance context. * * Returns: Returns the code returned by the @nproc callback, or -1 if * the maximum recursion limit has been exceeded. */ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, int (*nproc)(void *, void *, int), void *priv, - void *cookie) + void *cookie, void *ctx) { int error, call_nests = 0; unsigned long flags; - int this_cpu = get_cpu(); struct list_head *lsthead = &ncalls->tasks_call_list; struct nested_call_node *tncur; struct nested_call_node tnode; @@ -340,7 +340,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, * very much limited. */ list_for_each_entry(tncur, lsthead, llink) { - if (tncur->cpu == this_cpu && + if (tncur->ctx == ctx && (tncur->cookie == cookie || ++call_nests > max_nests)) { /* * Ops ... loop detected or maximum nest level reached. @@ -352,7 +352,7 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, } /* Add the current task and cookie to the list */ - tnode.cpu = this_cpu; + tnode.ctx = ctx; tnode.cookie = cookie; list_add(&tnode.llink, lsthead); @@ -364,10 +364,9 @@ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, /* Remove the current task from the list */ spin_lock_irqsave(&ncalls->lock, flags); list_del(&tnode.llink); - out_unlock: +out_unlock: spin_unlock_irqrestore(&ncalls->lock, flags); - put_cpu(); return error; } @@ -408,8 +407,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) */ static void ep_poll_safewake(wait_queue_head_t *wq) { + int this_cpu = get_cpu(); + ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, - ep_poll_wakeup_proc, NULL, wq); + ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); + + put_cpu(); } /* @@ -663,7 +666,7 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) * could re-enter here. */ pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS, - ep_poll_readyevents_proc, ep, ep); + ep_poll_readyevents_proc, ep, ep, current); return pollflags != -1 ? pollflags : 0; } diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 0035004..6cde970 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -450,7 +450,7 @@ ino_t ext2_inode_by_name(struct inode *dir, struct qstr *child) /* Releases the page */ void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de, - struct page *page, struct inode *inode) + struct page *page, struct inode *inode, int update_times) { loff_t pos = page_offset(page) + (char *) de - (char *) page_address(page); @@ -465,7 +465,8 @@ void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de, ext2_set_de_type(de, inode); err = ext2_commit_chunk(page, pos, len); ext2_put_page(page); - dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; + if (update_times) + dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL; mark_inode_dirty(dir); } diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index f2e5811..d988a71 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -111,7 +111,7 @@ extern struct ext2_dir_entry_2 * ext2_find_entry (struct inode *,struct qstr *, extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *); extern int ext2_empty_dir (struct inode *); extern struct ext2_dir_entry_2 * ext2_dotdot (struct inode *, struct page **); -extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, struct inode *); +extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, struct inode *, int); /* ialloc.c */ extern struct inode * ext2_new_inode (struct inode *, int); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 90ea179..6524eca 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -320,7 +320,7 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, if (!new_de) goto out_dir; inode_inc_link_count(old_inode); - ext2_set_link(new_dir, new_de, new_page, old_inode); + ext2_set_link(new_dir, new_de, new_page, old_inode, 1); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) drop_nlink(new_inode); @@ -352,7 +352,8 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, inode_dec_link_count(old_inode); if (dir_de) { - ext2_set_link(old_inode, dir_de, dir_page, new_dir); + if (old_dir != new_dir) + ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0); inode_dec_link_count(old_dir); } return 0; diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c index d81ef2f..e0c7454 100644 --- a/fs/ext3/acl.c +++ b/fs/ext3/acl.c @@ -129,12 +129,15 @@ fail: static inline struct posix_acl * ext3_iget_acl(struct inode *inode, struct posix_acl **i_acl) { - struct posix_acl *acl = EXT3_ACL_NOT_CACHED; + struct posix_acl *acl = ACCESS_ONCE(*i_acl); - spin_lock(&inode->i_lock); - if (*i_acl != EXT3_ACL_NOT_CACHED) - acl = posix_acl_dup(*i_acl); - spin_unlock(&inode->i_lock); + if (acl) { + spin_lock(&inode->i_lock); + acl = *i_acl; + if (acl != EXT3_ACL_NOT_CACHED) + acl = posix_acl_dup(acl); + spin_unlock(&inode->i_lock); + } return acl; } diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index b0248c6..05dea81 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -820,7 +820,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, while (count < maxblocks && count <= blocks_to_boundary) { ext3_fsblk_t blk; - if (!verify_chain(chain, partial)) { + if (!verify_chain(chain, chain + depth - 1)) { /* * Indirect block might be removed by * truncate while we were reading it. @@ -2374,7 +2374,7 @@ void ext3_truncate(struct inode *inode) struct page *page; if (!ext3_can_truncate(inode)) - return; + goto out_notrans; if (inode->i_size == 0 && ext3_should_writeback_data(inode)) ei->i_state |= EXT3_STATE_FLUSH_ON_CLOSE; @@ -2390,7 +2390,7 @@ void ext3_truncate(struct inode *inode) page = grab_cache_page(mapping, inode->i_size >> PAGE_CACHE_SHIFT); if (!page) - return; + goto out_notrans; } handle = start_transaction(inode); @@ -2401,7 +2401,7 @@ void ext3_truncate(struct inode *inode) unlock_page(page); page_cache_release(page); } - return; /* AKPM: return what? */ + goto out_notrans; } last_block = (inode->i_size + blocksize-1) @@ -2525,6 +2525,14 @@ out_stop: ext3_orphan_del(handle, inode); ext3_journal_stop(handle); + return; +out_notrans: + /* + * Delete the inode from orphan list so that it doesn't stay there + * forever and trigger assertion on umount. + */ + if (inode->i_nlink) + ext3_orphan_del(NULL, inode); } static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb, @@ -3122,12 +3130,6 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) rc = inode_setattr(inode, attr); - /* If inode_setattr's call to ext3_truncate failed to get a - * transaction handle at all, we need to clean up the in-core - * orphan list manually. */ - if (inode->i_nlink) - ext3_orphan_del(NULL, inode); - if (!rc && (ia_valid & ATTR_MODE)) rc = ext3_acl_chmod(inode); diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index 647e0d6..605aeed 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -129,12 +129,15 @@ fail: static inline struct posix_acl * ext4_iget_acl(struct inode *inode, struct posix_acl **i_acl) { - struct posix_acl *acl = EXT4_ACL_NOT_CACHED; + struct posix_acl *acl = ACCESS_ONCE(*i_acl); - spin_lock(&inode->i_lock); - if (*i_acl != EXT4_ACL_NOT_CACHED) - acl = posix_acl_dup(*i_acl); - spin_unlock(&inode->i_lock); + if (acl) { + spin_lock(&inode->i_lock); + acl = *i_acl; + if (acl != EXT4_ACL_NOT_CACHED) + acl = posix_acl_dup(acl); + spin_unlock(&inode->i_lock); + } return acl; } diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index f0df55a..d8673cc 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -19,7 +19,6 @@ #include <linux/random.h> #include <linux/sched.h> #include <linux/exportfs.h> -#include <linux/smp_lock.h> MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Filesystem in Userspace"); @@ -260,9 +259,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, static void fuse_umount_begin(struct super_block *sb) { - lock_kernel(); fuse_abort_conn(get_fuse_conn_super(sb)); - unlock_kernel(); } static void fuse_send_destroy(struct fuse_conn *fc) @@ -70,9 +70,7 @@ static int ioctl_fibmap(struct file *filp, int __user *p) res = get_user(block, p); if (res) return res; - lock_kernel(); res = mapping->a_ops->bmap(mapping, block); - unlock_kernel(); return put_user(res, p); } diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index 2f0dc5a..8ba5441 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -195,9 +195,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp, * Do not report hidden files if so instructed, or associated * files unless instructed to do so */ - if ((sbi->s_hide == 'y' && - (de->flags[-sbi->s_high_sierra] & 1)) || - (sbi->s_showassoc =='n' && + if ((sbi->s_hide && (de->flags[-sbi->s_high_sierra] & 1)) || + (!sbi->s_showassoc && (de->flags[-sbi->s_high_sierra] & 4))) { filp->f_pos += de_len; continue; diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 068b34b..58a7963 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -141,13 +141,17 @@ static const struct dentry_operations isofs_dentry_ops[] = { }; struct iso9660_options{ - char map; - char rock; + unsigned int rock:1; + unsigned int cruft:1; + unsigned int hide:1; + unsigned int showassoc:1; + unsigned int nocompress:1; + unsigned int overriderockperm:1; + unsigned int uid_set:1; + unsigned int gid_set:1; + unsigned int utf8:1; + unsigned char map; char joliet; - char cruft; - char hide; - char showassoc; - char nocompress; unsigned char check; unsigned int blocksize; mode_t fmode; @@ -155,7 +159,6 @@ struct iso9660_options{ gid_t gid; uid_t uid; char *iocharset; - unsigned char utf8; /* LVE */ s32 session; s32 sbsector; @@ -312,7 +315,7 @@ enum { Opt_block, Opt_check_r, Opt_check_s, Opt_cruft, Opt_gid, Opt_ignore, Opt_iocharset, Opt_map_a, Opt_map_n, Opt_map_o, Opt_mode, Opt_nojoliet, Opt_norock, Opt_sb, Opt_session, Opt_uid, Opt_unhide, Opt_utf8, Opt_err, - Opt_nocompress, Opt_hide, Opt_showassoc, Opt_dmode, + Opt_nocompress, Opt_hide, Opt_showassoc, Opt_dmode, Opt_overriderockperm, }; static const match_table_t tokens = { @@ -340,6 +343,7 @@ static const match_table_t tokens = { {Opt_gid, "gid=%u"}, {Opt_mode, "mode=%u"}, {Opt_dmode, "dmode=%u"}, + {Opt_overriderockperm, "overriderockperm"}, {Opt_block, "block=%u"}, {Opt_ignore, "conv=binary"}, {Opt_ignore, "conv=b"}, @@ -359,24 +363,22 @@ static int parse_options(char *options, struct iso9660_options *popt) int option; popt->map = 'n'; - popt->rock = 'y'; - popt->joliet = 'y'; - popt->cruft = 'n'; - popt->hide = 'n'; - popt->showassoc = 'n'; + popt->rock = 1; + popt->joliet = 1; + popt->cruft = 0; + popt->hide = 0; + popt->showassoc = 0; popt->check = 'u'; /* unset */ popt->nocompress = 0; popt->blocksize = 1024; - popt->fmode = popt->dmode = S_IRUGO | S_IXUGO; /* - * r-x for all. The disc could - * be shared with DOS machines so - * virtually anything could be - * a valid executable. - */ + popt->fmode = popt->dmode = ISOFS_INVALID_MODE; + popt->uid_set = 0; + popt->gid_set = 0; popt->gid = 0; popt->uid = 0; popt->iocharset = NULL; popt->utf8 = 0; + popt->overriderockperm = 0; popt->session=-1; popt->sbsector=-1; if (!options) @@ -393,20 +395,20 @@ static int parse_options(char *options, struct iso9660_options *popt) token = match_token(p, tokens, args); switch (token) { case Opt_norock: - popt->rock = 'n'; + popt->rock = 0; break; case Opt_nojoliet: - popt->joliet = 'n'; + popt->joliet = 0; break; case Opt_hide: - popt->hide = 'y'; + popt->hide = 1; break; case Opt_unhide: case Opt_showassoc: - popt->showassoc = 'y'; + popt->showassoc = 1; break; case Opt_cruft: - popt->cruft = 'y'; + popt->cruft = 1; break; case Opt_utf8: popt->utf8 = 1; @@ -450,11 +452,13 @@ static int parse_options(char *options, struct iso9660_options *popt) if (match_int(&args[0], &option)) return 0; popt->uid = option; + popt->uid_set = 1; break; case Opt_gid: if (match_int(&args[0], &option)) return 0; popt->gid = option; + popt->gid_set = 1; break; case Opt_mode: if (match_int(&args[0], &option)) @@ -466,6 +470,9 @@ static int parse_options(char *options, struct iso9660_options *popt) return 0; popt->dmode = option; break; + case Opt_overriderockperm: + popt->overriderockperm = 1; + break; case Opt_block: if (match_int(&args[0], &option)) return 0; @@ -650,7 +657,7 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) goto out_freebh; sbi->s_high_sierra = 1; - opt.rock = 'n'; + opt.rock = 0; h_pri = (struct hs_primary_descriptor *)vdp; goto root_found; } @@ -673,7 +680,7 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) root_found: - if (joliet_level && (pri == NULL || opt.rock == 'n')) { + if (joliet_level && (pri == NULL || !opt.rock)) { /* This is the case of Joliet with the norock mount flag. * A disc with both Joliet and Rock Ridge is handled later */ @@ -802,22 +809,31 @@ root_found: s->s_op = &isofs_sops; s->s_export_op = &isofs_export_ops; sbi->s_mapping = opt.map; - sbi->s_rock = (opt.rock == 'y' ? 2 : 0); + sbi->s_rock = (opt.rock ? 2 : 0); sbi->s_rock_offset = -1; /* initial offset, will guess until SP is found*/ sbi->s_cruft = opt.cruft; sbi->s_hide = opt.hide; sbi->s_showassoc = opt.showassoc; sbi->s_uid = opt.uid; sbi->s_gid = opt.gid; + sbi->s_uid_set = opt.uid_set; + sbi->s_gid_set = opt.gid_set; sbi->s_utf8 = opt.utf8; sbi->s_nocompress = opt.nocompress; + sbi->s_overriderockperm = opt.overriderockperm; /* * It would be incredibly stupid to allow people to mark every file * on the disk as suid, so we merely allow them to set the default * permissions. */ - sbi->s_fmode = opt.fmode & 0777; - sbi->s_dmode = opt.dmode & 0777; + if (opt.fmode != ISOFS_INVALID_MODE) + sbi->s_fmode = opt.fmode & 0777; + else + sbi->s_fmode = ISOFS_INVALID_MODE; + if (opt.dmode != ISOFS_INVALID_MODE) + sbi->s_dmode = opt.dmode & 0777; + else + sbi->s_dmode = ISOFS_INVALID_MODE; /* * Read the root inode, which _may_ result in changing @@ -1095,18 +1111,6 @@ static const struct address_space_operations isofs_aops = { .bmap = _isofs_bmap }; -static inline void test_and_set_uid(uid_t *p, uid_t value) -{ - if (value) - *p = value; -} - -static inline void test_and_set_gid(gid_t *p, gid_t value) -{ - if (value) - *p = value; -} - static int isofs_read_level3_size(struct inode *inode) { unsigned long bufsize = ISOFS_BUFFER_SIZE(inode); @@ -1261,7 +1265,10 @@ static int isofs_read_inode(struct inode *inode) ei->i_file_format = isofs_file_normal; if (de->flags[-high_sierra] & 2) { - inode->i_mode = sbi->s_dmode | S_IFDIR; + if (sbi->s_dmode != ISOFS_INVALID_MODE) + inode->i_mode = S_IFDIR | sbi->s_dmode; + else + inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; inode->i_nlink = 1; /* * Set to 1. We know there are 2, but * the find utility tries to optimize @@ -1270,8 +1277,16 @@ static int isofs_read_inode(struct inode *inode) * do it the hard way. */ } else { - /* Everybody gets to read the file. */ - inode->i_mode = sbi->s_fmode | S_IFREG; + if (sbi->s_fmode != ISOFS_INVALID_MODE) { + inode->i_mode = S_IFREG | sbi->s_fmode; + } else { + /* + * Set default permissions: r-x for all. The disc + * could be shared with DOS machines so virtually + * anything could be a valid executable. + */ + inode->i_mode = S_IFREG | S_IRUGO | S_IXUGO; + } inode->i_nlink = 1; } inode->i_uid = sbi->s_uid; @@ -1300,7 +1315,7 @@ static int isofs_read_inode(struct inode *inode) * this CDROM was mounted with the cruft option. */ - if (sbi->s_cruft == 'y') + if (sbi->s_cruft) inode->i_size &= 0x00ffffff; if (de->interleave[0]) { @@ -1346,9 +1361,18 @@ static int isofs_read_inode(struct inode *inode) if (!high_sierra) { parse_rock_ridge_inode(de, inode); /* if we want uid/gid set, override the rock ridge setting */ - test_and_set_uid(&inode->i_uid, sbi->s_uid); - test_and_set_gid(&inode->i_gid, sbi->s_gid); + if (sbi->s_uid_set) + inode->i_uid = sbi->s_uid; + if (sbi->s_gid_set) + inode->i_gid = sbi->s_gid; } + /* Now set final access rights if overriding rock ridge setting */ + if (S_ISDIR(inode->i_mode) && sbi->s_overriderockperm && + sbi->s_dmode != ISOFS_INVALID_MODE) + inode->i_mode = S_IFDIR | sbi->s_dmode; + if (S_ISREG(inode->i_mode) && sbi->s_overriderockperm && + sbi->s_fmode != ISOFS_INVALID_MODE) + inode->i_mode = S_IFREG | sbi->s_fmode; /* Install the inode operations vector */ if (S_ISREG(inode->i_mode)) { diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h index ccbf72f..7d33de8 100644 --- a/fs/isofs/isofs.h +++ b/fs/isofs/isofs.h @@ -35,21 +35,20 @@ struct isofs_sb_info { unsigned long s_log_zone_size; unsigned long s_max_size; - unsigned char s_high_sierra; /* A simple flag */ - unsigned char s_mapping; int s_rock_offset; /* offset of SUSP fields within SU area */ - unsigned char s_rock; unsigned char s_joliet_level; - unsigned char s_utf8; - unsigned char s_cruft; /* Broken disks with high - byte of length containing - junk */ - unsigned char s_unhide; - unsigned char s_nosuid; - unsigned char s_nodev; - unsigned char s_nocompress; - unsigned char s_hide; - unsigned char s_showassoc; + unsigned char s_mapping; + unsigned int s_high_sierra:1; + unsigned int s_rock:2; + unsigned int s_utf8:1; + unsigned int s_cruft:1; /* Broken disks with high byte of length + * containing junk */ + unsigned int s_nocompress:1; + unsigned int s_hide:1; + unsigned int s_showassoc:1; + unsigned int s_overriderockperm:1; + unsigned int s_uid_set:1; + unsigned int s_gid_set:1; mode_t s_fmode; mode_t s_dmode; @@ -58,6 +57,8 @@ struct isofs_sb_info { struct nls_table *s_nls_iocharset; /* Native language support table */ }; +#define ISOFS_INVALID_MODE ((mode_t) -1) + static inline struct isofs_sb_info *ISOFS_SB(struct super_block *sb) { return sb->s_fs_info; diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index 8299889..eaa8313 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -142,9 +142,9 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry, */ match = 0; if (dlen > 0 && - (sbi->s_hide =='n' || + (!sbi->s_hide || (!(de->flags[-sbi->s_high_sierra] & 1))) && - (sbi->s_showassoc =='y' || + (sbi->s_showassoc || (!(de->flags[-sbi->s_high_sierra] & 4)))) { match = (isofs_cmp(dentry, dpnt, dlen) == 0); } diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index ed886e6..73242ba 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -1686,35 +1686,6 @@ out: return; } -/* - * journal_try_to_free_buffers() could race with journal_commit_transaction() - * The latter might still hold the a count on buffers when inspecting - * them on t_syncdata_list or t_locked_list. - * - * journal_try_to_free_buffers() will call this function to - * wait for the current transaction to finish syncing data buffers, before - * tryinf to free that buffer. - * - * Called with journal->j_state_lock held. - */ -static void journal_wait_for_transaction_sync_data(journal_t *journal) -{ - transaction_t *transaction = NULL; - tid_t tid; - - spin_lock(&journal->j_state_lock); - transaction = journal->j_committing_transaction; - - if (!transaction) { - spin_unlock(&journal->j_state_lock); - return; - } - - tid = transaction->t_tid; - spin_unlock(&journal->j_state_lock); - log_wait_commit(journal, tid); -} - /** * int journal_try_to_free_buffers() - try to free page buffers. * @journal: journal for operation @@ -1786,25 +1757,6 @@ int journal_try_to_free_buffers(journal_t *journal, ret = try_to_free_buffers(page); - /* - * There are a number of places where journal_try_to_free_buffers() - * could race with journal_commit_transaction(), the later still - * holds the reference to the buffers to free while processing them. - * try_to_free_buffers() failed to free those buffers. Some of the - * caller of releasepage() request page buffers to be dropped, otherwise - * treat the fail-to-free as errors (such as generic_file_direct_IO()) - * - * So, if the caller of try_to_release_page() wants the synchronous - * behaviour(i.e make sure buffers are dropped upon return), - * let's wait for the current transaction to finish flush of - * dirty data buffers, then try to free those buffers again, - * with the journal locked. - */ - if (ret == 0 && (gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)) { - journal_wait_for_transaction_sync_data(journal); - ret = try_to_free_buffers(page); - } - busy: return ret; } diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c index 3aebe32..6ac693f 100644 --- a/fs/minix/bitmap.c +++ b/fs/minix/bitmap.c @@ -12,13 +12,14 @@ /* bitmap.c contains the code that handles the inode and block bitmaps */ #include "minix.h" -#include <linux/smp_lock.h> #include <linux/buffer_head.h> #include <linux/bitops.h> #include <linux/sched.h> static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 }; +static DEFINE_SPINLOCK(bitmap_lock); + static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits) { unsigned i, j, sum = 0; @@ -69,11 +70,11 @@ void minix_free_block(struct inode *inode, unsigned long block) return; } bh = sbi->s_zmap[zone]; - lock_kernel(); + spin_lock(&bitmap_lock); if (!minix_test_and_clear_bit(bit, bh->b_data)) printk("minix_free_block (%s:%lu): bit already cleared\n", sb->s_id, block); - unlock_kernel(); + spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); return; } @@ -88,18 +89,18 @@ int minix_new_block(struct inode * inode) struct buffer_head *bh = sbi->s_zmap[i]; int j; - lock_kernel(); + spin_lock(&bitmap_lock); j = minix_find_first_zero_bit(bh->b_data, bits_per_zone); if (j < bits_per_zone) { minix_set_bit(j, bh->b_data); - unlock_kernel(); + spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); j += i * bits_per_zone + sbi->s_firstdatazone-1; if (j < sbi->s_firstdatazone || j >= sbi->s_nzones) break; return j; } - unlock_kernel(); + spin_unlock(&bitmap_lock); } return 0; } @@ -211,10 +212,10 @@ void minix_free_inode(struct inode * inode) minix_clear_inode(inode); /* clear on-disk copy */ bh = sbi->s_imap[ino]; - lock_kernel(); + spin_lock(&bitmap_lock); if (!minix_test_and_clear_bit(bit, bh->b_data)) printk("minix_free_inode: bit %lu already cleared\n", bit); - unlock_kernel(); + spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); out: clear_inode(inode); /* clear in-memory copy */ @@ -237,7 +238,7 @@ struct inode * minix_new_inode(const struct inode * dir, int * error) j = bits_per_zone; bh = NULL; *error = -ENOSPC; - lock_kernel(); + spin_lock(&bitmap_lock); for (i = 0; i < sbi->s_imap_blocks; i++) { bh = sbi->s_imap[i]; j = minix_find_first_zero_bit(bh->b_data, bits_per_zone); @@ -245,17 +246,17 @@ struct inode * minix_new_inode(const struct inode * dir, int * error) break; } if (!bh || j >= bits_per_zone) { - unlock_kernel(); + spin_unlock(&bitmap_lock); iput(inode); return NULL; } if (minix_test_and_set_bit(j, bh->b_data)) { /* shouldn't happen */ - unlock_kernel(); + spin_unlock(&bitmap_lock); printk("minix_new_inode: bit already set\n"); iput(inode); return NULL; } - unlock_kernel(); + spin_unlock(&bitmap_lock); mark_buffer_dirty(bh); j += i * bits_per_zone; if (!j || j > sbi->s_ninodes) { diff --git a/fs/minix/dir.c b/fs/minix/dir.c index e5f2064..d407e7a 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -11,7 +11,6 @@ #include "minix.h" #include <linux/buffer_head.h> #include <linux/highmem.h> -#include <linux/smp_lock.h> #include <linux/swap.h> typedef struct minix_dir_entry minix_dirent; @@ -20,6 +19,7 @@ typedef struct minix3_dir_entry minix3_dirent; static int minix_readdir(struct file *, void *, filldir_t); const struct file_operations minix_dir_operations = { + .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = minix_readdir, .fsync = simple_fsync, @@ -102,8 +102,6 @@ static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir) char *name; __u32 inumber; - lock_kernel(); - pos = (pos + chunk_size-1) & ~(chunk_size-1); if (pos >= inode->i_size) goto done; @@ -146,7 +144,6 @@ static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir) done: filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset; - unlock_kernel(); return 0; } diff --git a/fs/minix/inode.c b/fs/minix/inode.c index f91a236..74ea82d 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -35,8 +35,6 @@ static void minix_put_super(struct super_block *sb) int i; struct minix_sb_info *sbi = minix_sb(sb); - lock_kernel(); - if (!(sb->s_flags & MS_RDONLY)) { if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ sbi->s_ms->s_state = sbi->s_mount_state; @@ -50,8 +48,6 @@ static void minix_put_super(struct super_block *sb) kfree(sbi->s_imap); sb->s_fs_info = NULL; kfree(sbi); - - unlock_kernel(); } static struct kmem_cache * minix_inode_cachep; diff --git a/fs/proc/Makefile b/fs/proc/Makefile index 63d9651..11a7b5c 100644 --- a/fs/proc/Makefile +++ b/fs/proc/Makefile @@ -18,6 +18,7 @@ proc-y += meminfo.o proc-y += stat.o proc-y += uptime.o proc-y += version.o +proc-y += softirqs.o proc-$(CONFIG_PROC_SYSCTL) += proc_sysctl.o proc-$(CONFIG_NET) += proc_net.o proc-$(CONFIG_PROC_KCORE) += kcore.o diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index fc6c302..7ba79a5 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c @@ -195,20 +195,20 @@ void proc_device_tree_add_node(struct device_node *np, p = fixup_name(np, de, p); ent = proc_mkdir(p, de); - if (ent == 0) + if (ent == NULL) break; proc_device_tree_add_node(child, ent); } of_node_put(child); - for (pp = np->properties; pp != 0; pp = pp->next) { + for (pp = np->properties; pp != NULL; pp = pp->next) { p = pp->name; if (duplicate_name(de, p)) p = fixup_name(np, de, p); ent = __proc_device_tree_add_prop(de, pp, p); - if (ent == 0) + if (ent == NULL) break; } } @@ -221,10 +221,10 @@ void __init proc_device_tree_init(void) struct device_node *root; proc_device_tree = proc_mkdir("device-tree", NULL); - if (proc_device_tree == 0) + if (proc_device_tree == NULL) return; root = of_find_node_by_path("/"); - if (root == 0) { + if (root == NULL) { printk(KERN_ERR "/proc/device-tree: can't find root\n"); return; } diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c new file mode 100644 index 0000000..1807c24 --- /dev/null +++ b/fs/proc/softirqs.c @@ -0,0 +1,44 @@ +#include <linux/init.h> +#include <linux/kernel_stat.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> + +/* + * /proc/softirqs ... display the number of softirqs + */ +static int show_softirqs(struct seq_file *p, void *v) +{ + int i, j; + + seq_printf(p, " "); + for_each_possible_cpu(i) + seq_printf(p, "CPU%-8d", i); + seq_printf(p, "\n"); + + for (i = 0; i < NR_SOFTIRQS; i++) { + seq_printf(p, "%8s:", softirq_to_name[i]); + for_each_possible_cpu(j) + seq_printf(p, " %10u", kstat_softirqs_cpu(i, j)); + seq_printf(p, "\n"); + } + return 0; +} + +static int softirqs_open(struct inode *inode, struct file *file) +{ + return single_open(file, show_softirqs, NULL); +} + +static const struct file_operations proc_softirqs_operations = { + .open = softirqs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init proc_softirqs_init(void) +{ + proc_create("softirqs", 0, NULL, &proc_softirqs_operations); + return 0; +} +module_init(proc_softirqs_init); diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 81e4eb6..7cc726c 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -29,6 +29,8 @@ static int show_stat(struct seq_file *p, void *v) cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; cputime64_t guest; u64 sum = 0; + u64 sum_softirq = 0; + unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec boottime; unsigned int per_irq_sum; @@ -53,6 +55,13 @@ static int show_stat(struct seq_file *p, void *v) sum += kstat_irqs_cpu(j, i); } sum += arch_irq_stat_cpu(i); + + for (j = 0; j < NR_SOFTIRQS; j++) { + unsigned int softirq_stat = kstat_softirqs_cpu(j, i); + + per_softirq_sums[j] += softirq_stat; + sum_softirq += softirq_stat; + } } sum += arch_irq_stat(); @@ -115,6 +124,12 @@ static int show_stat(struct seq_file *p, void *v) nr_running(), nr_iowait()); + seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq); + + for (i = 0; i < NR_SOFTIRQS; i++) + seq_printf(p, " %u", per_softirq_sums[i]); + seq_printf(p, "\n"); + return 0; } diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 5edcc3f..0872afa 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -166,12 +166,7 @@ static const struct file_operations proc_vmcore_operations = { static struct vmcore* __init get_new_element(void) { - struct vmcore *p; - - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (p) - memset(p, 0, sizeof(*p)); - return p; + return kzalloc(sizeof(struct vmcore), GFP_KERNEL); } static u64 __init get_vmcore_size_elf64(char *elfptr) diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c index 4beb964..128d3f7 100644 --- a/fs/reiserfs/do_balan.c +++ b/fs/reiserfs/do_balan.c @@ -1270,9 +1270,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h RFALSE(ih, "PAP-12210: ih must be 0"); - if (is_direntry_le_ih - (aux_ih = - B_N_PITEM_HEAD(tbS0, item_pos))) { + aux_ih = B_N_PITEM_HEAD(tbS0, item_pos); + if (is_direntry_le_ih(aux_ih)) { /* we append to directory item */ int entry_count; diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c index 381750a..03d85cb 100644 --- a/fs/reiserfs/lbalance.c +++ b/fs/reiserfs/lbalance.c @@ -390,7 +390,8 @@ static void leaf_item_bottle(struct buffer_info *dest_bi, if (last_first == FIRST_TO_LAST) { /* if ( if item in position item_num in buffer SOURCE is directory item ) */ - if (is_direntry_le_ih(ih = B_N_PITEM_HEAD(src, item_num))) + ih = B_N_PITEM_HEAD(src, item_num); + if (is_direntry_le_ih(ih)) leaf_copy_dir_entries(dest_bi, src, FIRST_TO_LAST, item_num, 0, cpy_bytes); else { @@ -418,7 +419,8 @@ static void leaf_item_bottle(struct buffer_info *dest_bi, } } else { /* if ( if item in position item_num in buffer SOURCE is directory item ) */ - if (is_direntry_le_ih(ih = B_N_PITEM_HEAD(src, item_num))) + ih = B_N_PITEM_HEAD(src, item_num); + if (is_direntry_le_ih(ih)) leaf_copy_dir_entries(dest_bi, src, LAST_TO_FIRST, item_num, I_ENTRY_COUNT(ih) - cpy_bytes, @@ -774,8 +776,8 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first, leaf_delete_items_entirely(cur_bi, first + 1, del_num - 1); - if (is_direntry_le_ih - (ih = B_N_PITEM_HEAD(bh, B_NR_ITEMS(bh) - 1))) + ih = B_N_PITEM_HEAD(bh, B_NR_ITEMS(bh) - 1); + if (is_direntry_le_ih(ih)) /* the last item is directory */ /* len = numbers of directory entries in this item */ len = ih_entry_count(ih); diff --git a/fs/seq_file.c b/fs/seq_file.c index 7f40f30c..6c95927 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -640,6 +640,26 @@ int seq_puts(struct seq_file *m, const char *s) } EXPORT_SYMBOL(seq_puts); +/** + * seq_write - write arbitrary data to buffer + * @seq: seq_file identifying the buffer to which data should be written + * @data: data address + * @len: number of bytes + * + * Return 0 on success, non-zero otherwise. + */ +int seq_write(struct seq_file *seq, const void *data, size_t len) +{ + if (seq->count + len < seq->size) { + memcpy(seq->buf + seq->count, data, len); + seq->count += len; + return 0; + } + seq->count = seq->size; + return -1; +} +EXPORT_SYMBOL(seq_write); + struct list_head *seq_list_start(struct list_head *head, loff_t pos) { struct list_head *lh; @@ -545,24 +545,18 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) { if (force) mark_files_ro(sb); - else if (!fs_may_remount_ro(sb)) { - unlock_kernel(); + else if (!fs_may_remount_ro(sb)) return -EBUSY; - } retval = vfs_dq_off(sb, 1); - if (retval < 0 && retval != -ENOSYS) { - unlock_kernel(); + if (retval < 0 && retval != -ENOSYS) return -EBUSY; - } } remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY); if (sb->s_op->remount_fs) { retval = sb->s_op->remount_fs(sb, &flags, data); - if (retval) { - unlock_kernel(); + if (retval) return retval; - } } sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); if (remount_rw) diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c index c779807..4e50286 100644 --- a/fs/sysv/dir.c +++ b/fs/sysv/dir.c @@ -15,13 +15,13 @@ #include <linux/pagemap.h> #include <linux/highmem.h> -#include <linux/smp_lock.h> #include <linux/swap.h> #include "sysv.h" static int sysv_readdir(struct file *, void *, filldir_t); const struct file_operations sysv_dir_operations = { + .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = sysv_readdir, .fsync = simple_fsync, @@ -74,8 +74,6 @@ static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir) unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); - lock_kernel(); - pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1); if (pos >= inode->i_size) goto done; @@ -113,7 +111,6 @@ static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir) done: filp->f_pos = ((loff_t)n << PAGE_CACHE_SHIFT) | offset; - unlock_kernel(); return 0; } diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index 4799234..9824743 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c @@ -21,7 +21,6 @@ * the superblock. */ -#include <linux/smp_lock.h> #include <linux/highuid.h> #include <linux/slab.h> #include <linux/init.h> @@ -37,7 +36,6 @@ static int sysv_sync_fs(struct super_block *sb, int wait) unsigned long time = get_seconds(), old_time; lock_super(sb); - lock_kernel(); /* * If we are going to write out the super block, @@ -52,7 +50,6 @@ static int sysv_sync_fs(struct super_block *sb, int wait) mark_buffer_dirty(sbi->s_bh2); } - unlock_kernel(); unlock_super(sb); return 0; @@ -82,8 +79,6 @@ static void sysv_put_super(struct super_block *sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); - lock_kernel(); - if (sb->s_dirt) sysv_write_super(sb); @@ -99,8 +94,6 @@ static void sysv_put_super(struct super_block *sb) brelse(sbi->s_bh2); kfree(sbi); - - unlock_kernel(); } static int sysv_statfs(struct dentry *dentry, struct kstatfs *buf) @@ -275,7 +268,6 @@ int sysv_write_inode(struct inode *inode, int wait) return -EIO; } - lock_kernel(); raw_inode->i_mode = cpu_to_fs16(sbi, inode->i_mode); raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(inode->i_uid)); raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(inode->i_gid)); @@ -291,7 +283,6 @@ int sysv_write_inode(struct inode *inode, int wait) for (block = 0; block < 10+1+1+1; block++) write3byte(sbi, (u8 *)&si->i_data[block], &raw_inode->i_data[3*block]); - unlock_kernel(); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); @@ -315,9 +306,7 @@ static void sysv_delete_inode(struct inode *inode) truncate_inode_pages(&inode->i_data, 0); inode->i_size = 0; sysv_truncate(inode); - lock_kernel(); sysv_free_inode(inode); - unlock_kernel(); } static struct kmem_cache *sysv_inode_cachep; diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index af19144..eaf6d89 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -91,7 +91,6 @@ static int shrink_liability(struct ubifs_info *c, int nr_to_write) return nr_written; } - /** * run_gc - run garbage collector. * @c: UBIFS file-system description object @@ -628,7 +627,7 @@ void ubifs_convert_page_budget(struct ubifs_info *c) * * This function releases budget corresponding to a dirty inode. It is usually * called when after the inode has been written to the media and marked as - * clean. + * clean. It also causes the "no space" flags to be cleared. */ void ubifs_release_dirty_inode_budget(struct ubifs_info *c, struct ubifs_inode *ui) @@ -636,6 +635,7 @@ void ubifs_release_dirty_inode_budget(struct ubifs_info *c, struct ubifs_budget_req req; memset(&req, 0, sizeof(struct ubifs_budget_req)); + /* The "no space" flags will be cleared because dd_growth is > 0 */ req.dd_growth = c->inode_budget + ALIGN(ui->data_len, 8); ubifs_release_budget(c, &req); } diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index f55d523..552fb01 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -528,6 +528,25 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir, inode->i_nlink, dir->i_ino); ubifs_assert(mutex_is_locked(&dir->i_mutex)); ubifs_assert(mutex_is_locked(&inode->i_mutex)); + + /* + * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing + * otherwise has the potential to corrupt the orphan inode list. + * + * Indeed, consider a scenario when 'vfs_link(dirA/fileA)' and + * 'vfs_unlink(dirA/fileA, dirB/fileB)' race. 'vfs_link()' does not + * lock 'dirA->i_mutex', so this is possible. Both of the functions + * lock 'fileA->i_mutex' though. Suppose 'vfs_unlink()' wins, and takes + * 'fileA->i_mutex' mutex first. Suppose 'fileA->i_nlink' is 1. In this + * case 'ubifs_unlink()' will drop the last reference, and put 'inodeA' + * to the list of orphans. After this, 'vfs_link()' will link + * 'dirB/fileB' to 'inodeA'. This is a problem because, for example, + * the subsequent 'vfs_unlink(dirB/fileB)' will add the same inode + * to the list of orphans. + */ + if (inode->i_nlink == 0) + return -ENOENT; + err = dbg_check_synced_i_size(inode); if (err) return err; diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index e8e632a..bc58571 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -293,13 +293,14 @@ void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last) * * This function is called when the write-buffer timer expires. */ -static void wbuf_timer_callback_nolock(unsigned long data) +static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer) { - struct ubifs_wbuf *wbuf = (struct ubifs_wbuf *)data; + struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); wbuf->need_sync = 1; wbuf->c->need_wbuf_sync = 1; ubifs_wake_up_bgt(wbuf->c); + return HRTIMER_NORESTART; } /** @@ -308,13 +309,12 @@ static void wbuf_timer_callback_nolock(unsigned long data) */ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) { - ubifs_assert(!timer_pending(&wbuf->timer)); + ubifs_assert(!hrtimer_active(&wbuf->timer)); - if (!wbuf->timeout) + if (!ktime_to_ns(wbuf->softlimit)) return; - - wbuf->timer.expires = jiffies + wbuf->timeout; - add_timer(&wbuf->timer); + hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta, + HRTIMER_MODE_REL); } /** @@ -329,7 +329,7 @@ static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) * should be canceled. */ wbuf->need_sync = 0; - del_timer(&wbuf->timer); + hrtimer_cancel(&wbuf->timer); } /** @@ -825,6 +825,7 @@ out: int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) { size_t size; + ktime_t hardlimit; wbuf->buf = kmalloc(c->min_io_size, GFP_KERNEL); if (!wbuf->buf) @@ -845,14 +846,21 @@ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) wbuf->sync_callback = NULL; mutex_init(&wbuf->io_mutex); spin_lock_init(&wbuf->lock); - wbuf->c = c; - init_timer(&wbuf->timer); - wbuf->timer.function = wbuf_timer_callback_nolock; - wbuf->timer.data = (unsigned long)wbuf; - wbuf->timeout = DEFAULT_WBUF_TIMEOUT; wbuf->next_ino = 0; + hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + wbuf->timer.function = wbuf_timer_callback_nolock; + /* + * Make write-buffer soft limit to be 20% of the hard limit. The + * write-buffer timer is allowed to expire any time between the soft + * and hard limits. + */ + hardlimit = ktime_set(DEFAULT_WBUF_TIMEOUT_SECS, 0); + wbuf->delta = (DEFAULT_WBUF_TIMEOUT_SECS * NSEC_PER_SEC) * 2 / 10; + wbuf->softlimit = ktime_sub_ns(hardlimit, wbuf->delta); + hrtimer_set_expires_range_ns(&wbuf->timer, wbuf->softlimit, + wbuf->delta); return 0; } diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 1066297..8056052 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -343,33 +343,15 @@ int ubifs_write_rcvrd_mst_node(struct ubifs_info *c) * * This function returns %1 if @offs was in the last write to the LEB whose data * is in @buf, otherwise %0 is returned. The determination is made by checking - * for subsequent empty space starting from the next min_io_size boundary (or a - * bit less than the common header size if min_io_size is one). + * for subsequent empty space starting from the next @c->min_io_size boundary. */ static int is_last_write(const struct ubifs_info *c, void *buf, int offs) { - int empty_offs; - int check_len; + int empty_offs, check_len; uint8_t *p; - if (c->min_io_size == 1) { - check_len = c->leb_size - offs; - p = buf + check_len; - for (; check_len > 0; check_len--) - if (*--p != 0xff) - break; - /* - * 'check_len' is the size of the corruption which cannot be - * more than the size of 1 node if it was caused by an unclean - * unmount. - */ - if (check_len > UBIFS_MAX_NODE_SZ) - return 0; - return 1; - } - /* - * Round up to the next c->min_io_size boundary i.e. 'offs' is in the + * Round up to the next @c->min_io_size boundary i.e. @offs is in the * last wbuf written. After that should be empty space. */ empty_offs = ALIGN(offs + 1, c->min_io_size); @@ -392,7 +374,7 @@ static int is_last_write(const struct ubifs_info *c, void *buf, int offs) * * This function pads up to the next min_io_size boundary (if there is one) and * sets empty space to all 0xff. @buf, @offs and @len are updated to the next - * min_io_size boundary (if there is one). + * @c->min_io_size boundary. */ static void clean_buf(const struct ubifs_info *c, void **buf, int lnum, int *offs, int *len) @@ -402,11 +384,6 @@ static void clean_buf(const struct ubifs_info *c, void **buf, int lnum, lnum = lnum; dbg_rcvry("cleaning corruption at %d:%d", lnum, *offs); - if (c->min_io_size == 1) { - memset(*buf, 0xff, c->leb_size - *offs); - return; - } - ubifs_assert(!(*offs & 7)); empty_offs = ALIGN(*offs, c->min_io_size); pad_len = empty_offs - *offs; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 3260b73..79fad43 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -361,6 +361,11 @@ static void ubifs_delete_inode(struct inode *inode) out: if (ui->dirty) ubifs_release_dirty_inode_budget(c, ui); + else { + /* We've deleted something - clean the "no space" flags */ + c->nospace = c->nospace_rp = 0; + smp_wmb(); + } clear_inode(inode); } @@ -792,7 +797,7 @@ static int alloc_wbufs(struct ubifs_info *c) * does not need to be synchronized by timer. */ c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM; - c->jheads[GCHD].wbuf.timeout = 0; + c->jheads[GCHD].wbuf.softlimit = ktime_set(0, 0); return 0; } @@ -933,6 +938,27 @@ static const match_table_t tokens = { }; /** + * parse_standard_option - parse a standard mount option. + * @option: the option to parse + * + * Normally, standard mount options like "sync" are passed to file-systems as + * flags. However, when a "rootflags=" kernel boot parameter is used, they may + * be present in the options string. This function tries to deal with this + * situation and parse standard options. Returns 0 if the option was not + * recognized, and the corresponding integer flag if it was. + * + * UBIFS is only interested in the "sync" option, so do not check for anything + * else. + */ +static int parse_standard_option(const char *option) +{ + ubifs_msg("parse %s", option); + if (!strcmp(option, "sync")) + return MS_SYNCHRONOUS; + return 0; +} + +/** * ubifs_parse_options - parse mount parameters. * @c: UBIFS file-system description object * @options: parameters to parse @@ -1008,9 +1034,19 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, break; } default: - ubifs_err("unrecognized mount option \"%s\" " - "or missing value", p); - return -EINVAL; + { + unsigned long flag; + struct super_block *sb = c->vfs_sb; + + flag = parse_standard_option(p); + if (!flag) { + ubifs_err("unrecognized mount option \"%s\" " + "or missing value", p); + return -EINVAL; + } + sb->s_flags |= flag; + break; + } } } @@ -1180,6 +1216,7 @@ static int mount_ubifs(struct ubifs_info *c) if (!ubifs_compr_present(c->default_compr)) { ubifs_err("'compressor \"%s\" is not compiled in", ubifs_compr_name(c->default_compr)); + err = -ENOTSUPP; goto out_free; } @@ -1656,7 +1693,7 @@ static void ubifs_remount_ro(struct ubifs_info *c) for (i = 0; i < c->jhead_cnt; i++) { ubifs_wbuf_sync(&c->jheads[i].wbuf); - del_timer_sync(&c->jheads[i].wbuf.timer); + hrtimer_cancel(&c->jheads[i].wbuf.timer); } c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY); @@ -1719,7 +1756,7 @@ static void ubifs_put_super(struct super_block *sb) if (c->jheads) for (i = 0; i < c->jhead_cnt; i++) { ubifs_wbuf_sync(&c->jheads[i].wbuf); - del_timer_sync(&c->jheads[i].wbuf.timer); + hrtimer_cancel(&c->jheads[i].wbuf.timer); } /* @@ -1911,6 +1948,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) INIT_LIST_HEAD(&c->orph_list); INIT_LIST_HEAD(&c->orph_new); + c->vfs_sb = sb; c->highest_inum = UBIFS_FIRST_INO; c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM; @@ -1945,13 +1983,10 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) if (err) goto out_bdi; - c->vfs_sb = sb; - sb->s_fs_info = c; sb->s_magic = UBIFS_SUPER_MAGIC; sb->s_blocksize = UBIFS_BLOCK_SIZE; sb->s_blocksize_bits = UBIFS_BLOCK_SHIFT; - sb->s_dev = c->vi.cdev; sb->s_maxbytes = c->max_inode_sz = key_max_inode_size(c); if (c->max_inode_sz > MAX_LFS_FILESIZE) sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE; @@ -1996,16 +2031,9 @@ out_free: static int sb_test(struct super_block *sb, void *data) { dev_t *dev = data; + struct ubifs_info *c = sb->s_fs_info; - return sb->s_dev == *dev; -} - -static int sb_set(struct super_block *sb, void *data) -{ - dev_t *dev = data; - - sb->s_dev = *dev; - return 0; + return c->vi.cdev == *dev; } static int ubifs_get_sb(struct file_system_type *fs_type, int flags, @@ -2033,7 +2061,7 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags, dbg_gen("opened ubi%d_%d", vi.ubi_num, vi.vol_id); - sb = sget(fs_type, &sb_test, &sb_set, &vi.cdev); + sb = sget(fs_type, &sb_test, &set_anon_super, &vi.cdev); if (IS_ERR(sb)) { err = PTR_ERR(sb); goto out_close; @@ -2073,16 +2101,11 @@ out_close: return err; } -static void ubifs_kill_sb(struct super_block *sb) -{ - generic_shutdown_super(sb); -} - static struct file_system_type ubifs_fs_type = { .name = "ubifs", .owner = THIS_MODULE, .get_sb = ubifs_get_sb, - .kill_sb = ubifs_kill_sb + .kill_sb = kill_anon_super, }; /* diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 0a8341e..1bf01d8 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -95,8 +95,8 @@ */ #define BGT_NAME_PATTERN "ubifs_bgt%d_%d" -/* Default write-buffer synchronization timeout (5 secs) */ -#define DEFAULT_WBUF_TIMEOUT (5 * HZ) +/* Default write-buffer synchronization timeout in seconds */ +#define DEFAULT_WBUF_TIMEOUT_SECS 5 /* Maximum possible inode number (only 32-bit inodes are supported now) */ #define MAX_INUM 0xFFFFFFFF @@ -650,8 +650,10 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c, * @io_mutex: serializes write-buffer I/O * @lock: serializes @buf, @lnum, @offs, @avail, @used, @next_ino and @inodes * fields + * @softlimit: soft write-buffer timeout interval + * @delta: hard and soft timeouts delta (the timer expire inteval is @softlimit + * and @softlimit + @delta) * @timer: write-buffer timer - * @timeout: timer expire interval in jiffies * @need_sync: it is set if its timer expired and needs sync * @next_ino: points to the next position of the following inode number * @inodes: stores the inode numbers of the nodes which are in wbuf @@ -678,8 +680,9 @@ struct ubifs_wbuf { int (*sync_callback)(struct ubifs_info *c, int lnum, int free, int pad); struct mutex io_mutex; spinlock_t lock; - struct timer_list timer; - int timeout; + ktime_t softlimit; + unsigned long long delta; + struct hrtimer timer; int need_sync; int next_ino; ino_t *inodes; diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 3d2512c..7cf3337 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -56,9 +56,7 @@ static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t off UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks); - if (i_block < 0) { - ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0"); - } else if (i_block < direct_blocks) { + if (i_block < direct_blocks) { offsets[n++] = i_block; } else if ((i_block -= direct_blocks) < indirect_blocks) { offsets[n++] = UFS_IND_BLOCK; @@ -440,8 +438,6 @@ int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head lock_kernel(); UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); - if (fragment < 0) - goto abort_negative; if (fragment > ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb) << uspi->s_fpbshift)) @@ -504,10 +500,6 @@ abort: unlock_kernel(); return err; -abort_negative: - ufs_warning(sb, "ufs_get_block", "block < 0"); - goto abort; - abort_too_big: ufs_warning(sb, "ufs_get_block", "block > big"); goto abort; diff --git a/include/Kbuild b/include/Kbuild index fe36acc..8d226bf 100644 --- a/include/Kbuild +++ b/include/Kbuild @@ -9,3 +9,4 @@ header-y += rdma/ header-y += video/ header-y += drm/ header-y += xen/ +header-y += scsi/ diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h new file mode 100644 index 0000000..5406a60 --- /dev/null +++ b/include/asm-generic/dma-mapping-common.h @@ -0,0 +1,190 @@ +#ifndef _ASM_GENERIC_DMA_MAPPING_H +#define _ASM_GENERIC_DMA_MAPPING_H + +#include <linux/kmemcheck.h> +#include <linux/scatterlist.h> +#include <linux/dma-debug.h> +#include <linux/dma-attrs.h> + +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(ptr, size); + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, virt_to_page(ptr), + (unsigned long)ptr & ~PAGE_MASK, size, + dir, attrs); + debug_dma_map_page(dev, virt_to_page(ptr), + (unsigned long)ptr & ~PAGE_MASK, size, + dir, addr, true); + return addr; +} + +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, attrs); + debug_dma_unmap_page(dev, addr, size, dir, true); +} + +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + int i, ents; + struct scatterlist *s; + + for_each_sg(sg, s, nents, i) + kmemcheck_mark_initialized(sg_virt(s), s->length); + BUG_ON(!valid_dma_direction(dir)); + ents = ops->map_sg(dev, sg, nents, dir, attrs); + debug_dma_map_sg(dev, sg, nents, ents, dir); + + return ents; +} + +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + debug_dma_unmap_sg(dev, sg, nents, dir); + if (ops->unmap_sg) + ops->unmap_sg(dev, sg, nents, dir, attrs); +} + +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(page_address(page) + offset, size); + BUG_ON(!valid_dma_direction(dir)); + addr = ops->map_page(dev, page, offset, size, dir, NULL); + debug_dma_map_page(dev, page, offset, size, dir, addr, false); + + return addr; +} + +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) + ops->unmap_page(dev, addr, size, dir, NULL); + debug_dma_unmap_page(dev, addr, size, dir, false); +} + +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, + size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) + ops->sync_single_for_cpu(dev, addr, size, dir); + debug_dma_sync_single_for_cpu(dev, addr, size, dir); + flush_write_buffers(); +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) + ops->sync_single_for_device(dev, addr, size, dir); + debug_dma_sync_single_for_device(dev, addr, size, dir); + flush_write_buffers(); +} + +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_range_for_cpu) { + ops->sync_single_range_for_cpu(dev, addr, offset, size, dir); + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); + + flush_write_buffers(); + } else + dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t addr, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_range_for_device) { + ops->sync_single_range_for_device(dev, addr, offset, size, dir); + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); + + flush_write_buffers(); + } else + dma_sync_single_for_device(dev, addr, size, dir); +} + +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_cpu) + ops->sync_sg_for_cpu(dev, sg, nelems, dir); + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); + flush_write_buffers(); +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_device) + ops->sync_sg_for_device(dev, sg, nelems, dir); + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); + + flush_write_buffers(); +} + +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) + +#endif diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h index 515c6e2..b4326b5 100644 --- a/include/asm-generic/pci.h +++ b/include/asm-generic/pci.h @@ -30,19 +30,6 @@ pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, res->end = region->end; } -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - #define pcibios_scan_all_fns(a, b) 0 #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 4ce48e8..d083561 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -14,6 +14,9 @@ extern char __kprobes_text_start[], __kprobes_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __start_rodata[], __end_rodata[]; +/* Start and end of .ctors section - used for constructor calls. */ +extern char __ctors_start[], __ctors_end[]; + /* function descriptor handling (if any). Override * in asm/sections.h */ #ifndef dereference_function_descriptor diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 6bdba10..55413e5 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -440,12 +440,21 @@ INIT_TASK \ } +#ifdef CONFIG_CONSTRUCTORS +#define KERNEL_CTORS() VMLINUX_SYMBOL(__ctors_start) = .; \ + *(.ctors) \ + VMLINUX_SYMBOL(__ctors_end) = .; +#else +#define KERNEL_CTORS() +#endif + /* init and exit section handling */ #define INIT_DATA \ *(.init.data) \ DEV_DISCARD(init.data) \ CPU_DISCARD(init.data) \ MEM_DISCARD(init.data) \ + KERNEL_CTORS() \ *(.init.rodata) \ DEV_DISCARD(init.rodata) \ CPU_DISCARD(init.rodata) \ diff --git a/include/linux/Kbuild b/include/linux/Kbuild index a2df703..03f2207 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -308,6 +308,7 @@ unifdef-y += pmu.h unifdef-y += poll.h unifdef-y += ppp_defs.h unifdef-y += ppp-comp.h +unifdef-y += pps.h unifdef-y += ptrace.h unifdef-y += quota.h unifdef-y += random.h diff --git a/include/linux/adfs_fs.h b/include/linux/adfs_fs.h index ef788c2..b19801f 100644 --- a/include/linux/adfs_fs.h +++ b/include/linux/adfs_fs.h @@ -41,8 +41,6 @@ struct adfs_discrecord { #define ADFS_DR_SIZE_BITS (ADFS_DR_SIZE << 3) #ifdef __KERNEL__ -#include <linux/adfs_fs_i.h> -#include <linux/adfs_fs_sb.h> /* * Calculate the boot block checksum on an ADFS drive. Note that this will * appear to be correct if the sector contains all zeros, so also check that @@ -60,17 +58,6 @@ static inline int adfs_checkbblk(unsigned char *ptr) return (result & 0xff) != ptr[511]; } - -static inline struct adfs_sb_info *ADFS_SB(struct super_block *sb) -{ - return sb->s_fs_info; -} - -static inline struct adfs_inode_info *ADFS_I(struct inode *inode) -{ - return container_of(inode, struct adfs_inode_info, vfs_inode); -} - #endif #endif diff --git a/include/linux/adfs_fs_i.h b/include/linux/adfs_fs_i.h deleted file mode 100644 index cb54303..0000000 --- a/include/linux/adfs_fs_i.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * linux/include/linux/adfs_fs_i.h - * - * Copyright (C) 1997 Russell King - */ - -#ifndef _ADFS_FS_I -#define _ADFS_FS_I - -/* - * adfs file system inode data in memory - */ -struct adfs_inode_info { - loff_t mmu_private; - unsigned long parent_id; /* object id of parent */ - __u32 loadaddr; /* RISC OS load address */ - __u32 execaddr; /* RISC OS exec address */ - unsigned int filetype; /* RISC OS file type */ - unsigned int attr; /* RISC OS permissions */ - unsigned int stamped:1; /* RISC OS file has date/time */ - struct inode vfs_inode; -}; - -#endif diff --git a/include/linux/adfs_fs_sb.h b/include/linux/adfs_fs_sb.h deleted file mode 100644 index d9bf05c..0000000 --- a/include/linux/adfs_fs_sb.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * linux/include/linux/adfs_fs_sb.h - * - * Copyright (C) 1997-1999 Russell King - */ - -#ifndef _ADFS_FS_SB -#define _ADFS_FS_SB - -/* - * Forward-declare this - */ -struct adfs_discmap; -struct adfs_dir_ops; - -/* - * ADFS file system superblock data in memory - */ -struct adfs_sb_info { - struct adfs_discmap *s_map; /* bh list containing map */ - struct adfs_dir_ops *s_dir; /* directory operations */ - - uid_t s_uid; /* owner uid */ - gid_t s_gid; /* owner gid */ - umode_t s_owner_mask; /* ADFS owner perm -> unix perm */ - umode_t s_other_mask; /* ADFS other perm -> unix perm */ - - __u32 s_ids_per_zone; /* max. no ids in one zone */ - __u32 s_idlen; /* length of ID in map */ - __u32 s_map_size; /* sector size of a map */ - unsigned long s_size; /* total size (in blocks) of this fs */ - signed int s_map2blk; /* shift left by this for map->sector */ - unsigned int s_log2sharesize;/* log2 share size */ - __le32 s_version; /* disc format version */ - unsigned int s_namelen; /* maximum number of characters in name */ -}; - -#endif diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h index 8005eff..b721129 100644 --- a/include/linux/compiler-gcc3.h +++ b/include/linux/compiler-gcc3.h @@ -16,6 +16,12 @@ #define __must_check __attribute__((warn_unused_result)) #endif +#ifdef CONFIG_GCOV_KERNEL +# if __GNUC_MINOR__ < 4 +# error "GCOV profiling support for gcc versions below 3.4 not included" +# endif /* __GNUC_MINOR__ */ +#endif /* CONFIG_GCOV_KERNEL */ + /* * A trick to suppress uninitialized variable warning without generating any * code diff --git a/include/linux/efi.h b/include/linux/efi.h index bb66feb..ce4581f 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -101,7 +101,7 @@ typedef struct { u64 attribute; } efi_memory_desc_t; -typedef int (*efi_freemem_callback_t) (unsigned long start, unsigned long end, void *arg); +typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg); /* * Types and defines for Time Services diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 244677c..43fc95d 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -79,10 +79,6 @@ struct fsl_spi_platform_data { u16 max_chipselect; void (*cs_control)(struct spi_device *spi, bool on); u32 sysclk; - - /* Legacy hooks, used by mpc52xx_psc_spi driver. */ - void (*activate_cs)(u8 cs, u8 polarity); - void (*deactivate_cs)(u8 cs, u8 polarity); }; struct mpc8xx_pcmcia_ops { diff --git a/include/linux/gcd.h b/include/linux/gcd.h new file mode 100644 index 0000000..69f5e8a --- /dev/null +++ b/include/linux/gcd.h @@ -0,0 +1,8 @@ +#ifndef _GCD_H +#define _GCD_H + +#include <linux/compiler.h> + +unsigned long gcd(unsigned long a, unsigned long b) __attribute_const__; + +#endif /* _GCD_H */ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index cfdb35d..7c777a0 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -99,7 +99,7 @@ struct vm_area_struct; __GFP_NORETRY|__GFP_NOMEMALLOC) /* Control slab gfp mask during early boot */ -#define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) +#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) /* Control allocation constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) @@ -348,4 +348,11 @@ static inline void oom_killer_enable(void) oom_killer_disabled = false; } +extern gfp_t gfp_allowed_mask; + +static inline void set_gfp_allowed_mask(gfp_t mask) +{ + gfp_allowed_mask = mask; +} + #endif /* __LINUX_GFP_H */ diff --git a/include/linux/i2c/pca953x.h b/include/linux/i2c/pca953x.h index 3c73612..81736d6 100644 --- a/include/linux/i2c/pca953x.h +++ b/include/linux/i2c/pca953x.h @@ -15,4 +15,5 @@ struct pca953x_platform_data { int (*teardown)(struct i2c_client *client, unsigned gpio, unsigned ngpio, void *context); + char **names; }; diff --git a/include/linux/init.h b/include/linux/init.h index 8c2c998..13b633e 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -134,6 +134,9 @@ typedef void (*exitcall_t)(void); extern initcall_t __con_initcall_start[], __con_initcall_end[]; extern initcall_t __security_initcall_start[], __security_initcall_end[]; +/* Used for contructor calls. */ +typedef void (*ctor_fn_t)(void); + /* Defined in init/main.c */ extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index 3bf40e2..e408722 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -94,13 +94,8 @@ static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } #endif #if defined(CONFIG_IPC_NS) -extern void free_ipc_ns(struct ipc_namespace *ns); extern struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns); -extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, - void (*free)(struct ipc_namespace *, - struct kern_ipc_perm *)); - static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) { if (ns) diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index a77c600..348fa88 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -5,6 +5,7 @@ #include <linux/threads.h> #include <linux/percpu.h> #include <linux/cpumask.h> +#include <linux/interrupt.h> #include <asm/irq.h> #include <asm/cputime.h> @@ -31,6 +32,7 @@ struct kernel_stat { #ifndef CONFIG_GENERIC_HARDIRQS unsigned int irqs[NR_IRQS]; #endif + unsigned int softirqs[NR_SOFTIRQS]; }; DECLARE_PER_CPU(struct kernel_stat, kstat); @@ -67,6 +69,16 @@ extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); #endif +static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) +{ + kstat_this_cpu.softirqs[irq]++; +} + +static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) +{ + return kstat_cpu(cpu).softirqs[irq]; +} + /* * Number of interrupts per specific IRQ source, since bootup */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 45add35..e46a073 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -117,7 +117,7 @@ static inline bool mem_cgroup_disabled(void) } extern bool mem_cgroup_oom_called(struct task_struct *task); - +void mem_cgroup_update_mapped_file_stat(struct page *page, int val); #else /* CONFIG_CGROUP_MEM_RES_CTLR */ struct mem_cgroup; @@ -271,6 +271,11 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) { } +static inline void mem_cgroup_update_mapped_file_stat(struct page *page, + int val) +{ +} + #endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h new file mode 100644 index 0000000..7a3f316 --- /dev/null +++ b/include/linux/mfd/ab3100.h @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2007-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * AB3100 core access functions + * Author: Linus Walleij <linus.walleij@stericsson.com> + */ + +#include <linux/device.h> + +#ifndef MFD_AB3100_H +#define MFD_AB3100_H + +#define ABUNKNOWN 0 +#define AB3000 1 +#define AB3100 2 + +/* + * AB3100, EVENTA1, A2 and A3 event register flags + * these are catenated into a single 32-bit flag in the code + * for event notification broadcasts. + */ +#define AB3100_EVENTA1_ONSWA (0x01<<16) +#define AB3100_EVENTA1_ONSWB (0x02<<16) +#define AB3100_EVENTA1_ONSWC (0x04<<16) +#define AB3100_EVENTA1_DCIO (0x08<<16) +#define AB3100_EVENTA1_OVER_TEMP (0x10<<16) +#define AB3100_EVENTA1_SIM_OFF (0x20<<16) +#define AB3100_EVENTA1_VBUS (0x40<<16) +#define AB3100_EVENTA1_VSET_USB (0x80<<16) + +#define AB3100_EVENTA2_READY_TX (0x01<<8) +#define AB3100_EVENTA2_READY_RX (0x02<<8) +#define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8) +#define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8) +#define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8) +#define AB3100_EVENTA2_MIDR (0x20<<8) +#define AB3100_EVENTA2_BATTERY_REM (0x40<<8) +#define AB3100_EVENTA2_ALARM (0x80<<8) + +#define AB3100_EVENTA3_ADC_TRIG5 (0x01) +#define AB3100_EVENTA3_ADC_TRIG4 (0x02) +#define AB3100_EVENTA3_ADC_TRIG3 (0x04) +#define AB3100_EVENTA3_ADC_TRIG2 (0x08) +#define AB3100_EVENTA3_ADC_TRIGVBAT (0x10) +#define AB3100_EVENTA3_ADC_TRIGVTX (0x20) +#define AB3100_EVENTA3_ADC_TRIG1 (0x40) +#define AB3100_EVENTA3_ADC_TRIG0 (0x80) + +/* AB3100, STR register flags */ +#define AB3100_STR_ONSWA (0x01) +#define AB3100_STR_ONSWB (0x02) +#define AB3100_STR_ONSWC (0x04) +#define AB3100_STR_DCIO (0x08) +#define AB3100_STR_BOOT_MODE (0x10) +#define AB3100_STR_SIM_OFF (0x20) +#define AB3100_STR_BATT_REMOVAL (0x40) +#define AB3100_STR_VBUS (0x80) + +/** + * struct ab3100 + * @access_mutex: lock out concurrent accesses to the AB3100 registers + * @dev: pointer to the containing device + * @i2c_client: I2C client for this chip + * @testreg_client: secondary client for test registers + * @chip_name: name of this chip variant + * @chip_id: 8 bit chip ID for this chip variant + * @work: an event handling worker + * @event_subscribers: event subscribers are listed here + * @startup_events: a copy of the first reading of the event registers + * @startup_events_read: whether the first events have been read + * + * This struct is PRIVATE and devices using it should NOT + * access ANY fields. It is used as a token for calling the + * AB3100 functions. + */ +struct ab3100 { + struct mutex access_mutex; + struct device *dev; + struct i2c_client *i2c_client; + struct i2c_client *testreg_client; + char chip_name[32]; + u8 chip_id; + struct work_struct work; + struct blocking_notifier_head event_subscribers; + u32 startup_events; + bool startup_events_read; +}; + +int ab3100_set_register(struct ab3100 *ab3100, u8 reg, u8 regval); +int ab3100_get_register(struct ab3100 *ab3100, u8 reg, u8 *regval); +int ab3100_get_register_page(struct ab3100 *ab3100, + u8 first_reg, u8 *regvals, u8 numregs); +int ab3100_mask_and_set_register(struct ab3100 *ab3100, + u8 reg, u8 andmask, u8 ormask); +u8 ab3100_get_chip_type(struct ab3100 *ab3100); +int ab3100_event_register(struct ab3100 *ab3100, + struct notifier_block *nb); +int ab3100_event_unregister(struct ab3100 *ab3100, + struct notifier_block *nb); +int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100, + u32 *fatevent); + +#endif diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h index 322cd6d..de3c4ad 100644 --- a/include/linux/mfd/asic3.h +++ b/include/linux/mfd/asic3.h @@ -30,6 +30,13 @@ struct asic3_platform_data { #define ASIC3_NUM_GPIOS 64 #define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6 +#define ASIC3_IRQ_LED0 64 +#define ASIC3_IRQ_LED1 65 +#define ASIC3_IRQ_LED2 66 +#define ASIC3_IRQ_SPI 67 +#define ASIC3_IRQ_SMBUS 68 +#define ASIC3_IRQ_OWM 69 + #define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio)) #define ASIC3_GPIO_BANK_A 0 @@ -227,8 +234,8 @@ struct asic3_platform_data { /* Basic control of the SD ASIC */ -#define ASIC3_SDHWCTRL_Base 0x0E00 -#define ASIC3_SDHWCTRL_SDConf 0x00 +#define ASIC3_SDHWCTRL_BASE 0x0E00 +#define ASIC3_SDHWCTRL_SDCONF 0x00 #define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */ #define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */ @@ -242,10 +249,10 @@ struct asic3_platform_data { /* SD card power supply ctrl 1=enable */ #define ASIC3_SDHWCTRL_SDPWR (1 << 6) -#define ASIC3_EXTCF_Base 0x1100 +#define ASIC3_EXTCF_BASE 0x1100 -#define ASIC3_EXTCF_Select 0x00 -#define ASIC3_EXTCF_Reset 0x04 +#define ASIC3_EXTCF_SELECT 0x00 +#define ASIC3_EXTCF_RESET 0x04 #define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */ #define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */ @@ -279,222 +286,9 @@ struct asic3_platform_data { * SDIO_CTRL Control registers for SDIO operations * *****************************************************************************/ -#define ASIC3_SD_CONFIG_Base 0x0400 /* Assumes 32 bit addressing */ - -#define ASIC3_SD_CONFIG_Command 0x08 /* R/W: Command */ - -/* [0:8] SD Control Register Base Address */ -#define ASIC3_SD_CONFIG_Addr0 0x20 - -/* [9:31] SD Control Register Base Address */ -#define ASIC3_SD_CONFIG_Addr1 0x24 - -/* R/O: interrupt assigned to pin */ -#define ASIC3_SD_CONFIG_IntPin 0x78 - -/* - * Set to 0x1f to clock SD controller, 0 otherwise. - * At 0x82 - Gated Clock Ctrl - */ -#define ASIC3_SD_CONFIG_ClkStop 0x80 - -/* Control clock of SD controller */ -#define ASIC3_SD_CONFIG_ClockMode 0x84 -#define ASIC3_SD_CONFIG_SDHC_PinStatus 0x88 /* R/0: SD pins status */ -#define ASIC3_SD_CONFIG_SDHC_Power1 0x90 /* Power1 - manual pwr ctrl */ - -/* auto power up after card inserted */ -#define ASIC3_SD_CONFIG_SDHC_Power2 0x92 - -/* auto power down when card removed */ -#define ASIC3_SD_CONFIG_SDHC_Power3 0x94 -#define ASIC3_SD_CONFIG_SDHC_CardDetect 0x98 -#define ASIC3_SD_CONFIG_SDHC_Slot 0xA0 /* R/O: support slot number */ -#define ASIC3_SD_CONFIG_SDHC_ExtGateClk1 0x1E0 /* Not used */ -#define ASIC3_SD_CONFIG_SDHC_ExtGateClk2 0x1E2 /* Not used*/ - -/* GPIO Output Reg. , at 0x1EA - GPIO Output Enable Reg. */ -#define ASIC3_SD_CONFIG_SDHC_GPIO_OutAndEnable 0x1E8 -#define ASIC3_SD_CONFIG_SDHC_GPIO_Status 0x1EC /* GPIO Status Reg. */ - -/* Bit 1: double buffer/single buffer */ -#define ASIC3_SD_CONFIG_SDHC_ExtGateClk3 0x1F0 - -/* Memory access enable (set to 1 to access SD Controller) */ -#define SD_CONFIG_COMMAND_MAE (1<<1) - -#define SD_CONFIG_CLK_ENABLE_ALL 0x1f - -#define SD_CONFIG_POWER1_PC_33V 0x0200 /* Set for 3.3 volts */ -#define SD_CONFIG_POWER1_PC_OFF 0x0000 /* Turn off power */ - - /* two bits - number of cycles for card detection */ -#define SD_CONFIG_CARDDETECTMODE_CLK ((x) & 0x3) - - -#define ASIC3_SD_CTRL_Base 0x1000 - -#define ASIC3_SD_CTRL_Cmd 0x00 -#define ASIC3_SD_CTRL_Arg0 0x08 -#define ASIC3_SD_CTRL_Arg1 0x0C -#define ASIC3_SD_CTRL_StopInternal 0x10 -#define ASIC3_SD_CTRL_TransferSectorCount 0x14 -#define ASIC3_SD_CTRL_Response0 0x18 -#define ASIC3_SD_CTRL_Response1 0x1C -#define ASIC3_SD_CTRL_Response2 0x20 -#define ASIC3_SD_CTRL_Response3 0x24 -#define ASIC3_SD_CTRL_Response4 0x28 -#define ASIC3_SD_CTRL_Response5 0x2C -#define ASIC3_SD_CTRL_Response6 0x30 -#define ASIC3_SD_CTRL_Response7 0x34 -#define ASIC3_SD_CTRL_CardStatus 0x38 -#define ASIC3_SD_CTRL_BufferCtrl 0x3C -#define ASIC3_SD_CTRL_IntMaskCard 0x40 -#define ASIC3_SD_CTRL_IntMaskBuffer 0x44 -#define ASIC3_SD_CTRL_CardClockCtrl 0x48 -#define ASIC3_SD_CTRL_MemCardXferDataLen 0x4C -#define ASIC3_SD_CTRL_MemCardOptionSetup 0x50 -#define ASIC3_SD_CTRL_ErrorStatus0 0x58 -#define ASIC3_SD_CTRL_ErrorStatus1 0x5C -#define ASIC3_SD_CTRL_DataPort 0x60 -#define ASIC3_SD_CTRL_TransactionCtrl 0x68 -#define ASIC3_SD_CTRL_SoftwareReset 0x1C0 - -#define SD_CTRL_SOFTWARE_RESET_CLEAR (1<<0) - -#define SD_CTRL_TRANSACTIONCONTROL_SET (1<<8) - -#define SD_CTRL_CARDCLOCKCONTROL_FOR_SD_CARD (1<<15) -#define SD_CTRL_CARDCLOCKCONTROL_ENABLE_CLOCK (1<<8) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_512 (1<<7) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_256 (1<<6) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_128 (1<<5) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_64 (1<<4) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_32 (1<<3) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_16 (1<<2) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_8 (1<<1) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_4 (1<<0) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_2 (0<<0) - -#define MEM_CARD_OPTION_REQUIRED 0x000e -#define MEM_CARD_OPTION_DATA_RESPONSE_TIMEOUT(x) (((x) & 0x0f) << 4) -#define MEM_CARD_OPTION_C2_MODULE_NOT_PRESENT (1<<14) -#define MEM_CARD_OPTION_DATA_XFR_WIDTH_1 (1<<15) -#define MEM_CARD_OPTION_DATA_XFR_WIDTH_4 0 - -#define SD_CTRL_COMMAND_INDEX(x) ((x) & 0x3f) -#define SD_CTRL_COMMAND_TYPE_CMD (0 << 6) -#define SD_CTRL_COMMAND_TYPE_ACMD (1 << 6) -#define SD_CTRL_COMMAND_TYPE_AUTHENTICATION (2 << 6) -#define SD_CTRL_COMMAND_RESPONSE_TYPE_NORMAL (0 << 8) -#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R1 (4 << 8) -#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R1B (5 << 8) -#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R2 (6 << 8) -#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R3 (7 << 8) -#define SD_CTRL_COMMAND_DATA_PRESENT (1 << 11) -#define SD_CTRL_COMMAND_TRANSFER_READ (1 << 12) -#define SD_CTRL_COMMAND_TRANSFER_WRITE (0 << 12) -#define SD_CTRL_COMMAND_MULTI_BLOCK (1 << 13) -#define SD_CTRL_COMMAND_SECURITY_CMD (1 << 14) - -#define SD_CTRL_STOP_INTERNAL_ISSSUE_CMD12 (1 << 0) -#define SD_CTRL_STOP_INTERNAL_AUTO_ISSUE_CMD12 (1 << 8) - -#define SD_CTRL_CARDSTATUS_RESPONSE_END (1 << 0) -#define SD_CTRL_CARDSTATUS_RW_END (1 << 2) -#define SD_CTRL_CARDSTATUS_CARD_REMOVED_0 (1 << 3) -#define SD_CTRL_CARDSTATUS_CARD_INSERTED_0 (1 << 4) -#define SD_CTRL_CARDSTATUS_SIGNAL_STATE_PRESENT_0 (1 << 5) -#define SD_CTRL_CARDSTATUS_WRITE_PROTECT (1 << 7) -#define SD_CTRL_CARDSTATUS_CARD_REMOVED_3 (1 << 8) -#define SD_CTRL_CARDSTATUS_CARD_INSERTED_3 (1 << 9) -#define SD_CTRL_CARDSTATUS_SIGNAL_STATE_PRESENT_3 (1 << 10) - -#define SD_CTRL_BUFFERSTATUS_CMD_INDEX_ERROR (1 << 0) -#define SD_CTRL_BUFFERSTATUS_CRC_ERROR (1 << 1) -#define SD_CTRL_BUFFERSTATUS_STOP_BIT_END_ERROR (1 << 2) -#define SD_CTRL_BUFFERSTATUS_DATA_TIMEOUT (1 << 3) -#define SD_CTRL_BUFFERSTATUS_BUFFER_OVERFLOW (1 << 4) -#define SD_CTRL_BUFFERSTATUS_BUFFER_UNDERFLOW (1 << 5) -#define SD_CTRL_BUFFERSTATUS_CMD_TIMEOUT (1 << 6) -#define SD_CTRL_BUFFERSTATUS_UNK7 (1 << 7) -#define SD_CTRL_BUFFERSTATUS_BUFFER_READ_ENABLE (1 << 8) -#define SD_CTRL_BUFFERSTATUS_BUFFER_WRITE_ENABLE (1 << 9) -#define SD_CTRL_BUFFERSTATUS_ILLEGAL_FUNCTION (1 << 13) -#define SD_CTRL_BUFFERSTATUS_CMD_BUSY (1 << 14) -#define SD_CTRL_BUFFERSTATUS_ILLEGAL_ACCESS (1 << 15) - -#define SD_CTRL_INTMASKCARD_RESPONSE_END (1 << 0) -#define SD_CTRL_INTMASKCARD_RW_END (1 << 2) -#define SD_CTRL_INTMASKCARD_CARD_REMOVED_0 (1 << 3) -#define SD_CTRL_INTMASKCARD_CARD_INSERTED_0 (1 << 4) -#define SD_CTRL_INTMASKCARD_SIGNAL_STATE_PRESENT_0 (1 << 5) -#define SD_CTRL_INTMASKCARD_UNK6 (1 << 6) -#define SD_CTRL_INTMASKCARD_WRITE_PROTECT (1 << 7) -#define SD_CTRL_INTMASKCARD_CARD_REMOVED_3 (1 << 8) -#define SD_CTRL_INTMASKCARD_CARD_INSERTED_3 (1 << 9) -#define SD_CTRL_INTMASKCARD_SIGNAL_STATE_PRESENT_3 (1 << 10) - -#define SD_CTRL_INTMASKBUFFER_CMD_INDEX_ERROR (1 << 0) -#define SD_CTRL_INTMASKBUFFER_CRC_ERROR (1 << 1) -#define SD_CTRL_INTMASKBUFFER_STOP_BIT_END_ERROR (1 << 2) -#define SD_CTRL_INTMASKBUFFER_DATA_TIMEOUT (1 << 3) -#define SD_CTRL_INTMASKBUFFER_BUFFER_OVERFLOW (1 << 4) -#define SD_CTRL_INTMASKBUFFER_BUFFER_UNDERFLOW (1 << 5) -#define SD_CTRL_INTMASKBUFFER_CMD_TIMEOUT (1 << 6) -#define SD_CTRL_INTMASKBUFFER_UNK7 (1 << 7) -#define SD_CTRL_INTMASKBUFFER_BUFFER_READ_ENABLE (1 << 8) -#define SD_CTRL_INTMASKBUFFER_BUFFER_WRITE_ENABLE (1 << 9) -#define SD_CTRL_INTMASKBUFFER_ILLEGAL_FUNCTION (1 << 13) -#define SD_CTRL_INTMASKBUFFER_CMD_BUSY (1 << 14) -#define SD_CTRL_INTMASKBUFFER_ILLEGAL_ACCESS (1 << 15) - -#define SD_CTRL_DETAIL0_RESPONSE_CMD_ERROR (1 << 0) -#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_RESPONSE_NON_CMD12 (1 << 2) -#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_RESPONSE_CMD12 (1 << 3) -#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_READ_DATA (1 << 4) -#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_WRITE_CRC_STATUS (1 << 5) -#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_RESPONSE_NON_CMD12 (1 << 8) -#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_RESPONSE_CMD12 (1 << 9) -#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_READ_DATA (1 << 10) -#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_WRITE_CMD (1 << 11) - -#define SD_CTRL_DETAIL1_NO_CMD_RESPONSE (1 << 0) -#define SD_CTRL_DETAIL1_TIMEOUT_READ_DATA (1 << 4) -#define SD_CTRL_DETAIL1_TIMEOUT_CRS_STATUS (1 << 5) -#define SD_CTRL_DETAIL1_TIMEOUT_CRC_BUSY (1 << 6) - -#define ASIC3_SDIO_CTRL_Base 0x1200 - -#define ASIC3_SDIO_CTRL_Cmd 0x00 -#define ASIC3_SDIO_CTRL_CardPortSel 0x04 -#define ASIC3_SDIO_CTRL_Arg0 0x08 -#define ASIC3_SDIO_CTRL_Arg1 0x0C -#define ASIC3_SDIO_CTRL_TransferBlockCount 0x14 -#define ASIC3_SDIO_CTRL_Response0 0x18 -#define ASIC3_SDIO_CTRL_Response1 0x1C -#define ASIC3_SDIO_CTRL_Response2 0x20 -#define ASIC3_SDIO_CTRL_Response3 0x24 -#define ASIC3_SDIO_CTRL_Response4 0x28 -#define ASIC3_SDIO_CTRL_Response5 0x2C -#define ASIC3_SDIO_CTRL_Response6 0x30 -#define ASIC3_SDIO_CTRL_Response7 0x34 -#define ASIC3_SDIO_CTRL_CardStatus 0x38 -#define ASIC3_SDIO_CTRL_BufferCtrl 0x3C -#define ASIC3_SDIO_CTRL_IntMaskCard 0x40 -#define ASIC3_SDIO_CTRL_IntMaskBuffer 0x44 -#define ASIC3_SDIO_CTRL_CardXferDataLen 0x4C -#define ASIC3_SDIO_CTRL_CardOptionSetup 0x50 -#define ASIC3_SDIO_CTRL_ErrorStatus0 0x54 -#define ASIC3_SDIO_CTRL_ErrorStatus1 0x58 -#define ASIC3_SDIO_CTRL_DataPort 0x60 -#define ASIC3_SDIO_CTRL_TransactionCtrl 0x68 -#define ASIC3_SDIO_CTRL_CardIntCtrl 0x6C -#define ASIC3_SDIO_CTRL_ClocknWaitCtrl 0x70 -#define ASIC3_SDIO_CTRL_HostInformation 0x74 -#define ASIC3_SDIO_CTRL_ErrorCtrl 0x78 -#define ASIC3_SDIO_CTRL_LEDCtrl 0x7C -#define ASIC3_SDIO_CTRL_SoftwareReset 0x1C0 +#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */ +#define ASIC3_SD_CTRL_BASE 0x1000 +#define ASIC3_SDIO_CTRL_BASE 0x1200 #define ASIC3_MAP_SIZE_32BIT 0x2000 #define ASIC3_MAP_SIZE_16BIT 0x1000 diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h new file mode 100644 index 0000000..c12c3c0 --- /dev/null +++ b/include/linux/mfd/ezx-pcap.h @@ -0,0 +1,256 @@ +/* + * Copyright 2009 Daniel Ribeiro <drwyrm@gmail.com> + * + * For further information, please see http://wiki.openezx.org/PCAP2 + */ + +#ifndef EZX_PCAP_H +#define EZX_PCAP_H + +struct pcap_subdev { + int id; + const char *name; + void *platform_data; +}; + +struct pcap_platform_data { + unsigned int irq_base; + unsigned int config; + void (*init) (void *); /* board specific init */ + int num_subdevs; + struct pcap_subdev *subdevs; +}; + +struct pcap_chip; + +int ezx_pcap_write(struct pcap_chip *, u8, u32); +int ezx_pcap_read(struct pcap_chip *, u8, u32 *); +int pcap_to_irq(struct pcap_chip *, int); +int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *); +int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]); + +#define PCAP_SECOND_PORT 1 +#define PCAP_CS_AH 2 + +#define PCAP_REGISTER_WRITE_OP_BIT 0x80000000 +#define PCAP_REGISTER_READ_OP_BIT 0x00000000 + +#define PCAP_REGISTER_VALUE_MASK 0x01ffffff +#define PCAP_REGISTER_ADDRESS_MASK 0x7c000000 +#define PCAP_REGISTER_ADDRESS_SHIFT 26 +#define PCAP_REGISTER_NUMBER 32 +#define PCAP_CLEAR_INTERRUPT_REGISTER 0x01ffffff +#define PCAP_MASK_ALL_INTERRUPT 0x01ffffff + +/* registers acessible by both pcap ports */ +#define PCAP_REG_ISR 0x0 /* Interrupt Status */ +#define PCAP_REG_MSR 0x1 /* Interrupt Mask */ +#define PCAP_REG_PSTAT 0x2 /* Processor Status */ +#define PCAP_REG_VREG2 0x6 /* Regulator Bank 2 Control */ +#define PCAP_REG_AUXVREG 0x7 /* Auxiliary Regulator Control */ +#define PCAP_REG_BATT 0x8 /* Battery Control */ +#define PCAP_REG_ADC 0x9 /* AD Control */ +#define PCAP_REG_ADR 0xa /* AD Result */ +#define PCAP_REG_CODEC 0xb /* Audio Codec Control */ +#define PCAP_REG_RX_AMPS 0xc /* RX Audio Amplifiers Control */ +#define PCAP_REG_ST_DAC 0xd /* Stereo DAC Control */ +#define PCAP_REG_BUSCTRL 0x14 /* Connectivity Control */ +#define PCAP_REG_PERIPH 0x15 /* Peripheral Control */ +#define PCAP_REG_LOWPWR 0x18 /* Regulator Low Power Control */ +#define PCAP_REG_TX_AMPS 0x1a /* TX Audio Amplifiers Control */ +#define PCAP_REG_GP 0x1b /* General Purpose */ +#define PCAP_REG_TEST1 0x1c +#define PCAP_REG_TEST2 0x1d +#define PCAP_REG_VENDOR_TEST1 0x1e +#define PCAP_REG_VENDOR_TEST2 0x1f + +/* registers acessible by pcap port 1 only (a1200, e2 & e6) */ +#define PCAP_REG_INT_SEL 0x3 /* Interrupt Select */ +#define PCAP_REG_SWCTRL 0x4 /* Switching Regulator Control */ +#define PCAP_REG_VREG1 0x5 /* Regulator Bank 1 Control */ +#define PCAP_REG_RTC_TOD 0xe /* RTC Time of Day */ +#define PCAP_REG_RTC_TODA 0xf /* RTC Time of Day Alarm */ +#define PCAP_REG_RTC_DAY 0x10 /* RTC Day */ +#define PCAP_REG_RTC_DAYA 0x11 /* RTC Day Alarm */ +#define PCAP_REG_MTRTMR 0x12 /* AD Monitor Timer */ +#define PCAP_REG_PWR 0x13 /* Power Control */ +#define PCAP_REG_AUXVREG_MASK 0x16 /* Auxiliary Regulator Mask */ +#define PCAP_REG_VENDOR_REV 0x17 +#define PCAP_REG_PERIPH_MASK 0x19 /* Peripheral Mask */ + +/* PCAP2 Interrupts */ +#define PCAP_NIRQS 23 +#define PCAP_IRQ_ADCDONE 0 /* ADC done port 1 */ +#define PCAP_IRQ_TS 1 /* Touch Screen */ +#define PCAP_IRQ_1HZ 2 /* 1HZ timer */ +#define PCAP_IRQ_WH 3 /* ADC above high limit */ +#define PCAP_IRQ_WL 4 /* ADC below low limit */ +#define PCAP_IRQ_TODA 5 /* Time of day alarm */ +#define PCAP_IRQ_USB4V 6 /* USB above 4V */ +#define PCAP_IRQ_ONOFF 7 /* On/Off button */ +#define PCAP_IRQ_ONOFF2 8 /* On/Off button 2 */ +#define PCAP_IRQ_USB1V 9 /* USB above 1V */ +#define PCAP_IRQ_MOBPORT 10 +#define PCAP_IRQ_MIC 11 /* Mic attach/HS button */ +#define PCAP_IRQ_HS 12 /* Headset attach */ +#define PCAP_IRQ_ST 13 +#define PCAP_IRQ_PC 14 /* Power Cut */ +#define PCAP_IRQ_WARM 15 +#define PCAP_IRQ_EOL 16 /* Battery End Of Life */ +#define PCAP_IRQ_CLK 17 +#define PCAP_IRQ_SYSRST 18 /* System Reset */ +#define PCAP_IRQ_DUMMY 19 +#define PCAP_IRQ_ADCDONE2 20 /* ADC done port 2 */ +#define PCAP_IRQ_SOFTRESET 21 +#define PCAP_IRQ_MNEXB 22 + +/* voltage regulators */ +#define V1 0 +#define V2 1 +#define V3 2 +#define V4 3 +#define V5 4 +#define V6 5 +#define V7 6 +#define V8 7 +#define V9 8 +#define V10 9 +#define VAUX1 10 +#define VAUX2 11 +#define VAUX3 12 +#define VAUX4 13 +#define VSIM 14 +#define VSIM2 15 +#define VVIB 16 +#define SW1 17 +#define SW2 18 +#define SW3 19 +#define SW1S 20 +#define SW2S 21 + +#define PCAP_BATT_DAC_MASK 0x000000ff +#define PCAP_BATT_DAC_SHIFT 0 +#define PCAP_BATT_B_FDBK (1 << 8) +#define PCAP_BATT_EXT_ISENSE (1 << 9) +#define PCAP_BATT_V_COIN_MASK 0x00003c00 +#define PCAP_BATT_V_COIN_SHIFT 10 +#define PCAP_BATT_I_COIN (1 << 14) +#define PCAP_BATT_COIN_CH_EN (1 << 15) +#define PCAP_BATT_EOL_SEL_MASK 0x000e0000 +#define PCAP_BATT_EOL_SEL_SHIFT 17 +#define PCAP_BATT_EOL_CMP_EN (1 << 20) +#define PCAP_BATT_BATT_DET_EN (1 << 21) +#define PCAP_BATT_THERMBIAS_CTRL (1 << 22) + +#define PCAP_ADC_ADEN (1 << 0) +#define PCAP_ADC_RAND (1 << 1) +#define PCAP_ADC_AD_SEL1 (1 << 2) +#define PCAP_ADC_AD_SEL2 (1 << 3) +#define PCAP_ADC_ADA1_MASK 0x00000070 +#define PCAP_ADC_ADA1_SHIFT 4 +#define PCAP_ADC_ADA2_MASK 0x00000380 +#define PCAP_ADC_ADA2_SHIFT 7 +#define PCAP_ADC_ATO_MASK 0x00003c00 +#define PCAP_ADC_ATO_SHIFT 10 +#define PCAP_ADC_ATOX (1 << 14) +#define PCAP_ADC_MTR1 (1 << 15) +#define PCAP_ADC_MTR2 (1 << 16) +#define PCAP_ADC_TS_M_MASK 0x000e0000 +#define PCAP_ADC_TS_M_SHIFT 17 +#define PCAP_ADC_TS_REF_LOWPWR (1 << 20) +#define PCAP_ADC_TS_REFENB (1 << 21) +#define PCAP_ADC_BATT_I_POLARITY (1 << 22) +#define PCAP_ADC_BATT_I_ADC (1 << 23) + +#define PCAP_ADC_BANK_0 0 +#define PCAP_ADC_BANK_1 1 +/* ADC bank 0 */ +#define PCAP_ADC_CH_COIN 0 +#define PCAP_ADC_CH_BATT 1 +#define PCAP_ADC_CH_BPLUS 2 +#define PCAP_ADC_CH_MOBPORTB 3 +#define PCAP_ADC_CH_TEMPERATURE 4 +#define PCAP_ADC_CH_CHARGER_ID 5 +#define PCAP_ADC_CH_AD6 6 +/* ADC bank 1 */ +#define PCAP_ADC_CH_AD7 0 +#define PCAP_ADC_CH_AD8 1 +#define PCAP_ADC_CH_AD9 2 +#define PCAP_ADC_CH_TS_X1 3 +#define PCAP_ADC_CH_TS_X2 4 +#define PCAP_ADC_CH_TS_Y1 5 +#define PCAP_ADC_CH_TS_Y2 6 + +#define PCAP_ADC_T_NOW 0 +#define PCAP_ADC_T_IN_BURST 1 +#define PCAP_ADC_T_OUT_BURST 2 + +#define PCAP_ADC_ATO_IN_BURST 6 +#define PCAP_ADC_ATO_OUT_BURST 0 + +#define PCAP_ADC_TS_M_XY 1 +#define PCAP_ADC_TS_M_PRESSURE 2 +#define PCAP_ADC_TS_M_PLATE_X 3 +#define PCAP_ADC_TS_M_PLATE_Y 4 +#define PCAP_ADC_TS_M_STANDBY 5 +#define PCAP_ADC_TS_M_NONTS 6 + +#define PCAP_ADR_ADD1_MASK 0x000003ff +#define PCAP_ADR_ADD1_SHIFT 0 +#define PCAP_ADR_ADD2_MASK 0x000ffc00 +#define PCAP_ADR_ADD2_SHIFT 10 +#define PCAP_ADR_ADINC1 (1 << 20) +#define PCAP_ADR_ADINC2 (1 << 21) +#define PCAP_ADR_ASC (1 << 22) +#define PCAP_ADR_ONESHOT (1 << 23) + +#define PCAP_BUSCTRL_FSENB (1 << 0) +#define PCAP_BUSCTRL_USB_SUSPEND (1 << 1) +#define PCAP_BUSCTRL_USB_PU (1 << 2) +#define PCAP_BUSCTRL_USB_PD (1 << 3) +#define PCAP_BUSCTRL_VUSB_EN (1 << 4) +#define PCAP_BUSCTRL_USB_PS (1 << 5) +#define PCAP_BUSCTRL_VUSB_MSTR_EN (1 << 6) +#define PCAP_BUSCTRL_VBUS_PD_ENB (1 << 7) +#define PCAP_BUSCTRL_CURRLIM (1 << 8) +#define PCAP_BUSCTRL_RS232ENB (1 << 9) +#define PCAP_BUSCTRL_RS232_DIR (1 << 10) +#define PCAP_BUSCTRL_SE0_CONN (1 << 11) +#define PCAP_BUSCTRL_USB_PDM (1 << 12) +#define PCAP_BUSCTRL_BUS_PRI_ADJ (1 << 24) + +/* leds */ +#define PCAP_LED0 0 +#define PCAP_LED1 1 +#define PCAP_BL0 2 +#define PCAP_BL1 3 +#define PCAP_VIB 4 +#define PCAP_LED_3MA 0 +#define PCAP_LED_4MA 1 +#define PCAP_LED_5MA 2 +#define PCAP_LED_9MA 3 +#define PCAP_LED_GPIO_VAL_MASK 0x00ffffff +#define PCAP_LED_GPIO_EN 0x01000000 +#define PCAP_LED_GPIO_INVERT 0x02000000 +#define PCAP_LED_T_MASK 0xf +#define PCAP_LED_C_MASK 0x3 +#define PCAP_BL_MASK 0x1f +#define PCAP_BL0_SHIFT 0 +#define PCAP_LED0_EN (1 << 5) +#define PCAP_LED1_EN (1 << 6) +#define PCAP_LED0_T_SHIFT 7 +#define PCAP_LED1_T_SHIFT 11 +#define PCAP_LED0_C_SHIFT 15 +#define PCAP_LED1_C_SHIFT 17 +#define PCAP_BL1_SHIFT 20 +#define PCAP_VIB_MASK 0x3 +#define PCAP_VIB_SHIFT 20 +#define PCAP_VIB_EN (1 << 19) + +/* RTC */ +#define PCAP_RTC_DAY_MASK 0x3fff +#define PCAP_RTC_TOD_MASK 0xffff +#define PCAP_RTC_PC_MASK 0x7 +#define SEC_PER_DAY 86400 + +#endif diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index c377118..6b9c5d0 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -22,7 +22,7 @@ * data for the MMC controller */ struct tmio_mmc_data { - unsigned int hclk; + const unsigned int hclk; }; /* diff --git a/include/linux/module.h b/include/linux/module.h index 505f20d..098bdb7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -363,6 +363,12 @@ struct module local_t ref; #endif #endif + +#ifdef CONFIG_CONSTRUCTORS + /* Constructor functions. */ + ctor_fn_t *ctors; + unsigned int num_ctors; +#endif }; #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h index 6316faf..6913b71 100644 --- a/include/linux/mtd/ubi.h +++ b/include/linux/mtd/ubi.h @@ -132,6 +132,39 @@ struct ubi_device_info { dev_t cdev; }; +/* + * enum - volume notification types. + * @UBI_VOLUME_ADDED: volume has been added + * @UBI_VOLUME_REMOVED: start volume volume + * @UBI_VOLUME_RESIZED: volume size has been re-sized + * @UBI_VOLUME_RENAMED: volume name has been re-named + * @UBI_VOLUME_UPDATED: volume name has been updated + * + * These constants define which type of event has happened when a volume + * notification function is invoked. + */ +enum { + UBI_VOLUME_ADDED, + UBI_VOLUME_REMOVED, + UBI_VOLUME_RESIZED, + UBI_VOLUME_RENAMED, + UBI_VOLUME_UPDATED, +}; + +/* + * struct ubi_notification - UBI notification description structure. + * @di: UBI device description object + * @vi: UBI volume description object + * + * UBI notifiers are called with a pointer to an object of this type. The + * object describes the notification. Namely, it provides a description of the + * UBI device and UBI volume the notification informs about. + */ +struct ubi_notification { + struct ubi_device_info di; + struct ubi_volume_info vi; +}; + /* UBI descriptor given to users when they open UBI volumes */ struct ubi_volume_desc; @@ -141,6 +174,10 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc, struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode); struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, int mode); +int ubi_register_volume_notifier(struct notifier_block *nb, + int ignore_existing); +int ubi_unregister_volume_notifier(struct notifier_block *nb); + void ubi_close_volume(struct ubi_volume_desc *desc); int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, int len, int check); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 9ea8d6d..d4a4d98 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -224,6 +224,11 @@ struct netdev_hw_addr { struct rcu_head rcu_head; }; +struct netdev_hw_addr_list { + struct list_head list; + int count; +}; + struct hh_cache { struct hh_cache *hh_next; /* Next entry */ @@ -776,9 +781,8 @@ struct net_device unsigned char addr_len; /* hardware address length */ unsigned short dev_id; /* for shared network cards */ - struct list_head uc_list; /* Secondary unicast mac - addresses */ - int uc_count; /* Number of installed ucasts */ + struct netdev_hw_addr_list uc; /* Secondary unicast + mac addresses */ int uc_promisc; spinlock_t addr_list_lock; struct dev_addr_list *mc_list; /* Multicast mac addresses */ @@ -810,7 +814,8 @@ struct net_device because most packets are unicast) */ - struct list_head dev_addr_list; /* list of device hw addresses */ + struct netdev_hw_addr_list dev_addrs; /* list of device + hw addresses */ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ @@ -1806,11 +1811,11 @@ static inline void netif_addr_unlock_bh(struct net_device *dev) } /* - * dev_addr_list walker. Should be used only for read access. Call with + * dev_addrs walker. Should be used only for read access. Call with * rcu_read_lock held. */ #define for_each_dev_addr(dev, ha) \ - list_for_each_entry_rcu(ha, &dev->dev_addr_list, list) + list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) /* These functions live elsewhere (drivers/net/net_init.c, but related) */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index d6792f8..e2e5ce5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -118,7 +118,6 @@ enum pageflags { PG_savepinned = PG_dirty, /* SLOB */ - PG_slob_page = PG_active, PG_slob_free = PG_private, /* SLUB */ @@ -201,7 +200,6 @@ PAGEFLAG(SavePinned, savepinned); /* Xen */ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) -__PAGEFLAG(SlobPage, slob_page) __PAGEFLAG(SlobFree, slob_free) __PAGEFLAG(SlubFrozen, slub_frozen) diff --git a/include/linux/pps.h b/include/linux/pps.h new file mode 100644 index 0000000..cfe5c72 --- /dev/null +++ b/include/linux/pps.h @@ -0,0 +1,122 @@ +/* + * PPS API header + * + * Copyright (C) 2005-2009 Rodolfo Giometti <giometti@linux.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#ifndef _PPS_H_ +#define _PPS_H_ + +#define PPS_VERSION "5.3.6" +#define PPS_MAX_SOURCES 16 /* should be enough... */ + +/* Implementation note: the logical states ``assert'' and ``clear'' + * are implemented in terms of the chip register, i.e. ``assert'' + * means the bit is set. */ + +/* + * 3.2 New data structures + */ + +#define PPS_API_VERS_1 1 +#define PPS_API_VERS PPS_API_VERS_1 /* we use API version 1 */ +#define PPS_MAX_NAME_LEN 32 + +/* 32-bit vs. 64-bit compatibility. + * + * 0n i386, the alignment of a uint64_t is only 4 bytes, while on most other + * architectures it's 8 bytes. On i386, there will be no padding between the + * two consecutive 'struct pps_ktime' members of struct pps_kinfo and struct + * pps_kparams. But on most platforms there will be padding to ensure correct + * alignment. + * + * The simple fix is probably to add an explicit padding. + * [David Woodhouse] + */ +struct pps_ktime { + __s64 sec; + __s32 nsec; + __u32 flags; +}; +#define PPS_TIME_INVALID (1<<0) /* used to specify timeout==NULL */ + +struct pps_kinfo { + __u32 assert_sequence; /* seq. num. of assert event */ + __u32 clear_sequence; /* seq. num. of clear event */ + struct pps_ktime assert_tu; /* time of assert event */ + struct pps_ktime clear_tu; /* time of clear event */ + int current_mode; /* current mode bits */ +}; + +struct pps_kparams { + int api_version; /* API version # */ + int mode; /* mode bits */ + struct pps_ktime assert_off_tu; /* offset compensation for assert */ + struct pps_ktime clear_off_tu; /* offset compensation for clear */ +}; + +/* + * 3.3 Mode bit definitions + */ + +/* Device/implementation parameters */ +#define PPS_CAPTUREASSERT 0x01 /* capture assert events */ +#define PPS_CAPTURECLEAR 0x02 /* capture clear events */ +#define PPS_CAPTUREBOTH 0x03 /* capture assert and clear events */ + +#define PPS_OFFSETASSERT 0x10 /* apply compensation for assert ev. */ +#define PPS_OFFSETCLEAR 0x20 /* apply compensation for clear ev. */ + +#define PPS_CANWAIT 0x100 /* can we wait for an event? */ +#define PPS_CANPOLL 0x200 /* bit reserved for future use */ + +/* Kernel actions */ +#define PPS_ECHOASSERT 0x40 /* feed back assert event to output */ +#define PPS_ECHOCLEAR 0x80 /* feed back clear event to output */ + +/* Timestamp formats */ +#define PPS_TSFMT_TSPEC 0x1000 /* select timespec format */ +#define PPS_TSFMT_NTPFP 0x2000 /* select NTP format */ + +/* + * 3.4.4 New functions: disciplining the kernel timebase + */ + +/* Kernel consumers */ +#define PPS_KC_HARDPPS 0 /* hardpps() (or equivalent) */ +#define PPS_KC_HARDPPS_PLL 1 /* hardpps() constrained to + use a phase-locked loop */ +#define PPS_KC_HARDPPS_FLL 2 /* hardpps() constrained to + use a frequency-locked loop */ +/* + * Here begins the implementation-specific part! + */ + +struct pps_fdata { + struct pps_kinfo info; + struct pps_ktime timeout; +}; + +#include <linux/ioctl.h> + +#define PPS_GETPARAMS _IOR('p', 0xa1, struct pps_kparams *) +#define PPS_SETPARAMS _IOW('p', 0xa2, struct pps_kparams *) +#define PPS_GETCAP _IOR('p', 0xa3, int *) +#define PPS_FETCH _IOWR('p', 0xa4, struct pps_fdata *) + +#endif /* _PPS_H_ */ diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h new file mode 100644 index 0000000..e0a193f --- /dev/null +++ b/include/linux/pps_kernel.h @@ -0,0 +1,89 @@ +/* + * PPS API kernel header + * + * Copyright (C) 2009 Rodolfo Giometti <giometti@linux.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/pps.h> + +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/time.h> + +/* + * Global defines + */ + +/* The specific PPS source info */ +struct pps_source_info { + char name[PPS_MAX_NAME_LEN]; /* simbolic name */ + char path[PPS_MAX_NAME_LEN]; /* path of connected device */ + int mode; /* PPS's allowed mode */ + + void (*echo)(int source, int event, void *data); /* PPS echo function */ + + struct module *owner; + struct device *dev; +}; + +/* The main struct */ +struct pps_device { + struct pps_source_info info; /* PSS source info */ + + struct pps_kparams params; /* PPS's current params */ + + __u32 assert_sequence; /* PPS' assert event seq # */ + __u32 clear_sequence; /* PPS' clear event seq # */ + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; /* PPS mode at event time */ + + int go; /* PPS event is arrived? */ + wait_queue_head_t queue; /* PPS event queue */ + + unsigned int id; /* PPS source unique ID */ + struct cdev cdev; + struct device *dev; + int devno; + struct fasync_struct *async_queue; /* fasync method */ + spinlock_t lock; + + atomic_t usage; /* usage count */ +}; + +/* + * Global variables + */ + +extern spinlock_t pps_idr_lock; +extern struct idr pps_idr; +extern struct timespec pps_irq_ts[]; + +extern struct device_attribute pps_attrs[]; + +/* + * Exported functions + */ + +struct pps_device *pps_get_source(int source); +extern void pps_put_source(struct pps_device *pps); +extern int pps_register_source(struct pps_source_info *info, + int default_params); +extern void pps_unregister_source(int source); +extern int pps_register_cdev(struct pps_device *pps); +extern void pps_unregister_cdev(struct pps_device *pps); +extern void pps_event(int source, struct pps_ktime *ts, int event, void *data); diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 59e133d..7456d7d 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -81,7 +81,6 @@ extern long arch_ptrace(struct task_struct *child, long request, long addr, long data); -extern struct task_struct *ptrace_get_task_struct(pid_t pid); extern int ptrace_traceme(void); extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index 6ba830f..ffa2efb 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h @@ -232,7 +232,7 @@ struct mdp_superblock_1 { __le64 reshape_position; /* next address in array-space for reshape */ __le32 delta_disks; /* change in number of raid_disks */ __le32 new_layout; /* new layout */ - __le32 new_chunk; /* new chunk size (bytes) */ + __le32 new_chunk; /* new chunk size (512byte sectors) */ __u8 pad1[128-124]; /* set to 0 when written */ /* constant this-device information - 64 bytes */ diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 2245c78..dd31e7b 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -660,23 +660,54 @@ static inline void set_le_key_k_type(int version, struct reiserfs_key *key, cpu_to_le32(type2uniqueness(type))) : (void)(set_offset_v2_k_type(&(key->u.k_offset_v2), type)); } + static inline void set_le_ih_k_type(struct item_head *ih, int type) { set_le_key_k_type(ih_version(ih), &(ih->ih_key), type); } -#define is_direntry_le_key(version,key) (le_key_k_type (version, key) == TYPE_DIRENTRY) -#define is_direct_le_key(version,key) (le_key_k_type (version, key) == TYPE_DIRECT) -#define is_indirect_le_key(version,key) (le_key_k_type (version, key) == TYPE_INDIRECT) -#define is_statdata_le_key(version,key) (le_key_k_type (version, key) == TYPE_STAT_DATA) +static inline int is_direntry_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_DIRENTRY; +} + +static inline int is_direct_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_DIRECT; +} + +static inline int is_indirect_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_INDIRECT; +} + +static inline int is_statdata_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_STAT_DATA; +} // // item header has version. // -#define is_direntry_le_ih(ih) is_direntry_le_key (ih_version (ih), &((ih)->ih_key)) -#define is_direct_le_ih(ih) is_direct_le_key (ih_version (ih), &((ih)->ih_key)) -#define is_indirect_le_ih(ih) is_indirect_le_key (ih_version(ih), &((ih)->ih_key)) -#define is_statdata_le_ih(ih) is_statdata_le_key (ih_version (ih), &((ih)->ih_key)) +static inline int is_direntry_le_ih(struct item_head *ih) +{ + return is_direntry_le_key(ih_version(ih), &ih->ih_key); +} + +static inline int is_direct_le_ih(struct item_head *ih) +{ + return is_direct_le_key(ih_version(ih), &ih->ih_key); +} + +static inline int is_indirect_le_ih(struct item_head *ih) +{ + return is_indirect_le_key(ih_version(ih), &ih->ih_key); +} + +static inline int is_statdata_le_ih(struct item_head *ih) +{ + return is_statdata_le_key(ih_version(ih), &ih->ih_key); +} // // key is pointer to cpu key, result is cpu diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 4c5bcf6..511f42f 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -49,6 +49,8 @@ struct res_counter { struct res_counter *parent; }; +#define RESOURCE_MAX (unsigned long long)LLONG_MAX + /** * Helpers to interact with userspace * res_counter_read_u64() - returns the value of the specified member. diff --git a/include/linux/sched.h b/include/linux/sched.h index 02042e7..4d07542 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -92,7 +92,6 @@ struct sched_param { #include <asm/processor.h> -struct mem_cgroup; struct exec_domain; struct futex_pi_state; struct robust_list_head; @@ -1879,9 +1878,6 @@ extern struct pid_namespace init_pid_ns; /* * find a task by one of its numerical ids * - * find_task_by_pid_type_ns(): - * it is the most generic call - it finds a task by all id, - * type and namespace specified * find_task_by_pid_ns(): * finds a task by its pid in the specified namespace * find_task_by_vpid(): @@ -1890,9 +1886,6 @@ extern struct pid_namespace init_pid_ns; * see also find_vpid() etc in include/linux/pid.h */ -extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, - struct pid_namespace *ns); - extern struct task_struct *find_task_by_vpid(pid_t nr); extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 004f3b3..0c6a86b 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -43,6 +43,7 @@ int seq_release(struct inode *, struct file *); int seq_escape(struct seq_file *, const char *, const char *); int seq_putc(struct seq_file *m, char c); int seq_puts(struct seq_file *m, const char *s); +int seq_write(struct seq_file *seq, const void *data, size_t len); int seq_printf(struct seq_file *, const char *, ...) __attribute__ ((format (printf,2,3))); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 63ef24b..b47b3f0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -265,7 +265,7 @@ typedef unsigned char *sk_buff_data_t; * @transport_header: Transport layer header * @network_header: Network layer header * @mac_header: Link layer header - * @dst: destination entry + * @_skb_dst: destination entry * @sp: the security path, used for xfrm * @cb: Control buffer. Free for use by every layer. Put private vars here * @len: Length of actual data diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index a0faa18..9c4cd27 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -245,6 +245,9 @@ struct spi_master { */ u16 dma_alignment; + /* spi_device.mode flags understood by this controller driver */ + u16 mode_bits; + /* Setup mode and clock, etc (spi driver may call many times). * * IMPORTANT: this may be called when transfers to another @@ -523,30 +526,7 @@ static inline void spi_message_free(struct spi_message *m) kfree(m); } -/** - * spi_setup - setup SPI mode and clock rate - * @spi: the device whose settings are being modified - * Context: can sleep, and no requests are queued to the device - * - * SPI protocol drivers may need to update the transfer mode if the - * device doesn't work with its default. They may likewise need - * to update clock rates or word sizes from initial values. This function - * changes those settings, and must be called from a context that can sleep. - * Except for SPI_CS_HIGH, which takes effect immediately, the changes take - * effect the next time the device is selected and data is transferred to - * or from it. When this function returns, the spi device is deselected. - * - * Note that this call will fail if the protocol driver specifies an option - * that the underlying controller or its driver does not support. For - * example, not all hardware supports wire transfers using nine bit words, - * LSB-first wire encoding, or active-high chipselects. - */ -static inline int -spi_setup(struct spi_device *spi) -{ - return spi->master->setup(spi); -} - +extern int spi_setup(struct spi_device *spi); /** * spi_async - asynchronous SPI transfer diff --git a/include/linux/swap.h b/include/linux/swap.h index 0cedf31..c88b366 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -319,10 +319,11 @@ static inline void disable_swap_token(void) } #ifdef CONFIG_CGROUP_MEM_RES_CTLR -extern void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent); +extern void +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); #else static inline void -mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) { } #endif @@ -423,12 +424,6 @@ static inline swp_entry_t get_swap_page(void) #define has_swap_token(x) 0 #define disable_swap_token() do { } while(0) -static inline int mem_cgroup_cache_charge_swapin(struct page *page, - struct mm_struct *mm, gfp_t mask, bool locked) -{ - return 0; -} - static inline void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) { diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index eb96603..17ba82ef 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -143,7 +143,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) * * Return %LSM_UNSAFE_* bits applied to an exec because of tracing. * - * Called with task_lock() held on @task. + * @task->cred_guard_mutex is held by the caller through the do_execve(). */ static inline int tracehook_unsafe_exec(struct task_struct *task) { diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h index 9797fec..3adeff8 100644 --- a/include/linux/w1-gpio.h +++ b/include/linux/w1-gpio.h @@ -18,6 +18,7 @@ struct w1_gpio_platform_data { unsigned int pin; unsigned int is_open_drain:1; + void (*enable_external_pullup)(int enable); }; #endif /* _LINUX_W1_GPIO_H */ diff --git a/include/net/sock.h b/include/net/sock.h index 95bd3fd..07133c5 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1208,6 +1208,39 @@ static inline int skb_copy_to_page(struct sock *sk, char __user *from, return 0; } +/** + * sk_wmem_alloc_get - returns write allocations + * @sk: socket + * + * Returns sk_wmem_alloc minus initial offset of one + */ +static inline int sk_wmem_alloc_get(const struct sock *sk) +{ + return atomic_read(&sk->sk_wmem_alloc) - 1; +} + +/** + * sk_rmem_alloc_get - returns read allocations + * @sk: socket + * + * Returns sk_rmem_alloc + */ +static inline int sk_rmem_alloc_get(const struct sock *sk) +{ + return atomic_read(&sk->sk_rmem_alloc); +} + +/** + * sk_has_allocations - check if allocations are outstanding + * @sk: socket + * + * Returns true if socket has write or read allocations + */ +static inline int sk_has_allocations(const struct sock *sk) +{ + return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); +} + /* * Queue a received datagram if it will fit. Stream and sequenced * protocols can't normally use this as they need to fit buffers in diff --git a/include/net/x25.h b/include/net/x25.h index fc3f03d..2cda040 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -187,7 +187,7 @@ extern int x25_addr_ntoa(unsigned char *, struct x25_address *, extern int x25_addr_aton(unsigned char *, struct x25_address *, struct x25_address *); extern struct sock *x25_find_socket(unsigned int, struct x25_neigh *); -extern void x25_destroy_socket(struct sock *); +extern void x25_destroy_socket_from_timer(struct sock *); extern int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int); extern void x25_kill_by_neigh(struct x25_neigh *); diff --git a/include/scsi/Kbuild b/include/scsi/Kbuild new file mode 100644 index 0000000..33b2750 --- /dev/null +++ b/include/scsi/Kbuild @@ -0,0 +1,4 @@ +header-y += scsi.h +header-y += scsi_netlink.h +header-y += scsi_netlink_fc.h +header-y += scsi_bsg_fc.h diff --git a/include/scsi/scsi_bsg_fc.h b/include/scsi/scsi_bsg_fc.h new file mode 100644 index 0000000..a4b2333 --- /dev/null +++ b/include/scsi/scsi_bsg_fc.h @@ -0,0 +1,322 @@ +/* + * FC Transport BSG Interface + * + * Copyright (C) 2008 James Smart, Emulex Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef SCSI_BSG_FC_H +#define SCSI_BSG_FC_H + +/* + * This file intended to be included by both kernel and user space + */ + +#include <scsi/scsi.h> + +/* + * FC Transport SGIO v4 BSG Message Support + */ + +/* Default BSG request timeout (in seconds) */ +#define FC_DEFAULT_BSG_TIMEOUT (10 * HZ) + + +/* + * Request Message Codes supported by the FC Transport + */ + +/* define the class masks for the message codes */ +#define FC_BSG_CLS_MASK 0xF0000000 /* find object class */ +#define FC_BSG_HST_MASK 0x80000000 /* fc host class */ +#define FC_BSG_RPT_MASK 0x40000000 /* fc rport class */ + + /* fc_host Message Codes */ +#define FC_BSG_HST_ADD_RPORT (FC_BSG_HST_MASK | 0x00000001) +#define FC_BSG_HST_DEL_RPORT (FC_BSG_HST_MASK | 0x00000002) +#define FC_BSG_HST_ELS_NOLOGIN (FC_BSG_HST_MASK | 0x00000003) +#define FC_BSG_HST_CT (FC_BSG_HST_MASK | 0x00000004) +#define FC_BSG_HST_VENDOR (FC_BSG_HST_MASK | 0x000000FF) + + /* fc_rport Message Codes */ +#define FC_BSG_RPT_ELS (FC_BSG_RPT_MASK | 0x00000001) +#define FC_BSG_RPT_CT (FC_BSG_RPT_MASK | 0x00000002) + + + +/* + * FC Address Identifiers in Message Structures : + * + * Whenever a command payload contains a FC Address Identifier + * (aka port_id), the value is effectively in big-endian + * order, thus the array elements are decoded as follows: + * element [0] is bits 23:16 of the FC Address Identifier + * element [1] is bits 15:8 of the FC Address Identifier + * element [2] is bits 7:0 of the FC Address Identifier + */ + + +/* + * FC Host Messages + */ + +/* FC_BSG_HST_ADDR_PORT : */ + +/* Request: + * This message requests the FC host to login to the remote port + * at the specified N_Port_Id. The remote port is to be enumerated + * with the transport upon completion of the login. + */ +struct fc_bsg_host_add_rport { + uint8_t reserved; + + /* FC Address Identier of the remote port to login to */ + uint8_t port_id[3]; +}; + +/* Response: + * There is no additional response data - fc_bsg_reply->result is sufficient + */ + + +/* FC_BSG_HST_DEL_RPORT : */ + +/* Request: + * This message requests the FC host to remove an enumerated + * remote port and to terminate the login to it. + * + * Note: The driver is free to reject this request if it desires to + * remain logged in with the remote port. + */ +struct fc_bsg_host_del_rport { + uint8_t reserved; + + /* FC Address Identier of the remote port to logout of */ + uint8_t port_id[3]; +}; + +/* Response: + * There is no additional response data - fc_bsg_reply->result is sufficient + */ + + +/* FC_BSG_HST_ELS_NOLOGIN : */ + +/* Request: + * This message requests the FC_Host to send an ELS to a specific + * N_Port_ID. The host does not need to log into the remote port, + * nor does it need to enumerate the rport for further traffic + * (although, the FC host is free to do so if it desires). + */ +struct fc_bsg_host_els { + /* + * ELS Command Code being sent (must be the same as byte 0 + * of the payload) + */ + uint8_t command_code; + + /* FC Address Identier of the remote port to send the ELS to */ + uint8_t port_id[3]; +}; + +/* Response: + */ +/* fc_bsg_ctels_reply->status values */ +#define FC_CTELS_STATUS_OK 0x00000000 +#define FC_CTELS_STATUS_REJECT 0x00000001 +#define FC_CTELS_STATUS_P_RJT 0x00000002 +#define FC_CTELS_STATUS_F_RJT 0x00000003 +#define FC_CTELS_STATUS_P_BSY 0x00000004 +#define FC_CTELS_STATUS_F_BSY 0x00000006 +struct fc_bsg_ctels_reply { + /* + * Note: An ELS LS_RJT may be reported in 2 ways: + * a) A status of FC_CTELS_STATUS_OK is returned. The caller + * is to look into the ELS receive payload to determine + * LS_ACC or LS_RJT (by contents of word 0). The reject + * data will be in word 1. + * b) A status of FC_CTELS_STATUS_REJECT is returned, The + * rjt_data field will contain valid data. + * + * Note: ELS LS_ACC is determined by an FC_CTELS_STATUS_OK, and + * the receive payload word 0 indicates LS_ACC + * (e.g. value is 0x02xxxxxx). + * + * Note: Similarly, a CT Reject may be reported in 2 ways: + * a) A status of FC_CTELS_STATUS_OK is returned. The caller + * is to look into the CT receive payload to determine + * Accept or Reject (by contents of word 2). The reject + * data will be in word 3. + * b) A status of FC_CTELS_STATUS_REJECT is returned, The + * rjt_data field will contain valid data. + * + * Note: x_RJT/BSY status will indicae that the rjt_data field + * is valid and contains the reason/explanation values. + */ + uint32_t status; /* See FC_CTELS_STATUS_xxx */ + + /* valid if status is not FC_CTELS_STATUS_OK */ + struct { + uint8_t action; /* fragment_id for CT REJECT */ + uint8_t reason_code; + uint8_t reason_explanation; + uint8_t vendor_unique; + } rjt_data; +}; + + +/* FC_BSG_HST_CT : */ + +/* Request: + * This message requests that a CT Request be performed with the + * indicated N_Port_ID. The driver is responsible for logging in with + * the fabric and/or N_Port_ID, etc as per FC rules. This request does + * not mandate that the driver must enumerate the destination in the + * transport. The driver is allowed to decide whether to enumerate it, + * and whether to tear it down after the request. + */ +struct fc_bsg_host_ct { + uint8_t reserved; + + /* FC Address Identier of the remote port to send the ELS to */ + uint8_t port_id[3]; + + /* + * We need words 0-2 of the generic preamble for the LLD's + */ + uint32_t preamble_word0; /* revision & IN_ID */ + uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ + uint32_t preamble_word2; /* Cmd Code, Max Size */ + +}; +/* Response: + * + * The reply structure is an fc_bsg_ctels_reply structure + */ + + +/* FC_BSG_HST_VENDOR : */ + +/* Request: + * Note: When specifying vendor_id, be sure to read the Vendor Type and ID + * formatting requirements specified in scsi_netlink.h + */ +struct fc_bsg_host_vendor { + /* + * Identifies the vendor that the message is formatted for. This + * should be the recipient of the message. + */ + uint64_t vendor_id; + + /* start of vendor command area */ + uint32_t vendor_cmd[0]; +}; + +/* Response: + */ +struct fc_bsg_host_vendor_reply { + /* start of vendor response area */ + uint32_t vendor_rsp[0]; +}; + + + +/* + * FC Remote Port Messages + */ + +/* FC_BSG_RPT_ELS : */ + +/* Request: + * This message requests that an ELS be performed with the rport. + */ +struct fc_bsg_rport_els { + /* + * ELS Command Code being sent (must be the same as + * byte 0 of the payload) + */ + uint8_t els_code; +}; + +/* Response: + * + * The reply structure is an fc_bsg_ctels_reply structure + */ + + +/* FC_BSG_RPT_CT : */ + +/* Request: + * This message requests that a CT Request be performed with the rport. + */ +struct fc_bsg_rport_ct { + /* + * We need words 0-2 of the generic preamble for the LLD's + */ + uint32_t preamble_word0; /* revision & IN_ID */ + uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ + uint32_t preamble_word2; /* Cmd Code, Max Size */ +}; +/* Response: + * + * The reply structure is an fc_bsg_ctels_reply structure + */ + + + + +/* request (CDB) structure of the sg_io_v4 */ +struct fc_bsg_request { + uint32_t msgcode; + union { + struct fc_bsg_host_add_rport h_addrport; + struct fc_bsg_host_del_rport h_delrport; + struct fc_bsg_host_els h_els; + struct fc_bsg_host_ct h_ct; + struct fc_bsg_host_vendor h_vendor; + + struct fc_bsg_rport_els r_els; + struct fc_bsg_rport_ct r_ct; + } rqst_data; +}; + + +/* response (request sense data) structure of the sg_io_v4 */ +struct fc_bsg_reply { + /* + * The completion result. Result exists in two forms: + * if negative, it is an -Exxx system errno value. There will + * be no further reply information supplied. + * else, it's the 4-byte scsi error result, with driver, host, + * msg and status fields. The per-msgcode reply structure + * will contain valid data. + */ + uint32_t result; + + /* If there was reply_payload, how much was recevied ? */ + uint32_t reply_payload_rcv_len; + + union { + struct fc_bsg_host_vendor_reply vendor_reply; + + struct fc_bsg_ctels_reply ctels_reply; + } reply_data; +}; + + +#endif /* SCSI_BSG_FC_H */ + diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index d123ca8..b62a097 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -478,6 +478,15 @@ struct scsi_host_template { * module_init/module_exit. */ struct list_head legacy_hosts; + + /* + * Vendor Identifier associated with the host + * + * Note: When specifying vendor_id, be sure to read the + * Vendor Type and ID formatting requirements specified in + * scsi_netlink.h + */ + u64 vendor_id; }; /* diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 68a8d87..fc50bd6 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -33,7 +33,6 @@ struct scsi_transport_template; - /* * FC Port definitions - Following FC HBAAPI guidelines * @@ -352,6 +351,7 @@ struct fc_rport { /* aka fc_starget_attrs */ struct delayed_work fail_io_work; struct work_struct stgt_delete_work; struct work_struct rport_delete_work; + struct request_queue *rqst_q; /* bsg support */ } __attribute__((aligned(sizeof(unsigned long)))); /* bit field values for struct fc_rport "flags" field: */ @@ -514,6 +514,9 @@ struct fc_host_attrs { struct workqueue_struct *work_q; char devloss_work_q_name[20]; struct workqueue_struct *devloss_work_q; + + /* bsg support */ + struct request_queue *rqst_q; }; #define shost_to_fc_host(x) \ @@ -579,6 +582,47 @@ struct fc_host_attrs { (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q) +struct fc_bsg_buffer { + unsigned int payload_len; + int sg_cnt; + struct scatterlist *sg_list; +}; + +/* Values for fc_bsg_job->state_flags (bitflags) */ +#define FC_RQST_STATE_INPROGRESS 0 +#define FC_RQST_STATE_DONE 1 + +struct fc_bsg_job { + struct Scsi_Host *shost; + struct fc_rport *rport; + struct device *dev; + struct request *req; + spinlock_t job_lock; + unsigned int state_flags; + unsigned int ref_cnt; + void (*job_done)(struct fc_bsg_job *); + + struct fc_bsg_request *request; + struct fc_bsg_reply *reply; + unsigned int request_len; + unsigned int reply_len; + /* + * On entry : reply_len indicates the buffer size allocated for + * the reply. + * + * Upon completion : the message handler must set reply_len + * to indicates the size of the reply to be returned to the + * caller. + */ + + /* DMA payloads for the request/response */ + struct fc_bsg_buffer request_payload; + struct fc_bsg_buffer reply_payload; + + void *dd_data; /* Used for driver-specific storage */ +}; + + /* The functions by which the transport class and the driver communicate */ struct fc_function_template { void (*get_rport_dev_loss_tmo)(struct fc_rport *); @@ -614,9 +658,14 @@ struct fc_function_template { int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int); int (* it_nexus_response)(struct Scsi_Host *, u64, int); + /* bsg support */ + int (*bsg_request)(struct fc_bsg_job *); + int (*bsg_timeout)(struct fc_bsg_job *); + /* allocation lengths for host-specific data */ u32 dd_fcrport_size; u32 dd_fcvport_size; + u32 dd_bsg_size; /* * The driver sets these to tell the transport class it @@ -737,7 +786,6 @@ fc_vport_set_state(struct fc_vport *vport, enum fc_vport_state new_state) vport->vport_state = new_state; } - struct scsi_transport_template *fc_attach_transport( struct fc_function_template *); void fc_release_transport(struct scsi_transport_template *); diff --git a/include/scsi/scsi_transport_spi.h b/include/scsi/scsi_transport_spi.h index 286e962..7497a38 100644 --- a/include/scsi/scsi_transport_spi.h +++ b/include/scsi/scsi_transport_spi.h @@ -36,8 +36,10 @@ struct spi_transport_attrs { unsigned int width:1; /* 0 - narrow, 1 - wide */ unsigned int max_width:1; unsigned int iu:1; /* Information Units enabled */ + unsigned int max_iu:1; unsigned int dt:1; /* DT clocking enabled */ unsigned int qas:1; /* Quick Arbitration and Selection enabled */ + unsigned int max_qas:1; unsigned int wr_flow:1; /* Write Flow control enabled */ unsigned int rd_strm:1; /* Read streaming enabled */ unsigned int rti:1; /* Retain Training Information */ @@ -77,8 +79,10 @@ struct spi_host_attrs { #define spi_width(x) (((struct spi_transport_attrs *)&(x)->starget_data)->width) #define spi_max_width(x) (((struct spi_transport_attrs *)&(x)->starget_data)->max_width) #define spi_iu(x) (((struct spi_transport_attrs *)&(x)->starget_data)->iu) +#define spi_max_iu(x) (((struct spi_transport_attrs *)&(x)->starget_data)->max_iu) #define spi_dt(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dt) #define spi_qas(x) (((struct spi_transport_attrs *)&(x)->starget_data)->qas) +#define spi_max_qas(x) (((struct spi_transport_attrs *)&(x)->starget_data)->max_qas) #define spi_wr_flow(x) (((struct spi_transport_attrs *)&(x)->starget_data)->wr_flow) #define spi_rd_strm(x) (((struct spi_transport_attrs *)&(x)->starget_data)->rd_strm) #define spi_rti(x) (((struct spi_transport_attrs *)&(x)->starget_data)->rti) diff --git a/init/Kconfig b/init/Kconfig index c4b3c6d..1ce05a4 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -16,6 +16,11 @@ config DEFCONFIG_LIST default "$ARCH_DEFCONFIG" default "arch/$ARCH/defconfig" +config CONSTRUCTORS + bool + depends on !UML + default y + menu "General setup" config EXPERIMENTAL diff --git a/init/main.c b/init/main.c index 0e7aede..09131ec 100644 --- a/init/main.c +++ b/init/main.c @@ -642,6 +642,10 @@ asmlinkage void __init start_kernel(void) "enabled early\n"); early_boot_irqs_on(); local_irq_enable(); + + /* Interrupts are enabled now so all GFP allocations are safe. */ + set_gfp_allowed_mask(__GFP_BITS_MASK); + kmem_cache_init_late(); /* @@ -720,6 +724,17 @@ asmlinkage void __init start_kernel(void) rest_init(); } +/* Call all constructor functions linked into the kernel. */ +static void __init do_ctors(void) +{ +#ifdef CONFIG_CONSTRUCTORS + ctor_fn_t *call = (ctor_fn_t *) __ctors_start; + + for (; call < (ctor_fn_t *) __ctors_end; call++) + (*call)(); +#endif +} + int initcall_debug; core_param(initcall_debug, initcall_debug, bool, 0644); @@ -800,6 +815,7 @@ static void __init do_basic_setup(void) usermodehelper_init(); driver_init(); init_irq_proc(); + do_ctors(); do_initcalls(); } diff --git a/ipc/namespace.c b/ipc/namespace.c index 4a5e752..a1094ff 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c @@ -14,7 +14,7 @@ #include "util.h" -static struct ipc_namespace *clone_ipc_ns(struct ipc_namespace *old_ns) +static struct ipc_namespace *create_ipc_ns(void) { struct ipc_namespace *ns; int err; @@ -48,18 +48,9 @@ static struct ipc_namespace *clone_ipc_ns(struct ipc_namespace *old_ns) struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns) { - struct ipc_namespace *new_ns; - - BUG_ON(!ns); - get_ipc_ns(ns); - if (!(flags & CLONE_NEWIPC)) - return ns; - - new_ns = clone_ipc_ns(ns); - - put_ipc_ns(ns); - return new_ns; + return get_ipc_ns(ns); + return create_ipc_ns(); } /* @@ -92,6 +83,30 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, up_write(&ids->rw_mutex); } +static void free_ipc_ns(struct ipc_namespace *ns) +{ + /* + * Unregistering the hotplug notifier at the beginning guarantees + * that the ipc namespace won't be freed while we are inside the + * callback routine. Since the blocking_notifier_chain_XXX routines + * hold a rw lock on the notifier list, unregister_ipcns_notifier() + * won't take the rw lock before blocking_notifier_call_chain() has + * released the rd lock. + */ + unregister_ipcns_notifier(ns); + sem_exit_ns(ns); + msg_exit_ns(ns); + shm_exit_ns(ns); + kfree(ns); + atomic_dec(&nr_ipc_ns); + + /* + * Do the ipcns removal notification after decrementing nr_ipc_ns in + * order to have a correct value when recomputing msgmni. + */ + ipcns_notify(IPCNS_REMOVED); +} + /* * put_ipc_ns - drop a reference to an ipc namespace. * @ns: the namespace to put @@ -117,27 +132,3 @@ void put_ipc_ns(struct ipc_namespace *ns) free_ipc_ns(ns); } } - -void free_ipc_ns(struct ipc_namespace *ns) -{ - /* - * Unregistering the hotplug notifier at the beginning guarantees - * that the ipc namespace won't be freed while we are inside the - * callback routine. Since the blocking_notifier_chain_XXX routines - * hold a rw lock on the notifier list, unregister_ipcns_notifier() - * won't take the rw lock before blocking_notifier_call_chain() has - * released the rd lock. - */ - unregister_ipcns_notifier(ns); - sem_exit_ns(ns); - msg_exit_ns(ns); - shm_exit_ns(ns); - kfree(ns); - atomic_dec(&nr_ipc_ns); - - /* - * Do the ipcns removal notification after decrementing nr_ipc_ns in - * order to have a correct value when recomputing msgmni. - */ - ipcns_notify(IPCNS_REMOVED); -} @@ -128,7 +128,7 @@ void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out); struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd, struct ipc64_perm *perm, int extra_perm); -#if defined(__ia64__) || defined(__x86_64__) || defined(__hppa__) || defined(__XTENSA__) +#ifndef __ARCH_WANT_IPC_PARSE_VERSION /* On IA-64, we always use the "64-bit version" of the IPC structures. */ # define ipc_parse_version(cmd) IPC_64 #else @@ -171,5 +171,6 @@ static inline void ipc_unlock(struct kern_ipc_perm *perm) struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id); int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, struct ipc_ops *ops, struct ipc_params *params); - +void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, + void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)); #endif diff --git a/kernel/Makefile b/kernel/Makefile index 9df4501..0a32cb2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_STOP_MACHINE) += stop_machine.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o obj-$(CONFIG_AUDIT) += audit.o auditfilter.o obj-$(CONFIG_AUDITSYSCALL) += auditsc.o +obj-$(CONFIG_GCOV_KERNEL) += gcov/ obj-$(CONFIG_AUDIT_TREE) += audit_tree.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KGDB) += kgdb.o diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3fb789f..3737a68 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -843,6 +843,11 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) { char *token, *o = data ?: "all"; + unsigned long mask = (unsigned long)-1; + +#ifdef CONFIG_CPUSETS + mask = ~(1UL << cpuset_subsys_id); +#endif opts->subsys_bits = 0; opts->flags = 0; @@ -887,6 +892,15 @@ static int parse_cgroupfs_options(char *data, } } + /* + * Option noprefix was introduced just for backward compatibility + * with the old cpuset, so we allow noprefix only if mounting just + * the cpuset subsystem. + */ + if (test_bit(ROOT_NOPREFIX, &opts->flags) && + (opts->subsys_bits & mask)) + return -EINVAL; + /* We can't have an empty hierarchy */ if (!opts->subsys_bits) return -EINVAL; diff --git a/kernel/exit.c b/kernel/exit.c index b6c90b5..13ae640 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -375,9 +375,8 @@ static void set_special_pids(struct pid *pid) } /* - * Let kernel threads use this to say that they - * allow a certain signal (since daemonize() will - * have disabled all of them by default). + * Let kernel threads use this to say that they allow a certain signal. + * Must not be used if kthread was cloned with CLONE_SIGHAND. */ int allow_signal(int sig) { @@ -385,14 +384,14 @@ int allow_signal(int sig) return -EINVAL; spin_lock_irq(¤t->sighand->siglock); + /* This is only needed for daemonize()'ed kthreads */ sigdelset(¤t->blocked, sig); - if (!current->mm) { - /* Kernel threads handle their own signals. - Let the signal code know it'll be handled, so - that they don't get converted to SIGKILL or - just silently dropped */ - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; - } + /* + * Kernel threads handle their own signals. Let the signal code + * know it'll be handled, so that they don't get converted to + * SIGKILL or just silently dropped. + */ + current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); return 0; @@ -591,7 +590,7 @@ retry: /* * Search in the siblings */ - list_for_each_entry(c, &p->parent->children, sibling) { + list_for_each_entry(c, &p->real_parent->children, sibling) { if (c->mm == mm) goto assign_new_owner; } @@ -758,7 +757,7 @@ static void reparent_thread(struct task_struct *father, struct task_struct *p, p->exit_signal = SIGCHLD; /* If it has exited notify the new parent about this child's death. */ - if (!p->ptrace && + if (!task_ptrace(p) && p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { do_notify_parent(p, p->exit_signal); if (task_detached(p)) { @@ -783,7 +782,7 @@ static void forget_original_parent(struct task_struct *father) list_for_each_entry_safe(p, n, &father->children, sibling) { p->real_parent = reaper; if (p->parent == father) { - BUG_ON(p->ptrace); + BUG_ON(task_ptrace(p)); p->parent = p->real_parent; } reparent_thread(father, p, &dead_children); @@ -1081,6 +1080,18 @@ SYSCALL_DEFINE1(exit_group, int, error_code) return 0; } +struct wait_opts { + enum pid_type wo_type; + int wo_flags; + struct pid *wo_pid; + + struct siginfo __user *wo_info; + int __user *wo_stat; + struct rusage __user *wo_rusage; + + int notask_error; +}; + static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) { struct pid *pid = NULL; @@ -1091,13 +1102,12 @@ static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) return pid; } -static int eligible_child(enum pid_type type, struct pid *pid, int options, - struct task_struct *p) +static int eligible_child(struct wait_opts *wo, struct task_struct *p) { int err; - if (type < PIDTYPE_MAX) { - if (task_pid_type(p, type) != pid) + if (wo->wo_type < PIDTYPE_MAX) { + if (task_pid_type(p, wo->wo_type) != wo->wo_pid) return 0; } @@ -1106,8 +1116,8 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options, * set; otherwise, wait for non-clone children *only*. (Note: * A "clone" child here is one that reports to its parent * using a signal other than SIGCHLD.) */ - if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) - && !(options & __WALL)) + if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) + && !(wo->wo_flags & __WALL)) return 0; err = security_task_wait(p); @@ -1117,14 +1127,15 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options, return 1; } -static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, - int why, int status, - struct siginfo __user *infop, - struct rusage __user *rusagep) +static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p, + pid_t pid, uid_t uid, int why, int status) { - int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; + struct siginfo __user *infop; + int retval = wo->wo_rusage + ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; put_task_struct(p); + infop = wo->wo_info; if (!retval) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval) @@ -1148,19 +1159,18 @@ static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_zombie(struct task_struct *p, int options, - struct siginfo __user *infop, - int __user *stat_addr, struct rusage __user *ru) +static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) { unsigned long state; int retval, status, traced; pid_t pid = task_pid_vnr(p); uid_t uid = __task_cred(p)->uid; + struct siginfo __user *infop; - if (!likely(options & WEXITED)) + if (!likely(wo->wo_flags & WEXITED)) return 0; - if (unlikely(options & WNOWAIT)) { + if (unlikely(wo->wo_flags & WNOWAIT)) { int exit_code = p->exit_code; int why, status; @@ -1173,8 +1183,7 @@ static int wait_task_zombie(struct task_struct *p, int options, why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; status = exit_code & 0x7f; } - return wait_noreap_copyout(p, pid, uid, why, - status, infop, ru); + return wait_noreap_copyout(wo, p, pid, uid, why, status); } /* @@ -1192,7 +1201,6 @@ static int wait_task_zombie(struct task_struct *p, int options, if (likely(!traced)) { struct signal_struct *psig; struct signal_struct *sig; - struct task_cputime cputime; /* * The resource counters for the group leader are in its @@ -1205,26 +1213,23 @@ static int wait_task_zombie(struct task_struct *p, int options, * p->signal fields, because they are only touched by * __exit_signal, which runs with tasklist_lock * write-locked anyway, and so is excluded here. We do - * need to protect the access to p->parent->signal fields, + * need to protect the access to parent->signal fields, * as other threads in the parent group can be right * here reaping other children at the same time. - * - * We use thread_group_cputime() to get times for the thread - * group, which consolidates times for all threads in the - * group including the group leader. */ - thread_group_cputime(p, &cputime); - spin_lock_irq(&p->parent->sighand->siglock); - psig = p->parent->signal; + spin_lock_irq(&p->real_parent->sighand->siglock); + psig = p->real_parent->signal; sig = p->signal; psig->cutime = cputime_add(psig->cutime, - cputime_add(cputime.utime, - sig->cutime)); + cputime_add(p->utime, + cputime_add(sig->utime, + sig->cutime))); psig->cstime = cputime_add(psig->cstime, - cputime_add(cputime.stime, - sig->cstime)); + cputime_add(p->stime, + cputime_add(sig->stime, + sig->cstime))); psig->cgtime = cputime_add(psig->cgtime, cputime_add(p->gtime, @@ -1246,7 +1251,7 @@ static int wait_task_zombie(struct task_struct *p, int options, sig->oublock + sig->coublock; task_io_accounting_add(&psig->ioac, &p->ioac); task_io_accounting_add(&psig->ioac, &sig->ioac); - spin_unlock_irq(&p->parent->sighand->siglock); + spin_unlock_irq(&p->real_parent->sighand->siglock); } /* @@ -1255,11 +1260,14 @@ static int wait_task_zombie(struct task_struct *p, int options, */ read_unlock(&tasklist_lock); - retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; + retval = wo->wo_rusage + ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; status = (p->signal->flags & SIGNAL_GROUP_EXIT) ? p->signal->group_exit_code : p->exit_code; - if (!retval && stat_addr) - retval = put_user(status, stat_addr); + if (!retval && wo->wo_stat) + retval = put_user(status, wo->wo_stat); + + infop = wo->wo_info; if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) @@ -1327,15 +1335,18 @@ static int *task_stopped_code(struct task_struct *p, bool ptrace) * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_stopped(int ptrace, struct task_struct *p, - int options, struct siginfo __user *infop, - int __user *stat_addr, struct rusage __user *ru) +static int wait_task_stopped(struct wait_opts *wo, + int ptrace, struct task_struct *p) { + struct siginfo __user *infop; int retval, exit_code, *p_code, why; uid_t uid = 0; /* unneeded, required by compiler */ pid_t pid; - if (!(options & WUNTRACED)) + /* + * Traditionally we see ptrace'd stopped tasks regardless of options. + */ + if (!ptrace && !(wo->wo_flags & WUNTRACED)) return 0; exit_code = 0; @@ -1349,7 +1360,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, if (!exit_code) goto unlock_sig; - if (!unlikely(options & WNOWAIT)) + if (!unlikely(wo->wo_flags & WNOWAIT)) *p_code = 0; /* don't need the RCU readlock here as we're holding a spinlock */ @@ -1371,14 +1382,15 @@ unlock_sig: why = ptrace ? CLD_TRAPPED : CLD_STOPPED; read_unlock(&tasklist_lock); - if (unlikely(options & WNOWAIT)) - return wait_noreap_copyout(p, pid, uid, - why, exit_code, - infop, ru); + if (unlikely(wo->wo_flags & WNOWAIT)) + return wait_noreap_copyout(wo, p, pid, uid, why, exit_code); + + retval = wo->wo_rusage + ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; + if (!retval && wo->wo_stat) + retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat); - retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; - if (!retval && stat_addr) - retval = put_user((exit_code << 8) | 0x7f, stat_addr); + infop = wo->wo_info; if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) @@ -1405,15 +1417,13 @@ unlock_sig: * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_continued(struct task_struct *p, int options, - struct siginfo __user *infop, - int __user *stat_addr, struct rusage __user *ru) +static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) { int retval; pid_t pid; uid_t uid; - if (!unlikely(options & WCONTINUED)) + if (!unlikely(wo->wo_flags & WCONTINUED)) return 0; if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) @@ -1425,7 +1435,7 @@ static int wait_task_continued(struct task_struct *p, int options, spin_unlock_irq(&p->sighand->siglock); return 0; } - if (!unlikely(options & WNOWAIT)) + if (!unlikely(wo->wo_flags & WNOWAIT)) p->signal->flags &= ~SIGNAL_STOP_CONTINUED; uid = __task_cred(p)->uid; spin_unlock_irq(&p->sighand->siglock); @@ -1434,17 +1444,17 @@ static int wait_task_continued(struct task_struct *p, int options, get_task_struct(p); read_unlock(&tasklist_lock); - if (!infop) { - retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; + if (!wo->wo_info) { + retval = wo->wo_rusage + ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; put_task_struct(p); - if (!retval && stat_addr) - retval = put_user(0xffff, stat_addr); + if (!retval && wo->wo_stat) + retval = put_user(0xffff, wo->wo_stat); if (!retval) retval = pid; } else { - retval = wait_noreap_copyout(p, pid, uid, - CLD_CONTINUED, SIGCONT, - infop, ru); + retval = wait_noreap_copyout(wo, p, pid, uid, + CLD_CONTINUED, SIGCONT); BUG_ON(retval == 0); } @@ -1454,19 +1464,16 @@ static int wait_task_continued(struct task_struct *p, int options, /* * Consider @p for a wait by @parent. * - * -ECHILD should be in *@notask_error before the first call. + * -ECHILD should be in ->notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; - * then *@notask_error is 0 if @p is an eligible child, + * then ->notask_error is 0 if @p is an eligible child, * or another error from security_task_wait(), or still -ECHILD. */ -static int wait_consider_task(struct task_struct *parent, int ptrace, - struct task_struct *p, int *notask_error, - enum pid_type type, struct pid *pid, int options, - struct siginfo __user *infop, - int __user *stat_addr, struct rusage __user *ru) +static int wait_consider_task(struct wait_opts *wo, struct task_struct *parent, + int ptrace, struct task_struct *p) { - int ret = eligible_child(type, pid, options, p); + int ret = eligible_child(wo, p); if (!ret) return ret; @@ -1478,17 +1485,17 @@ static int wait_consider_task(struct task_struct *parent, int ptrace, * to look for security policy problems, rather * than for mysterious wait bugs. */ - if (*notask_error) - *notask_error = ret; + if (wo->notask_error) + wo->notask_error = ret; return 0; } - if (likely(!ptrace) && unlikely(p->ptrace)) { + if (likely(!ptrace) && unlikely(task_ptrace(p))) { /* * This child is hidden by ptrace. * We aren't allowed to see it now, but eventually we will. */ - *notask_error = 0; + wo->notask_error = 0; return 0; } @@ -1499,34 +1506,30 @@ static int wait_consider_task(struct task_struct *parent, int ptrace, * We don't reap group leaders with subthreads. */ if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) - return wait_task_zombie(p, options, infop, stat_addr, ru); + return wait_task_zombie(wo, p); /* * It's stopped or running now, so it might * later continue, exit, or stop again. */ - *notask_error = 0; + wo->notask_error = 0; if (task_stopped_code(p, ptrace)) - return wait_task_stopped(ptrace, p, options, - infop, stat_addr, ru); + return wait_task_stopped(wo, ptrace, p); - return wait_task_continued(p, options, infop, stat_addr, ru); + return wait_task_continued(wo, p); } /* * Do the work of do_wait() for one thread in the group, @tsk. * - * -ECHILD should be in *@notask_error before the first call. + * -ECHILD should be in ->notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; then - * *@notask_error is 0 if there were any eligible children, + * ->notask_error is 0 if there were any eligible children, * or another error from security_task_wait(), or still -ECHILD. */ -static int do_wait_thread(struct task_struct *tsk, int *notask_error, - enum pid_type type, struct pid *pid, int options, - struct siginfo __user *infop, int __user *stat_addr, - struct rusage __user *ru) +static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) { struct task_struct *p; @@ -1535,9 +1538,7 @@ static int do_wait_thread(struct task_struct *tsk, int *notask_error, * Do not consider detached threads. */ if (!task_detached(p)) { - int ret = wait_consider_task(tsk, 0, p, notask_error, - type, pid, options, - infop, stat_addr, ru); + int ret = wait_consider_task(wo, tsk, 0, p); if (ret) return ret; } @@ -1546,22 +1547,12 @@ static int do_wait_thread(struct task_struct *tsk, int *notask_error, return 0; } -static int ptrace_do_wait(struct task_struct *tsk, int *notask_error, - enum pid_type type, struct pid *pid, int options, - struct siginfo __user *infop, int __user *stat_addr, - struct rusage __user *ru) +static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) { struct task_struct *p; - /* - * Traditionally we see ptrace'd stopped tasks regardless of options. - */ - options |= WUNTRACED; - list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { - int ret = wait_consider_task(tsk, 1, p, notask_error, - type, pid, options, - infop, stat_addr, ru); + int ret = wait_consider_task(wo, tsk, 1, p); if (ret) return ret; } @@ -1569,65 +1560,59 @@ static int ptrace_do_wait(struct task_struct *tsk, int *notask_error, return 0; } -static long do_wait(enum pid_type type, struct pid *pid, int options, - struct siginfo __user *infop, int __user *stat_addr, - struct rusage __user *ru) +static long do_wait(struct wait_opts *wo) { DECLARE_WAITQUEUE(wait, current); struct task_struct *tsk; int retval; - trace_sched_process_wait(pid); + trace_sched_process_wait(wo->wo_pid); add_wait_queue(¤t->signal->wait_chldexit,&wait); repeat: /* * If there is nothing that can match our critiera just get out. - * We will clear @retval to zero if we see any child that might later - * match our criteria, even if we are not able to reap it yet. + * We will clear ->notask_error to zero if we see any child that + * might later match our criteria, even if we are not able to reap + * it yet. */ - retval = -ECHILD; - if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type]))) - goto end; + wo->notask_error = -ECHILD; + if ((wo->wo_type < PIDTYPE_MAX) && + (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) + goto notask; - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); read_lock(&tasklist_lock); tsk = current; do { - int tsk_result = do_wait_thread(tsk, &retval, - type, pid, options, - infop, stat_addr, ru); - if (!tsk_result) - tsk_result = ptrace_do_wait(tsk, &retval, - type, pid, options, - infop, stat_addr, ru); - if (tsk_result) { - /* - * tasklist_lock is unlocked and we have a final result. - */ - retval = tsk_result; + retval = do_wait_thread(wo, tsk); + if (retval) + goto end; + + retval = ptrace_do_wait(wo, tsk); + if (retval) goto end; - } - if (options & __WNOTHREAD) + if (wo->wo_flags & __WNOTHREAD) break; - tsk = next_thread(tsk); - BUG_ON(tsk->signal != current->signal); - } while (tsk != current); + } while_each_thread(current, tsk); read_unlock(&tasklist_lock); - if (!retval && !(options & WNOHANG)) { +notask: + retval = wo->notask_error; + if (!retval && !(wo->wo_flags & WNOHANG)) { retval = -ERESTARTSYS; if (!signal_pending(current)) { schedule(); goto repeat; } } - end: - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(¤t->signal->wait_chldexit,&wait); - if (infop) { + if (wo->wo_info) { + struct siginfo __user *infop = wo->wo_info; + if (retval > 0) retval = 0; else { @@ -1656,6 +1641,7 @@ end: SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, infop, int, options, struct rusage __user *, ru) { + struct wait_opts wo; struct pid *pid = NULL; enum pid_type type; long ret; @@ -1685,7 +1671,14 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, if (type < PIDTYPE_MAX) pid = find_get_pid(upid); - ret = do_wait(type, pid, options, infop, NULL, ru); + + wo.wo_type = type; + wo.wo_pid = pid; + wo.wo_flags = options; + wo.wo_info = infop; + wo.wo_stat = NULL; + wo.wo_rusage = ru; + ret = do_wait(&wo); put_pid(pid); /* avoid REGPARM breakage on x86: */ @@ -1696,6 +1689,7 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, int, options, struct rusage __user *, ru) { + struct wait_opts wo; struct pid *pid = NULL; enum pid_type type; long ret; @@ -1717,7 +1711,13 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, pid = find_get_pid(upid); } - ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru); + wo.wo_type = type; + wo.wo_pid = pid; + wo.wo_flags = options | WEXITED; + wo.wo_info = NULL; + wo.wo_stat = stat_addr; + wo.wo_rusage = ru; + ret = do_wait(&wo); put_pid(pid); /* avoid REGPARM breakage on x86: */ diff --git a/kernel/fork.c b/kernel/fork.c index be022c2..467746b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1029,7 +1029,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); - clear_tsk_thread_flag(p, TIF_SIGPENDING); init_sigpending(&p->pending); p->utime = cputime_zero; diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig new file mode 100644 index 0000000..22e9dcf --- /dev/null +++ b/kernel/gcov/Kconfig @@ -0,0 +1,48 @@ +menu "GCOV-based kernel profiling" + +config GCOV_KERNEL + bool "Enable gcov-based kernel profiling" + depends on DEBUG_FS && CONSTRUCTORS + default n + ---help--- + This option enables gcov-based code profiling (e.g. for code coverage + measurements). + + If unsure, say N. + + Additionally specify CONFIG_GCOV_PROFILE_ALL=y to get profiling data + for the entire kernel. To enable profiling for specific files or + directories, add a line similar to the following to the respective + Makefile: + + For a single file (e.g. main.o): + GCOV_PROFILE_main.o := y + + For all files in one directory: + GCOV_PROFILE := y + + To exclude files from being profiled even when CONFIG_GCOV_PROFILE_ALL + is specified, use: + + GCOV_PROFILE_main.o := n + and: + GCOV_PROFILE := n + + Note that the debugfs filesystem has to be mounted to access + profiling data. + +config GCOV_PROFILE_ALL + bool "Profile entire Kernel" + depends on GCOV_KERNEL + depends on S390 || X86 + default n + ---help--- + This options activates profiling for the entire kernel. + + If unsure, say N. + + Note that a kernel compiled with profiling flags will be significantly + larger and run slower. Also be sure to exclude files from profiling + which are not linked to the kernel image to prevent linker errors. + +endmenu diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile new file mode 100644 index 0000000..3f76100 --- /dev/null +++ b/kernel/gcov/Makefile @@ -0,0 +1,3 @@ +EXTRA_CFLAGS := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"' + +obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o gcc_3_4.o diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c new file mode 100644 index 0000000..9b22d03 --- /dev/null +++ b/kernel/gcov/base.c @@ -0,0 +1,148 @@ +/* + * This code maintains a list of active profiling data structures. + * + * Copyright IBM Corp. 2009 + * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * + * Uses gcc-internal data definitions. + * Based on the gcov-kernel patch by: + * Hubertus Franke <frankeh@us.ibm.com> + * Nigel Hinds <nhinds@us.ibm.com> + * Rajan Ravindran <rajancr@us.ibm.com> + * Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * Paul Larson + */ + +#define pr_fmt(fmt) "gcov: " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include "gcov.h" + +static struct gcov_info *gcov_info_head; +static int gcov_events_enabled; +static DEFINE_MUTEX(gcov_lock); + +/* + * __gcov_init is called by gcc-generated constructor code for each object + * file compiled with -fprofile-arcs. + */ +void __gcov_init(struct gcov_info *info) +{ + static unsigned int gcov_version; + + mutex_lock(&gcov_lock); + if (gcov_version == 0) { + gcov_version = info->version; + /* + * Printing gcc's version magic may prove useful for debugging + * incompatibility reports. + */ + pr_info("version magic: 0x%x\n", gcov_version); + } + /* + * Add new profiling data structure to list and inform event + * listener. + */ + info->next = gcov_info_head; + gcov_info_head = info; + if (gcov_events_enabled) + gcov_event(GCOV_ADD, info); + mutex_unlock(&gcov_lock); +} +EXPORT_SYMBOL(__gcov_init); + +/* + * These functions may be referenced by gcc-generated profiling code but serve + * no function for kernel profiling. + */ +void __gcov_flush(void) +{ + /* Unused. */ +} +EXPORT_SYMBOL(__gcov_flush); + +void __gcov_merge_add(gcov_type *counters, unsigned int n_counters) +{ + /* Unused. */ +} +EXPORT_SYMBOL(__gcov_merge_add); + +void __gcov_merge_single(gcov_type *counters, unsigned int n_counters) +{ + /* Unused. */ +} +EXPORT_SYMBOL(__gcov_merge_single); + +void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters) +{ + /* Unused. */ +} +EXPORT_SYMBOL(__gcov_merge_delta); + +/** + * gcov_enable_events - enable event reporting through gcov_event() + * + * Turn on reporting of profiling data load/unload-events through the + * gcov_event() callback. Also replay all previous events once. This function + * is needed because some events are potentially generated too early for the + * callback implementation to handle them initially. + */ +void gcov_enable_events(void) +{ + struct gcov_info *info; + + mutex_lock(&gcov_lock); + gcov_events_enabled = 1; + /* Perform event callback for previously registered entries. */ + for (info = gcov_info_head; info; info = info->next) + gcov_event(GCOV_ADD, info); + mutex_unlock(&gcov_lock); +} + +#ifdef CONFIG_MODULES +static inline int within(void *addr, void *start, unsigned long size) +{ + return ((addr >= start) && (addr < start + size)); +} + +/* Update list and generate events when modules are unloaded. */ +static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct module *mod = data; + struct gcov_info *info; + struct gcov_info *prev; + + if (event != MODULE_STATE_GOING) + return NOTIFY_OK; + mutex_lock(&gcov_lock); + prev = NULL; + /* Remove entries located in module from linked list. */ + for (info = gcov_info_head; info; info = info->next) { + if (within(info, mod->module_core, mod->core_size)) { + if (prev) + prev->next = info->next; + else + gcov_info_head = info->next; + if (gcov_events_enabled) + gcov_event(GCOV_REMOVE, info); + } else + prev = info; + } + mutex_unlock(&gcov_lock); + + return NOTIFY_OK; +} + +static struct notifier_block gcov_nb = { + .notifier_call = gcov_module_notifier, +}; + +static int __init gcov_init(void) +{ + return register_module_notifier(&gcov_nb); +} +device_initcall(gcov_init); +#endif /* CONFIG_MODULES */ diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c new file mode 100644 index 0000000..ef3c3f8 --- /dev/null +++ b/kernel/gcov/fs.c @@ -0,0 +1,673 @@ +/* + * This code exports profiling data as debugfs files to userspace. + * + * Copyright IBM Corp. 2009 + * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * + * Uses gcc-internal data definitions. + * Based on the gcov-kernel patch by: + * Hubertus Franke <frankeh@us.ibm.com> + * Nigel Hinds <nhinds@us.ibm.com> + * Rajan Ravindran <rajancr@us.ibm.com> + * Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * Paul Larson + * Yi CDL Yang + */ + +#define pr_fmt(fmt) "gcov: " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/seq_file.h> +#include "gcov.h" + +/** + * struct gcov_node - represents a debugfs entry + * @list: list head for child node list + * @children: child nodes + * @all: list head for list of all nodes + * @parent: parent node + * @info: associated profiling data structure if not a directory + * @ghost: when an object file containing profiling data is unloaded we keep a + * copy of the profiling data here to allow collecting coverage data + * for cleanup code. Such a node is called a "ghost". + * @dentry: main debugfs entry, either a directory or data file + * @links: associated symbolic links + * @name: data file basename + * + * struct gcov_node represents an entity within the gcov/ subdirectory + * of debugfs. There are directory and data file nodes. The latter represent + * the actual synthesized data file plus any associated symbolic links which + * are needed by the gcov tool to work correctly. + */ +struct gcov_node { + struct list_head list; + struct list_head children; + struct list_head all; + struct gcov_node *parent; + struct gcov_info *info; + struct gcov_info *ghost; + struct dentry *dentry; + struct dentry **links; + char name[0]; +}; + +static const char objtree[] = OBJTREE; +static const char srctree[] = SRCTREE; +static struct gcov_node root_node; +static struct dentry *reset_dentry; +static LIST_HEAD(all_head); +static DEFINE_MUTEX(node_lock); + +/* If non-zero, keep copies of profiling data for unloaded modules. */ +static int gcov_persist = 1; + +static int __init gcov_persist_setup(char *str) +{ + unsigned long val; + + if (strict_strtoul(str, 0, &val)) { + pr_warning("invalid gcov_persist parameter '%s'\n", str); + return 0; + } + gcov_persist = val; + pr_info("setting gcov_persist to %d\n", gcov_persist); + + return 1; +} +__setup("gcov_persist=", gcov_persist_setup); + +/* + * seq_file.start() implementation for gcov data files. Note that the + * gcov_iterator interface is designed to be more restrictive than seq_file + * (no start from arbitrary position, etc.), to simplify the iterator + * implementation. + */ +static void *gcov_seq_start(struct seq_file *seq, loff_t *pos) +{ + loff_t i; + + gcov_iter_start(seq->private); + for (i = 0; i < *pos; i++) { + if (gcov_iter_next(seq->private)) + return NULL; + } + return seq->private; +} + +/* seq_file.next() implementation for gcov data files. */ +static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos) +{ + struct gcov_iterator *iter = data; + + if (gcov_iter_next(iter)) + return NULL; + (*pos)++; + + return iter; +} + +/* seq_file.show() implementation for gcov data files. */ +static int gcov_seq_show(struct seq_file *seq, void *data) +{ + struct gcov_iterator *iter = data; + + if (gcov_iter_write(iter, seq)) + return -EINVAL; + return 0; +} + +static void gcov_seq_stop(struct seq_file *seq, void *data) +{ + /* Unused. */ +} + +static const struct seq_operations gcov_seq_ops = { + .start = gcov_seq_start, + .next = gcov_seq_next, + .show = gcov_seq_show, + .stop = gcov_seq_stop, +}; + +/* + * Return the profiling data set for a given node. This can either be the + * original profiling data structure or a duplicate (also called "ghost") + * in case the associated object file has been unloaded. + */ +static struct gcov_info *get_node_info(struct gcov_node *node) +{ + if (node->info) + return node->info; + + return node->ghost; +} + +/* + * open() implementation for gcov data files. Create a copy of the profiling + * data set and initialize the iterator and seq_file interface. + */ +static int gcov_seq_open(struct inode *inode, struct file *file) +{ + struct gcov_node *node = inode->i_private; + struct gcov_iterator *iter; + struct seq_file *seq; + struct gcov_info *info; + int rc = -ENOMEM; + + mutex_lock(&node_lock); + /* + * Read from a profiling data copy to minimize reference tracking + * complexity and concurrent access. + */ + info = gcov_info_dup(get_node_info(node)); + if (!info) + goto out_unlock; + iter = gcov_iter_new(info); + if (!iter) + goto err_free_info; + rc = seq_open(file, &gcov_seq_ops); + if (rc) + goto err_free_iter_info; + seq = file->private_data; + seq->private = iter; +out_unlock: + mutex_unlock(&node_lock); + return rc; + +err_free_iter_info: + gcov_iter_free(iter); +err_free_info: + gcov_info_free(info); + goto out_unlock; +} + +/* + * release() implementation for gcov data files. Release resources allocated + * by open(). + */ +static int gcov_seq_release(struct inode *inode, struct file *file) +{ + struct gcov_iterator *iter; + struct gcov_info *info; + struct seq_file *seq; + + seq = file->private_data; + iter = seq->private; + info = gcov_iter_get_info(iter); + gcov_iter_free(iter); + gcov_info_free(info); + seq_release(inode, file); + + return 0; +} + +/* + * Find a node by the associated data file name. Needs to be called with + * node_lock held. + */ +static struct gcov_node *get_node_by_name(const char *name) +{ + struct gcov_node *node; + struct gcov_info *info; + + list_for_each_entry(node, &all_head, all) { + info = get_node_info(node); + if (info && (strcmp(info->filename, name) == 0)) + return node; + } + + return NULL; +} + +static void remove_node(struct gcov_node *node); + +/* + * write() implementation for gcov data files. Reset profiling data for the + * associated file. If the object file has been unloaded (i.e. this is + * a "ghost" node), remove the debug fs node as well. + */ +static ssize_t gcov_seq_write(struct file *file, const char __user *addr, + size_t len, loff_t *pos) +{ + struct seq_file *seq; + struct gcov_info *info; + struct gcov_node *node; + + seq = file->private_data; + info = gcov_iter_get_info(seq->private); + mutex_lock(&node_lock); + node = get_node_by_name(info->filename); + if (node) { + /* Reset counts or remove node for unloaded modules. */ + if (node->ghost) + remove_node(node); + else + gcov_info_reset(node->info); + } + /* Reset counts for open file. */ + gcov_info_reset(info); + mutex_unlock(&node_lock); + + return len; +} + +/* + * Given a string <path> representing a file path of format: + * path/to/file.gcda + * construct and return a new string: + * <dir/>path/to/file.<ext> + */ +static char *link_target(const char *dir, const char *path, const char *ext) +{ + char *target; + char *old_ext; + char *copy; + + copy = kstrdup(path, GFP_KERNEL); + if (!copy) + return NULL; + old_ext = strrchr(copy, '.'); + if (old_ext) + *old_ext = '\0'; + if (dir) + target = kasprintf(GFP_KERNEL, "%s/%s.%s", dir, copy, ext); + else + target = kasprintf(GFP_KERNEL, "%s.%s", copy, ext); + kfree(copy); + + return target; +} + +/* + * Construct a string representing the symbolic link target for the given + * gcov data file name and link type. Depending on the link type and the + * location of the data file, the link target can either point to a + * subdirectory of srctree, objtree or in an external location. + */ +static char *get_link_target(const char *filename, const struct gcov_link *ext) +{ + const char *rel; + char *result; + + if (strncmp(filename, objtree, strlen(objtree)) == 0) { + rel = filename + strlen(objtree) + 1; + if (ext->dir == SRC_TREE) + result = link_target(srctree, rel, ext->ext); + else + result = link_target(objtree, rel, ext->ext); + } else { + /* External compilation. */ + result = link_target(NULL, filename, ext->ext); + } + + return result; +} + +#define SKEW_PREFIX ".tmp_" + +/* + * For a filename .tmp_filename.ext return filename.ext. Needed to compensate + * for filename skewing caused by the mod-versioning mechanism. + */ +static const char *deskew(const char *basename) +{ + if (strncmp(basename, SKEW_PREFIX, sizeof(SKEW_PREFIX) - 1) == 0) + return basename + sizeof(SKEW_PREFIX) - 1; + return basename; +} + +/* + * Create links to additional files (usually .c and .gcno files) which the + * gcov tool expects to find in the same directory as the gcov data file. + */ +static void add_links(struct gcov_node *node, struct dentry *parent) +{ + char *basename; + char *target; + int num; + int i; + + for (num = 0; gcov_link[num].ext; num++) + /* Nothing. */; + node->links = kcalloc(num, sizeof(struct dentry *), GFP_KERNEL); + if (!node->links) + return; + for (i = 0; i < num; i++) { + target = get_link_target(get_node_info(node)->filename, + &gcov_link[i]); + if (!target) + goto out_err; + basename = strrchr(target, '/'); + if (!basename) + goto out_err; + basename++; + node->links[i] = debugfs_create_symlink(deskew(basename), + parent, target); + if (!node->links[i]) + goto out_err; + kfree(target); + } + + return; +out_err: + kfree(target); + while (i-- > 0) + debugfs_remove(node->links[i]); + kfree(node->links); + node->links = NULL; +} + +static const struct file_operations gcov_data_fops = { + .open = gcov_seq_open, + .release = gcov_seq_release, + .read = seq_read, + .llseek = seq_lseek, + .write = gcov_seq_write, +}; + +/* Basic initialization of a new node. */ +static void init_node(struct gcov_node *node, struct gcov_info *info, + const char *name, struct gcov_node *parent) +{ + INIT_LIST_HEAD(&node->list); + INIT_LIST_HEAD(&node->children); + INIT_LIST_HEAD(&node->all); + node->info = info; + node->parent = parent; + if (name) + strcpy(node->name, name); +} + +/* + * Create a new node and associated debugfs entry. Needs to be called with + * node_lock held. + */ +static struct gcov_node *new_node(struct gcov_node *parent, + struct gcov_info *info, const char *name) +{ + struct gcov_node *node; + + node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); + if (!node) { + pr_warning("out of memory\n"); + return NULL; + } + init_node(node, info, name, parent); + /* Differentiate between gcov data file nodes and directory nodes. */ + if (info) { + node->dentry = debugfs_create_file(deskew(node->name), 0600, + parent->dentry, node, &gcov_data_fops); + } else + node->dentry = debugfs_create_dir(node->name, parent->dentry); + if (!node->dentry) { + pr_warning("could not create file\n"); + kfree(node); + return NULL; + } + if (info) + add_links(node, parent->dentry); + list_add(&node->list, &parent->children); + list_add(&node->all, &all_head); + + return node; +} + +/* Remove symbolic links associated with node. */ +static void remove_links(struct gcov_node *node) +{ + int i; + + if (!node->links) + return; + for (i = 0; gcov_link[i].ext; i++) + debugfs_remove(node->links[i]); + kfree(node->links); + node->links = NULL; +} + +/* + * Remove node from all lists and debugfs and release associated resources. + * Needs to be called with node_lock held. + */ +static void release_node(struct gcov_node *node) +{ + list_del(&node->list); + list_del(&node->all); + debugfs_remove(node->dentry); + remove_links(node); + if (node->ghost) + gcov_info_free(node->ghost); + kfree(node); +} + +/* Release node and empty parents. Needs to be called with node_lock held. */ +static void remove_node(struct gcov_node *node) +{ + struct gcov_node *parent; + + while ((node != &root_node) && list_empty(&node->children)) { + parent = node->parent; + release_node(node); + node = parent; + } +} + +/* + * Find child node with given basename. Needs to be called with node_lock + * held. + */ +static struct gcov_node *get_child_by_name(struct gcov_node *parent, + const char *name) +{ + struct gcov_node *node; + + list_for_each_entry(node, &parent->children, list) { + if (strcmp(node->name, name) == 0) + return node; + } + + return NULL; +} + +/* + * write() implementation for reset file. Reset all profiling data to zero + * and remove ghost nodes. + */ +static ssize_t reset_write(struct file *file, const char __user *addr, + size_t len, loff_t *pos) +{ + struct gcov_node *node; + + mutex_lock(&node_lock); +restart: + list_for_each_entry(node, &all_head, all) { + if (node->info) + gcov_info_reset(node->info); + else if (list_empty(&node->children)) { + remove_node(node); + /* Several nodes may have gone - restart loop. */ + goto restart; + } + } + mutex_unlock(&node_lock); + + return len; +} + +/* read() implementation for reset file. Unused. */ +static ssize_t reset_read(struct file *file, char __user *addr, size_t len, + loff_t *pos) +{ + /* Allow read operation so that a recursive copy won't fail. */ + return 0; +} + +static const struct file_operations gcov_reset_fops = { + .write = reset_write, + .read = reset_read, +}; + +/* + * Create a node for a given profiling data set and add it to all lists and + * debugfs. Needs to be called with node_lock held. + */ +static void add_node(struct gcov_info *info) +{ + char *filename; + char *curr; + char *next; + struct gcov_node *parent; + struct gcov_node *node; + + filename = kstrdup(info->filename, GFP_KERNEL); + if (!filename) + return; + parent = &root_node; + /* Create directory nodes along the path. */ + for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) { + if (curr == next) + continue; + *next = 0; + if (strcmp(curr, ".") == 0) + continue; + if (strcmp(curr, "..") == 0) { + if (!parent->parent) + goto err_remove; + parent = parent->parent; + continue; + } + node = get_child_by_name(parent, curr); + if (!node) { + node = new_node(parent, NULL, curr); + if (!node) + goto err_remove; + } + parent = node; + } + /* Create file node. */ + node = new_node(parent, info, curr); + if (!node) + goto err_remove; +out: + kfree(filename); + return; + +err_remove: + remove_node(parent); + goto out; +} + +/* + * The profiling data set associated with this node is being unloaded. Store a + * copy of the profiling data and turn this node into a "ghost". + */ +static int ghost_node(struct gcov_node *node) +{ + node->ghost = gcov_info_dup(node->info); + if (!node->ghost) { + pr_warning("could not save data for '%s' (out of memory)\n", + node->info->filename); + return -ENOMEM; + } + node->info = NULL; + + return 0; +} + +/* + * Profiling data for this node has been loaded again. Add profiling data + * from previous instantiation and turn this node into a regular node. + */ +static void revive_node(struct gcov_node *node, struct gcov_info *info) +{ + if (gcov_info_is_compatible(node->ghost, info)) + gcov_info_add(info, node->ghost); + else { + pr_warning("discarding saved data for '%s' (version changed)\n", + info->filename); + } + gcov_info_free(node->ghost); + node->ghost = NULL; + node->info = info; +} + +/* + * Callback to create/remove profiling files when code compiled with + * -fprofile-arcs is loaded/unloaded. + */ +void gcov_event(enum gcov_action action, struct gcov_info *info) +{ + struct gcov_node *node; + + mutex_lock(&node_lock); + node = get_node_by_name(info->filename); + switch (action) { + case GCOV_ADD: + /* Add new node or revive ghost. */ + if (!node) { + add_node(info); + break; + } + if (gcov_persist) + revive_node(node, info); + else { + pr_warning("could not add '%s' (already exists)\n", + info->filename); + } + break; + case GCOV_REMOVE: + /* Remove node or turn into ghost. */ + if (!node) { + pr_warning("could not remove '%s' (not found)\n", + info->filename); + break; + } + if (gcov_persist) { + if (!ghost_node(node)) + break; + } + remove_node(node); + break; + } + mutex_unlock(&node_lock); +} + +/* Create debugfs entries. */ +static __init int gcov_fs_init(void) +{ + int rc = -EIO; + + init_node(&root_node, NULL, NULL, NULL); + /* + * /sys/kernel/debug/gcov will be parent for the reset control file + * and all profiling files. + */ + root_node.dentry = debugfs_create_dir("gcov", NULL); + if (!root_node.dentry) + goto err_remove; + /* + * Create reset file which resets all profiling counts when written + * to. + */ + reset_dentry = debugfs_create_file("reset", 0600, root_node.dentry, + NULL, &gcov_reset_fops); + if (!reset_dentry) + goto err_remove; + /* Replay previous events to get our fs hierarchy up-to-date. */ + gcov_enable_events(); + return 0; + +err_remove: + pr_err("init failed\n"); + if (root_node.dentry) + debugfs_remove(root_node.dentry); + + return rc; +} +device_initcall(gcov_fs_init); diff --git a/kernel/gcov/gcc_3_4.c b/kernel/gcov/gcc_3_4.c new file mode 100644 index 0000000..ae5bb42 --- /dev/null +++ b/kernel/gcov/gcc_3_4.c @@ -0,0 +1,447 @@ +/* + * This code provides functions to handle gcc's profiling data format + * introduced with gcc 3.4. Future versions of gcc may change the gcov + * format (as happened before), so all format-specific information needs + * to be kept modular and easily exchangeable. + * + * This file is based on gcc-internal definitions. Functions and data + * structures are defined to be compatible with gcc counterparts. + * For a better understanding, refer to gcc source: gcc/gcov-io.h. + * + * Copyright IBM Corp. 2009 + * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * + * Uses gcc-internal data definitions. + */ + +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/seq_file.h> +#include <linux/vmalloc.h> +#include "gcov.h" + +/* Symbolic links to be created for each profiling data file. */ +const struct gcov_link gcov_link[] = { + { OBJ_TREE, "gcno" }, /* Link to .gcno file in $(objtree). */ + { 0, NULL}, +}; + +/* + * Determine whether a counter is active. Based on gcc magic. Doesn't change + * at run-time. + */ +static int counter_active(struct gcov_info *info, unsigned int type) +{ + return (1 << type) & info->ctr_mask; +} + +/* Determine number of active counters. Based on gcc magic. */ +static unsigned int num_counter_active(struct gcov_info *info) +{ + unsigned int i; + unsigned int result = 0; + + for (i = 0; i < GCOV_COUNTERS; i++) { + if (counter_active(info, i)) + result++; + } + return result; +} + +/** + * gcov_info_reset - reset profiling data to zero + * @info: profiling data set + */ +void gcov_info_reset(struct gcov_info *info) +{ + unsigned int active = num_counter_active(info); + unsigned int i; + + for (i = 0; i < active; i++) { + memset(info->counts[i].values, 0, + info->counts[i].num * sizeof(gcov_type)); + } +} + +/** + * gcov_info_is_compatible - check if profiling data can be added + * @info1: first profiling data set + * @info2: second profiling data set + * + * Returns non-zero if profiling data can be added, zero otherwise. + */ +int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2) +{ + return (info1->stamp == info2->stamp); +} + +/** + * gcov_info_add - add up profiling data + * @dest: profiling data set to which data is added + * @source: profiling data set which is added + * + * Adds profiling counts of @source to @dest. + */ +void gcov_info_add(struct gcov_info *dest, struct gcov_info *source) +{ + unsigned int i; + unsigned int j; + + for (i = 0; i < num_counter_active(dest); i++) { + for (j = 0; j < dest->counts[i].num; j++) { + dest->counts[i].values[j] += + source->counts[i].values[j]; + } + } +} + +/* Get size of function info entry. Based on gcc magic. */ +static size_t get_fn_size(struct gcov_info *info) +{ + size_t size; + + size = sizeof(struct gcov_fn_info) + num_counter_active(info) * + sizeof(unsigned int); + if (__alignof__(struct gcov_fn_info) > sizeof(unsigned int)) + size = ALIGN(size, __alignof__(struct gcov_fn_info)); + return size; +} + +/* Get address of function info entry. Based on gcc magic. */ +static struct gcov_fn_info *get_fn_info(struct gcov_info *info, unsigned int fn) +{ + return (struct gcov_fn_info *) + ((char *) info->functions + fn * get_fn_size(info)); +} + +/** + * gcov_info_dup - duplicate profiling data set + * @info: profiling data set to duplicate + * + * Return newly allocated duplicate on success, %NULL on error. + */ +struct gcov_info *gcov_info_dup(struct gcov_info *info) +{ + struct gcov_info *dup; + unsigned int i; + unsigned int active; + + /* Duplicate gcov_info. */ + active = num_counter_active(info); + dup = kzalloc(sizeof(struct gcov_info) + + sizeof(struct gcov_ctr_info) * active, GFP_KERNEL); + if (!dup) + return NULL; + dup->version = info->version; + dup->stamp = info->stamp; + dup->n_functions = info->n_functions; + dup->ctr_mask = info->ctr_mask; + /* Duplicate filename. */ + dup->filename = kstrdup(info->filename, GFP_KERNEL); + if (!dup->filename) + goto err_free; + /* Duplicate table of functions. */ + dup->functions = kmemdup(info->functions, info->n_functions * + get_fn_size(info), GFP_KERNEL); + if (!dup->functions) + goto err_free; + /* Duplicate counter arrays. */ + for (i = 0; i < active ; i++) { + struct gcov_ctr_info *ctr = &info->counts[i]; + size_t size = ctr->num * sizeof(gcov_type); + + dup->counts[i].num = ctr->num; + dup->counts[i].merge = ctr->merge; + dup->counts[i].values = vmalloc(size); + if (!dup->counts[i].values) + goto err_free; + memcpy(dup->counts[i].values, ctr->values, size); + } + return dup; + +err_free: + gcov_info_free(dup); + return NULL; +} + +/** + * gcov_info_free - release memory for profiling data set duplicate + * @info: profiling data set duplicate to free + */ +void gcov_info_free(struct gcov_info *info) +{ + unsigned int active = num_counter_active(info); + unsigned int i; + + for (i = 0; i < active ; i++) + vfree(info->counts[i].values); + kfree(info->functions); + kfree(info->filename); + kfree(info); +} + +/** + * struct type_info - iterator helper array + * @ctr_type: counter type + * @offset: index of the first value of the current function for this type + * + * This array is needed to convert the in-memory data format into the in-file + * data format: + * + * In-memory: + * for each counter type + * for each function + * values + * + * In-file: + * for each function + * for each counter type + * values + * + * See gcc source gcc/gcov-io.h for more information on data organization. + */ +struct type_info { + int ctr_type; + unsigned int offset; +}; + +/** + * struct gcov_iterator - specifies current file position in logical records + * @info: associated profiling data + * @record: record type + * @function: function number + * @type: counter type + * @count: index into values array + * @num_types: number of counter types + * @type_info: helper array to get values-array offset for current function + */ +struct gcov_iterator { + struct gcov_info *info; + + int record; + unsigned int function; + unsigned int type; + unsigned int count; + + int num_types; + struct type_info type_info[0]; +}; + +static struct gcov_fn_info *get_func(struct gcov_iterator *iter) +{ + return get_fn_info(iter->info, iter->function); +} + +static struct type_info *get_type(struct gcov_iterator *iter) +{ + return &iter->type_info[iter->type]; +} + +/** + * gcov_iter_new - allocate and initialize profiling data iterator + * @info: profiling data set to be iterated + * + * Return file iterator on success, %NULL otherwise. + */ +struct gcov_iterator *gcov_iter_new(struct gcov_info *info) +{ + struct gcov_iterator *iter; + + iter = kzalloc(sizeof(struct gcov_iterator) + + num_counter_active(info) * sizeof(struct type_info), + GFP_KERNEL); + if (iter) + iter->info = info; + + return iter; +} + +/** + * gcov_iter_free - release memory for iterator + * @iter: file iterator to free + */ +void gcov_iter_free(struct gcov_iterator *iter) +{ + kfree(iter); +} + +/** + * gcov_iter_get_info - return profiling data set for given file iterator + * @iter: file iterator + */ +struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter) +{ + return iter->info; +} + +/** + * gcov_iter_start - reset file iterator to starting position + * @iter: file iterator + */ +void gcov_iter_start(struct gcov_iterator *iter) +{ + int i; + + iter->record = 0; + iter->function = 0; + iter->type = 0; + iter->count = 0; + iter->num_types = 0; + for (i = 0; i < GCOV_COUNTERS; i++) { + if (counter_active(iter->info, i)) { + iter->type_info[iter->num_types].ctr_type = i; + iter->type_info[iter->num_types++].offset = 0; + } + } +} + +/* Mapping of logical record number to actual file content. */ +#define RECORD_FILE_MAGIC 0 +#define RECORD_GCOV_VERSION 1 +#define RECORD_TIME_STAMP 2 +#define RECORD_FUNCTION_TAG 3 +#define RECORD_FUNCTON_TAG_LEN 4 +#define RECORD_FUNCTION_IDENT 5 +#define RECORD_FUNCTION_CHECK 6 +#define RECORD_COUNT_TAG 7 +#define RECORD_COUNT_LEN 8 +#define RECORD_COUNT 9 + +/** + * gcov_iter_next - advance file iterator to next logical record + * @iter: file iterator + * + * Return zero if new position is valid, non-zero if iterator has reached end. + */ +int gcov_iter_next(struct gcov_iterator *iter) +{ + switch (iter->record) { + case RECORD_FILE_MAGIC: + case RECORD_GCOV_VERSION: + case RECORD_FUNCTION_TAG: + case RECORD_FUNCTON_TAG_LEN: + case RECORD_FUNCTION_IDENT: + case RECORD_COUNT_TAG: + /* Advance to next record */ + iter->record++; + break; + case RECORD_COUNT: + /* Advance to next count */ + iter->count++; + /* fall through */ + case RECORD_COUNT_LEN: + if (iter->count < get_func(iter)->n_ctrs[iter->type]) { + iter->record = 9; + break; + } + /* Advance to next counter type */ + get_type(iter)->offset += iter->count; + iter->count = 0; + iter->type++; + /* fall through */ + case RECORD_FUNCTION_CHECK: + if (iter->type < iter->num_types) { + iter->record = 7; + break; + } + /* Advance to next function */ + iter->type = 0; + iter->function++; + /* fall through */ + case RECORD_TIME_STAMP: + if (iter->function < iter->info->n_functions) + iter->record = 3; + else + iter->record = -1; + break; + } + /* Check for EOF. */ + if (iter->record == -1) + return -EINVAL; + else + return 0; +} + +/** + * seq_write_gcov_u32 - write 32 bit number in gcov format to seq_file + * @seq: seq_file handle + * @v: value to be stored + * + * Number format defined by gcc: numbers are recorded in the 32 bit + * unsigned binary form of the endianness of the machine generating the + * file. + */ +static int seq_write_gcov_u32(struct seq_file *seq, u32 v) +{ + return seq_write(seq, &v, sizeof(v)); +} + +/** + * seq_write_gcov_u64 - write 64 bit number in gcov format to seq_file + * @seq: seq_file handle + * @v: value to be stored + * + * Number format defined by gcc: numbers are recorded in the 32 bit + * unsigned binary form of the endianness of the machine generating the + * file. 64 bit numbers are stored as two 32 bit numbers, the low part + * first. + */ +static int seq_write_gcov_u64(struct seq_file *seq, u64 v) +{ + u32 data[2]; + + data[0] = (v & 0xffffffffUL); + data[1] = (v >> 32); + return seq_write(seq, data, sizeof(data)); +} + +/** + * gcov_iter_write - write data for current pos to seq_file + * @iter: file iterator + * @seq: seq_file handle + * + * Return zero on success, non-zero otherwise. + */ +int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq) +{ + int rc = -EINVAL; + + switch (iter->record) { + case RECORD_FILE_MAGIC: + rc = seq_write_gcov_u32(seq, GCOV_DATA_MAGIC); + break; + case RECORD_GCOV_VERSION: + rc = seq_write_gcov_u32(seq, iter->info->version); + break; + case RECORD_TIME_STAMP: + rc = seq_write_gcov_u32(seq, iter->info->stamp); + break; + case RECORD_FUNCTION_TAG: + rc = seq_write_gcov_u32(seq, GCOV_TAG_FUNCTION); + break; + case RECORD_FUNCTON_TAG_LEN: + rc = seq_write_gcov_u32(seq, 2); + break; + case RECORD_FUNCTION_IDENT: + rc = seq_write_gcov_u32(seq, get_func(iter)->ident); + break; + case RECORD_FUNCTION_CHECK: + rc = seq_write_gcov_u32(seq, get_func(iter)->checksum); + break; + case RECORD_COUNT_TAG: + rc = seq_write_gcov_u32(seq, + GCOV_TAG_FOR_COUNTER(get_type(iter)->ctr_type)); + break; + case RECORD_COUNT_LEN: + rc = seq_write_gcov_u32(seq, + get_func(iter)->n_ctrs[iter->type] * 2); + break; + case RECORD_COUNT: + rc = seq_write_gcov_u64(seq, + iter->info->counts[iter->type]. + values[iter->count + get_type(iter)->offset]); + break; + } + return rc; +} diff --git a/kernel/gcov/gcov.h b/kernel/gcov/gcov.h new file mode 100644 index 0000000..060073e --- /dev/null +++ b/kernel/gcov/gcov.h @@ -0,0 +1,128 @@ +/* + * Profiling infrastructure declarations. + * + * This file is based on gcc-internal definitions. Data structures are + * defined to be compatible with gcc counterparts. For a better + * understanding, refer to gcc source: gcc/gcov-io.h. + * + * Copyright IBM Corp. 2009 + * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> + * + * Uses gcc-internal data definitions. + */ + +#ifndef GCOV_H +#define GCOV_H GCOV_H + +#include <linux/types.h> + +/* + * Profiling data types used for gcc 3.4 and above - these are defined by + * gcc and need to be kept as close to the original definition as possible to + * remain compatible. + */ +#define GCOV_COUNTERS 5 +#define GCOV_DATA_MAGIC ((unsigned int) 0x67636461) +#define GCOV_TAG_FUNCTION ((unsigned int) 0x01000000) +#define GCOV_TAG_COUNTER_BASE ((unsigned int) 0x01a10000) +#define GCOV_TAG_FOR_COUNTER(count) \ + (GCOV_TAG_COUNTER_BASE + ((unsigned int) (count) << 17)) + +#if BITS_PER_LONG >= 64 +typedef long gcov_type; +#else +typedef long long gcov_type; +#endif + +/** + * struct gcov_fn_info - profiling meta data per function + * @ident: object file-unique function identifier + * @checksum: function checksum + * @n_ctrs: number of values per counter type belonging to this function + * + * This data is generated by gcc during compilation and doesn't change + * at run-time. + */ +struct gcov_fn_info { + unsigned int ident; + unsigned int checksum; + unsigned int n_ctrs[0]; +}; + +/** + * struct gcov_ctr_info - profiling data per counter type + * @num: number of counter values for this type + * @values: array of counter values for this type + * @merge: merge function for counter values of this type (unused) + * + * This data is generated by gcc during compilation and doesn't change + * at run-time with the exception of the values array. + */ +struct gcov_ctr_info { + unsigned int num; + gcov_type *values; + void (*merge)(gcov_type *, unsigned int); +}; + +/** + * struct gcov_info - profiling data per object file + * @version: gcov version magic indicating the gcc version used for compilation + * @next: list head for a singly-linked list + * @stamp: time stamp + * @filename: name of the associated gcov data file + * @n_functions: number of instrumented functions + * @functions: function data + * @ctr_mask: mask specifying which counter types are active + * @counts: counter data per counter type + * + * This data is generated by gcc during compilation and doesn't change + * at run-time with the exception of the next pointer. + */ +struct gcov_info { + unsigned int version; + struct gcov_info *next; + unsigned int stamp; + const char *filename; + unsigned int n_functions; + const struct gcov_fn_info *functions; + unsigned int ctr_mask; + struct gcov_ctr_info counts[0]; +}; + +/* Base interface. */ +enum gcov_action { + GCOV_ADD, + GCOV_REMOVE, +}; + +void gcov_event(enum gcov_action action, struct gcov_info *info); +void gcov_enable_events(void); + +/* Iterator control. */ +struct seq_file; +struct gcov_iterator; + +struct gcov_iterator *gcov_iter_new(struct gcov_info *info); +void gcov_iter_free(struct gcov_iterator *iter); +void gcov_iter_start(struct gcov_iterator *iter); +int gcov_iter_next(struct gcov_iterator *iter); +int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq); +struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter); + +/* gcov_info control. */ +void gcov_info_reset(struct gcov_info *info); +int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2); +void gcov_info_add(struct gcov_info *dest, struct gcov_info *source); +struct gcov_info *gcov_info_dup(struct gcov_info *info); +void gcov_info_free(struct gcov_info *info); + +struct gcov_link { + enum { + OBJ_TREE, + SRC_TREE, + } dir; + const char *ext; +}; +extern const struct gcov_link gcov_link[]; + +#endif /* GCOV_H */ diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index b675a67..9002958 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -380,6 +380,8 @@ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) return res; } +EXPORT_SYMBOL_GPL(ktime_add_safe); + #ifdef CONFIG_DEBUG_OBJECTS_TIMERS static struct debug_obj_descr hrtimer_debug_descr; diff --git a/kernel/kthread.c b/kernel/kthread.c index 7fa4413..9b1a7de 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -27,7 +27,6 @@ struct kthread_create_info /* Information passed to kthread() from kthreadd. */ int (*threadfn)(void *data); void *data; - struct completion started; /* Result passed back to kthread_create() from kthreadd. */ struct task_struct *result; @@ -36,17 +35,13 @@ struct kthread_create_info struct list_head list; }; -struct kthread_stop_info -{ - struct task_struct *k; - int err; - struct completion done; +struct kthread { + int should_stop; + struct completion exited; }; -/* Thread stopping is done by setthing this var: lock serializes - * multiple kthread_stop calls. */ -static DEFINE_MUTEX(kthread_stop_lock); -static struct kthread_stop_info kthread_stop_info; +#define to_kthread(tsk) \ + container_of((tsk)->vfork_done, struct kthread, exited) /** * kthread_should_stop - should this kthread return now? @@ -57,36 +52,35 @@ static struct kthread_stop_info kthread_stop_info; */ int kthread_should_stop(void) { - return (kthread_stop_info.k == current); + return to_kthread(current)->should_stop; } EXPORT_SYMBOL(kthread_should_stop); static int kthread(void *_create) { + /* Copy data: it's on kthread's stack */ struct kthread_create_info *create = _create; - int (*threadfn)(void *data); - void *data; - int ret = -EINTR; + int (*threadfn)(void *data) = create->threadfn; + void *data = create->data; + struct kthread self; + int ret; - /* Copy data: it's on kthread's stack */ - threadfn = create->threadfn; - data = create->data; + self.should_stop = 0; + init_completion(&self.exited); + current->vfork_done = &self.exited; /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); create->result = current; - complete(&create->started); + complete(&create->done); schedule(); - if (!kthread_should_stop()) + ret = -EINTR; + if (!self.should_stop) ret = threadfn(data); - /* It might have exited on its own, w/o kthread_stop. Check. */ - if (kthread_should_stop()) { - kthread_stop_info.err = ret; - complete(&kthread_stop_info.done); - } - return 0; + /* we can't just return, we must preserve "self" on stack */ + do_exit(ret); } static void create_kthread(struct kthread_create_info *create) @@ -95,11 +89,10 @@ static void create_kthread(struct kthread_create_info *create) /* We want our own signal handler (we take no signals by default). */ pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); - if (pid < 0) + if (pid < 0) { create->result = ERR_PTR(pid); - else - wait_for_completion(&create->started); - complete(&create->done); + complete(&create->done); + } } /** @@ -130,7 +123,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), create.threadfn = threadfn; create.data = data; - init_completion(&create.started); init_completion(&create.done); spin_lock(&kthread_create_lock); @@ -198,30 +190,22 @@ EXPORT_SYMBOL(kthread_bind); */ int kthread_stop(struct task_struct *k) { + struct kthread *kthread; int ret; - mutex_lock(&kthread_stop_lock); - - /* It could exit after stop_info.k set, but before wake_up_process. */ - get_task_struct(k); - trace_sched_kthread_stop(k); + get_task_struct(k); - /* Must init completion *before* thread sees kthread_stop_info.k */ - init_completion(&kthread_stop_info.done); - smp_wmb(); + kthread = to_kthread(k); + barrier(); /* it might have exited */ + if (k->vfork_done != NULL) { + kthread->should_stop = 1; + wake_up_process(k); + wait_for_completion(&kthread->exited); + } + ret = k->exit_code; - /* Now set kthread_should_stop() to true, and wake it up. */ - kthread_stop_info.k = k; - wake_up_process(k); put_task_struct(k); - - /* Once it dies, reset stop ptr, gather result and we're done. */ - wait_for_completion(&kthread_stop_info.done); - kthread_stop_info.k = NULL; - ret = kthread_stop_info.err; - mutex_unlock(&kthread_stop_lock); - trace_sched_kthread_stop_ret(ret); return ret; diff --git a/kernel/module.c b/kernel/module.c index 215aaab..38928fc 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2216,6 +2216,10 @@ static noinline struct module *load_module(void __user *umod, mod->unused_gpl_crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl"); #endif +#ifdef CONFIG_CONSTRUCTORS + mod->ctors = section_objs(hdr, sechdrs, secstrings, ".ctors", + sizeof(*mod->ctors), &mod->num_ctors); +#endif #ifdef CONFIG_MARKERS mod->markers = section_objs(hdr, sechdrs, secstrings, "__markers", @@ -2389,6 +2393,17 @@ static noinline struct module *load_module(void __user *umod, goto free_hdr; } +/* Call module constructors. */ +static void do_mod_ctors(struct module *mod) +{ +#ifdef CONFIG_CONSTRUCTORS + unsigned long i; + + for (i = 0; i < mod->num_ctors; i++) + mod->ctors[i](); +#endif +} + /* This is where the real work happens */ SYSCALL_DEFINE3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs) @@ -2417,6 +2432,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); + do_mod_ctors(mod); /* Start the module */ if (mod->init != NULL) ret = do_one_initcall(mod->init); diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 63598dc..09b4ff9 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -26,19 +26,14 @@ static struct kmem_cache *nsproxy_cachep; struct nsproxy init_nsproxy = INIT_NSPROXY(init_nsproxy); -/* - * creates a copy of "orig" with refcount 1. - */ -static inline struct nsproxy *clone_nsproxy(struct nsproxy *orig) +static inline struct nsproxy *create_nsproxy(void) { - struct nsproxy *ns; + struct nsproxy *nsproxy; - ns = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL); - if (ns) { - memcpy(ns, orig, sizeof(struct nsproxy)); - atomic_set(&ns->count, 1); - } - return ns; + nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL); + if (nsproxy) + atomic_set(&nsproxy->count, 1); + return nsproxy; } /* @@ -52,7 +47,7 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, struct nsproxy *new_nsp; int err; - new_nsp = clone_nsproxy(tsk->nsproxy); + new_nsp = create_nsproxy(); if (!new_nsp) return ERR_PTR(-ENOMEM); diff --git a/kernel/pid.c b/kernel/pid.c index b2e5f78..31310b5 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -378,26 +378,15 @@ EXPORT_SYMBOL(pid_task); /* * Must be called under rcu_read_lock() or with tasklist_lock read-held. */ -struct task_struct *find_task_by_pid_type_ns(int type, int nr, - struct pid_namespace *ns) +struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) { - return pid_task(find_pid_ns(nr, ns), type); + return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); } -EXPORT_SYMBOL(find_task_by_pid_type_ns); - struct task_struct *find_task_by_vpid(pid_t vnr) { - return find_task_by_pid_type_ns(PIDTYPE_PID, vnr, - current->nsproxy->pid_ns); -} -EXPORT_SYMBOL(find_task_by_vpid); - -struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) -{ - return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns); + return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns); } -EXPORT_SYMBOL(find_task_by_pid_ns); struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 2d1001b..821722a 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -67,9 +67,10 @@ err_alloc: return NULL; } -static struct pid_namespace *create_pid_namespace(unsigned int level) +static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns) { struct pid_namespace *ns; + unsigned int level = parent_pid_ns->level + 1; int i; ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); @@ -86,6 +87,7 @@ static struct pid_namespace *create_pid_namespace(unsigned int level) kref_init(&ns->kref); ns->level = level; + ns->parent = get_pid_ns(parent_pid_ns); set_bit(0, ns->pidmap[0].page); atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); @@ -114,25 +116,11 @@ static void destroy_pid_namespace(struct pid_namespace *ns) struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns) { - struct pid_namespace *new_ns; - - BUG_ON(!old_ns); - new_ns = get_pid_ns(old_ns); if (!(flags & CLONE_NEWPID)) - goto out; - - new_ns = ERR_PTR(-EINVAL); + return get_pid_ns(old_ns); if (flags & CLONE_THREAD) - goto out_put; - - new_ns = create_pid_namespace(old_ns->level + 1); - if (!IS_ERR(new_ns)) - new_ns->parent = get_pid_ns(old_ns); - -out_put: - put_pid_ns(old_ns); -out: - return new_ns; + return ERR_PTR(-EINVAL); + return create_pid_namespace(old_ns); } void free_pid_ns(struct kref *kref) diff --git a/kernel/ptrace.c b/kernel/ptrace.c index f6d8b8c..61c78b2 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -167,67 +167,82 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) int ptrace_attach(struct task_struct *task) { int retval; - unsigned long flags; audit_ptrace(task); retval = -EPERM; + if (unlikely(task->flags & PF_KTHREAD)) + goto out; if (same_thread_group(task, current)) goto out; - /* Protect the target's credential calculations against our + /* + * Protect exec's credential calculations against our interference; * interference; SUID, SGID and LSM creds get determined differently * under ptrace. */ retval = mutex_lock_interruptible(&task->cred_guard_mutex); - if (retval < 0) + if (retval < 0) goto out; - retval = -EPERM; -repeat: - /* - * Nasty, nasty. - * - * We want to hold both the task-lock and the - * tasklist_lock for writing at the same time. - * But that's against the rules (tasklist_lock - * is taken for reading by interrupts on other - * cpu's that may have task_lock). - */ task_lock(task); - if (!write_trylock_irqsave(&tasklist_lock, flags)) { - task_unlock(task); - do { - cpu_relax(); - } while (!write_can_lock(&tasklist_lock)); - goto repeat; - } - - if (!task->mm) - goto bad; - /* the same process cannot be attached many times */ - if (task->ptrace & PT_PTRACED) - goto bad; retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); + task_unlock(task); if (retval) - goto bad; + goto unlock_creds; - /* Go */ - task->ptrace |= PT_PTRACED; + write_lock_irq(&tasklist_lock); + retval = -EPERM; + if (unlikely(task->exit_state)) + goto unlock_tasklist; + if (task->ptrace) + goto unlock_tasklist; + + task->ptrace = PT_PTRACED; if (capable(CAP_SYS_PTRACE)) task->ptrace |= PT_PTRACE_CAP; __ptrace_link(task, current); - send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); -bad: - write_unlock_irqrestore(&tasklist_lock, flags); - task_unlock(task); + + retval = 0; +unlock_tasklist: + write_unlock_irq(&tasklist_lock); +unlock_creds: mutex_unlock(&task->cred_guard_mutex); out: return retval; } +/** + * ptrace_traceme -- helper for PTRACE_TRACEME + * + * Performs checks and sets PT_PTRACED. + * Should be used by all ptrace implementations for PTRACE_TRACEME. + */ +int ptrace_traceme(void) +{ + int ret = -EPERM; + + write_lock_irq(&tasklist_lock); + /* Are we already being traced? */ + if (!current->ptrace) { + ret = security_ptrace_traceme(current->parent); + /* + * Check PF_EXITING to ensure ->real_parent has not passed + * exit_ptrace(). Otherwise we don't report the error but + * pretend ->real_parent untraces us right after return. + */ + if (!ret && !(current->real_parent->flags & PF_EXITING)) { + current->ptrace = PT_PTRACED; + __ptrace_link(current, current->real_parent); + } + } + write_unlock_irq(&tasklist_lock); + + return ret; +} + /* * Called with irqs disabled, returns true if childs should reap themselves. */ @@ -409,37 +424,33 @@ static int ptrace_setoptions(struct task_struct *child, long data) static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) { + unsigned long flags; int error = -ESRCH; - read_lock(&tasklist_lock); - if (likely(child->sighand != NULL)) { + if (lock_task_sighand(child, &flags)) { error = -EINVAL; - spin_lock_irq(&child->sighand->siglock); if (likely(child->last_siginfo != NULL)) { *info = *child->last_siginfo; error = 0; } - spin_unlock_irq(&child->sighand->siglock); + unlock_task_sighand(child, &flags); } - read_unlock(&tasklist_lock); return error; } static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) { + unsigned long flags; int error = -ESRCH; - read_lock(&tasklist_lock); - if (likely(child->sighand != NULL)) { + if (lock_task_sighand(child, &flags)) { error = -EINVAL; - spin_lock_irq(&child->sighand->siglock); if (likely(child->last_siginfo != NULL)) { *child->last_siginfo = *info; error = 0; } - spin_unlock_irq(&child->sighand->siglock); + unlock_task_sighand(child, &flags); } - read_unlock(&tasklist_lock); return error; } @@ -566,72 +577,16 @@ int ptrace_request(struct task_struct *child, long request, return ret; } -/** - * ptrace_traceme -- helper for PTRACE_TRACEME - * - * Performs checks and sets PT_PTRACED. - * Should be used by all ptrace implementations for PTRACE_TRACEME. - */ -int ptrace_traceme(void) -{ - int ret = -EPERM; - - /* - * Are we already being traced? - */ -repeat: - task_lock(current); - if (!(current->ptrace & PT_PTRACED)) { - /* - * See ptrace_attach() comments about the locking here. - */ - unsigned long flags; - if (!write_trylock_irqsave(&tasklist_lock, flags)) { - task_unlock(current); - do { - cpu_relax(); - } while (!write_can_lock(&tasklist_lock)); - goto repeat; - } - - ret = security_ptrace_traceme(current->parent); - - /* - * Check PF_EXITING to ensure ->real_parent has not passed - * exit_ptrace(). Otherwise we don't report the error but - * pretend ->real_parent untraces us right after return. - */ - if (!ret && !(current->real_parent->flags & PF_EXITING)) { - current->ptrace |= PT_PTRACED; - __ptrace_link(current, current->real_parent); - } - - write_unlock_irqrestore(&tasklist_lock, flags); - } - task_unlock(current); - return ret; -} - -/** - * ptrace_get_task_struct -- grab a task struct reference for ptrace - * @pid: process id to grab a task_struct reference of - * - * This function is a helper for ptrace implementations. It checks - * permissions and then grabs a task struct for use of the actual - * ptrace implementation. - * - * Returns the task_struct for @pid or an ERR_PTR() on failure. - */ -struct task_struct *ptrace_get_task_struct(pid_t pid) +static struct task_struct *ptrace_get_task_struct(pid_t pid) { struct task_struct *child; - read_lock(&tasklist_lock); + rcu_read_lock(); child = find_task_by_vpid(pid); if (child) get_task_struct(child); + rcu_read_unlock(); - read_unlock(&tasklist_lock); if (!child) return ERR_PTR(-ESRCH); return child; diff --git a/kernel/res_counter.c b/kernel/res_counter.c index bf8e753..e1338f0 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c @@ -18,7 +18,7 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent) { spin_lock_init(&counter->lock); - counter->limit = (unsigned long long)LLONG_MAX; + counter->limit = RESOURCE_MAX; counter->parent = parent; } @@ -133,6 +133,16 @@ int res_counter_memparse_write_strategy(const char *buf, unsigned long long *res) { char *end; + + /* return RESOURCE_MAX(unlimited) if "-1" is specified */ + if (*buf == '-') { + *res = simple_strtoull(buf + 1, &end, 10); + if (*res != 1 || *end != '\0') + return -EINVAL; + *res = RESOURCE_MAX; + return 0; + } + /* FIXME - make memparse() take const char* args */ *res = memparse((char *)buf, &end); if (*end != '\0') diff --git a/kernel/sched.c b/kernel/sched.c index 8fb88a9..247fd0f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7045,7 +7045,7 @@ static int migration_thread(void *data) if (cpu_is_offline(cpu)) { spin_unlock_irq(&rq->lock); - goto wait_to_die; + break; } if (rq->active_balance) { @@ -7071,16 +7071,7 @@ static int migration_thread(void *data) complete(&req->done); } __set_current_state(TASK_RUNNING); - return 0; -wait_to_die: - /* Wait for kthread_stop */ - set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) { - schedule(); - set_current_state(TASK_INTERRUPTIBLE); - } - __set_current_state(TASK_RUNNING); return 0; } @@ -7494,6 +7485,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) rq = task_rq_lock(p, &flags); __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); task_rq_unlock(rq, &flags); + get_task_struct(p); cpu_rq(cpu)->migration_thread = p; break; @@ -7524,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) kthread_bind(cpu_rq(cpu)->migration_thread, cpumask_any(cpu_online_mask)); kthread_stop(cpu_rq(cpu)->migration_thread); + put_task_struct(cpu_rq(cpu)->migration_thread); cpu_rq(cpu)->migration_thread = NULL; break; @@ -7533,6 +7526,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) migrate_live_tasks(cpu); rq = cpu_rq(cpu); kthread_stop(rq->migration_thread); + put_task_struct(rq->migration_thread); rq->migration_thread = NULL; /* Idle task back to normal (off runqueue, low prio) */ spin_lock_irq(&rq->lock); diff --git a/kernel/signal.c b/kernel/signal.c index d81f495..ccf1cee 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1410,7 +1410,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) /* do_notify_parent_cldstop should have been called instead. */ BUG_ON(task_is_stopped_or_traced(tsk)); - BUG_ON(!tsk->ptrace && + BUG_ON(!task_ptrace(tsk) && (tsk->group_leader != tsk || !thread_group_empty(tsk))); info.si_signo = sig; @@ -1449,7 +1449,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) psig = tsk->parent->sighand; spin_lock_irqsave(&psig->siglock, flags); - if (!tsk->ptrace && sig == SIGCHLD && + if (!task_ptrace(tsk) && sig == SIGCHLD && (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { /* @@ -1486,7 +1486,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) struct task_struct *parent; struct sighand_struct *sighand; - if (tsk->ptrace & PT_PTRACED) + if (task_ptrace(tsk)) parent = tsk->parent; else { tsk = tsk->group_leader; @@ -1499,7 +1499,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) * see comment in do_notify_parent() abot the following 3 lines */ rcu_read_lock(); - info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); + info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); info.si_uid = __task_cred(tsk)->uid; rcu_read_unlock(); @@ -1535,7 +1535,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) static inline int may_ptrace_stop(void) { - if (!likely(current->ptrace & PT_PTRACED)) + if (!likely(task_ptrace(current))) return 0; /* * Are we in the middle of do_coredump? @@ -1753,7 +1753,7 @@ static int do_signal_stop(int signr) static int ptrace_signal(int signr, siginfo_t *info, struct pt_regs *regs, void *cookie) { - if (!(current->ptrace & PT_PTRACED)) + if (!task_ptrace(current)) return signr; ptrace_signal_deliver(regs, cookie); diff --git a/kernel/softirq.c b/kernel/softirq.c index b41fb71..3a94905 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -213,6 +213,7 @@ restart: do { if (pending & 1) { int prev_count = preempt_count(); + kstat_incr_softirqs_this_cpu(h - softirq_vec); trace_softirq_entry(h, softirq_vec); h->action(h); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ab462b9..62e4ff9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2283,7 +2283,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, void *data) { #define TMPBUFLEN 21 - int *i, vleft, first=1, neg, val; + int *i, vleft, first = 1, neg; unsigned long lval; size_t left, len; @@ -2336,8 +2336,6 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, len = p-buf; if ((len < left) && *p && !isspace(*p)) break; - if (neg) - val = -val; s += len; left -= len; diff --git a/kernel/utsname.c b/kernel/utsname.c index 815237a..8a82b4b 100644 --- a/kernel/utsname.c +++ b/kernel/utsname.c @@ -15,6 +15,16 @@ #include <linux/err.h> #include <linux/slab.h> +static struct uts_namespace *create_uts_ns(void) +{ + struct uts_namespace *uts_ns; + + uts_ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL); + if (uts_ns) + kref_init(&uts_ns->kref); + return uts_ns; +} + /* * Clone a new ns copying an original utsname, setting refcount to 1 * @old_ns: namespace to clone @@ -24,14 +34,13 @@ static struct uts_namespace *clone_uts_ns(struct uts_namespace *old_ns) { struct uts_namespace *ns; - ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL); + ns = create_uts_ns(); if (!ns) return ERR_PTR(-ENOMEM); down_read(&uts_sem); memcpy(&ns->name, &old_ns->name, sizeof(ns->name)); up_read(&uts_sem); - kref_init(&ns->kref); return ns; } diff --git a/lib/Makefile b/lib/Makefile index 8e9bcf9..b6d1857 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - string_helpers.o + string_helpers.o gcd.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/gcd.c b/lib/gcd.c new file mode 100644 index 0000000..f879033 --- /dev/null +++ b/lib/gcd.c @@ -0,0 +1,18 @@ +#include <linux/kernel.h> +#include <linux/gcd.h> +#include <linux/module.h> + +/* Greatest common divisor */ +unsigned long gcd(unsigned long a, unsigned long b) +{ + unsigned long r; + + if (a < b) + swap(a, b); + while ((r = a % b) != 0) { + a = b; + b = r; + } + return b; +} +EXPORT_SYMBOL_GPL(gcd); diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 58ec86c..ec759b6 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -109,6 +109,9 @@ #define BYTES_PER_POINTER sizeof(void *) +/* GFP bitmask for kmemleak internal allocations */ +#define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC) + /* scanning area inside a memory block */ struct kmemleak_scan_area { struct hlist_node node; @@ -199,9 +202,9 @@ static DEFINE_MUTEX(kmemleak_mutex); static int reported_leaks; /* - * Early object allocation/freeing logging. Kkmemleak is initialized after the + * Early object allocation/freeing logging. Kmemleak is initialized after the * kernel allocator. However, both the kernel allocator and kmemleak may - * allocate memory blocks which need to be tracked. Kkmemleak defines an + * allocate memory blocks which need to be tracked. Kmemleak defines an * arbitrary buffer to hold the allocation/freeing information before it is * fully initialized. */ @@ -245,10 +248,10 @@ static void kmemleak_disable(void); /* * Macro invoked when a serious kmemleak condition occured and cannot be - * recovered from. Kkmemleak will be disabled and further allocation/freeing + * recovered from. Kmemleak will be disabled and further allocation/freeing * tracing no longer available. */ -#define kmemleak_panic(x...) do { \ +#define kmemleak_stop(x...) do { \ kmemleak_warn(x); \ kmemleak_disable(); \ } while (0) @@ -462,10 +465,10 @@ static void create_object(unsigned long ptr, size_t size, int min_count, struct prio_tree_node *node; struct stack_trace trace; - object = kmem_cache_alloc(object_cache, gfp & ~GFP_SLAB_BUG_MASK); + object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); if (!object) { - kmemleak_panic("kmemleak: Cannot allocate a kmemleak_object " - "structure\n"); + kmemleak_stop("kmemleak: Cannot allocate a kmemleak_object " + "structure\n"); return; } @@ -524,8 +527,8 @@ static void create_object(unsigned long ptr, size_t size, int min_count, if (node != &object->tree_node) { unsigned long flags; - kmemleak_panic("kmemleak: Cannot insert 0x%lx into the object " - "search tree (already existing)\n", ptr); + kmemleak_stop("kmemleak: Cannot insert 0x%lx into the object " + "search tree (already existing)\n", ptr); object = lookup_object(ptr, 1); spin_lock_irqsave(&object->lock, flags); dump_object_info(object); @@ -636,7 +639,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset, return; } - area = kmem_cache_alloc(scan_area_cache, gfp & ~GFP_SLAB_BUG_MASK); + area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK); if (!area) { kmemleak_warn("kmemleak: Cannot allocate a scan area\n"); goto out; @@ -696,7 +699,7 @@ static void log_early(int op_type, const void *ptr, size_t size, struct early_log *log; if (crt_early_log >= ARRAY_SIZE(early_log)) { - kmemleak_panic("kmemleak: Early log buffer exceeded\n"); + kmemleak_stop("kmemleak: Early log buffer exceeded\n"); return; } @@ -1404,7 +1407,7 @@ static int kmemleak_boot_config(char *str) early_param("kmemleak", kmemleak_boot_config); /* - * Kkmemleak initialization. + * Kmemleak initialization. */ void __init kmemleak_init(void) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 70db6e0..e2fa20d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -45,7 +45,7 @@ struct cgroup_subsys mem_cgroup_subsys __read_mostly; #define MEM_CGROUP_RECLAIM_RETRIES 5 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP -/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */ +/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ int do_swap_account __read_mostly; static int really_do_swap_account __initdata = 1; /* for remember boot option*/ #else @@ -62,7 +62,8 @@ enum mem_cgroup_stat_index { * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. */ MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ - MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */ + MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ + MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */ MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ @@ -176,6 +177,9 @@ struct mem_cgroup { unsigned int swappiness; + /* set when res.limit == memsw.limit */ + bool memsw_is_minimum; + /* * statistics. This must be placed at the end of memcg. */ @@ -188,6 +192,7 @@ enum charge_type { MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */ MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */ + MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */ NR_CHARGE_TYPE, }; @@ -644,6 +649,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, int zid = zone_idx(z); struct mem_cgroup_per_zone *mz; int lru = LRU_FILE * !!file + !!active; + int ret; BUG_ON(!mem_cont); mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); @@ -661,9 +667,19 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, continue; scan++; - if (__isolate_lru_page(page, mode, file) == 0) { + ret = __isolate_lru_page(page, mode, file); + switch (ret) { + case 0: list_move(&page->lru, dst); + mem_cgroup_del_lru(page); nr_taken++; + break; + case -EBUSY: + /* we don't affect global LRU but rotate in our LRU */ + mem_cgroup_rotate_lru_list(page, page_lru(page)); + break; + default: + break; } } @@ -845,6 +861,10 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, int ret, total = 0; int loop = 0; + /* If memsw_is_minimum==1, swap-out is of-no-use. */ + if (root_mem->memsw_is_minimum) + noswap = true; + while (loop < 2) { victim = mem_cgroup_select_victim(root_mem); if (victim == root_mem) @@ -900,6 +920,44 @@ static void record_last_oom(struct mem_cgroup *mem) mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb); } +/* + * Currently used to update mapped file statistics, but the routine can be + * generalized to update other statistics as well. + */ +void mem_cgroup_update_mapped_file_stat(struct page *page, int val) +{ + struct mem_cgroup *mem; + struct mem_cgroup_stat *stat; + struct mem_cgroup_stat_cpu *cpustat; + int cpu; + struct page_cgroup *pc; + + if (!page_is_file_cache(page)) + return; + + pc = lookup_page_cgroup(page); + if (unlikely(!pc)) + return; + + lock_page_cgroup(pc); + mem = pc->mem_cgroup; + if (!mem) + goto done; + + if (!PageCgroupUsed(pc)) + goto done; + + /* + * Preemption is already disabled, we don't need get_cpu() + */ + cpu = smp_processor_id(); + stat = &mem->stat; + cpustat = &stat->cpustat[cpu]; + + __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val); +done: + unlock_page_cgroup(pc); +} /* * Unlike exported interface, "oom" parameter is added. if oom==true, @@ -1098,6 +1156,10 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, struct mem_cgroup_per_zone *from_mz, *to_mz; int nid, zid; int ret = -EBUSY; + struct page *page; + int cpu; + struct mem_cgroup_stat *stat; + struct mem_cgroup_stat_cpu *cpustat; VM_BUG_ON(from == to); VM_BUG_ON(PageLRU(pc->page)); @@ -1118,6 +1180,23 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, res_counter_uncharge(&from->res, PAGE_SIZE); mem_cgroup_charge_statistics(from, pc, false); + + page = pc->page; + if (page_is_file_cache(page) && page_mapped(page)) { + cpu = smp_processor_id(); + /* Update mapped_file data for mem_cgroup "from" */ + stat = &from->stat; + cpustat = &stat->cpustat[cpu]; + __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, + -1); + + /* Update mapped_file data for mem_cgroup "to" */ + stat = &to->stat; + cpustat = &stat->cpustat[cpu]; + __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, + 1); + } + if (do_swap_account) res_counter_uncharge(&from->memsw, PAGE_SIZE); css_put(&from->css); @@ -1433,6 +1512,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) switch (ctype) { case MEM_CGROUP_CHARGE_TYPE_MAPPED: + case MEM_CGROUP_CHARGE_TYPE_DROP: if (page_mapped(page)) goto unlock_out; break; @@ -1496,18 +1576,23 @@ void mem_cgroup_uncharge_cache_page(struct page *page) * called after __delete_from_swap_cache() and drop "page" account. * memcg information is recorded to swap_cgroup of "ent" */ -void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) +void +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) { struct mem_cgroup *memcg; + int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; + + if (!swapout) /* this was a swap cache but the swap is unused ! */ + ctype = MEM_CGROUP_CHARGE_TYPE_DROP; + + memcg = __mem_cgroup_uncharge_common(page, ctype); - memcg = __mem_cgroup_uncharge_common(page, - MEM_CGROUP_CHARGE_TYPE_SWAPOUT); /* record memcg information */ - if (do_swap_account && memcg) { + if (do_swap_account && swapout && memcg) { swap_cgroup_record(ent, css_id(&memcg->css)); mem_cgroup_get(memcg); } - if (memcg) + if (swapout && memcg) css_put(&memcg->css); } #endif @@ -1685,6 +1770,12 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, break; } ret = res_counter_set_limit(&memcg->res, val); + if (!ret) { + if (memswlimit == val) + memcg->memsw_is_minimum = true; + else + memcg->memsw_is_minimum = false; + } mutex_unlock(&set_limit_mutex); if (!ret) @@ -1703,16 +1794,14 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, return ret; } -int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, - unsigned long long val) +static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, + unsigned long long val) { int retry_count; u64 memlimit, oldusage, curusage; int children = mem_cgroup_count_children(memcg); int ret = -EBUSY; - if (!do_swap_account) - return -EINVAL; /* see mem_cgroup_resize_res_limit */ retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); @@ -1734,6 +1823,12 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, break; } ret = res_counter_set_limit(&memcg->memsw, val); + if (!ret) { + if (memlimit == val) + memcg->memsw_is_minimum = true; + else + memcg->memsw_is_minimum = false; + } mutex_unlock(&set_limit_mutex); if (!ret) @@ -1947,8 +2042,7 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) val = res_counter_read_u64(&mem->res, name); break; case _MEMSWAP: - if (do_swap_account) - val = res_counter_read_u64(&mem->memsw, name); + val = res_counter_read_u64(&mem->memsw, name); break; default: BUG(); @@ -2046,6 +2140,7 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) enum { MCS_CACHE, MCS_RSS, + MCS_MAPPED_FILE, MCS_PGPGIN, MCS_PGPGOUT, MCS_INACTIVE_ANON, @@ -2066,6 +2161,7 @@ struct { } memcg_stat_strings[NR_MCS_STAT] = { {"cache", "total_cache"}, {"rss", "total_rss"}, + {"mapped_file", "total_mapped_file"}, {"pgpgin", "total_pgpgin"}, {"pgpgout", "total_pgpgout"}, {"inactive_anon", "total_inactive_anon"}, @@ -2086,6 +2182,8 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data) s->stat[MCS_CACHE] += val * PAGE_SIZE; val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); s->stat[MCS_RSS] += val * PAGE_SIZE; + val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE); + s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE; val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT); s->stat[MCS_PGPGIN] += val; val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a5f3c27..6f0753f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -73,6 +73,7 @@ unsigned long totalram_pages __read_mostly; unsigned long totalreserve_pages __read_mostly; unsigned long highest_memmap_pfn __read_mostly; int percpu_pagelist_fraction; +gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE int pageblock_order __read_mostly; @@ -1863,6 +1864,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct page *page; int migratetype = allocflags_to_migratetype(gfp_mask); + gfp_mask &= gfp_allowed_mask; + lockdep_trace_alloc(gfp_mask); might_sleep_if(gfp_mask & __GFP_WAIT); diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 11a8a10..f22b4eb 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -83,12 +83,12 @@ void __init page_cgroup_init_flatmem(void) goto fail; } printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); - printk(KERN_INFO "please try cgroup_disable=memory option if you" - " don't want\n"); + printk(KERN_INFO "please try 'cgroup_disable=memory' option if you" + " don't want memory cgroups\n"); return; fail: - printk(KERN_CRIT "allocation of page_cgroup was failed.\n"); - printk(KERN_CRIT "please try cgroup_disable=memory boot option\n"); + printk(KERN_CRIT "allocation of page_cgroup failed.\n"); + printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n"); panic("Out of memory"); } @@ -99,6 +99,8 @@ struct page_cgroup *lookup_page_cgroup(struct page *page) unsigned long pfn = page_to_pfn(page); struct mem_section *section = __pfn_to_section(pfn); + if (!section->page_cgroup) + return NULL; return section->page_cgroup + pfn; } @@ -252,14 +254,14 @@ void __init page_cgroup_init(void) fail = init_section_page_cgroup(pfn); } if (fail) { - printk(KERN_CRIT "try cgroup_disable=memory boot option\n"); + printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); panic("Out of memory"); } else { hotplug_memory_notifier(page_cgroup_callback, 0); } printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); - printk(KERN_INFO "please try cgroup_disable=memory option if you don't" - " want\n"); + printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't" + " want memory cgroups\n"); } void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) @@ -309,8 +311,6 @@ static int swap_cgroup_prepare(int type) struct swap_cgroup_ctrl *ctrl; unsigned long idx, max; - if (!do_swap_account) - return 0; ctrl = &swap_cgroup_ctrl[type]; for (idx = 0; idx < ctrl->length; idx++) { @@ -347,9 +347,6 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) struct swap_cgroup *sc; unsigned short old; - if (!do_swap_account) - return 0; - ctrl = &swap_cgroup_ctrl[type]; mappage = ctrl->map[idx]; @@ -378,9 +375,6 @@ unsigned short lookup_swap_cgroup(swp_entry_t ent) struct swap_cgroup *sc; unsigned short ret; - if (!do_swap_account) - return 0; - ctrl = &swap_cgroup_ctrl[type]; mappage = ctrl->map[idx]; sc = page_address(mappage); @@ -703,8 +703,10 @@ void page_add_new_anon_rmap(struct page *page, */ void page_add_file_rmap(struct page *page) { - if (atomic_inc_and_test(&page->_mapcount)) + if (atomic_inc_and_test(&page->_mapcount)) { __inc_zone_page_state(page, NR_FILE_MAPPED); + mem_cgroup_update_mapped_file_stat(page, 1); + } } #ifdef CONFIG_DEBUG_VM @@ -753,6 +755,7 @@ void page_remove_rmap(struct page *page) mem_cgroup_uncharge_page(page); __dec_zone_page_state(page, PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); + mem_cgroup_update_mapped_file_stat(page, -1); /* * It would be tidy to reset the PageAnon mapping here, * but that might overwrite a racing page_add_anon_rmap @@ -305,12 +305,6 @@ struct kmem_list3 { }; /* - * The slab allocator is initialized with interrupts disabled. Therefore, make - * sure early boot allocations don't accidentally enable interrupts. - */ -static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; - -/* * Need this for bootstrapping a per node allocator. */ #define NUM_INIT_LISTS (3 * MAX_NUMNODES) @@ -1559,11 +1553,6 @@ void __init kmem_cache_init_late(void) { struct kmem_cache *cachep; - /* - * Interrupts are enabled now so all GFP allocations are safe. - */ - slab_gfp_mask = __GFP_BITS_MASK; - /* 6) resize the head arrays to their final sizes */ mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) @@ -2308,6 +2297,15 @@ kmem_cache_create (const char *name, size_t size, size_t align, /* really off slab. No need for manual alignment */ slab_size = cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); + +#ifdef CONFIG_PAGE_POISONING + /* If we're going to use the generic kernel_map_pages() + * poisoning, then it's going to smash the contents of + * the redzone and userword anyhow, so switch them off. + */ + if (size % PAGE_SIZE == 0 && flags & SLAB_POISON) + flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); +#endif } cachep->colour_off = cache_line_size(); @@ -3298,7 +3296,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, unsigned long save_flags; void *ptr; - flags &= slab_gfp_mask; + flags &= gfp_allowed_mask; lockdep_trace_alloc(flags); @@ -3383,7 +3381,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) unsigned long save_flags; void *objp; - flags &= slab_gfp_mask; + flags &= gfp_allowed_mask; lockdep_trace_alloc(flags); @@ -133,17 +133,17 @@ static LIST_HEAD(free_slob_large); */ static inline int is_slob_page(struct slob_page *sp) { - return PageSlobPage((struct page *)sp); + return PageSlab((struct page *)sp); } static inline void set_slob_page(struct slob_page *sp) { - __SetPageSlobPage((struct page *)sp); + __SetPageSlab((struct page *)sp); } static inline void clear_slob_page(struct slob_page *sp) { - __ClearPageSlobPage((struct page *)sp); + __ClearPageSlab((struct page *)sp); } static inline struct slob_page *slob_page(const void *addr) @@ -179,12 +179,6 @@ static enum { SYSFS /* Sysfs up */ } slab_state = DOWN; -/* - * The slab allocator is initialized with interrupts disabled. Therefore, make - * sure early boot allocations don't accidentally enable interrupts. - */ -static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; - /* A list of all slab caches on the system */ static DECLARE_RWSEM(slub_lock); static LIST_HEAD(slab_caches); @@ -840,6 +834,11 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node) return atomic_long_read(&n->nr_slabs); } +static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) +{ + return atomic_long_read(&n->nr_slabs); +} + static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) { struct kmem_cache_node *n = get_node(s, node); @@ -1058,6 +1057,8 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize, static inline unsigned long slabs_node(struct kmem_cache *s, int node) { return 0; } +static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) + { return 0; } static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) {} static inline void dec_slabs_node(struct kmem_cache *s, int node, @@ -1514,6 +1515,65 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) return 1; } +static int count_free(struct page *page) +{ + return page->objects - page->inuse; +} + +static unsigned long count_partial(struct kmem_cache_node *n, + int (*get_count)(struct page *)) +{ + unsigned long flags; + unsigned long x = 0; + struct page *page; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += get_count(page); + spin_unlock_irqrestore(&n->list_lock, flags); + return x; +} + +static inline unsigned long node_nr_objs(struct kmem_cache_node *n) +{ +#ifdef CONFIG_SLUB_DEBUG + return atomic_long_read(&n->total_objects); +#else + return 0; +#endif +} + +static noinline void +slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) +{ + int node; + + printk(KERN_WARNING + "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", + nid, gfpflags); + printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " + "default order: %d, min order: %d\n", s->name, s->objsize, + s->size, oo_order(s->oo), oo_order(s->min)); + + for_each_online_node(node) { + struct kmem_cache_node *n = get_node(s, node); + unsigned long nr_slabs; + unsigned long nr_objs; + unsigned long nr_free; + + if (!n) + continue; + + nr_free = count_partial(n, count_free); + nr_slabs = node_nr_slabs(n); + nr_objs = node_nr_objs(n); + + printk(KERN_WARNING + " node %d: slabs: %ld, objs: %ld, free: %ld\n", + node, nr_slabs, nr_objs, nr_free); + } +} + /* * Slow path. The lockless freelist is empty or we need to perform * debugging duties. @@ -1595,6 +1655,8 @@ new_slab: c->page = new; goto load_freelist; } + if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) + slab_out_of_memory(s, gfpflags, node); return NULL; debug: if (!alloc_debug_processing(s, c->page, object, addr)) @@ -1624,7 +1686,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, unsigned long flags; unsigned int objsize; - gfpflags &= slab_gfp_mask; + gfpflags &= gfp_allowed_mask; lockdep_trace_alloc(gfpflags); might_sleep_if(gfpflags & __GFP_WAIT); @@ -2636,6 +2698,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) struct kmem_cache *s; char *text; size_t realsize; + unsigned long slabflags; s = kmalloc_caches_dma[index]; if (s) @@ -2657,10 +2720,18 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) (unsigned int)realsize); s = kmalloc(kmem_size, flags & ~SLUB_DMA); + /* + * Must defer sysfs creation to a workqueue because we don't know + * what context we are called from. Before sysfs comes up, we don't + * need to do anything because our sysfs initcall will start by + * adding all existing slabs to sysfs. + */ + slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK; + if (slab_state >= SYSFS) + slabflags |= __SYSFS_ADD_DEFERRED; + if (!s || !text || !kmem_cache_open(s, flags, text, - realsize, ARCH_KMALLOC_MINALIGN, - SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED, - NULL)) { + realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) { kfree(s); kfree(text); goto unlock_out; @@ -2669,7 +2740,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) list_add(&s->list, &slab_caches); kmalloc_caches_dma[index] = s; - schedule_work(&sysfs_add_work); + if (slab_state >= SYSFS) + schedule_work(&sysfs_add_work); unlock_out: up_write(&slub_lock); @@ -3142,10 +3214,6 @@ void __init kmem_cache_init(void) void __init kmem_cache_init_late(void) { - /* - * Interrupts are enabled now so all GFP allocations are safe. - */ - slab_gfp_mask = __GFP_BITS_MASK; } /* @@ -3368,20 +3436,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, } #ifdef CONFIG_SLUB_DEBUG -static unsigned long count_partial(struct kmem_cache_node *n, - int (*get_count)(struct page *)) -{ - unsigned long flags; - unsigned long x = 0; - struct page *page; - - spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - x += get_count(page); - spin_unlock_irqrestore(&n->list_lock, flags); - return x; -} - static int count_inuse(struct page *page) { return page->inuse; @@ -3392,11 +3446,6 @@ static int count_total(struct page *page) return page->objects; } -static int count_free(struct page *page) -{ - return page->objects - page->inuse; -} - static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) { diff --git a/mm/swapfile.c b/mm/swapfile.c index 28faa01..d1ade1a 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -583,8 +583,9 @@ static int swap_entry_free(struct swap_info_struct *p, swap_list.next = p - swap_info; nr_swap_pages++; p->inuse_pages--; - mem_cgroup_uncharge_swap(ent); } + if (!swap_count(count)) + mem_cgroup_uncharge_swap(ent); return count; } @@ -609,12 +610,19 @@ void swap_free(swp_entry_t entry) void swapcache_free(swp_entry_t entry, struct page *page) { struct swap_info_struct *p; + int ret; - if (page) - mem_cgroup_uncharge_swapcache(page, entry); p = swap_info_get(entry); if (p) { - swap_entry_free(p, entry, SWAP_CACHE); + ret = swap_entry_free(p, entry, SWAP_CACHE); + if (page) { + bool swapout; + if (ret) + swapout = true; /* the end of swap out */ + else + swapout = false; /* no more swap users! */ + mem_cgroup_uncharge_swapcache(page, entry, swapout); + } spin_unlock(&swap_lock); } return; @@ -168,6 +168,10 @@ EXPORT_SYMBOL(krealloc); * * The memory of the object @p points to is zeroed before freed. * If @p is %NULL, kzfree() does nothing. + * + * Note: this function zeroes the whole allocated buffer which can be a good + * deal bigger than the requested buffer size passed to kmalloc(). So be + * careful when using this function in performance sensitive code. */ void kzfree(const void *p) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 4139aa5..e8fa2d9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -837,7 +837,6 @@ int __isolate_lru_page(struct page *page, int mode, int file) */ ClearPageLRU(page); ret = 0; - mem_cgroup_del_lru(page); } return ret; @@ -885,12 +884,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, switch (__isolate_lru_page(page, mode, file)) { case 0: list_move(&page->lru, dst); + mem_cgroup_del_lru(page); nr_taken++; break; case -EBUSY: /* else it is being freed elsewhere */ list_move(&page->lru, src); + mem_cgroup_rotate_lru_list(page, page_lru(page)); continue; default: @@ -931,6 +932,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, continue; if (__isolate_lru_page(cursor_page, mode, file) == 0) { list_move(&cursor_page->lru, dst); + mem_cgroup_del_lru(page); nr_taken++; scan++; } diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c index fd8e084..80caad1 100644 --- a/net/appletalk/atalk_proc.c +++ b/net/appletalk/atalk_proc.c @@ -204,8 +204,8 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v) "%02X %d\n", s->sk_type, ntohs(at->src_net), at->src_node, at->src_port, ntohs(at->dest_net), at->dest_node, at->dest_port, - atomic_read(&s->sk_wmem_alloc), - atomic_read(&s->sk_rmem_alloc), + sk_wmem_alloc_get(s), + sk_rmem_alloc_get(s), s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); out: return 0; diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index b603cba..590b839 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -162,8 +162,7 @@ static void atalk_destroy_timer(unsigned long data) { struct sock *sk = (struct sock *)data; - if (atomic_read(&sk->sk_wmem_alloc) || - atomic_read(&sk->sk_rmem_alloc)) { + if (sk_has_allocations(sk)) { sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME; add_timer(&sk->sk_timer); } else @@ -175,8 +174,7 @@ static inline void atalk_destroy_socket(struct sock *sk) atalk_remove_socket(sk); skb_queue_purge(&sk->sk_receive_queue); - if (atomic_read(&sk->sk_wmem_alloc) || - atomic_read(&sk->sk_rmem_alloc)) { + if (sk_has_allocations(sk)) { setup_timer(&sk->sk_timer, atalk_destroy_timer, (unsigned long)sk); sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME; @@ -1750,8 +1748,7 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) switch (cmd) { /* Protocol layer */ case TIOCOUTQ: { - long amount = sk->sk_sndbuf - - atomic_read(&sk->sk_wmem_alloc); + long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; diff --git a/net/atm/common.c b/net/atm/common.c index d34edbe..c1c9793 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -62,15 +62,15 @@ static struct sk_buff *alloc_tx(struct atm_vcc *vcc,unsigned int size) struct sk_buff *skb; struct sock *sk = sk_atm(vcc); - if (atomic_read(&sk->sk_wmem_alloc) && !atm_may_send(vcc, size)) { + if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) { pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", - atomic_read(&sk->sk_wmem_alloc), size, + sk_wmem_alloc_get(sk), size, sk->sk_sndbuf); return NULL; } - while (!(skb = alloc_skb(size,GFP_KERNEL))) schedule(); - pr_debug("AlTx %d += %d\n", atomic_read(&sk->sk_wmem_alloc), - skb->truesize); + while (!(skb = alloc_skb(size, GFP_KERNEL))) + schedule(); + pr_debug("AlTx %d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); atomic_add(skb->truesize, &sk->sk_wmem_alloc); return skb; } @@ -145,7 +145,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family) memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc)); memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc)); vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ - atomic_set(&sk->sk_wmem_alloc, 0); + atomic_set(&sk->sk_wmem_alloc, 1); atomic_set(&sk->sk_rmem_alloc, 0); vcc->push = NULL; vcc->pop = NULL; diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c index 76ed3c8..4da8892 100644 --- a/net/atm/ioctl.c +++ b/net/atm/ioctl.c @@ -63,8 +63,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg error = -EINVAL; goto done; } - error = put_user(sk->sk_sndbuf - - atomic_read(&sk->sk_wmem_alloc), + error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk), (int __user *) argp) ? -EFAULT : 0; goto done; case SIOCINQ: diff --git a/net/atm/proc.c b/net/atm/proc.c index e7b3b273..38de5ff 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c @@ -204,8 +204,8 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc) seq_printf(seq, "%3d", sk->sk_family); } seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n", vcc->flags, sk->sk_err, - atomic_read(&sk->sk_wmem_alloc), sk->sk_sndbuf, - atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf, + sk_wmem_alloc_get(sk), sk->sk_sndbuf, + sk_rmem_alloc_get(sk), sk->sk_rcvbuf, atomic_read(&sk->sk_refcnt)); } diff --git a/net/atm/raw.c b/net/atm/raw.c index b0a2d8c..cbfcc71 100644 --- a/net/atm/raw.c +++ b/net/atm/raw.c @@ -33,7 +33,7 @@ static void atm_pop_raw(struct atm_vcc *vcc,struct sk_buff *skb) struct sock *sk = sk_atm(vcc); pr_debug("APopR (%d) %d -= %d\n", vcc->vci, - atomic_read(&sk->sk_wmem_alloc), skb->truesize); + sk_wmem_alloc_get(sk), skb->truesize); atomic_sub(skb->truesize, &sk->sk_wmem_alloc); dev_kfree_skb_any(skb); sk->sk_write_space(sk); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index fd9d06f..da0f64f 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -330,8 +330,7 @@ void ax25_destroy_socket(ax25_cb *ax25) } if (ax25->sk != NULL) { - if (atomic_read(&ax25->sk->sk_wmem_alloc) || - atomic_read(&ax25->sk->sk_rmem_alloc)) { + if (sk_has_allocations(ax25->sk)) { /* Defer: outstanding buffers */ setup_timer(&ax25->dtimer, ax25_destroy_timer, (unsigned long)ax25); @@ -1691,7 +1690,8 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) switch (cmd) { case TIOCOUTQ: { long amount; - amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); + + amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; res = put_user(amount, (int __user *)argp); @@ -1781,8 +1781,8 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ); ax25_info.n2count = ax25->n2count; ax25_info.state = ax25->state; - ax25_info.rcv_q = atomic_read(&sk->sk_rmem_alloc); - ax25_info.snd_q = atomic_read(&sk->sk_wmem_alloc); + ax25_info.rcv_q = sk_wmem_alloc_get(sk); + ax25_info.snd_q = sk_rmem_alloc_get(sk); ax25_info.vs = ax25->vs; ax25_info.vr = ax25->vr; ax25_info.va = ax25->va; @@ -1922,8 +1922,8 @@ static int ax25_info_show(struct seq_file *seq, void *v) if (ax25->sk != NULL) { seq_printf(seq, " %d %d %lu\n", - atomic_read(&ax25->sk->sk_wmem_alloc), - atomic_read(&ax25->sk->sk_rmem_alloc), + sk_wmem_alloc_get(ax25->sk), + sk_rmem_alloc_get(ax25->sk), sock_i_ino(ax25->sk)); } else { seq_puts(seq, " * * *\n"); diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 02b9baa..0250e06 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -337,7 +337,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) if (sk->sk_state == BT_LISTEN) return -EINVAL; - amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); + amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; err = put_user(amount, (int __user *) arg); diff --git a/net/core/dev.c b/net/core/dev.c index 576a615..baf2dc1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3461,10 +3461,10 @@ void __dev_set_rx_mode(struct net_device *dev) /* Unicast addresses changes may only happen under the rtnl, * therefore calling __dev_set_promiscuity here is safe. */ - if (dev->uc_count > 0 && !dev->uc_promisc) { + if (dev->uc.count > 0 && !dev->uc_promisc) { __dev_set_promiscuity(dev, 1); dev->uc_promisc = 1; - } else if (dev->uc_count == 0 && dev->uc_promisc) { + } else if (dev->uc.count == 0 && dev->uc_promisc) { __dev_set_promiscuity(dev, -1); dev->uc_promisc = 0; } @@ -3483,9 +3483,8 @@ void dev_set_rx_mode(struct net_device *dev) /* hw addresses list handling functions */ -static int __hw_addr_add(struct list_head *list, int *delta, - unsigned char *addr, int addr_len, - unsigned char addr_type) +static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, + int addr_len, unsigned char addr_type) { struct netdev_hw_addr *ha; int alloc_size; @@ -3493,7 +3492,7 @@ static int __hw_addr_add(struct list_head *list, int *delta, if (addr_len > MAX_ADDR_LEN) return -EINVAL; - list_for_each_entry(ha, list, list) { + list_for_each_entry(ha, &list->list, list) { if (!memcmp(ha->addr, addr, addr_len) && ha->type == addr_type) { ha->refcount++; @@ -3512,9 +3511,8 @@ static int __hw_addr_add(struct list_head *list, int *delta, ha->type = addr_type; ha->refcount = 1; ha->synced = false; - list_add_tail_rcu(&ha->list, list); - if (delta) - (*delta)++; + list_add_tail_rcu(&ha->list, &list->list); + list->count++; return 0; } @@ -3526,120 +3524,121 @@ static void ha_rcu_free(struct rcu_head *head) kfree(ha); } -static int __hw_addr_del(struct list_head *list, int *delta, - unsigned char *addr, int addr_len, - unsigned char addr_type) +static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, + int addr_len, unsigned char addr_type) { struct netdev_hw_addr *ha; - list_for_each_entry(ha, list, list) { + list_for_each_entry(ha, &list->list, list) { if (!memcmp(ha->addr, addr, addr_len) && (ha->type == addr_type || !addr_type)) { if (--ha->refcount) return 0; list_del_rcu(&ha->list); call_rcu(&ha->rcu_head, ha_rcu_free); - if (delta) - (*delta)--; + list->count--; return 0; } } return -ENOENT; } -static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta, - struct list_head *from_list, int addr_len, +static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len, unsigned char addr_type) { int err; struct netdev_hw_addr *ha, *ha2; unsigned char type; - list_for_each_entry(ha, from_list, list) { + list_for_each_entry(ha, &from_list->list, list) { type = addr_type ? addr_type : ha->type; - err = __hw_addr_add(to_list, to_delta, ha->addr, - addr_len, type); + err = __hw_addr_add(to_list, ha->addr, addr_len, type); if (err) goto unroll; } return 0; unroll: - list_for_each_entry(ha2, from_list, list) { + list_for_each_entry(ha2, &from_list->list, list) { if (ha2 == ha) break; type = addr_type ? addr_type : ha2->type; - __hw_addr_del(to_list, to_delta, ha2->addr, - addr_len, type); + __hw_addr_del(to_list, ha2->addr, addr_len, type); } return err; } -static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta, - struct list_head *from_list, int addr_len, +static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len, unsigned char addr_type) { struct netdev_hw_addr *ha; unsigned char type; - list_for_each_entry(ha, from_list, list) { + list_for_each_entry(ha, &from_list->list, list) { type = addr_type ? addr_type : ha->type; - __hw_addr_del(to_list, to_delta, ha->addr, - addr_len, addr_type); + __hw_addr_del(to_list, ha->addr, addr_len, addr_type); } } -static int __hw_addr_sync(struct list_head *to_list, int *to_delta, - struct list_head *from_list, int *from_delta, +static int __hw_addr_sync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, int addr_len) { int err = 0; struct netdev_hw_addr *ha, *tmp; - list_for_each_entry_safe(ha, tmp, from_list, list) { + list_for_each_entry_safe(ha, tmp, &from_list->list, list) { if (!ha->synced) { - err = __hw_addr_add(to_list, to_delta, ha->addr, + err = __hw_addr_add(to_list, ha->addr, addr_len, ha->type); if (err) break; ha->synced = true; ha->refcount++; } else if (ha->refcount == 1) { - __hw_addr_del(to_list, to_delta, ha->addr, - addr_len, ha->type); - __hw_addr_del(from_list, from_delta, ha->addr, - addr_len, ha->type); + __hw_addr_del(to_list, ha->addr, addr_len, ha->type); + __hw_addr_del(from_list, ha->addr, addr_len, ha->type); } } return err; } -static void __hw_addr_unsync(struct list_head *to_list, int *to_delta, - struct list_head *from_list, int *from_delta, +static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, int addr_len) { struct netdev_hw_addr *ha, *tmp; - list_for_each_entry_safe(ha, tmp, from_list, list) { + list_for_each_entry_safe(ha, tmp, &from_list->list, list) { if (ha->synced) { - __hw_addr_del(to_list, to_delta, ha->addr, + __hw_addr_del(to_list, ha->addr, addr_len, ha->type); ha->synced = false; - __hw_addr_del(from_list, from_delta, ha->addr, + __hw_addr_del(from_list, ha->addr, addr_len, ha->type); } } } - -static void __hw_addr_flush(struct list_head *list) +static void __hw_addr_flush(struct netdev_hw_addr_list *list) { struct netdev_hw_addr *ha, *tmp; - list_for_each_entry_safe(ha, tmp, list, list) { + list_for_each_entry_safe(ha, tmp, &list->list, list) { list_del_rcu(&ha->list); call_rcu(&ha->rcu_head, ha_rcu_free); } + list->count = 0; +} + +static void __hw_addr_init(struct netdev_hw_addr_list *list) +{ + INIT_LIST_HEAD(&list->list); + list->count = 0; } /* Device addresses handling functions */ @@ -3648,7 +3647,7 @@ static void dev_addr_flush(struct net_device *dev) { /* rtnl_mutex must be held here */ - __hw_addr_flush(&dev->dev_addr_list); + __hw_addr_flush(&dev->dev_addrs); dev->dev_addr = NULL; } @@ -3660,16 +3659,16 @@ static int dev_addr_init(struct net_device *dev) /* rtnl_mutex must be held here */ - INIT_LIST_HEAD(&dev->dev_addr_list); + __hw_addr_init(&dev->dev_addrs); memset(addr, 0, sizeof(addr)); - err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(addr), + err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), NETDEV_HW_ADDR_T_LAN); if (!err) { /* * Get the first (previously created) address from the list * and set dev_addr pointer to this location. */ - ha = list_first_entry(&dev->dev_addr_list, + ha = list_first_entry(&dev->dev_addrs.list, struct netdev_hw_addr, list); dev->dev_addr = ha->addr; } @@ -3694,8 +3693,7 @@ int dev_addr_add(struct net_device *dev, unsigned char *addr, ASSERT_RTNL(); - err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len, - addr_type); + err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); if (!err) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return err; @@ -3725,11 +3723,12 @@ int dev_addr_del(struct net_device *dev, unsigned char *addr, * We can not remove the first address from the list because * dev->dev_addr points to that. */ - ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list); + ha = list_first_entry(&dev->dev_addrs.list, + struct netdev_hw_addr, list); if (ha->addr == dev->dev_addr && ha->refcount == 1) return -ENOENT; - err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len, + err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, addr_type); if (!err) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); @@ -3757,8 +3756,7 @@ int dev_addr_add_multiple(struct net_device *to_dev, if (from_dev->addr_len != to_dev->addr_len) return -EINVAL; - err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL, - &from_dev->dev_addr_list, + err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, to_dev->addr_len, addr_type); if (!err) call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); @@ -3784,15 +3782,14 @@ int dev_addr_del_multiple(struct net_device *to_dev, if (from_dev->addr_len != to_dev->addr_len) return -EINVAL; - __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL, - &from_dev->dev_addr_list, + __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, to_dev->addr_len, addr_type); call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); return 0; } EXPORT_SYMBOL(dev_addr_del_multiple); -/* unicast and multicast addresses handling functions */ +/* multicast addresses handling functions */ int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int glbl) @@ -3868,8 +3865,8 @@ int dev_unicast_delete(struct net_device *dev, void *addr) ASSERT_RTNL(); - err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr, - dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); + err = __hw_addr_del(&dev->uc, addr, dev->addr_len, + NETDEV_HW_ADDR_T_UNICAST); if (!err) __dev_set_rx_mode(dev); return err; @@ -3892,8 +3889,8 @@ int dev_unicast_add(struct net_device *dev, void *addr) ASSERT_RTNL(); - err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr, - dev->addr_len, NETDEV_HW_ADDR_T_UNICAST); + err = __hw_addr_add(&dev->uc, addr, dev->addr_len, + NETDEV_HW_ADDR_T_UNICAST); if (!err) __dev_set_rx_mode(dev); return err; @@ -3966,8 +3963,7 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from) if (to->addr_len != from->addr_len) return -EINVAL; - err = __hw_addr_sync(&to->uc_list, &to->uc_count, - &from->uc_list, &from->uc_count, to->addr_len); + err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); if (!err) __dev_set_rx_mode(to); return err; @@ -3990,8 +3986,7 @@ void dev_unicast_unsync(struct net_device *to, struct net_device *from) if (to->addr_len != from->addr_len) return; - __hw_addr_unsync(&to->uc_list, &to->uc_count, - &from->uc_list, &from->uc_count, to->addr_len); + __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); __dev_set_rx_mode(to); } EXPORT_SYMBOL(dev_unicast_unsync); @@ -4000,15 +3995,14 @@ static void dev_unicast_flush(struct net_device *dev) { /* rtnl_mutex must be held here */ - __hw_addr_flush(&dev->uc_list); - dev->uc_count = 0; + __hw_addr_flush(&dev->uc); } static void dev_unicast_init(struct net_device *dev) { /* rtnl_mutex must be held here */ - INIT_LIST_HEAD(&dev->uc_list); + __hw_addr_init(&dev->uc); } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5c93435..9e0597d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -204,6 +204,10 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, skb->end = skb->tail + size; kmemcheck_annotate_bitfield(skb, flags1); kmemcheck_annotate_bitfield(skb, flags2); +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb->mac_header = ~0U; +#endif + /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); atomic_set(&shinfo->dataref, 1); @@ -665,7 +669,8 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) /* {transport,network,mac}_header are relative to skb->head */ new->transport_header += offset; new->network_header += offset; - new->mac_header += offset; + if (skb_mac_header_was_set(new)) + new->mac_header += offset; #endif skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; @@ -847,7 +852,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb->tail += off; skb->transport_header += off; skb->network_header += off; - skb->mac_header += off; + if (skb_mac_header_was_set(skb)) + skb->mac_header += off; skb->csum_start += nhead; skb->cloned = 0; skb->hdr_len = 0; @@ -939,7 +945,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, #ifdef NET_SKBUFF_DATA_USES_OFFSET n->transport_header += off; n->network_header += off; - n->mac_header += off; + if (skb_mac_header_was_set(skb)) + n->mac_header += off; #endif return n; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index a5e3a59..d351b8d 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1240,7 +1240,7 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) return val; case TIOCOUTQ: - amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); + amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; err = put_user(amount, (int __user *)arg); diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 8121bf0..2e1f836 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -540,8 +540,7 @@ static void econet_destroy_timer(unsigned long data) { struct sock *sk=(struct sock *)data; - if (!atomic_read(&sk->sk_wmem_alloc) && - !atomic_read(&sk->sk_rmem_alloc)) { + if (!sk_has_allocations(sk)) { sk_free(sk); return; } @@ -579,8 +578,7 @@ static int econet_release(struct socket *sock) skb_queue_purge(&sk->sk_receive_queue); - if (atomic_read(&sk->sk_rmem_alloc) || - atomic_read(&sk->sk_wmem_alloc)) { + if (sk_has_allocations(sk)) { sk->sk_timer.data = (unsigned long)sk; sk->sk_timer.expires = jiffies + HZ; sk->sk_timer.function = econet_destroy_timer; diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index 1779677..14d3984 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c @@ -126,7 +126,8 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg) switch (cmd) { case SIOCOUTQ: { - int amount = atomic_read(&sk->sk_wmem_alloc); + int amount = sk_wmem_alloc_get(sk); + return put_user(amount, (int __user *)arg); } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index d1a39b1..012cf5a 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -391,13 +391,8 @@ static inline void tnode_free(struct tnode *tn) static void tnode_free_safe(struct tnode *tn) { BUG_ON(IS_LEAF(tn)); - - if (node_parent((struct node *) tn)) { - tn->tnode_free = tnode_free_head; - tnode_free_head = tn; - } else { - tnode_free(tn); - } + tn->tnode_free = tnode_free_head; + tnode_free_head = tn; } static void tnode_free_flush(void) @@ -1009,7 +1004,7 @@ fib_find_node(struct trie *t, u32 key) return NULL; } -static struct node *trie_rebalance(struct trie *t, struct tnode *tn) +static void trie_rebalance(struct trie *t, struct tnode *tn) { int wasfull; t_key cindex, key; @@ -1033,12 +1028,13 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn) } /* Handle last (top) tnode */ - if (IS_TNODE(tn)) { + if (IS_TNODE(tn)) tn = (struct tnode *)resize(t, (struct tnode *)tn); - tnode_free_flush(); - } - return (struct node *)tn; + rcu_assign_pointer(t->trie, (struct node *)tn); + tnode_free_flush(); + + return; } /* only used from updater-side */ @@ -1186,7 +1182,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen) /* Rebalance the trie */ - rcu_assign_pointer(t->trie, trie_rebalance(t, tp)); + trie_rebalance(t, tp); done: return fa_head; } @@ -1605,7 +1601,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l) if (tp) { t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits); put_child(t, (struct tnode *)tp, cindex, NULL); - rcu_assign_pointer(t->trie, trie_rebalance(t, tp)); + trie_rebalance(t, tp); } else rcu_assign_pointer(t->trie, NULL); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index b0b2735..a706a47 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -156,10 +156,10 @@ static int inet_csk_diag_fill(struct sock *sk, r->idiag_inode = sock_i_ino(sk); if (minfo) { - minfo->idiag_rmem = atomic_read(&sk->sk_rmem_alloc); + minfo->idiag_rmem = sk_rmem_alloc_get(sk); minfo->idiag_wmem = sk->sk_wmem_queued; minfo->idiag_fmem = sk->sk_forward_alloc; - minfo->idiag_tmem = atomic_read(&sk->sk_wmem_alloc); + minfo->idiag_tmem = sk_wmem_alloc_get(sk); } handler->idiag_get_info(sk, r, info); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 3dc9171..2979f14 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -799,7 +799,8 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: { - int amount = atomic_read(&sk->sk_wmem_alloc); + int amount = sk_wmem_alloc_get(sk); + return put_user(amount, (int __user *)arg); } case SIOCINQ: { @@ -935,8 +936,8 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) seq_printf(seq, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", i, src, srcp, dest, destp, sp->sk_state, - atomic_read(&sp->sk_wmem_alloc), - atomic_read(&sp->sk_rmem_alloc), + sk_wmem_alloc_get(sp), + sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 8f4158d..80e3812 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -840,7 +840,8 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) switch (cmd) { case SIOCOUTQ: { - int amount = atomic_read(&sk->sk_wmem_alloc); + int amount = sk_wmem_alloc_get(sk); + return put_user(amount, (int __user *)arg); } @@ -1721,8 +1722,8 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", bucket, src, srcp, dest, destp, sp->sk_state, - atomic_read(&sp->sk_wmem_alloc), - atomic_read(&sp->sk_rmem_alloc), + sk_wmem_alloc_get(sp), + sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops), len); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 36a090d..8b0b6f9 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1130,7 +1130,8 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) switch(cmd) { case SIOCOUTQ: { - int amount = atomic_read(&sk->sk_wmem_alloc); + int amount = sk_wmem_alloc_get(sk); + return put_user(amount, (int __user *)arg); } case SIOCINQ: @@ -1236,8 +1237,8 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, sp->sk_state, - atomic_read(&sp->sk_wmem_alloc), - atomic_read(&sp->sk_rmem_alloc), + sk_wmem_alloc_get(sp), + sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index fc333d8..023beda 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1061,8 +1061,8 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, sp->sk_state, - atomic_read(&sp->sk_wmem_alloc), - atomic_read(&sp->sk_rmem_alloc), + sk_wmem_alloc_get(sp), + sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 1627050..417b0e3 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1835,7 +1835,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) switch (cmd) { case TIOCOUTQ: - amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); + amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; rc = put_user(amount, (int __user *)argp); diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c index 5ed97ad..5761784 100644 --- a/net/ipx/ipx_proc.c +++ b/net/ipx/ipx_proc.c @@ -280,8 +280,8 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v) } seq_printf(seq, "%08X %08X %02X %03d\n", - atomic_read(&s->sk_wmem_alloc), - atomic_read(&s->sk_rmem_alloc), + sk_wmem_alloc_get(s), + sk_rmem_alloc_get(s), s->sk_state, SOCK_INODE(s->sk_socket)->i_uid); out: return 0; diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 3eb5bcc..5922feb 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1762,7 +1762,8 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) switch (cmd) { case TIOCOUTQ: { long amount; - amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); + + amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; if (put_user(amount, (unsigned int __user *)arg)) diff --git a/net/key/af_key.c b/net/key/af_key.c index 643c1be..dba9abd 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3662,8 +3662,8 @@ static int pfkey_seq_show(struct seq_file *f, void *v) seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n", s, atomic_read(&s->sk_refcnt), - atomic_read(&s->sk_rmem_alloc), - atomic_read(&s->sk_wmem_alloc), + sk_rmem_alloc_get(s), + sk_wmem_alloc_get(s), sock_i_uid(s), sock_i_ino(s) ); diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index d208b33..f97be47 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c @@ -134,8 +134,8 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v) seq_printf(seq, "@%02X ", llc->sap->laddr.lsap); llc_ui_format_mac(seq, llc->daddr.mac); seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap, - atomic_read(&sk->sk_wmem_alloc), - atomic_read(&sk->sk_rmem_alloc) - llc->copied_seq, + sk_wmem_alloc_get(sk), + sk_rmem_alloc_get(sk) - llc->copied_seq, sk->sk_state, sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1, llc->link); diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 11c7231..6c439cd 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -163,6 +163,29 @@ static const struct file_operations noack_ops = { .open = mac80211_open_file_generic }; +static ssize_t queues_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + unsigned long flags; + char buf[IEEE80211_MAX_QUEUES * 20]; + int q, res = 0; + + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + for (q = 0; q < local->hw.queues; q++) + res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q, + local->queue_stop_reasons[q], + __netif_subqueue_stopped(local->mdev, q)); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + + return simple_read_from_buffer(user_buf, count, ppos, buf, res); +} + +static const struct file_operations queues_ops = { + .read = queues_read, + .open = mac80211_open_file_generic +}; + /* statistics stuff */ #define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ @@ -298,6 +321,7 @@ void debugfs_hw_add(struct ieee80211_local *local) DEBUGFS_ADD(total_ps_buffered); DEBUGFS_ADD(wep_iv); DEBUGFS_ADD(tsf); + DEBUGFS_ADD(queues); DEBUGFS_ADD_MODE(reset, 0200); DEBUGFS_ADD(noack); @@ -350,6 +374,7 @@ void debugfs_hw_del(struct ieee80211_local *local) DEBUGFS_DEL(total_ps_buffered); DEBUGFS_DEL(wep_iv); DEBUGFS_DEL(tsf); + DEBUGFS_DEL(queues); DEBUGFS_DEL(reset); DEBUGFS_DEL(noack); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 4dbc289..68eb505 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -783,6 +783,7 @@ struct ieee80211_local { struct dentry *total_ps_buffered; struct dentry *wep_iv; struct dentry *tsf; + struct dentry *queues; struct dentry *reset; struct dentry *noack; struct dentry *statistics; @@ -1100,7 +1101,6 @@ void ieee802_11_parse_elems(u8 *start, size_t len, u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, struct ieee802_11_elems *elems, u64 filter, u32 crc); -int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq); u32 ieee80211_mandatory_rates(struct ieee80211_local *local, enum ieee80211_band band); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index d779c57..aca22b0 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1102,14 +1102,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; u32 changed = 0, config_changed = 0; - rcu_read_lock(); - - sta = sta_info_get(local, ifmgd->bssid); - if (!sta) { - rcu_read_unlock(); - return; - } - if (deauth) { ifmgd->direct_probe_tries = 0; ifmgd->auth_tries = 0; @@ -1120,7 +1112,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, netif_tx_stop_all_queues(sdata->dev); netif_carrier_off(sdata->dev); - ieee80211_sta_tear_down_BA_sessions(sta); + rcu_read_lock(); + sta = sta_info_get(local, ifmgd->bssid); + if (sta) + ieee80211_sta_tear_down_BA_sessions(sta); + rcu_read_unlock(); bss = ieee80211_rx_bss_get(local, ifmgd->bssid, conf->channel->center_freq, @@ -1156,8 +1152,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, ifmgd->ssid, ifmgd->ssid_len); } - rcu_read_unlock(); - ieee80211_set_wmm_default(sdata); ieee80211_recalc_idle(local); @@ -2223,7 +2217,10 @@ static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata) capa_mask, capa_val); if (bss) { - ieee80211_set_freq(sdata, bss->cbss.channel->center_freq); + local->oper_channel = bss->cbss.channel; + local->oper_channel_type = NL80211_CHAN_NO_HT; + ieee80211_hw_config(local, 0); + if (!(ifmgd->flags & IEEE80211_STA_SSID_SET)) ieee80211_sta_set_ssid(sdata, bss->ssid, bss->ssid_len); @@ -2445,6 +2442,14 @@ void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata) ieee80211_set_disassoc(sdata, true, true, WLAN_REASON_DEAUTH_LEAVING); + if (ifmgd->ssid_len == 0) { + /* + * Only allow association to be started if a valid SSID + * is configured. + */ + return; + } + if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) || ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE) set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request); @@ -2476,6 +2481,10 @@ int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size ifmgd = &sdata->u.mgd; if (ifmgd->ssid_len != len || memcmp(ifmgd->ssid, ssid, len) != 0) { + if (ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) + ieee80211_set_disassoc(sdata, true, true, + WLAN_REASON_DEAUTH_LEAVING); + /* * Do not use reassociation if SSID is changed (different ESS). */ @@ -2500,6 +2509,11 @@ int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + if (compare_ether_addr(bssid, ifmgd->bssid) != 0 && + ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) + ieee80211_set_disassoc(sdata, true, true, + WLAN_REASON_DEAUTH_LEAVING); + if (is_valid_ether_addr(bssid)) { memcpy(ifmgd->bssid, bssid, ETH_ALEN); ifmgd->flags |= IEEE80211_STA_BSSID_SET; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 66ce96a..915e777 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -774,31 +774,6 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, dev_queue_xmit(skb); } -int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freqMHz) -{ - int ret = -EINVAL; - struct ieee80211_channel *chan; - struct ieee80211_local *local = sdata->local; - - chan = ieee80211_get_channel(local->hw.wiphy, freqMHz); - - if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { - if (sdata->vif.type == NL80211_IFTYPE_ADHOC && - chan->flags & IEEE80211_CHAN_NO_IBSS) - return ret; - local->oper_channel = chan; - local->oper_channel_type = NL80211_CHAN_NO_HT; - - if (local->sw_scanning || local->hw_scanning) - ret = 0; - else - ret = ieee80211_hw_config( - local, IEEE80211_CONF_CHANGE_CHANNEL); - } - - return ret; -} - u32 ieee80211_mandatory_rates(struct ieee80211_local *local, enum ieee80211_band band) { diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index d2d81b1..1da81f4 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c @@ -55,6 +55,8 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev, struct iw_freq *freq, char *extra) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct ieee80211_channel *chan; if (sdata->vif.type == NL80211_IFTYPE_ADHOC) return cfg80211_ibss_wext_siwfreq(dev, info, freq, extra); @@ -69,17 +71,38 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev, IEEE80211_STA_AUTO_CHANNEL_SEL; return 0; } else - return ieee80211_set_freq(sdata, + chan = ieee80211_get_channel(local->hw.wiphy, ieee80211_channel_to_frequency(freq->m)); } else { int i, div = 1000000; for (i = 0; i < freq->e; i++) div /= 10; - if (div > 0) - return ieee80211_set_freq(sdata, freq->m / div); - else + if (div <= 0) return -EINVAL; + chan = ieee80211_get_channel(local->hw.wiphy, freq->m / div); } + + if (!chan) + return -EINVAL; + + if (chan->flags & IEEE80211_CHAN_DISABLED) + return -EINVAL; + + /* + * no change except maybe auto -> fixed, ignore the HT + * setting so you can fix a channel you're on already + */ + if (local->oper_channel == chan) + return 0; + + if (sdata->vif.type == NL80211_IFTYPE_STATION) + ieee80211_sta_req_auth(sdata); + + local->oper_channel = chan; + local->oper_channel_type = NL80211_CHAN_NO_HT; + ieee80211_hw_config(local, 0); + + return 0; } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 8b6bbb3..2936fa3 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1914,8 +1914,8 @@ static int netlink_seq_show(struct seq_file *seq, void *v) s->sk_protocol, nlk->pid, nlk->groups ? (u32)nlk->groups[0] : 0, - atomic_read(&s->sk_rmem_alloc), - atomic_read(&s->sk_wmem_alloc), + sk_rmem_alloc_get(s), + sk_wmem_alloc_get(s), nlk->cb, atomic_read(&s->sk_refcnt), atomic_read(&s->sk_drops) diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 3be0e01..ce51ce0 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -286,8 +286,7 @@ void nr_destroy_socket(struct sock *sk) kfree_skb(skb); } - if (atomic_read(&sk->sk_wmem_alloc) || - atomic_read(&sk->sk_rmem_alloc)) { + if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ sk->sk_timer.function = nr_destroy_timer; sk->sk_timer.expires = jiffies + 2 * HZ; @@ -1206,7 +1205,7 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) long amount; lock_sock(sk); - amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); + amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; release_sock(sk); @@ -1342,8 +1341,8 @@ static int nr_info_show(struct seq_file *seq, void *v) nr->n2count, nr->n2, nr->window, - atomic_read(&s->sk_wmem_alloc), - atomic_read(&s->sk_rmem_alloc), + sk_wmem_alloc_get(s), + sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); bh_unlock_sock(s); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4f76e55..ebe5718 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1987,7 +1987,8 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, switch (cmd) { case SIOCOUTQ: { - int amount = atomic_read(&sk->sk_wmem_alloc); + int amount = sk_wmem_alloc_get(sk); + return put_user(amount, (int __user *)arg); } case SIOCINQ: diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 877a7f6..6bd8e938 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -356,8 +356,7 @@ void rose_destroy_socket(struct sock *sk) kfree_skb(skb); } - if (atomic_read(&sk->sk_wmem_alloc) || - atomic_read(&sk->sk_rmem_alloc)) { + if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ setup_timer(&sk->sk_timer, rose_destroy_timer, (unsigned long)sk); @@ -1310,7 +1309,8 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) switch (cmd) { case TIOCOUTQ: { long amount; - amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); + + amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; return put_user(amount, (unsigned int __user *) argp); @@ -1481,8 +1481,8 @@ static int rose_info_show(struct seq_file *seq, void *v) rose->hb / HZ, ax25_display_timer(&rose->idletimer) / (60 * HZ), rose->idle / (60 * HZ), - atomic_read(&s->sk_wmem_alloc), - atomic_read(&s->sk_rmem_alloc), + sk_wmem_alloc_get(s), + sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); } diff --git a/net/sched/act_police.c b/net/sched/act_police.c index f8f047b6..723964c 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -294,6 +294,8 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, if (police->tcfp_ewma_rate && police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { police->tcf_qstats.overlimits++; + if (police->tcf_action == TC_ACT_SHOT) + police->tcf_qstats.drops++; spin_unlock(&police->tcf_lock); return police->tcf_action; } @@ -327,6 +329,8 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, } police->tcf_qstats.overlimits++; + if (police->tcf_action == TC_ACT_SHOT) + police->tcf_qstats.drops++; spin_unlock(&police->tcf_lock); return police->tcf_action; } diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 266151a..18d85d2 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -349,13 +349,13 @@ META_COLLECTOR(int_sk_type) META_COLLECTOR(int_sk_rmem_alloc) { SKIP_NONLOCAL(skb); - dst->value = atomic_read(&skb->sk->sk_rmem_alloc); + dst->value = sk_rmem_alloc_get(skb->sk); } META_COLLECTOR(int_sk_wmem_alloc) { SKIP_NONLOCAL(skb); - dst->value = atomic_read(&skb->sk->sk_wmem_alloc); + dst->value = sk_wmem_alloc_get(skb->sk); } META_COLLECTOR(int_sk_omem_alloc) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0f01e5d..35ba035 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -130,7 +130,7 @@ static inline int sctp_wspace(struct sctp_association *asoc) if (asoc->ep->sndbuf_policy) amt = asoc->sndbuf_used; else - amt = atomic_read(&asoc->base.sk->sk_wmem_alloc); + amt = sk_wmem_alloc_get(asoc->base.sk); if (amt >= asoc->base.sk->sk_sndbuf) { if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) @@ -6523,7 +6523,7 @@ static int sctp_writeable(struct sock *sk) { int amt = 0; - amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); + amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amt < 0) amt = 0; return amt; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 9dcc6e7..36d4e44 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1946,7 +1946,7 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) switch (cmd) { case SIOCOUTQ: - amount = atomic_read(&sk->sk_wmem_alloc); + amount = sk_wmem_alloc_get(sk); err = put_user(amount, (int __user *)arg); break; case SIOCINQ: diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index ed80af8..21cdc87 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -332,14 +332,14 @@ static unsigned int x25_new_lci(struct x25_neigh *nb) /* * Deferred destroy. */ -void x25_destroy_socket(struct sock *); +static void __x25_destroy_socket(struct sock *); /* * handler for deferred kills. */ static void x25_destroy_timer(unsigned long data) { - x25_destroy_socket((struct sock *)data); + x25_destroy_socket_from_timer((struct sock *)data); } /* @@ -349,12 +349,10 @@ static void x25_destroy_timer(unsigned long data) * will touch it and we are (fairly 8-) ) safe. * Not static as it's used by the timer */ -void x25_destroy_socket(struct sock *sk) +static void __x25_destroy_socket(struct sock *sk) { struct sk_buff *skb; - sock_hold(sk); - lock_sock(sk); x25_stop_heartbeat(sk); x25_stop_timer(sk); @@ -374,8 +372,7 @@ void x25_destroy_socket(struct sock *sk) kfree_skb(skb); } - if (atomic_read(&sk->sk_wmem_alloc) || - atomic_read(&sk->sk_rmem_alloc)) { + if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ sk->sk_timer.expires = jiffies + 10 * HZ; sk->sk_timer.function = x25_destroy_timer; @@ -385,7 +382,22 @@ void x25_destroy_socket(struct sock *sk) /* drop last reference so sock_put will free */ __sock_put(sk); } +} + +void x25_destroy_socket_from_timer(struct sock *sk) +{ + sock_hold(sk); + bh_lock_sock(sk); + __x25_destroy_socket(sk); + bh_unlock_sock(sk); + sock_put(sk); +} +static void x25_destroy_socket(struct sock *sk) +{ + sock_hold(sk); + lock_sock(sk); + __x25_destroy_socket(sk); release_sock(sk); sock_put(sk); } @@ -1259,8 +1271,8 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) switch (cmd) { case TIOCOUTQ: { - int amount = sk->sk_sndbuf - - atomic_read(&sk->sk_wmem_alloc); + int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); + if (amount < 0) amount = 0; rc = put_user(amount, (unsigned int __user *)argp); diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c index 1afa44d..0a04e62 100644 --- a/net/x25/x25_proc.c +++ b/net/x25/x25_proc.c @@ -163,8 +163,8 @@ static int x25_seq_socket_show(struct seq_file *seq, void *v) devname, x25->lci & 0x0FFF, x25->state, x25->vs, x25->vr, x25->va, x25_display_timer(s) / HZ, x25->t2 / HZ, x25->t21 / HZ, x25->t22 / HZ, x25->t23 / HZ, - atomic_read(&s->sk_wmem_alloc), - atomic_read(&s->sk_rmem_alloc), + sk_wmem_alloc_get(s), + sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); out: return 0; diff --git a/net/x25/x25_timer.c b/net/x25/x25_timer.c index d3e3e54..5c5db1a 100644 --- a/net/x25/x25_timer.c +++ b/net/x25/x25_timer.c @@ -113,7 +113,7 @@ static void x25_heartbeat_expiry(unsigned long param) (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { bh_unlock_sock(sk); - x25_destroy_socket(sk); + x25_destroy_socket_from_timer(sk); return; } break; diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 2b70661..7a77787 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -116,6 +116,17 @@ _a_flags = $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) $(KBUILD_SUBDIR_ASFLAGS) \ $(asflags-y) $(AFLAGS_$(basetarget).o) _cpp_flags = $(KBUILD_CPPFLAGS) $(cppflags-y) $(CPPFLAGS_$(@F)) +# +# Enable gcov profiling flags for a file, directory or for all files depending +# on variables GCOV_PROFILE_obj.o, GCOV_PROFILE and CONFIG_GCOV_PROFILE_ALL +# (in this order) +# +ifeq ($(CONFIG_GCOV_KERNEL),y) +_c_flags += $(if $(patsubst n%,, \ + $(GCOV_PROFILE_$(basetarget).o)$(GCOV_PROFILE)$(CONFIG_GCOV_PROFILE_ALL)), \ + $(CFLAGS_GCOV)) +endif + # If building the kernel in a separate objtree expand all occurrences # of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/'). diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 5fda7df..b8186ba 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -490,7 +490,7 @@ int devcgroup_inode_permission(struct inode *inode, int mask) list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) { if (wh->type & DEV_ALL) - goto acc_check; + goto found; if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode)) continue; if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode)) @@ -499,11 +499,12 @@ int devcgroup_inode_permission(struct inode *inode, int mask) continue; if (wh->minor != ~0 && wh->minor != iminor(inode)) continue; -acc_check: + if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE)) continue; if ((mask & MAY_READ) && !(wh->access & ACC_READ)) continue; +found: rcu_read_unlock(); return 0; } @@ -527,7 +528,7 @@ int devcgroup_inode_mknod(int mode, dev_t dev) list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) { if (wh->type & DEV_ALL) - goto acc_check; + goto found; if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode)) continue; if ((wh->type & DEV_CHAR) && !S_ISCHR(mode)) @@ -536,9 +537,10 @@ int devcgroup_inode_mknod(int mode, dev_t dev) continue; if (wh->minor != ~0 && wh->minor != MINOR(dev)) continue; -acc_check: + if (!(wh->access & ACC_MKNOD)) continue; +found: rcu_read_unlock(); return 0; } |