summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbde <bde@FreeBSD.org>1996-07-01 20:16:10 +0000
committerbde <bde@FreeBSD.org>1996-07-01 20:16:10 +0000
commit325165ab608d04f8a5bfd39e6a3563b72a0ffd26 (patch)
tree1801ddb437691bf2fd00a49cc7eeac4ec5f0ae1d
parentae8d73b2e42e4798298240f05b3ea0e2ee19cdc6 (diff)
downloadFreeBSD-src-325165ab608d04f8a5bfd39e6a3563b72a0ffd26.zip
FreeBSD-src-325165ab608d04f8a5bfd39e6a3563b72a0ffd26.tar.gz
Fixed lots of warnings about unportable casts of pointers to volatile
variables: don't depend on the compiler generating atomic code to set the variables - use inline asm to specify the atomic instruction(s) explicitly.
-rw-r--r--sys/amd64/include/cpufunc.h13
-rw-r--r--sys/i386/include/cpufunc.h13
-rw-r--r--sys/i386/include/spl.h29
3 files changed, 36 insertions, 19 deletions
diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
index 204a228..9dce136 100644
--- a/sys/amd64/include/cpufunc.h
+++ b/sys/amd64/include/cpufunc.h
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: cpufunc.h,v 1.50 1996/06/14 11:01:01 asami Exp $
+ * $Id: cpufunc.h,v 1.51 1996/07/01 18:12:23 bde Exp $
*/
/*
@@ -43,8 +43,6 @@
#include <sys/cdefs.h>
#include <sys/types.h>
-#include <machine/spl.h> /* XXX belongs elsewhere */
-
#ifdef __GNUC__
static __inline void
@@ -358,6 +356,12 @@ rdtsc(void)
}
static __inline void
+setbits(volatile unsigned *addr, u_int bits)
+{
+ __asm __volatile("orl %1,%0" : "=m" (*addr) : "ir" (bits));
+}
+
+static __inline void
write_eflags(u_long ef)
{
__asm __volatile("pushl %0; popfl" : : "r" (ef));
@@ -393,6 +397,7 @@ quad_t rdmsr __P((u_int msr));
quad_t rdpmc __P((u_int pmc));
quad_t rdtsc __P((void));
u_long read_eflags __P((void));
+void setbits __P((volatile unsigned *addr, u_int bits));
void write_eflags __P((u_long ef));
void wrmsr __P((u_int msr, quad_t newval));
@@ -404,4 +409,6 @@ void ltr __P((u_short sel));
u_int rcr0 __P((void));
u_long rcr3 __P((void));
+#include <machine/spl.h> /* XXX belongs elsewhere */
+
#endif /* !_MACHINE_CPUFUNC_H_ */
diff --git a/sys/i386/include/cpufunc.h b/sys/i386/include/cpufunc.h
index 204a228..9dce136 100644
--- a/sys/i386/include/cpufunc.h
+++ b/sys/i386/include/cpufunc.h
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: cpufunc.h,v 1.50 1996/06/14 11:01:01 asami Exp $
+ * $Id: cpufunc.h,v 1.51 1996/07/01 18:12:23 bde Exp $
*/
/*
@@ -43,8 +43,6 @@
#include <sys/cdefs.h>
#include <sys/types.h>
-#include <machine/spl.h> /* XXX belongs elsewhere */
-
#ifdef __GNUC__
static __inline void
@@ -358,6 +356,12 @@ rdtsc(void)
}
static __inline void
+setbits(volatile unsigned *addr, u_int bits)
+{
+ __asm __volatile("orl %1,%0" : "=m" (*addr) : "ir" (bits));
+}
+
+static __inline void
write_eflags(u_long ef)
{
__asm __volatile("pushl %0; popfl" : : "r" (ef));
@@ -393,6 +397,7 @@ quad_t rdmsr __P((u_int msr));
quad_t rdpmc __P((u_int pmc));
quad_t rdtsc __P((void));
u_long read_eflags __P((void));
+void setbits __P((volatile unsigned *addr, u_int bits));
void write_eflags __P((u_long ef));
void wrmsr __P((u_int msr, quad_t newval));
@@ -404,4 +409,6 @@ void ltr __P((u_short sel));
u_int rcr0 __P((void));
u_long rcr3 __P((void));
+#include <machine/spl.h> /* XXX belongs elsewhere */
+
#endif /* !_MACHINE_CPUFUNC_H_ */
diff --git a/sys/i386/include/spl.h b/sys/i386/include/spl.h
index 2c14df5..59f2104 100644
--- a/sys/i386/include/spl.h
+++ b/sys/i386/include/spl.h
@@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: spl.h,v 1.13 1996/02/07 21:52:57 wollman Exp $
+ * $Id: spl.h,v 1.14 1996/05/18 03:36:42 dyson Exp $
*/
#ifndef _MACHINE_IPL_H_
@@ -76,6 +76,11 @@
#ifndef LOCORE
+/*
+ * cpl is preserved by interrupt handlers so it is effectively nonvolatile.
+ * ipending and idelayed are changed by interrupt handlers so they are
+ * volatile.
+ */
extern unsigned bio_imask; /* group of interrupts masked with splbio() */
extern unsigned cpl; /* current priority level mask */
extern volatile unsigned idelayed; /* interrupts to become pending */
@@ -85,19 +90,17 @@ extern unsigned stat_imask; /* interrupts masked with splstatclock() */
extern unsigned tty_imask; /* group of interrupts masked with spltty() */
/*
- * ipending has to be volatile so that it is read every time it is accessed
- * in splx() and spl0(), but we don't want it to be read nonatomically when
- * it is changed. Pretending that ipending is a plain int happens to give
- * suitable atomic code for "ipending |= constant;".
+ * The volatile bitmap variables must be set atomically. This normally
+ * involves using a machine-dependent bit-set or `or' instruction.
*/
-#define setdelayed() (*(unsigned *)&ipending |= loadandclear(&idelayed))
-#define setsoftast() (*(unsigned *)&ipending |= SWI_AST_PENDING)
-#define setsoftclock() (*(unsigned *)&ipending |= SWI_CLOCK_PENDING)
-#define setsoftnet() (*(unsigned *)&ipending |= SWI_NET_PENDING)
-#define setsofttty() (*(unsigned *)&ipending |= SWI_TTY_PENDING)
-
-#define schedsofttty() (*(unsigned *)&idelayed |= SWI_TTY_PENDING)
-#define schedsoftnet() (*(unsigned *)&idelayed |= SWI_NET_PENDING)
+#define setdelayed() setbits(&ipending, loadandclear(&idelayed))
+#define setsoftast() setbits(&ipending, SWI_AST_PENDING)
+#define setsoftclock() setbits(&ipending, SWI_CLOCK_PENDING)
+#define setsoftnet() setbits(&ipending, SWI_NET_PENDING)
+#define setsofttty() setbits(&ipending, SWI_TTY_PENDING)
+
+#define schedsofttty() setbits(&idelayed, SWI_TTY_PENDING)
+#define schedsoftnet() setbits(&idelayed, SWI_NET_PENDING)
#define softclockpending() (ipending & SWI_CLOCK_PENDING)
OpenPOWER on IntegriCloud