patch-2.1.51 linux/include/asm-ppc/bitops.h
Next file: linux/include/asm-ppc/byteorder.h
Previous file: linux/include/asm-mips/system.h
Back to the patch index
Back to the overall index
- Lines: 309
- Date:
Sat Aug 16 09:51:09 1997
- Orig file:
v2.1.50/linux/include/asm-ppc/bitops.h
- Orig date:
Mon Aug 4 16:25:39 1997
diff -u --recursive --new-file v2.1.50/linux/include/asm-ppc/bitops.h linux/include/asm-ppc/bitops.h
@@ -1,17 +1,26 @@
+/*
+ * $Id: bitops.h,v 1.7 1997/08/03 00:12:07 paulus Exp $
+ * bitops.h: Bit string operations on the ppc
+ */
+
#ifndef _ASM_PPC_BITOPS_H_
#define _ASM_PPC_BITOPS_H_
#include <asm/system.h>
#include <asm/byteorder.h>
-#include <linux/kernel.h> /* for printk */
-
-#define BIT(n) 1<<(n&0x1F)
-typedef unsigned long BITFIELD;
+extern void set_bit(int nr, volatile void *addr);
+extern void clear_bit(int nr, volatile void *addr);
+extern void change_bit(int nr, volatile void *addr);
+extern int test_and_set_bit(int nr, volatile void *addr);
+extern int test_and_clear_bit(int nr, volatile void *addr);
+extern int test_and_change_bit(int nr, volatile void *addr);
/*
- * These are ifdef'd out here because using : "cc" as a constraing
+ * These are if'd out here because using : "cc" as a constraint
* results in errors from gcc. -- Cort
+ * Besides, they need to be changed so we have both set_bit
+ * and test_and_set_bit, etc.
*/
#if 0
extern __inline__ int set_bit(int nr, void * addr)
@@ -20,9 +29,6 @@
unsigned long mask = 1 << (nr & 0x1f);
unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- if ((unsigned long)addr & 3)
- printk("set_bit(%lx, %p)\n", nr, addr);
-
__asm__ __volatile__(
"1:lwarx %0,0,%3 \n\t"
"or %1,%0,%2 \n\t"
@@ -32,7 +38,7 @@
: "r" (mask), "r" (p)
/*: "cc" */);
-n return (old & mask) != 0;
+ return (old & mask) != 0;
}
extern __inline__ unsigned long clear_bit(unsigned long nr, void *addr)
@@ -41,8 +47,6 @@
unsigned long mask = 1 << (nr & 0x1f);
unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- if ((unsigned long)addr & 3)
- printk("clear_bit(%lx, %p)\n", nr, addr);
__asm__ __volatile__("\n\
1: lwarx %0,0,%3
andc %1,%0,%2
@@ -61,8 +65,6 @@
unsigned long mask = 1 << (nr & 0x1f);
unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- if ((unsigned long)addr & 3)
- printk("change_bit(%lx, %p)\n", nr, addr);
__asm__ __volatile__("\n\
1: lwarx %0,0,%3
xor %1,%0,%2
@@ -76,10 +78,19 @@
}
#endif
+extern __inline__ unsigned long test_bit(int nr, __const__ volatile void *addr)
+{
+ __const__ unsigned int *p = (__const__ unsigned int *) addr;
+
+ return (p[nr >> 5] >> (nr & 0x1f)) & 1UL;
+}
+
extern __inline__ int ffz(unsigned int x)
{
int n;
+ if (x == ~0)
+ return 32;
x = ~x & (x+1); /* set LS zero to 1, other bits to 0 */
__asm__ ("cntlzw %0,%1" : "=r" (n) : "r" (x));
return 31 - n;
@@ -89,34 +100,11 @@
* This implementation of find_{first,next}_zero_bit was stolen from
* Linus' asm-alpha/bitops.h.
*/
+#define find_first_zero_bit(addr, size) \
+ find_next_zero_bit((addr), (size), 0)
-extern __inline__ unsigned long find_first_zero_bit(void * addr, unsigned long size)
-{
- unsigned int * p = ((unsigned int *) addr);
- unsigned int result = 0;
- unsigned int tmp;
-
- if (size == 0)
- return 0;
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
- tmp |= ~0UL << size;
-found_middle:
- return result + ffz(tmp);
-}
-
-/*
- * Find next zero bit in a bitmap reasonably efficiently..
- */
-extern __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long size,
- unsigned long offset)
+extern __inline__ unsigned long find_next_zero_bit(void * addr,
+ unsigned long size, unsigned long offset)
{
unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
unsigned int result = offset & ~31UL;
@@ -127,17 +115,17 @@
size -= result;
offset &= 31UL;
if (offset) {
- tmp = *(p++);
+ tmp = *p++;
tmp |= ~0UL >> (32-offset);
if (size < 32)
goto found_first;
- if (~tmp)
+ if (tmp != ~0U)
goto found_middle;
size -= 32;
result += 32;
}
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
+ while (size >= 32) {
+ if ((tmp = *p++) != ~0U)
goto found_middle;
result += 32;
size -= 32;
@@ -153,101 +141,98 @@
#define _EXT2_HAVE_ASM_BITOPS_
-#define ext2_find_first_zero_bit(addr, size) \
- ext2_find_next_zero_bit((addr), (size), 0)
+#ifdef __KERNEL__
+/*
+ * test_and_{set,clear}_bit guarantee atomicity without
+ * disabling interrupts.
+ */
+#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, addr)
+#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, addr)
+#else
extern __inline__ int ext2_set_bit(int nr, void * addr)
{
-#ifdef __KERNEL__
- int s = _disable_interrupts();
-#endif
- int mask;
- unsigned char *ADDR = (unsigned char *) addr;
- int oldbit;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- oldbit = (*ADDR & mask) ? 1 : 0;
- *ADDR |= mask;
-#ifdef __KERNEL__
- _enable_interrupts(s);
-#endif
- return oldbit;
+ int mask;
+ unsigned char *ADDR = (unsigned char *) addr;
+ int oldbit;
+
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ oldbit = (*ADDR & mask) ? 1 : 0;
+ *ADDR |= mask;
+ return oldbit;
}
extern __inline__ int ext2_clear_bit(int nr, void * addr)
{
-#ifdef __KERNEL__
- int s = _disable_interrupts();
-#endif
- int mask;
- unsigned char *ADDR = (unsigned char *) addr;
- int oldbit;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- oldbit = (*ADDR & mask) ? 1 : 0;
- *ADDR = *ADDR & ~mask;
-#ifdef __KERNEL__
- _enable_interrupts(s);
-#endif
- return oldbit;
-}
+ int mask;
+ unsigned char *ADDR = (unsigned char *) addr;
+ int oldbit;
-
-/* The following routine need not be atomic. */
-extern __inline__ unsigned long test_bit(int nr, void *addr)
-{
- return 1UL & (((__const__ unsigned int *) addr)[nr >> 5] >> (nr & 31));
+ ADDR += nr >> 3;
+ mask = 1 << (nr & 0x07);
+ oldbit = (*ADDR & mask) ? 1 : 0;
+ *ADDR = *ADDR & ~mask;
+ return oldbit;
}
+#endif /* __KERNEL__ */
extern __inline__ int ext2_test_bit(int nr, __const__ void * addr)
{
- int mask;
__const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- return ((mask & *ADDR) != 0);
+ return (ADDR[nr >> 3] >> (nr & 7)) & 1;
}
-extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
+/*
+ * This implementation of ext2_find_{first,next}_zero_bit was stolen from
+ * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
+ */
+
+#define ext2_find_first_zero_bit(addr, size) \
+ ext2_find_next_zero_bit((addr), (size), 0)
+
+extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
+ unsigned long size, unsigned long offset)
{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
+ unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
+ unsigned int result = offset & ~31UL;
+ unsigned int tmp;
if (offset >= size)
return size;
size -= result;
offset &= 31UL;
- if(offset) {
- tmp = *(p++);
- tmp |= le32_to_cpu(~0UL >> (32-offset));
- if(size < 32)
+ if (offset) {
+ tmp = cpu_to_le32p(p++);
+ tmp |= ~0UL >> (32-offset);
+ if (size < 32)
goto found_first;
- if(~tmp)
+ if (tmp != ~0U)
goto found_middle;
size -= 32;
result += 32;
}
- while(size & ~31UL) {
- if(~(tmp = *(p++)))
+ while (size >= 32) {
+ if ((tmp = cpu_to_le32p(p++)) != ~0U)
goto found_middle;
result += 32;
size -= 32;
}
- if(!size)
+ if (!size)
return result;
- tmp = *p;
-
+ tmp = cpu_to_le32p(p);
found_first:
- return result + ffz(le32_to_cpu(tmp) | (~0UL << size));
+ tmp |= ~0U << size;
found_middle:
- return result + ffz(le32_to_cpu(tmp));
+ return result + ffz(tmp);
}
-#endif /* _ASM_PPC_BITOPS_H */
-
+/* Bitmap functions for the minix filesystem. */
+#define minix_set_bit(nr,addr) ext2_set_bit(nr,addr)
+#define minix_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
+#endif /* _ASM_PPC_BITOPS_H */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov