patch-2.3.99-pre9 linux/arch/sh/mm/cache.c
Next file: linux/arch/sh/mm/fault.c
Previous file: linux/arch/sh/lib/checksum.S
Back to the patch index
Back to the overall index
- Lines: 455
- Date:
Sat May 20 12:05:29 2000
- Orig file:
v2.3.99-pre8/linux/arch/sh/mm/cache.c
- Orig date:
Wed Apr 26 16:34:07 2000
diff -u --recursive --new-file v2.3.99-pre8/linux/arch/sh/mm/cache.c linux/arch/sh/mm/cache.c
@@ -49,7 +49,7 @@
#elif defined(__SH4__)
#define CCR 0xff00001c /* Address of Cache Control Register */
#define CCR_CACHE_VAL 0x00000105 /* 8k+16k-byte cache,P1-wb,enable */
-#define CCR_CACHE_INIT 0x0000090d /* 8k+16k-byte cache,CF,P1-wb,enable */
+#define CCR_CACHE_INIT 0x0000090d /* ICI,ICE(8k), OCI,P1-wb,OCE(16k) */
#define CCR_CACHE_ENABLE 0x00000101
#define CACHE_IC_ADDRESS_ARRAY 0xf0000000
@@ -60,143 +60,41 @@
#define CACHE_OC_WAY_SHIFT 13
#define CACHE_IC_WAY_SHIFT 13
#define CACHE_OC_ENTRY_SHIFT 5
+#define CACHE_IC_ENTRY_SHIFT 5
#define CACHE_OC_ENTRY_MASK 0x3fe0
+#define CACHE_OC_ENTRY_PHYS_MASK 0x0fe0
#define CACHE_IC_ENTRY_MASK 0x1fe0
+#define CACHE_IC_NUM_ENTRIES 256
#define CACHE_OC_NUM_ENTRIES 512
#define CACHE_OC_NUM_WAYS 1
#define CACHE_IC_NUM_WAYS 1
#endif
-/* Write back caches to memory (if needed) and invalidates the caches */
-void cache_flush_area(unsigned long start, unsigned long end)
-{
- unsigned long flags;
- unsigned long addr, data, v, p;
-
- start &= ~(L1_CACHE_BYTES-1);
- save_and_cli(flags);
- jump_to_P2();
-
- for (v = start; v < end; v+=L1_CACHE_BYTES) {
- p = __pa(v);
- addr = CACHE_IC_ADDRESS_ARRAY |
- (v&CACHE_IC_ENTRY_MASK) | 0x8 /* A-bit */;
- data = (v&0xfffffc00); /* U=0, V=0 */
- ctrl_outl(data,addr);
-#if CACHE_IC_ADDRESS_ARRAY != CACHE_OC_ADDRESS_ARRAY
- asm volatile("ocbp %0"
- : /* no output */
- : "m" (__m(v)));
-#endif
- }
- back_to_P1();
- restore_flags(flags);
-}
-
-/* Purge (just invalidate, no write back) the caches */
-/* This is expected to work well.. but..
-
- On SH7708S, the write-back cache is written back on "purge".
- (it's not expected, though).
-
- It seems that we have no way to just purge (with no write back action)
- the cache line. */
-void cache_purge_area(unsigned long start, unsigned long end)
-{
- unsigned long flags;
- unsigned long addr, data, v, p, j;
-
- start &= ~(L1_CACHE_BYTES-1);
- save_and_cli(flags);
- jump_to_P2();
-
- for (v = start; v < end; v+=L1_CACHE_BYTES) {
- p = __pa(v);
- for (j=0; j<CACHE_IC_NUM_WAYS; j++) {
- addr = CACHE_IC_ADDRESS_ARRAY|(j<<CACHE_IC_WAY_SHIFT)|
- (v&CACHE_IC_ENTRY_MASK);
- data = ctrl_inl(addr);
- if ((data & 0xfffffc00) == (p&0xfffffc00)
- && (data & CACHE_VALID)) {
- data &= ~CACHE_VALID;
- ctrl_outl(data,addr);
- break;
- }
- }
-#if CACHE_IC_ADDRESS_ARRAY != CACHE_OC_ADDRESS_ARRAY
- asm volatile("ocbi %0"
- : /* no output */
- : "m" (__m(v)));
-#endif
- }
- back_to_P1();
- restore_flags(flags);
-}
-
-/* write back the dirty cache, but not invalidate the cache */
-void cache_wback_area(unsigned long start, unsigned long end)
-{
- unsigned long flags;
- unsigned long v;
-
- start &= ~(L1_CACHE_BYTES-1);
- save_and_cli(flags);
- jump_to_P2();
-
- for (v = start; v < end; v+=L1_CACHE_BYTES) {
-#if CACHE_IC_ADDRESS_ARRAY == CACHE_OC_ADDRESS_ARRAY
- unsigned long addr, data, j;
- unsigned long p = __pa(v);
-
- for (j=0; j<CACHE_OC_NUM_WAYS; j++) {
- addr = CACHE_OC_ADDRESS_ARRAY|(j<<CACHE_OC_WAY_SHIFT)|
- (v&CACHE_OC_ENTRY_MASK);
- data = ctrl_inl(addr);
- if ((data & 0xfffffc00) == (p&0xfffffc00)
- && (data & CACHE_VALID)
- && (data & CACHE_UPDATED)) {
- data &= ~CACHE_UPDATED;
- ctrl_outl(data,addr);
- break;
- }
- }
-#else
- asm volatile("ocbwb %0"
- : /* no output */
- : "m" (__m(v)));
-#endif
- }
- back_to_P1();
- restore_flags(flags);
-}
/*
- * Write back the cache.
+ * Write back all the cache.
*
- * For SH-4, flush (write back) Operand Cache, as Instruction Cache
- * doesn't have "updated" data.
+ * For SH-4, we only need to flush (write back) Operand Cache,
+ * as Instruction Cache doesn't have "updated" data.
*
- * Assumes that called in interrupt disabled.
+ * Assumes that this is called in interrupt disabled context, and P2.
+ * Shuld be INLINE function.
*/
-static void cache_wback_all(void)
+static inline void cache_wback_all(void)
{
unsigned long addr, data, i, j;
- jump_to_P2();
-
for (i=0; i<CACHE_OC_NUM_ENTRIES; i++) {
for (j=0; j<CACHE_OC_NUM_WAYS; j++) {
addr = CACHE_OC_ADDRESS_ARRAY|(j<<CACHE_OC_WAY_SHIFT)|
(i<<CACHE_OC_ENTRY_SHIFT);
data = ctrl_inl(addr);
- if (data & CACHE_VALID) {
- data &= ~(CACHE_VALID|CACHE_UPDATED);
- ctrl_outl(data,addr);
+ if (data & CACHE_UPDATED) {
+ data &= ~CACHE_UPDATED;
+ ctrl_outl(data, addr);
}
}
}
-
- back_to_P1();
}
static void
@@ -206,7 +104,8 @@
unsigned long addr0, addr1, data0, data1, data2, data3;
jump_to_P2();
- /* Check if the entry shadows or not.
+ /*
+ * Check if the entry shadows or not.
* When shadowed, it's 128-entry system.
* Otherwise, it's 256-entry system.
*/
@@ -221,8 +120,8 @@
data3 = ctrl_inl(addr0);
/* Invaliate them, in case the cache has been enabled already. */
- ctrl_outl(data0&~0x00000001,addr0);
- ctrl_outl(data2&~0x00000001,addr1);
+ ctrl_outl(data0&~0x00000001, addr0);
+ ctrl_outl(data2&~0x00000001, addr1);
back_to_P1();
if (data0 == data1 && data2 == data3) { /* Shadow */
@@ -250,76 +149,141 @@
ccr = ctrl_inl(CCR);
if (ccr == CCR_CACHE_VAL)
return;
+ jump_to_P2();
if (ccr & CCR_CACHE_ENABLE)
- /* Should check RA here. If RA was 1,
- we only need to flush the half of the caches. */
+ /*
+ * XXX: Should check RA here.
+ * If RA was 1, we only need to flush the half of the caches.
+ */
cache_wback_all();
- jump_to_P2();
ctrl_outl(CCR_CACHE_INIT, CCR);
back_to_P1();
}
#if defined(__SH4__)
-void flush_icache_page(struct vm_area_struct *vma, struct page *pg)
+/*
+ * SH-4 has virtually indexed and physically tagged cache.
+ */
+
+/*
+ * Write back the dirty D-caches, but not invalidate them.
+ *
+ * START, END: Virtual Address
+ */
+static void dcache_wback_range(unsigned long start, unsigned long end)
{
- unsigned long flags;
- unsigned long addr, data, v;
+ unsigned long v;
- save_and_cli(flags);
- jump_to_P2();
+ start &= ~(L1_CACHE_BYTES-1);
+ for (v = start; v < end; v+=L1_CACHE_BYTES) {
+ asm volatile("ocbwb %0"
+ : /* no output */
+ : "m" (__m(v)));
+ }
+}
- v = page_address(pg);
+/*
+ * Invalidate I-caches.
+ *
+ * START, END: Virtual Address
+ *
+ */
+static void icache_purge_range(unsigned long start, unsigned long end)
+{
+ unsigned long addr, data, v;
- /* Write back O Cache */
- asm volatile("ocbwb %0"
- : /* no output */
- : "m" (__m(v)));
- /* Invalidate I Cache */
- addr = CACHE_IC_ADDRESS_ARRAY |
- (v&CACHE_IC_ENTRY_MASK) | 0x8 /* A-bit */;
- data = (v&0xfffffc00); /* Valid=0 */
- ctrl_outl(data,addr);
+ start &= ~(L1_CACHE_BYTES-1);
+ jump_to_P2();
+ /*
+ * To handle the cache-line, we calculate the entry with virtual
+ * address: entry = vaddr & CACHE_IC_ENTRY_MASK.
+ *
+ * With A-bit "on", data written to is translated by MMU and
+ * compared the tag of cache and if it's not matched, nothing
+ * will be occurred. (We can avoid flushing other caches.)
+ *
+ */
+ for (v = start; v < end; v+=L1_CACHE_BYTES) {
+ addr = CACHE_IC_ADDRESS_ARRAY | (v&CACHE_IC_ENTRY_MASK)
+ | 0x8 /* A-bit */;
+ data = (v&0xfffffc00); /* Valid=0 */
+ ctrl_outl(data, addr);
+ }
back_to_P1();
- restore_flags(flags);
}
+/*
+ * Write back the range of D-cache, and purge the I-cache.
+ *
+ * Called from sh/kernel/signal.c, after accessing the memory
+ * through U0 area. START and END is the address of U0.
+ */
void flush_icache_range(unsigned long start, unsigned long end)
{
- unsigned long flags;
- unsigned long addr, data, v;
+ dcache_wback_range(start, end);
+ icache_purge_range(start, end);
+}
- start &= ~(L1_CACHE_BYTES-1);
- save_and_cli(flags);
- jump_to_P2();
+/*
+ * Invalidate the I-cache of the page (don't need to write back D-cache).
+ *
+ * Called from kernel/ptrace.c, mm/memory.c after flush_page_to_ram is called.
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *pg)
+{
+ unsigned long phys, addr, data, i;
- for (v = start; v < end; v+=L1_CACHE_BYTES) {
- /* Write back O Cache */
- asm volatile("ocbwb %0"
- : /* no output */
- : "m" (__m(v)));
- /* Invalidate I Cache */
- addr = CACHE_IC_ADDRESS_ARRAY |
- (v&CACHE_IC_ENTRY_MASK) | 0x8 /* A-bit */;
- data = (v&0xfffffc00); /* Valid=0 */
- ctrl_outl(data,addr);
+ /*
+ * Alas, we don't know where the virtual address is,
+ * So, we can't use icache_purge_range().
+ */
+
+ /* Physical address of this page */
+ phys = (pg - mem_map)*PAGE_SIZE + __MEMORY_START;
+
+ jump_to_P2();
+ /* Loop all the I-cache */
+ for (i=0; i<CACHE_IC_NUM_ENTRIES; i++) {
+ addr = CACHE_IC_ADDRESS_ARRAY| (i<<CACHE_IC_ENTRY_SHIFT);
+ data = ctrl_inl(addr);
+ if ((data & CACHE_VALID) && (data&PAGE_MASK) == phys) {
+ data &= ~CACHE_VALID;
+ ctrl_outl(data, addr);
+ }
}
back_to_P1();
- restore_flags(flags);
}
void flush_cache_all(void)
{
+ unsigned long addr, data, i;
unsigned long flags;
save_and_cli(flags);
- /* Write back Operand Cache */
- cache_wback_all();
-
- /* Then, invalidate Instruction Cache and Operand Cache */
jump_to_P2();
- ctrl_outl(CCR_CACHE_INIT, CCR);
+
+ /* Loop all the D-cache */
+ for (i=0; i<CACHE_OC_NUM_ENTRIES; i++) {
+ addr = CACHE_OC_ADDRESS_ARRAY|(i<<CACHE_OC_ENTRY_SHIFT);
+ data = ctrl_inl(addr);
+ if (data & CACHE_VALID) {
+ data &= ~(CACHE_UPDATED|CACHE_VALID);
+ ctrl_outl(data, addr);
+ }
+ }
+
+ /* Loop all the I-cache */
+ for (i=0; i<CACHE_IC_NUM_ENTRIES; i++) {
+ addr = CACHE_IC_ADDRESS_ARRAY| (i<<CACHE_IC_ENTRY_SHIFT);
+ data = ctrl_inl(addr);
+ if (data & CACHE_VALID) {
+ data &= ~CACHE_VALID;
+ ctrl_outl(data, addr);
+ }
+ }
+
back_to_P1();
restore_flags(flags);
}
@@ -334,24 +298,42 @@
void flush_cache_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
- unsigned long flags;
+ /*
+ * Calling
+ * dcache_flush_range(start, end);
+ * is not good for the purpose of this function. That is,
+ * flushing cache lines indexed by the virtual address is not
+ * sufficient.
+ *
+ * Instead, we need to flush the relevant cache lines which
+ * hold the data of the corresponding physical memory, as we
+ * have "alias" issues.
+ *
+ * This is needed because, kernel accesses the memory through
+ * P1-area (and/or U0-area) and user-space accesses through U0-area.
+ * And P1-area and U0-area may use different cache lines for
+ * same physical memory.
+ *
+ * If we would call dcache_flush_range(), the line of P1-area
+ * could remain in the cache, unflushed.
+ */
unsigned long addr, data, v;
start &= ~(L1_CACHE_BYTES-1);
- save_and_cli(flags);
jump_to_P2();
for (v = start; v < end; v+=L1_CACHE_BYTES) {
- addr = CACHE_IC_ADDRESS_ARRAY |
- (v&CACHE_IC_ENTRY_MASK) | 0x8 /* A-bit */;
- data = (v&0xfffffc00); /* Update=0, Valid=0 */
- ctrl_outl(data,addr);
addr = CACHE_OC_ADDRESS_ARRAY |
- (v&CACHE_OC_ENTRY_MASK) | 0x8 /* A-bit */;
- ctrl_outl(data,addr);
+ (v&CACHE_OC_ENTRY_PHYS_MASK) | 0x8 /* A-bit */;
+ data = (v&0xfffffc00); /* Update=0, Valid=0 */
+
+ /* Try all the cases for aliases */
+ ctrl_outl(data, addr);
+ ctrl_outl(data, addr | 0x1000);
+ ctrl_outl(data, addr | 0x2000);
+ ctrl_outl(data, addr | 0x3000);
}
back_to_P1();
- restore_flags(flags);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr)
@@ -359,9 +341,30 @@
flush_cache_range(vma->vm_mm, addr, addr+PAGE_SIZE);
}
-void __flush_page_to_ram(unsigned long page)
-{ /* Page is in physical address */
- /* XXX: for the time being... */
- flush_cache_all();
+/*
+ * After accessing the memory from kernel space (P1-area), we need to
+ * write back the cache line, to avoid "alias" issues.
+ *
+ * We search the D-cache to see if we have the entries corresponding to
+ * the page, and if found, write back them.
+ */
+void flush_page_to_ram(struct page *pg)
+{
+ unsigned long phys, addr, data, i;
+
+ /* Physical address of this page */
+ phys = (pg - mem_map)*PAGE_SIZE + __MEMORY_START;
+
+ jump_to_P2();
+ /* Loop all the D-cache */
+ for (i=0; i<CACHE_OC_NUM_ENTRIES; i++) {
+ addr = CACHE_OC_ADDRESS_ARRAY| (i<<CACHE_OC_ENTRY_SHIFT);
+ data = ctrl_inl(addr);
+ if ((data & CACHE_UPDATED) && (data&PAGE_MASK) == phys) {
+ data &= ~CACHE_UPDATED;
+ ctrl_outl(data, addr);
+ }
+ }
+ back_to_P1();
}
#endif
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)