159-cpu_fixes.patch 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391
  1. --- a/arch/mips/include/asm/r4kcache.h
  2. +++ b/arch/mips/include/asm/r4kcache.h
  3. @@ -25,6 +25,20 @@
  4. extern void (*r4k_blast_dcache)(void);
  5. extern void (*r4k_blast_icache)(void);
  6. +#ifdef CONFIG_BCM47XX
  7. +#include <asm/paccess.h>
  8. +#include <linux/ssb/ssb.h>
  9. +#define BCM4710_DUMMY_RREG() ((void) *((u8 *) KSEG1ADDR(SSB_ENUM_BASE)))
  10. +
  11. +#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
  12. +#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
  13. +#else
  14. +#define BCM4710_DUMMY_RREG()
  15. +
  16. +#define BCM4710_FILL_TLB(addr)
  17. +#define BCM4710_PROTECTED_FILL_TLB(addr)
  18. +#endif
  19. +
  20. /*
  21. * This macro return a properly sign-extended address suitable as base address
  22. * for indexed cache operations. Two issues here:
  23. @@ -98,6 +112,7 @@ static inline void flush_icache_line_ind
  24. static inline void flush_dcache_line_indexed(unsigned long addr)
  25. {
  26. __dflush_prologue
  27. + BCM4710_DUMMY_RREG();
  28. cache_op(Index_Writeback_Inv_D, addr);
  29. __dflush_epilogue
  30. }
  31. @@ -125,6 +140,7 @@ static inline void flush_icache_line(uns
  32. static inline void flush_dcache_line(unsigned long addr)
  33. {
  34. __dflush_prologue
  35. + BCM4710_DUMMY_RREG();
  36. cache_op(Hit_Writeback_Inv_D, addr);
  37. __dflush_epilogue
  38. }
  39. @@ -132,6 +148,7 @@ static inline void flush_dcache_line(uns
  40. static inline void invalidate_dcache_line(unsigned long addr)
  41. {
  42. __dflush_prologue
  43. + BCM4710_DUMMY_RREG();
  44. cache_op(Hit_Invalidate_D, addr);
  45. __dflush_epilogue
  46. }
  47. @@ -187,6 +204,7 @@ static inline void protected_flush_icach
  48. #ifdef CONFIG_EVA
  49. protected_cachee_op(Hit_Invalidate_I, addr);
  50. #else
  51. + BCM4710_DUMMY_RREG();
  52. protected_cache_op(Hit_Invalidate_I, addr);
  53. #endif
  54. break;
  55. @@ -201,6 +219,7 @@ static inline void protected_flush_icach
  56. */
  57. static inline void protected_writeback_dcache_line(unsigned long addr)
  58. {
  59. + BCM4710_DUMMY_RREG();
  60. #ifdef CONFIG_EVA
  61. protected_cachee_op(Hit_Writeback_Inv_D, addr);
  62. #else
  63. @@ -554,8 +573,51 @@ static inline void invalidate_tcache_pag
  64. : "r" (base), \
  65. "i" (op));
  66. +static inline void blast_dcache(void)
  67. +{
  68. + unsigned long start = KSEG0;
  69. + unsigned long dcache_size = current_cpu_data.dcache.waysize * current_cpu_data.dcache.ways;
  70. + unsigned long end = (start + dcache_size);
  71. +
  72. + do {
  73. + BCM4710_DUMMY_RREG();
  74. + cache_op(Index_Writeback_Inv_D, start);
  75. + start += current_cpu_data.dcache.linesz;
  76. + } while(start < end);
  77. +}
  78. +
  79. +static inline void blast_dcache_page(unsigned long page)
  80. +{
  81. + unsigned long start = page;
  82. + unsigned long end = start + PAGE_SIZE;
  83. +
  84. + BCM4710_FILL_TLB(start);
  85. + do {
  86. + BCM4710_DUMMY_RREG();
  87. + cache_op(Hit_Writeback_Inv_D, start);
  88. + start += current_cpu_data.dcache.linesz;
  89. + } while(start < end);
  90. +}
  91. +
  92. +static inline void blast_dcache_page_indexed(unsigned long page)
  93. +{
  94. + unsigned long start = page;
  95. + unsigned long end = start + PAGE_SIZE;
  96. + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
  97. + unsigned long ws_end = current_cpu_data.dcache.ways <<
  98. + current_cpu_data.dcache.waybit;
  99. + unsigned long ws, addr;
  100. + for (ws = 0; ws < ws_end; ws += ws_inc) {
  101. + start = page + ws;
  102. + for (addr = start; addr < end; addr += current_cpu_data.dcache.linesz) {
  103. + BCM4710_DUMMY_RREG();
  104. + cache_op(Index_Writeback_Inv_D, addr);
  105. + }
  106. + }
  107. +}
  108. +
  109. /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
  110. -#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
  111. +#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra, war) \
  112. static inline void extra##blast_##pfx##cache##lsize(void) \
  113. { \
  114. unsigned long start = INDEX_BASE; \
  115. @@ -567,6 +629,7 @@ static inline void extra##blast_##pfx##c
  116. \
  117. __##pfx##flush_prologue \
  118. \
  119. + war \
  120. for (ws = 0; ws < ws_end; ws += ws_inc) \
  121. for (addr = start; addr < end; addr += lsize * 32) \
  122. cache##lsize##_unroll32(addr|ws, indexop); \
  123. @@ -581,6 +644,7 @@ static inline void extra##blast_##pfx##c
  124. \
  125. __##pfx##flush_prologue \
  126. \
  127. + war \
  128. do { \
  129. cache##lsize##_unroll32(start, hitop); \
  130. start += lsize * 32; \
  131. @@ -599,6 +663,8 @@ static inline void extra##blast_##pfx##c
  132. current_cpu_data.desc.waybit; \
  133. unsigned long ws, addr; \
  134. \
  135. + war \
  136. + \
  137. __##pfx##flush_prologue \
  138. \
  139. for (ws = 0; ws < ws_end; ws += ws_inc) \
  140. @@ -608,26 +674,26 @@ static inline void extra##blast_##pfx##c
  141. __##pfx##flush_epilogue \
  142. }
  143. -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
  144. -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
  145. -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
  146. -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
  147. -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
  148. -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
  149. -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
  150. -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
  151. -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
  152. -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
  153. -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
  154. -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
  155. -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
  156. -
  157. -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
  158. -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
  159. -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
  160. -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
  161. -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
  162. -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
  163. +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, , )
  164. +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, , BCM4710_FILL_TLB(start);)
  165. +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, , )
  166. +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, , )
  167. +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, , BCM4710_FILL_TLB(start);)
  168. +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_, BCM4710_FILL_TLB(start);)
  169. +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, , )
  170. +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, , )
  171. +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, , BCM4710_FILL_TLB(start);)
  172. +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, , )
  173. +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, , )
  174. +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, , )
  175. +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, , )
  176. +
  177. +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, , )
  178. +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, , )
  179. +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, , )
  180. +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, , )
  181. +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, , )
  182. +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, , )
  183. #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
  184. static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
  185. @@ -656,17 +722,19 @@ __BUILD_BLAST_USER_CACHE(d, dcache, Inde
  186. __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
  187. /* build blast_xxx_range, protected_blast_xxx_range */
  188. -#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
  189. +#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra, war, war2) \
  190. static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
  191. unsigned long end) \
  192. { \
  193. unsigned long lsize = cpu_##desc##_line_size(); \
  194. unsigned long addr = start & ~(lsize - 1); \
  195. unsigned long aend = (end - 1) & ~(lsize - 1); \
  196. + war \
  197. \
  198. __##pfx##flush_prologue \
  199. \
  200. while (1) { \
  201. + war2 \
  202. prot##cache_op(hitop, addr); \
  203. if (addr == aend) \
  204. break; \
  205. @@ -678,8 +746,8 @@ static inline void prot##extra##blast_##
  206. #ifndef CONFIG_EVA
  207. -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
  208. -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
  209. +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, , BCM4710_PROTECTED_FILL_TLB(addr); BCM4710_PROTECTED_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
  210. +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, , , )
  211. #else
  212. @@ -716,14 +784,14 @@ __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache
  213. __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
  214. #endif
  215. -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
  216. +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, , , )
  217. __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
  218. - protected_, loongson2_)
  219. -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
  220. -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
  221. -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
  222. + protected_, loongson2_, , )
  223. +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , , BCM4710_FILL_TLB(addr); BCM4710_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
  224. +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , , , )
  225. +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , , , )
  226. /* blast_inv_dcache_range */
  227. -__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
  228. -__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
  229. +__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , , , BCM4710_DUMMY_RREG();)
  230. +__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , , , )
  231. #endif /* _ASM_R4KCACHE_H */
  232. --- a/arch/mips/include/asm/stackframe.h
  233. +++ b/arch/mips/include/asm/stackframe.h
  234. @@ -358,6 +358,10 @@
  235. .macro RESTORE_SP_AND_RET
  236. LONG_L sp, PT_R29(sp)
  237. .set arch=r4000
  238. +#ifdef CONFIG_BCM47XX
  239. + nop
  240. + nop
  241. +#endif
  242. eret
  243. .set mips0
  244. .endm
  245. --- a/arch/mips/kernel/genex.S
  246. +++ b/arch/mips/kernel/genex.S
  247. @@ -32,6 +32,10 @@
  248. NESTED(except_vec3_generic, 0, sp)
  249. .set push
  250. .set noat
  251. +#ifdef CONFIG_BCM47XX
  252. + nop
  253. + nop
  254. +#endif
  255. #if R5432_CP0_INTERRUPT_WAR
  256. mfc0 k0, CP0_INDEX
  257. #endif
  258. --- a/arch/mips/mm/c-r4k.c
  259. +++ b/arch/mips/mm/c-r4k.c
  260. @@ -39,6 +39,9 @@
  261. #include <asm/dma-coherence.h>
  262. #include <asm/mips-cm.h>
  263. +/* For enabling BCM4710 cache workarounds */
  264. +int bcm4710 = 0;
  265. +
  266. /*
  267. * Special Variant of smp_call_function for use by cache functions:
  268. *
  269. @@ -157,6 +160,9 @@ static void r4k_blast_dcache_user_page_s
  270. {
  271. unsigned long dc_lsize = cpu_dcache_line_size();
  272. + if (bcm4710)
  273. + r4k_blast_dcache_page = blast_dcache_page;
  274. + else
  275. if (dc_lsize == 0)
  276. r4k_blast_dcache_user_page = (void *)cache_noop;
  277. else if (dc_lsize == 16)
  278. @@ -175,6 +181,9 @@ static void r4k_blast_dcache_page_indexe
  279. {
  280. unsigned long dc_lsize = cpu_dcache_line_size();
  281. + if (bcm4710)
  282. + r4k_blast_dcache_page_indexed = blast_dcache_page_indexed;
  283. + else
  284. if (dc_lsize == 0)
  285. r4k_blast_dcache_page_indexed = (void *)cache_noop;
  286. else if (dc_lsize == 16)
  287. @@ -194,6 +203,9 @@ static void r4k_blast_dcache_setup(void)
  288. {
  289. unsigned long dc_lsize = cpu_dcache_line_size();
  290. + if (bcm4710)
  291. + r4k_blast_dcache = blast_dcache;
  292. + else
  293. if (dc_lsize == 0)
  294. r4k_blast_dcache = (void *)cache_noop;
  295. else if (dc_lsize == 16)
  296. @@ -793,6 +805,8 @@ static void local_r4k_flush_cache_sigtra
  297. unsigned long addr = (unsigned long) arg;
  298. R4600_HIT_CACHEOP_WAR_IMPL;
  299. + BCM4710_PROTECTED_FILL_TLB(addr);
  300. + BCM4710_PROTECTED_FILL_TLB(addr + 4);
  301. if (dc_lsize)
  302. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  303. if (!cpu_icache_snoops_remote_store && scache_size)
  304. @@ -1599,6 +1613,17 @@ static void coherency_setup(void)
  305. * silly idea of putting something else there ...
  306. */
  307. switch (current_cpu_type()) {
  308. + case CPU_BMIPS3300:
  309. + {
  310. + u32 cm;
  311. + cm = read_c0_diag();
  312. + /* Enable icache */
  313. + cm |= (1 << 31);
  314. + /* Enable dcache */
  315. + cm |= (1 << 30);
  316. + write_c0_diag(cm);
  317. + }
  318. + break;
  319. case CPU_R4000PC:
  320. case CPU_R4000SC:
  321. case CPU_R4000MC:
  322. @@ -1645,6 +1670,15 @@ void r4k_cache_init(void)
  323. extern void build_copy_page(void);
  324. struct cpuinfo_mips *c = &current_cpu_data;
  325. + /* Check if special workarounds are required */
  326. +#ifdef CONFIG_BCM47XX
  327. + if (current_cpu_data.cputype == CPU_BMIPS32 && (current_cpu_data.processor_id & 0xff) == 0) {
  328. + printk("Enabling BCM4710A0 cache workarounds.\n");
  329. + bcm4710 = 1;
  330. + } else
  331. +#endif
  332. + bcm4710 = 0;
  333. +
  334. probe_pcache();
  335. setup_scache();
  336. @@ -1714,7 +1748,15 @@ void r4k_cache_init(void)
  337. */
  338. local_r4k___flush_cache_all(NULL);
  339. +#ifdef CONFIG_BCM47XX
  340. + {
  341. + static void (*_coherency_setup)(void);
  342. + _coherency_setup = (void (*)(void)) KSEG1ADDR(coherency_setup);
  343. + _coherency_setup();
  344. + }
  345. +#else
  346. coherency_setup();
  347. +#endif
  348. board_cache_error_setup = r4k_cache_error_setup;
  349. /*
  350. --- a/arch/mips/mm/tlbex.c
  351. +++ b/arch/mips/mm/tlbex.c
  352. @@ -1296,6 +1296,9 @@ static void build_r4000_tlb_refill_handl
  353. /* No need for uasm_i_nop */
  354. }
  355. +#ifdef CONFIG_BCM47XX
  356. + uasm_i_nop(&p);
  357. +#endif
  358. #ifdef CONFIG_64BIT
  359. build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
  360. #else
  361. @@ -1868,6 +1871,9 @@ build_r4000_tlbchange_handler_head(u32 *
  362. {
  363. struct work_registers wr = build_get_work_registers(p);
  364. +#ifdef CONFIG_BCM47XX
  365. + uasm_i_nop(p);
  366. +#endif
  367. #ifdef CONFIG_64BIT
  368. build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
  369. #else