cbd81d17793a71b617ed28b92a2510d89d08114a
[linux-drm-fsl-dcu.git] / arch / mips / mm / page.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007  Maciej W. Rozycki
8  * Copyright (C) 2008  Thiemo Seufer
9  * Copyright (C) 2012  MIPS Technologies, Inc.
10  */
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/proc_fs.h>
18
19 #include <asm/bugs.h>
20 #include <asm/cacheops.h>
21 #include <asm/cpu-type.h>
22 #include <asm/inst.h>
23 #include <asm/io.h>
24 #include <asm/page.h>
25 #include <asm/pgtable.h>
26 #include <asm/prefetch.h>
27 #include <asm/bootinfo.h>
28 #include <asm/mipsregs.h>
29 #include <asm/mmu_context.h>
30 #include <asm/cpu.h>
31 #include <asm/war.h>
32
33 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
34 #include <asm/sibyte/sb1250.h>
35 #include <asm/sibyte/sb1250_regs.h>
36 #include <asm/sibyte/sb1250_dma.h>
37 #endif
38
39 #include <asm/uasm.h>
40
41 /* Registers used in the assembled routines. */
42 #define ZERO 0
43 #define AT 2
44 #define A0 4
45 #define A1 5
46 #define A2 6
47 #define T0 8
48 #define T1 9
49 #define T2 10
50 #define T3 11
51 #define T9 25
52 #define RA 31
53
54 /* Handle labels (which must be positive integers). */
55 enum label_id {
56         label_clear_nopref = 1,
57         label_clear_pref,
58         label_copy_nopref,
59         label_copy_pref_both,
60         label_copy_pref_store,
61 };
62
63 UASM_L_LA(_clear_nopref)
64 UASM_L_LA(_clear_pref)
65 UASM_L_LA(_copy_nopref)
66 UASM_L_LA(_copy_pref_both)
67 UASM_L_LA(_copy_pref_store)
68
69 /* We need one branch and therefore one relocation per target label. */
70 static struct uasm_label labels[5];
71 static struct uasm_reloc relocs[5];
72
73 #define cpu_is_r4600_v1_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002010)
74 #define cpu_is_r4600_v2_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002020)
75
76 static int pref_bias_clear_store;
77 static int pref_bias_copy_load;
78 static int pref_bias_copy_store;
79
80 static u32 pref_src_mode;
81 static u32 pref_dst_mode;
82
83 static int clear_word_size;
84 static int copy_word_size;
85
86 static int half_clear_loop_size;
87 static int half_copy_loop_size;
88
89 static int cache_line_size;
90 #define cache_line_mask() (cache_line_size - 1)
91
92 static inline void
93 pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
94 {
95         if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
96                 if (off > 0x7fff) {
97                         uasm_i_lui(buf, T9, uasm_rel_hi(off));
98                         uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
99                 } else
100                         uasm_i_addiu(buf, T9, ZERO, off);
101                 uasm_i_daddu(buf, reg1, reg2, T9);
102         } else {
103                 if (off > 0x7fff) {
104                         uasm_i_lui(buf, T9, uasm_rel_hi(off));
105                         uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
106                         UASM_i_ADDU(buf, reg1, reg2, T9);
107                 } else
108                         UASM_i_ADDIU(buf, reg1, reg2, off);
109         }
110 }
111
112 static void set_prefetch_parameters(void)
113 {
114         if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
115                 clear_word_size = 8;
116         else
117                 clear_word_size = 4;
118
119         if (cpu_has_64bit_gp_regs)
120                 copy_word_size = 8;
121         else
122                 copy_word_size = 4;
123
124         /*
125          * The pref's used here are using "streaming" hints, which cause the
126          * copied data to be kicked out of the cache sooner.  A page copy often
127          * ends up copying a lot more data than is commonly used, so this seems
128          * to make sense in terms of reducing cache pollution, but I've no real
129          * performance data to back this up.
130          */
131         if (cpu_has_prefetch) {
132                 /*
133                  * XXX: Most prefetch bias values in here are based on
134                  * guesswork.
135                  */
136                 cache_line_size = cpu_dcache_line_size();
137                 switch (current_cpu_type()) {
138                 case CPU_R5500:
139                 case CPU_TX49XX:
140                         /* These processors only support the Pref_Load. */
141                         pref_bias_copy_load = 256;
142                         break;
143
144                 case CPU_R10000:
145                 case CPU_R12000:
146                 case CPU_R14000:
147                         /*
148                          * Those values have been experimentally tuned for an
149                          * Origin 200.
150                          */
151                         pref_bias_clear_store = 512;
152                         pref_bias_copy_load = 256;
153                         pref_bias_copy_store = 256;
154                         pref_src_mode = Pref_LoadStreamed;
155                         pref_dst_mode = Pref_StoreStreamed;
156                         break;
157
158                 case CPU_SB1:
159                 case CPU_SB1A:
160                         pref_bias_clear_store = 128;
161                         pref_bias_copy_load = 128;
162                         pref_bias_copy_store = 128;
163                         /*
164                          * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
165                          * hints are broken.
166                          */
167                         if (current_cpu_type() == CPU_SB1 &&
168                             (current_cpu_data.processor_id & 0xff) < 0x02) {
169                                 pref_src_mode = Pref_Load;
170                                 pref_dst_mode = Pref_Store;
171                         } else {
172                                 pref_src_mode = Pref_LoadStreamed;
173                                 pref_dst_mode = Pref_StoreStreamed;
174                         }
175                         break;
176
177                 default:
178                         pref_bias_clear_store = 128;
179                         pref_bias_copy_load = 256;
180                         pref_bias_copy_store = 128;
181                         pref_src_mode = Pref_LoadStreamed;
182                         pref_dst_mode = Pref_PrepareForStore;
183                         break;
184                 }
185         } else {
186                 if (cpu_has_cache_cdex_s)
187                         cache_line_size = cpu_scache_line_size();
188                 else if (cpu_has_cache_cdex_p)
189                         cache_line_size = cpu_dcache_line_size();
190         }
191         /*
192          * Too much unrolling will overflow the available space in
193          * clear_space_array / copy_page_array.
194          */
195         half_clear_loop_size = min(16 * clear_word_size,
196                                    max(cache_line_size >> 1,
197                                        4 * clear_word_size));
198         half_copy_loop_size = min(16 * copy_word_size,
199                                   max(cache_line_size >> 1,
200                                       4 * copy_word_size));
201 }
202
203 static void build_clear_store(u32 **buf, int off)
204 {
205         if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
206                 uasm_i_sd(buf, ZERO, off, A0);
207         } else {
208                 uasm_i_sw(buf, ZERO, off, A0);
209         }
210 }
211
212 static inline void build_clear_pref(u32 **buf, int off)
213 {
214         if (off & cache_line_mask())
215                 return;
216
217         if (pref_bias_clear_store) {
218                 uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
219                             A0);
220         } else if (cache_line_size == (half_clear_loop_size << 1)) {
221                 if (cpu_has_cache_cdex_s) {
222                         uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
223                 } else if (cpu_has_cache_cdex_p) {
224                         if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
225                                 uasm_i_nop(buf);
226                                 uasm_i_nop(buf);
227                                 uasm_i_nop(buf);
228                                 uasm_i_nop(buf);
229                         }
230
231                         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
232                                 uasm_i_lw(buf, ZERO, ZERO, AT);
233
234                         uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
235                 }
236         }
237 }
238
239 extern u32 __clear_page_start;
240 extern u32 __clear_page_end;
241 extern u32 __copy_page_start;
242 extern u32 __copy_page_end;
243
244 void build_clear_page(void)
245 {
246         int off;
247         u32 *buf = &__clear_page_start;
248         struct uasm_label *l = labels;
249         struct uasm_reloc *r = relocs;
250         int i;
251         static atomic_t run_once = ATOMIC_INIT(0);
252
253         if (atomic_xchg(&run_once, 1)) {
254                 return;
255         }
256
257         memset(labels, 0, sizeof(labels));
258         memset(relocs, 0, sizeof(relocs));
259
260         set_prefetch_parameters();
261
262         /*
263          * This algorithm makes the following assumptions:
264          *   - The prefetch bias is a multiple of 2 words.
265          *   - The prefetch bias is less than one page.
266          */
267         BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
268         BUG_ON(PAGE_SIZE < pref_bias_clear_store);
269
270         off = PAGE_SIZE - pref_bias_clear_store;
271         if (off > 0xffff || !pref_bias_clear_store)
272                 pg_addiu(&buf, A2, A0, off);
273         else
274                 uasm_i_ori(&buf, A2, A0, off);
275
276         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
277                 uasm_i_lui(&buf, AT, 0xa000);
278
279         off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
280                                 * cache_line_size : 0;
281         while (off) {
282                 build_clear_pref(&buf, -off);
283                 off -= cache_line_size;
284         }
285         uasm_l_clear_pref(&l, buf);
286         do {
287                 build_clear_pref(&buf, off);
288                 build_clear_store(&buf, off);
289                 off += clear_word_size;
290         } while (off < half_clear_loop_size);
291         pg_addiu(&buf, A0, A0, 2 * off);
292         off = -off;
293         do {
294                 build_clear_pref(&buf, off);
295                 if (off == -clear_word_size)
296                         uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
297                 build_clear_store(&buf, off);
298                 off += clear_word_size;
299         } while (off < 0);
300
301         if (pref_bias_clear_store) {
302                 pg_addiu(&buf, A2, A0, pref_bias_clear_store);
303                 uasm_l_clear_nopref(&l, buf);
304                 off = 0;
305                 do {
306                         build_clear_store(&buf, off);
307                         off += clear_word_size;
308                 } while (off < half_clear_loop_size);
309                 pg_addiu(&buf, A0, A0, 2 * off);
310                 off = -off;
311                 do {
312                         if (off == -clear_word_size)
313                                 uasm_il_bne(&buf, &r, A0, A2,
314                                             label_clear_nopref);
315                         build_clear_store(&buf, off);
316                         off += clear_word_size;
317                 } while (off < 0);
318         }
319
320         uasm_i_jr(&buf, RA);
321         uasm_i_nop(&buf);
322
323         BUG_ON(buf > &__clear_page_end);
324
325         uasm_resolve_relocs(relocs, labels);
326
327         pr_debug("Synthesized clear page handler (%u instructions).\n",
328                  (u32)(buf - &__clear_page_start));
329
330         pr_debug("\t.set push\n");
331         pr_debug("\t.set noreorder\n");
332         for (i = 0; i < (buf - &__clear_page_start); i++)
333                 pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
334         pr_debug("\t.set pop\n");
335 }
336
337 static void build_copy_load(u32 **buf, int reg, int off)
338 {
339         if (cpu_has_64bit_gp_regs) {
340                 uasm_i_ld(buf, reg, off, A1);
341         } else {
342                 uasm_i_lw(buf, reg, off, A1);
343         }
344 }
345
346 static void build_copy_store(u32 **buf, int reg, int off)
347 {
348         if (cpu_has_64bit_gp_regs) {
349                 uasm_i_sd(buf, reg, off, A0);
350         } else {
351                 uasm_i_sw(buf, reg, off, A0);
352         }
353 }
354
355 static inline void build_copy_load_pref(u32 **buf, int off)
356 {
357         if (off & cache_line_mask())
358                 return;
359
360         if (pref_bias_copy_load)
361                 uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
362 }
363
364 static inline void build_copy_store_pref(u32 **buf, int off)
365 {
366         if (off & cache_line_mask())
367                 return;
368
369         if (pref_bias_copy_store) {
370                 uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
371                             A0);
372         } else if (cache_line_size == (half_copy_loop_size << 1)) {
373                 if (cpu_has_cache_cdex_s) {
374                         uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
375                 } else if (cpu_has_cache_cdex_p) {
376                         if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
377                                 uasm_i_nop(buf);
378                                 uasm_i_nop(buf);
379                                 uasm_i_nop(buf);
380                                 uasm_i_nop(buf);
381                         }
382
383                         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
384                                 uasm_i_lw(buf, ZERO, ZERO, AT);
385
386                         uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
387                 }
388         }
389 }
390
391 void build_copy_page(void)
392 {
393         int off;
394         u32 *buf = &__copy_page_start;
395         struct uasm_label *l = labels;
396         struct uasm_reloc *r = relocs;
397         int i;
398         static atomic_t run_once = ATOMIC_INIT(0);
399
400         if (atomic_xchg(&run_once, 1)) {
401                 return;
402         }
403
404         memset(labels, 0, sizeof(labels));
405         memset(relocs, 0, sizeof(relocs));
406
407         set_prefetch_parameters();
408
409         /*
410          * This algorithm makes the following assumptions:
411          *   - All prefetch biases are multiples of 8 words.
412          *   - The prefetch biases are less than one page.
413          *   - The store prefetch bias isn't greater than the load
414          *     prefetch bias.
415          */
416         BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
417         BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
418         BUG_ON(PAGE_SIZE < pref_bias_copy_load);
419         BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
420
421         off = PAGE_SIZE - pref_bias_copy_load;
422         if (off > 0xffff || !pref_bias_copy_load)
423                 pg_addiu(&buf, A2, A0, off);
424         else
425                 uasm_i_ori(&buf, A2, A0, off);
426
427         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
428                 uasm_i_lui(&buf, AT, 0xa000);
429
430         off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
431                                 cache_line_size : 0;
432         while (off) {
433                 build_copy_load_pref(&buf, -off);
434                 off -= cache_line_size;
435         }
436         off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
437                                 cache_line_size : 0;
438         while (off) {
439                 build_copy_store_pref(&buf, -off);
440                 off -= cache_line_size;
441         }
442         uasm_l_copy_pref_both(&l, buf);
443         do {
444                 build_copy_load_pref(&buf, off);
445                 build_copy_load(&buf, T0, off);
446                 build_copy_load_pref(&buf, off + copy_word_size);
447                 build_copy_load(&buf, T1, off + copy_word_size);
448                 build_copy_load_pref(&buf, off + 2 * copy_word_size);
449                 build_copy_load(&buf, T2, off + 2 * copy_word_size);
450                 build_copy_load_pref(&buf, off + 3 * copy_word_size);
451                 build_copy_load(&buf, T3, off + 3 * copy_word_size);
452                 build_copy_store_pref(&buf, off);
453                 build_copy_store(&buf, T0, off);
454                 build_copy_store_pref(&buf, off + copy_word_size);
455                 build_copy_store(&buf, T1, off + copy_word_size);
456                 build_copy_store_pref(&buf, off + 2 * copy_word_size);
457                 build_copy_store(&buf, T2, off + 2 * copy_word_size);
458                 build_copy_store_pref(&buf, off + 3 * copy_word_size);
459                 build_copy_store(&buf, T3, off + 3 * copy_word_size);
460                 off += 4 * copy_word_size;
461         } while (off < half_copy_loop_size);
462         pg_addiu(&buf, A1, A1, 2 * off);
463         pg_addiu(&buf, A0, A0, 2 * off);
464         off = -off;
465         do {
466                 build_copy_load_pref(&buf, off);
467                 build_copy_load(&buf, T0, off);
468                 build_copy_load_pref(&buf, off + copy_word_size);
469                 build_copy_load(&buf, T1, off + copy_word_size);
470                 build_copy_load_pref(&buf, off + 2 * copy_word_size);
471                 build_copy_load(&buf, T2, off + 2 * copy_word_size);
472                 build_copy_load_pref(&buf, off + 3 * copy_word_size);
473                 build_copy_load(&buf, T3, off + 3 * copy_word_size);
474                 build_copy_store_pref(&buf, off);
475                 build_copy_store(&buf, T0, off);
476                 build_copy_store_pref(&buf, off + copy_word_size);
477                 build_copy_store(&buf, T1, off + copy_word_size);
478                 build_copy_store_pref(&buf, off + 2 * copy_word_size);
479                 build_copy_store(&buf, T2, off + 2 * copy_word_size);
480                 build_copy_store_pref(&buf, off + 3 * copy_word_size);
481                 if (off == -(4 * copy_word_size))
482                         uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
483                 build_copy_store(&buf, T3, off + 3 * copy_word_size);
484                 off += 4 * copy_word_size;
485         } while (off < 0);
486
487         if (pref_bias_copy_load - pref_bias_copy_store) {
488                 pg_addiu(&buf, A2, A0,
489                          pref_bias_copy_load - pref_bias_copy_store);
490                 uasm_l_copy_pref_store(&l, buf);
491                 off = 0;
492                 do {
493                         build_copy_load(&buf, T0, off);
494                         build_copy_load(&buf, T1, off + copy_word_size);
495                         build_copy_load(&buf, T2, off + 2 * copy_word_size);
496                         build_copy_load(&buf, T3, off + 3 * copy_word_size);
497                         build_copy_store_pref(&buf, off);
498                         build_copy_store(&buf, T0, off);
499                         build_copy_store_pref(&buf, off + copy_word_size);
500                         build_copy_store(&buf, T1, off + copy_word_size);
501                         build_copy_store_pref(&buf, off + 2 * copy_word_size);
502                         build_copy_store(&buf, T2, off + 2 * copy_word_size);
503                         build_copy_store_pref(&buf, off + 3 * copy_word_size);
504                         build_copy_store(&buf, T3, off + 3 * copy_word_size);
505                         off += 4 * copy_word_size;
506                 } while (off < half_copy_loop_size);
507                 pg_addiu(&buf, A1, A1, 2 * off);
508                 pg_addiu(&buf, A0, A0, 2 * off);
509                 off = -off;
510                 do {
511                         build_copy_load(&buf, T0, off);
512                         build_copy_load(&buf, T1, off + copy_word_size);
513                         build_copy_load(&buf, T2, off + 2 * copy_word_size);
514                         build_copy_load(&buf, T3, off + 3 * copy_word_size);
515                         build_copy_store_pref(&buf, off);
516                         build_copy_store(&buf, T0, off);
517                         build_copy_store_pref(&buf, off + copy_word_size);
518                         build_copy_store(&buf, T1, off + copy_word_size);
519                         build_copy_store_pref(&buf, off + 2 * copy_word_size);
520                         build_copy_store(&buf, T2, off + 2 * copy_word_size);
521                         build_copy_store_pref(&buf, off + 3 * copy_word_size);
522                         if (off == -(4 * copy_word_size))
523                                 uasm_il_bne(&buf, &r, A2, A0,
524                                             label_copy_pref_store);
525                         build_copy_store(&buf, T3, off + 3 * copy_word_size);
526                         off += 4 * copy_word_size;
527                 } while (off < 0);
528         }
529
530         if (pref_bias_copy_store) {
531                 pg_addiu(&buf, A2, A0, pref_bias_copy_store);
532                 uasm_l_copy_nopref(&l, buf);
533                 off = 0;
534                 do {
535                         build_copy_load(&buf, T0, off);
536                         build_copy_load(&buf, T1, off + copy_word_size);
537                         build_copy_load(&buf, T2, off + 2 * copy_word_size);
538                         build_copy_load(&buf, T3, off + 3 * copy_word_size);
539                         build_copy_store(&buf, T0, off);
540                         build_copy_store(&buf, T1, off + copy_word_size);
541                         build_copy_store(&buf, T2, off + 2 * copy_word_size);
542                         build_copy_store(&buf, T3, off + 3 * copy_word_size);
543                         off += 4 * copy_word_size;
544                 } while (off < half_copy_loop_size);
545                 pg_addiu(&buf, A1, A1, 2 * off);
546                 pg_addiu(&buf, A0, A0, 2 * off);
547                 off = -off;
548                 do {
549                         build_copy_load(&buf, T0, off);
550                         build_copy_load(&buf, T1, off + copy_word_size);
551                         build_copy_load(&buf, T2, off + 2 * copy_word_size);
552                         build_copy_load(&buf, T3, off + 3 * copy_word_size);
553                         build_copy_store(&buf, T0, off);
554                         build_copy_store(&buf, T1, off + copy_word_size);
555                         build_copy_store(&buf, T2, off + 2 * copy_word_size);
556                         if (off == -(4 * copy_word_size))
557                                 uasm_il_bne(&buf, &r, A2, A0,
558                                             label_copy_nopref);
559                         build_copy_store(&buf, T3, off + 3 * copy_word_size);
560                         off += 4 * copy_word_size;
561                 } while (off < 0);
562         }
563
564         uasm_i_jr(&buf, RA);
565         uasm_i_nop(&buf);
566
567         BUG_ON(buf > &__copy_page_end);
568
569         uasm_resolve_relocs(relocs, labels);
570
571         pr_debug("Synthesized copy page handler (%u instructions).\n",
572                  (u32)(buf - &__copy_page_start));
573
574         pr_debug("\t.set push\n");
575         pr_debug("\t.set noreorder\n");
576         for (i = 0; i < (buf - &__copy_page_start); i++)
577                 pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
578         pr_debug("\t.set pop\n");
579 }
580
581 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
582 extern void clear_page_cpu(void *page);
583 extern void copy_page_cpu(void *to, void *from);
584
585 /*
586  * Pad descriptors to cacheline, since each is exclusively owned by a
587  * particular CPU.
588  */
589 struct dmadscr {
590         u64 dscr_a;
591         u64 dscr_b;
592         u64 pad_a;
593         u64 pad_b;
594 } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
595
596 void sb1_dma_init(void)
597 {
598         int i;
599
600         for (i = 0; i < DM_NUM_CHANNELS; i++) {
601                 const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
602                                      V_DM_DSCR_BASE_RINGSZ(1);
603                 void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
604
605                 __raw_writeq(base_val, base_reg);
606                 __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
607                 __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
608         }
609 }
610
611 void clear_page(void *page)
612 {
613         u64 to_phys = CPHYSADDR((unsigned long)page);
614         unsigned int cpu = smp_processor_id();
615
616         /* if the page is not in KSEG0, use old way */
617         if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
618                 return clear_page_cpu(page);
619
620         page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
621                                  M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
622         page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
623         __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
624
625         /*
626          * Don't really want to do it this way, but there's no
627          * reliable way to delay completion detection.
628          */
629         while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
630                  & M_DM_DSCR_BASE_INTERRUPT))
631                 ;
632         __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
633 }
634
635 void copy_page(void *to, void *from)
636 {
637         u64 from_phys = CPHYSADDR((unsigned long)from);
638         u64 to_phys = CPHYSADDR((unsigned long)to);
639         unsigned int cpu = smp_processor_id();
640
641         /* if any page is not in KSEG0, use old way */
642         if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
643             || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
644                 return copy_page_cpu(to, from);
645
646         page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
647                                  M_DM_DSCRA_INTERRUPT;
648         page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
649         __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
650
651         /*
652          * Don't really want to do it this way, but there's no
653          * reliable way to delay completion detection.
654          */
655         while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
656                  & M_DM_DSCR_BASE_INTERRUPT))
657                 ;
658         __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
659 }
660
661 #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */