aboutsummaryrefslogtreecommitdiff
path: root/sys/arm/arm/cpufunc_asm_armv5_ec.S
blob: 40125632dd232963c4a1d6a98f6d153f0973503c (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
/*	$NetBSD: cpufunc_asm_armv5_ec.S,v 1.1 2007/01/06 00:50:54 christos Exp $	*/

/*
 * Copyright (c) 2002, 2005 ARM Limited
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. The name of the company may not be used to endorse or promote
 *    products derived from this software without specific prior written
 *    permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 * ARMv5 assembly functions for manipulating caches.
 * These routines can be used by any core that supports both the set/index
 * operations and the test and clean operations for efficiently cleaning the
 * entire DCache.  If a core does not have the test and clean operations, but
 * does have the set/index operations, use the routines in cpufunc_asm_armv5.S.
 * This source was derived from that file.
 */

#include <machine/asm.h>
__FBSDID("$FreeBSD$");

/*
 * Functions to set the MMU Translation Table Base register
 *
 * We need to clean and flush the cache as it uses virtual
 * addresses that are about to change.
 */
ENTRY(armv5_ec_setttb)
	/*
	 * Some other ARM ports save registers on the stack, call the
	 * idcache_wbinv_all function and then restore the registers from the
	 * stack before setting the TTB.  I observed that this caused a
	 * problem when the old and new translation table entries' buffering
	 * bits were different.  If I saved the registers in other registers
	 * or invalidated the caches when I returned from idcache_wbinv_all,
	 * it worked fine.  If not, I ended up executing at an invalid PC.
	 * For armv5_ec_settb, the idcache_wbinv_all is simple enough, I just
	 * do it directly and entirely avoid the problem.
	 */
	mcr	p15, 0, r0, c7, c5, 0	/* Invalidate ICache */
1:	mrc	p15, 0, r15, c7, c14, 3	/* Test, clean and invalidate DCache */
	bne	1b			/* More to do? */
	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */

	mcr	p15, 0, r0, c2, c0, 0	/* load new TTB */

	mcr	p15, 0, r0, c8, c7, 0	/* invalidate I+D TLBs */
	RET

/*
 * Cache operations.  For the entire cache we use the enhanced cache
 * operations.
 */

ENTRY_NP(armv5_ec_icache_sync_range)
	ldr	ip, .Larmv5_ec_line_size
	cmp	r1, #0x4000
	bcs	.Larmv5_ec_icache_sync_all
	ldr	ip, [ip]
	sub	r1, r1, #1		/* Don't overrun */
	sub	r3, ip, #1
	and	r2, r0, r3
	add	r1, r1, r2
	bic	r0, r0, r3
1:
	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
	add	r0, r0, ip
	subs	r1, r1, ip
	bpl	1b
	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
	RET

ENTRY_NP(armv5_ec_icache_sync_all)
.Larmv5_ec_icache_sync_all:
	/*
	 * We assume that the code here can never be out of sync with the
	 * dcache, so that we can safely flush the Icache and fall through
	 * into the Dcache cleaning code.
	 */
	mcr	p15, 0, r0, c7, c5, 0	/* Flush I cache */
	/* Fall through to clean Dcache. */

.Larmv5_ec_dcache_wb:
1:
	mrc	p15, 0, r15, c7, c10, 3	/* Test and clean (don't invalidate) */
	bne	1b			/* More to do? */
	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
	RET

.Larmv5_ec_line_size:
	.word	_C_LABEL(arm_pdcache_line_size)

ENTRY(armv5_ec_dcache_wb_range)
	ldr	ip, .Larmv5_ec_line_size
	cmp	r1, #0x4000
	bcs	.Larmv5_ec_dcache_wb
	ldr	ip, [ip]
	sub	r1, r1, #1		/* Don't overrun */
	sub	r3, ip, #1
	and	r2, r0, r3
	add	r1, r1, r2
	bic	r0, r0, r3
1:
	mcr	p15, 0, r0, c7, c10, 1	/* Clean D cache SE with VA */
	add	r0, r0, ip
	subs	r1, r1, ip
	bpl	1b
	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
	RET

ENTRY(armv5_ec_dcache_wbinv_range)
	ldr	ip, .Larmv5_ec_line_size
	cmp	r1, #0x4000
	bcs	.Larmv5_ec_dcache_wbinv_all
	ldr	ip, [ip]
	sub	r1, r1, #1		/* Don't overrun */
	sub	r3, ip, #1
	and	r2, r0, r3
	add	r1, r1, r2
	bic	r0, r0, r3
1:
	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
	add	r0, r0, ip
	subs	r1, r1, ip
	bpl	1b
	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
	RET

/*
 * Note, we must not invalidate everything.  If the range is too big we
 * must use wb-inv of the entire cache.
 */
ENTRY(armv5_ec_dcache_inv_range)
	ldr	ip, .Larmv5_ec_line_size
	cmp	r1, #0x4000
	bcs	.Larmv5_ec_dcache_wbinv_all
	ldr	ip, [ip]
	sub	r1, r1, #1		/* Don't overrun */
	sub	r3, ip, #1
	and	r2, r0, r3
	add	r1, r1, r2
	bic	r0, r0, r3
1:
	mcr	p15, 0, r0, c7, c6, 1	/* Invalidate D cache SE with VA */
	add	r0, r0, ip
	subs	r1, r1, ip
	bpl	1b
	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
	RET

ENTRY(armv5_ec_idcache_wbinv_range)
	ldr	ip, .Larmv5_ec_line_size
	cmp	r1, #0x4000
	bcs	.Larmv5_ec_idcache_wbinv_all
	ldr	ip, [ip]
	sub	r1, r1, #1		/* Don't overrun */
	sub	r3, ip, #1
	and	r2, r0, r3
	add	r1, r1, r2
	bic	r0, r0, r3
1:
	mcr	p15, 0, r0, c7, c5, 1	/* Invalidate I cache SE with VA */
	mcr	p15, 0, r0, c7, c14, 1	/* Purge D cache SE with VA */
	add	r0, r0, ip
	subs	r1, r1, ip
	bpl	1b
	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
	RET

ENTRY_NP(armv5_ec_idcache_wbinv_all)
.Larmv5_ec_idcache_wbinv_all:
	/*
	 * We assume that the code here can never be out of sync with the
	 * dcache, so that we can safely flush the Icache and fall through
	 * into the Dcache purging code.
	 */
	mcr	p15, 0, r0, c7, c5, 0	/* Invalidate ICache */
	/* Fall through to purge Dcache. */

ENTRY(armv5_ec_dcache_wbinv_all)
.Larmv5_ec_dcache_wbinv_all:
1:	mrc	p15, 0, r15, c7, c14, 3	/* Test, clean and invalidate DCache */
	bne	1b			/* More to do? */
	mcr	p15, 0, r0, c7, c10, 4	/* drain the write buffer */
	RET