qemu with hax to log dma reads & writes
jcs.org/2018/11/12/vfio
1/*
2 * Software MMU support
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
16 *
17 */
18
19/*
20 * Generate inline load/store functions for all MMU modes (typically
21 * at least _user and _kernel) as well as _data versions, for all data
22 * sizes.
23 *
24 * Used by target op helpers.
25 *
26 * The syntax for the accessors is:
27 *
28 * load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr)
29 * cpu_ld{sign}{size}_{mmusuffix}_ra(env, ptr, retaddr)
30 * cpu_ld{sign}{size}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
31 *
32 * store: cpu_st{size}_{mmusuffix}(env, ptr, val)
33 * cpu_st{size}_{mmusuffix}_ra(env, ptr, val, retaddr)
34 * cpu_st{size}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
35 *
36 * sign is:
37 * (empty): for 32 and 64 bit sizes
38 * u : unsigned
39 * s : signed
40 *
41 * size is:
42 * b: 8 bits
43 * w: 16 bits
44 * l: 32 bits
45 * q: 64 bits
46 *
47 * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
48 * The "mmuidx" suffix carries an extra mmu_idx argument that specifies
49 * the index to use; the "data" and "code" suffixes take the index from
50 * cpu_mmu_index().
51 */
52#ifndef CPU_LDST_H
53#define CPU_LDST_H
54
55#if defined(CONFIG_USER_ONLY)
56/* sparc32plus has 64bit long but 32bit space address
57 * this can make bad result with g2h() and h2g()
58 */
59#if TARGET_VIRT_ADDR_SPACE_BITS <= 32
60typedef uint32_t abi_ptr;
61#define TARGET_ABI_FMT_ptr "%x"
62#else
63typedef uint64_t abi_ptr;
64#define TARGET_ABI_FMT_ptr "%"PRIx64
65#endif
66
67/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
68#define g2h(x) ((void *)((unsigned long)(abi_ptr)(x) + guest_base))
69
70#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
71#define guest_addr_valid(x) (1)
72#else
73#define guest_addr_valid(x) ((x) <= GUEST_ADDR_MAX)
74#endif
75#define h2g_valid(x) guest_addr_valid((unsigned long)(x) - guest_base)
76
77static inline int guest_range_valid(unsigned long start, unsigned long len)
78{
79 return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
80}
81
82#define h2g_nocheck(x) ({ \
83 unsigned long __ret = (unsigned long)(x) - guest_base; \
84 (abi_ptr)__ret; \
85})
86
87#define h2g(x) ({ \
88 /* Check if given address fits target address space */ \
89 assert(h2g_valid(x)); \
90 h2g_nocheck(x); \
91})
92#else
93typedef target_ulong abi_ptr;
94#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx
95#endif
96
97uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
98uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr);
99uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr);
100uint64_t cpu_ldq_data(CPUArchState *env, abi_ptr ptr);
101int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
102int cpu_ldsw_data(CPUArchState *env, abi_ptr ptr);
103
104uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
105uint32_t cpu_lduw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
106uint32_t cpu_ldl_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
107uint64_t cpu_ldq_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
108int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
109int cpu_ldsw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
110
111void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
112void cpu_stw_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
113void cpu_stl_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
114void cpu_stq_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
115
116void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
117 uint32_t val, uintptr_t retaddr);
118void cpu_stw_data_ra(CPUArchState *env, abi_ptr ptr,
119 uint32_t val, uintptr_t retaddr);
120void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr,
121 uint32_t val, uintptr_t retaddr);
122void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr,
123 uint64_t val, uintptr_t retaddr);
124
125#if defined(CONFIG_USER_ONLY)
126
127extern __thread uintptr_t helper_retaddr;
128
129static inline void set_helper_retaddr(uintptr_t ra)
130{
131 helper_retaddr = ra;
132 /*
133 * Ensure that this write is visible to the SIGSEGV handler that
134 * may be invoked due to a subsequent invalid memory operation.
135 */
136 signal_barrier();
137}
138
139static inline void clear_helper_retaddr(void)
140{
141 /*
142 * Ensure that previous memory operations have succeeded before
143 * removing the data visible to the signal handler.
144 */
145 signal_barrier();
146 helper_retaddr = 0;
147}
148
149/*
150 * Provide the same *_mmuidx_ra interface as for softmmu.
151 * The mmu_idx argument is ignored.
152 */
153
154static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
155 int mmu_idx, uintptr_t ra)
156{
157 return cpu_ldub_data_ra(env, addr, ra);
158}
159
160static inline uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
161 int mmu_idx, uintptr_t ra)
162{
163 return cpu_lduw_data_ra(env, addr, ra);
164}
165
166static inline uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
167 int mmu_idx, uintptr_t ra)
168{
169 return cpu_ldl_data_ra(env, addr, ra);
170}
171
172static inline uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
173 int mmu_idx, uintptr_t ra)
174{
175 return cpu_ldq_data_ra(env, addr, ra);
176}
177
178static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
179 int mmu_idx, uintptr_t ra)
180{
181 return cpu_ldsb_data_ra(env, addr, ra);
182}
183
184static inline int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
185 int mmu_idx, uintptr_t ra)
186{
187 return cpu_ldsw_data_ra(env, addr, ra);
188}
189
190static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
191 uint32_t val, int mmu_idx, uintptr_t ra)
192{
193 cpu_stb_data_ra(env, addr, val, ra);
194}
195
196static inline void cpu_stw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
197 uint32_t val, int mmu_idx, uintptr_t ra)
198{
199 cpu_stw_data_ra(env, addr, val, ra);
200}
201
202static inline void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
203 uint32_t val, int mmu_idx, uintptr_t ra)
204{
205 cpu_stl_data_ra(env, addr, val, ra);
206}
207
208static inline void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
209 uint64_t val, int mmu_idx, uintptr_t ra)
210{
211 cpu_stq_data_ra(env, addr, val, ra);
212}
213
214#else
215
216/* Needed for TCG_OVERSIZED_GUEST */
217#include "tcg/tcg.h"
218
219static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
220{
221#if TCG_OVERSIZED_GUEST
222 return entry->addr_write;
223#else
224 return atomic_read(&entry->addr_write);
225#endif
226}
227
228/* Find the TLB index corresponding to the mmu_idx + address pair. */
229static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
230 target_ulong addr)
231{
232 uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
233
234 return (addr >> TARGET_PAGE_BITS) & size_mask;
235}
236
237/* Find the TLB entry corresponding to the mmu_idx + address pair. */
238static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
239 target_ulong addr)
240{
241 return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
242}
243
244uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
245 int mmu_idx, uintptr_t ra);
246uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
247 int mmu_idx, uintptr_t ra);
248uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
249 int mmu_idx, uintptr_t ra);
250uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
251 int mmu_idx, uintptr_t ra);
252
253int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
254 int mmu_idx, uintptr_t ra);
255int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
256 int mmu_idx, uintptr_t ra);
257
258void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
259 int mmu_idx, uintptr_t retaddr);
260void cpu_stw_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
261 int mmu_idx, uintptr_t retaddr);
262void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
263 int mmu_idx, uintptr_t retaddr);
264void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
265 int mmu_idx, uintptr_t retaddr);
266
267#endif /* defined(CONFIG_USER_ONLY) */
268
269uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
270uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
271uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
272uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
273
274static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr)
275{
276 return (int8_t)cpu_ldub_code(env, addr);
277}
278
279static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
280{
281 return (int16_t)cpu_lduw_code(env, addr);
282}
283
284/**
285 * tlb_vaddr_to_host:
286 * @env: CPUArchState
287 * @addr: guest virtual address to look up
288 * @access_type: 0 for read, 1 for write, 2 for execute
289 * @mmu_idx: MMU index to use for lookup
290 *
291 * Look up the specified guest virtual index in the TCG softmmu TLB.
292 * If we can translate a host virtual address suitable for direct RAM
293 * access, without causing a guest exception, then return it.
294 * Otherwise (TLB entry is for an I/O access, guest software
295 * TLB fill required, etc) return NULL.
296 */
297#ifdef CONFIG_USER_ONLY
298static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
299 MMUAccessType access_type, int mmu_idx)
300{
301 return g2h(addr);
302}
303#else
304void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
305 MMUAccessType access_type, int mmu_idx);
306#endif
307
308#endif /* CPU_LDST_H */