qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio
at master 449 lines 16 kB view raw
1/* 2 * Software MMU support 3 * 4 * This library is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU Lesser General Public 6 * License as published by the Free Software Foundation; either 7 * version 2 of the License, or (at your option) any later version. 8 * 9 * This library is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * Lesser General Public License for more details. 13 * 14 * You should have received a copy of the GNU Lesser General Public 15 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 16 * 17 */ 18 19/* 20 * Generate inline load/store functions for all MMU modes (typically 21 * at least _user and _kernel) as well as _data versions, for all data 22 * sizes. 23 * 24 * Used by target op helpers. 25 * 26 * The syntax for the accessors is: 27 * 28 * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr) 29 * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr) 30 * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr) 31 * 32 * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val) 33 * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr) 34 * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) 35 * 36 * sign is: 37 * (empty): for 32 and 64 bit sizes 38 * u : unsigned 39 * s : signed 40 * 41 * size is: 42 * b: 8 bits 43 * w: 16 bits 44 * l: 32 bits 45 * q: 64 bits 46 * 47 * end is: 48 * (empty): for target native endian, or for 8 bit access 49 * _be: for forced big endian 50 * _le: for forced little endian 51 * 52 * mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx". 53 * The "mmuidx" suffix carries an extra mmu_idx argument that specifies 54 * the index to use; the "data" and "code" suffixes take the index from 55 * cpu_mmu_index(). 56 */ 57#ifndef CPU_LDST_H 58#define CPU_LDST_H 59 60#if defined(CONFIG_USER_ONLY) 61/* sparc32plus has 64bit long but 32bit space address 62 * this can make bad result with g2h() and h2g() 63 */ 64#if TARGET_VIRT_ADDR_SPACE_BITS <= 32 65typedef uint32_t abi_ptr; 66#define TARGET_ABI_FMT_ptr "%x" 67#else 68typedef uint64_t abi_ptr; 69#define TARGET_ABI_FMT_ptr "%"PRIx64 70#endif 71 72/* All direct uses of g2h and h2g need to go away for usermode softmmu. */ 73#define g2h(x) ((void *)((unsigned long)(abi_ptr)(x) + guest_base)) 74 75#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS 76#define guest_addr_valid(x) (1) 77#else 78#define guest_addr_valid(x) ((x) <= GUEST_ADDR_MAX) 79#endif 80#define h2g_valid(x) guest_addr_valid((unsigned long)(x) - guest_base) 81 82static inline int guest_range_valid(unsigned long start, unsigned long len) 83{ 84 return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1; 85} 86 87#define h2g_nocheck(x) ({ \ 88 unsigned long __ret = (unsigned long)(x) - guest_base; \ 89 (abi_ptr)__ret; \ 90}) 91 92#define h2g(x) ({ \ 93 /* Check if given address fits target address space */ \ 94 assert(h2g_valid(x)); \ 95 h2g_nocheck(x); \ 96}) 97#else 98typedef target_ulong abi_ptr; 99#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx 100#endif 101 102uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); 103int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr); 104 105uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr); 106int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr); 107uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr); 108uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr); 109 110uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr); 111int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr); 112uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr); 113uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr); 114 115uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 116int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 117 118uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 119int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 120uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 121uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 122 123uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 124int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 125uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 126uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); 127 128void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 129 130void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 131void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 132void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val); 133 134void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 135void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); 136void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val); 137 138void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, 139 uint32_t val, uintptr_t ra); 140 141void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, 142 uint32_t val, uintptr_t ra); 143void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, 144 uint32_t val, uintptr_t ra); 145void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, 146 uint64_t val, uintptr_t ra); 147 148void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, 149 uint32_t val, uintptr_t ra); 150void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, 151 uint32_t val, uintptr_t ra); 152void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, 153 uint64_t val, uintptr_t ra); 154 155#if defined(CONFIG_USER_ONLY) 156 157extern __thread uintptr_t helper_retaddr; 158 159static inline void set_helper_retaddr(uintptr_t ra) 160{ 161 helper_retaddr = ra; 162 /* 163 * Ensure that this write is visible to the SIGSEGV handler that 164 * may be invoked due to a subsequent invalid memory operation. 165 */ 166 signal_barrier(); 167} 168 169static inline void clear_helper_retaddr(void) 170{ 171 /* 172 * Ensure that previous memory operations have succeeded before 173 * removing the data visible to the signal handler. 174 */ 175 signal_barrier(); 176 helper_retaddr = 0; 177} 178 179/* 180 * Provide the same *_mmuidx_ra interface as for softmmu. 181 * The mmu_idx argument is ignored. 182 */ 183 184static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, 185 int mmu_idx, uintptr_t ra) 186{ 187 return cpu_ldub_data_ra(env, addr, ra); 188} 189 190static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 191 int mmu_idx, uintptr_t ra) 192{ 193 return cpu_ldsb_data_ra(env, addr, ra); 194} 195 196static inline uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 197 int mmu_idx, uintptr_t ra) 198{ 199 return cpu_lduw_be_data_ra(env, addr, ra); 200} 201 202static inline int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 203 int mmu_idx, uintptr_t ra) 204{ 205 return cpu_ldsw_be_data_ra(env, addr, ra); 206} 207 208static inline uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 209 int mmu_idx, uintptr_t ra) 210{ 211 return cpu_ldl_be_data_ra(env, addr, ra); 212} 213 214static inline uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 215 int mmu_idx, uintptr_t ra) 216{ 217 return cpu_ldq_be_data_ra(env, addr, ra); 218} 219 220static inline uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 221 int mmu_idx, uintptr_t ra) 222{ 223 return cpu_lduw_le_data_ra(env, addr, ra); 224} 225 226static inline int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 227 int mmu_idx, uintptr_t ra) 228{ 229 return cpu_ldsw_le_data_ra(env, addr, ra); 230} 231 232static inline uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 233 int mmu_idx, uintptr_t ra) 234{ 235 return cpu_ldl_le_data_ra(env, addr, ra); 236} 237 238static inline uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 239 int mmu_idx, uintptr_t ra) 240{ 241 return cpu_ldq_le_data_ra(env, addr, ra); 242} 243 244static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 245 uint32_t val, int mmu_idx, uintptr_t ra) 246{ 247 cpu_stb_data_ra(env, addr, val, ra); 248} 249 250static inline void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 251 uint32_t val, int mmu_idx, 252 uintptr_t ra) 253{ 254 cpu_stw_be_data_ra(env, addr, val, ra); 255} 256 257static inline void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 258 uint32_t val, int mmu_idx, 259 uintptr_t ra) 260{ 261 cpu_stl_be_data_ra(env, addr, val, ra); 262} 263 264static inline void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 265 uint64_t val, int mmu_idx, 266 uintptr_t ra) 267{ 268 cpu_stq_be_data_ra(env, addr, val, ra); 269} 270 271static inline void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 272 uint32_t val, int mmu_idx, 273 uintptr_t ra) 274{ 275 cpu_stw_le_data_ra(env, addr, val, ra); 276} 277 278static inline void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 279 uint32_t val, int mmu_idx, 280 uintptr_t ra) 281{ 282 cpu_stl_le_data_ra(env, addr, val, ra); 283} 284 285static inline void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 286 uint64_t val, int mmu_idx, 287 uintptr_t ra) 288{ 289 cpu_stq_le_data_ra(env, addr, val, ra); 290} 291 292#else 293 294/* Needed for TCG_OVERSIZED_GUEST */ 295#include "tcg/tcg.h" 296 297static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry) 298{ 299#if TCG_OVERSIZED_GUEST 300 return entry->addr_write; 301#else 302 return atomic_read(&entry->addr_write); 303#endif 304} 305 306/* Find the TLB index corresponding to the mmu_idx + address pair. */ 307static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx, 308 target_ulong addr) 309{ 310 uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; 311 312 return (addr >> TARGET_PAGE_BITS) & size_mask; 313} 314 315/* Find the TLB entry corresponding to the mmu_idx + address pair. */ 316static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, 317 target_ulong addr) 318{ 319 return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)]; 320} 321 322uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, 323 int mmu_idx, uintptr_t ra); 324int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, 325 int mmu_idx, uintptr_t ra); 326 327uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 328 int mmu_idx, uintptr_t ra); 329int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 330 int mmu_idx, uintptr_t ra); 331uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 332 int mmu_idx, uintptr_t ra); 333uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, 334 int mmu_idx, uintptr_t ra); 335 336uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 337 int mmu_idx, uintptr_t ra); 338int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 339 int mmu_idx, uintptr_t ra); 340uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 341 int mmu_idx, uintptr_t ra); 342uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, 343 int mmu_idx, uintptr_t ra); 344 345void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 346 int mmu_idx, uintptr_t retaddr); 347 348void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 349 int mmu_idx, uintptr_t retaddr); 350void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 351 int mmu_idx, uintptr_t retaddr); 352void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, 353 int mmu_idx, uintptr_t retaddr); 354 355void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 356 int mmu_idx, uintptr_t retaddr); 357void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, 358 int mmu_idx, uintptr_t retaddr); 359void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, 360 int mmu_idx, uintptr_t retaddr); 361 362#endif /* defined(CONFIG_USER_ONLY) */ 363 364#ifdef TARGET_WORDS_BIGENDIAN 365# define cpu_lduw_data cpu_lduw_be_data 366# define cpu_ldsw_data cpu_ldsw_be_data 367# define cpu_ldl_data cpu_ldl_be_data 368# define cpu_ldq_data cpu_ldq_be_data 369# define cpu_lduw_data_ra cpu_lduw_be_data_ra 370# define cpu_ldsw_data_ra cpu_ldsw_be_data_ra 371# define cpu_ldl_data_ra cpu_ldl_be_data_ra 372# define cpu_ldq_data_ra cpu_ldq_be_data_ra 373# define cpu_lduw_mmuidx_ra cpu_lduw_be_mmuidx_ra 374# define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra 375# define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra 376# define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra 377# define cpu_stw_data cpu_stw_be_data 378# define cpu_stl_data cpu_stl_be_data 379# define cpu_stq_data cpu_stq_be_data 380# define cpu_stw_data_ra cpu_stw_be_data_ra 381# define cpu_stl_data_ra cpu_stl_be_data_ra 382# define cpu_stq_data_ra cpu_stq_be_data_ra 383# define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra 384# define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra 385# define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra 386#else 387# define cpu_lduw_data cpu_lduw_le_data 388# define cpu_ldsw_data cpu_ldsw_le_data 389# define cpu_ldl_data cpu_ldl_le_data 390# define cpu_ldq_data cpu_ldq_le_data 391# define cpu_lduw_data_ra cpu_lduw_le_data_ra 392# define cpu_ldsw_data_ra cpu_ldsw_le_data_ra 393# define cpu_ldl_data_ra cpu_ldl_le_data_ra 394# define cpu_ldq_data_ra cpu_ldq_le_data_ra 395# define cpu_lduw_mmuidx_ra cpu_lduw_le_mmuidx_ra 396# define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra 397# define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra 398# define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra 399# define cpu_stw_data cpu_stw_le_data 400# define cpu_stl_data cpu_stl_le_data 401# define cpu_stq_data cpu_stq_le_data 402# define cpu_stw_data_ra cpu_stw_le_data_ra 403# define cpu_stl_data_ra cpu_stl_le_data_ra 404# define cpu_stq_data_ra cpu_stq_le_data_ra 405# define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra 406# define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra 407# define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra 408#endif 409 410uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); 411uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr); 412uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr); 413uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr); 414 415static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr) 416{ 417 return (int8_t)cpu_ldub_code(env, addr); 418} 419 420static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr) 421{ 422 return (int16_t)cpu_lduw_code(env, addr); 423} 424 425/** 426 * tlb_vaddr_to_host: 427 * @env: CPUArchState 428 * @addr: guest virtual address to look up 429 * @access_type: 0 for read, 1 for write, 2 for execute 430 * @mmu_idx: MMU index to use for lookup 431 * 432 * Look up the specified guest virtual index in the TCG softmmu TLB. 433 * If we can translate a host virtual address suitable for direct RAM 434 * access, without causing a guest exception, then return it. 435 * Otherwise (TLB entry is for an I/O access, guest software 436 * TLB fill required, etc) return NULL. 437 */ 438#ifdef CONFIG_USER_ONLY 439static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 440 MMUAccessType access_type, int mmu_idx) 441{ 442 return g2h(addr); 443} 444#else 445void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, 446 MMUAccessType access_type, int mmu_idx); 447#endif 448 449#endif /* CPU_LDST_H */