qemu with hax to log dma reads & writes
jcs.org/2018/11/12/vfio
1#ifndef CPU_COMMON_H
2#define CPU_COMMON_H
3
4/* CPU interfaces that are target independent. */
5
6#ifndef CONFIG_USER_ONLY
7#include "exec/hwaddr.h"
8#endif
9
10/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
11void qemu_init_cpu_list(void);
12void cpu_list_lock(void);
13void cpu_list_unlock(void);
14
15void tcg_flush_softmmu_tlb(CPUState *cs);
16
17#if !defined(CONFIG_USER_ONLY)
18
19enum device_endian {
20 DEVICE_NATIVE_ENDIAN,
21 DEVICE_BIG_ENDIAN,
22 DEVICE_LITTLE_ENDIAN,
23};
24
25#if defined(HOST_WORDS_BIGENDIAN)
26#define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
27#else
28#define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
29#endif
30
31/* address in the RAM (different from a physical address) */
32#if defined(CONFIG_XEN_BACKEND)
33typedef uint64_t ram_addr_t;
34# define RAM_ADDR_MAX UINT64_MAX
35# define RAM_ADDR_FMT "%" PRIx64
36#else
37typedef uintptr_t ram_addr_t;
38# define RAM_ADDR_MAX UINTPTR_MAX
39# define RAM_ADDR_FMT "%" PRIxPTR
40#endif
41
42extern ram_addr_t ram_size;
43
44/* memory API */
45
46void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
47/* This should not be used by devices. */
48ram_addr_t qemu_ram_addr_from_host(void *ptr);
49RAMBlock *qemu_ram_block_by_name(const char *name);
50RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
51 ram_addr_t *offset);
52ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host);
53void qemu_ram_set_idstr(RAMBlock *block, const char *name, DeviceState *dev);
54void qemu_ram_unset_idstr(RAMBlock *block);
55const char *qemu_ram_get_idstr(RAMBlock *rb);
56void *qemu_ram_get_host_addr(RAMBlock *rb);
57ram_addr_t qemu_ram_get_offset(RAMBlock *rb);
58ram_addr_t qemu_ram_get_used_length(RAMBlock *rb);
59bool qemu_ram_is_shared(RAMBlock *rb);
60bool qemu_ram_is_uf_zeroable(RAMBlock *rb);
61void qemu_ram_set_uf_zeroable(RAMBlock *rb);
62bool qemu_ram_is_migratable(RAMBlock *rb);
63void qemu_ram_set_migratable(RAMBlock *rb);
64void qemu_ram_unset_migratable(RAMBlock *rb);
65
66size_t qemu_ram_pagesize(RAMBlock *block);
67size_t qemu_ram_pagesize_largest(void);
68
69void cpu_physical_memory_rw(hwaddr addr, void *buf,
70 hwaddr len, bool is_write);
71static inline void cpu_physical_memory_read(hwaddr addr,
72 void *buf, hwaddr len)
73{
74 cpu_physical_memory_rw(addr, buf, len, false);
75}
76static inline void cpu_physical_memory_write(hwaddr addr,
77 const void *buf, hwaddr len)
78{
79 cpu_physical_memory_rw(addr, (void *)buf, len, true);
80}
81void *cpu_physical_memory_map(hwaddr addr,
82 hwaddr *plen,
83 bool is_write);
84void cpu_physical_memory_unmap(void *buffer, hwaddr len,
85 bool is_write, hwaddr access_len);
86void cpu_register_map_client(QEMUBH *bh);
87void cpu_unregister_map_client(QEMUBH *bh);
88
89bool cpu_physical_memory_is_io(hwaddr phys_addr);
90
91/* Coalesced MMIO regions are areas where write operations can be reordered.
92 * This usually implies that write operations are side-effect free. This allows
93 * batching which can make a major impact on performance when using
94 * virtualization.
95 */
96void qemu_flush_coalesced_mmio_buffer(void);
97
98void cpu_flush_icache_range(hwaddr start, hwaddr len);
99
100typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
101
102int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
103int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
104
105#endif
106
107#endif /* CPU_COMMON_H */