qemu with hax to log dma reads & writes
jcs.org/2018/11/12/vfio
1/*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14#include "qemu/osdep.h"
15
16#include "hw/virtio/virtio-gpu.h"
17#include "migration/blocker.h"
18#include "qapi/error.h"
19#include "qemu/error-report.h"
20#include "trace.h"
21
22void
23virtio_gpu_base_reset(VirtIOGPUBase *g)
24{
25 int i;
26
27 g->enable = 0;
28 g->use_virgl_renderer = false;
29
30 for (i = 0; i < g->conf.max_outputs; i++) {
31 g->scanout[i].resource_id = 0;
32 g->scanout[i].width = 0;
33 g->scanout[i].height = 0;
34 g->scanout[i].x = 0;
35 g->scanout[i].y = 0;
36 g->scanout[i].ds = NULL;
37 }
38}
39
40void
41virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
42 struct virtio_gpu_resp_display_info *dpy_info)
43{
44 int i;
45
46 for (i = 0; i < g->conf.max_outputs; i++) {
47 if (g->enabled_output_bitmask & (1 << i)) {
48 dpy_info->pmodes[i].enabled = 1;
49 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
50 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
51 }
52 }
53}
54
55static void virtio_gpu_invalidate_display(void *opaque)
56{
57}
58
59static void virtio_gpu_update_display(void *opaque)
60{
61}
62
63static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
64{
65}
66
67static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type)
68{
69 g->virtio_config.events_read |= event_type;
70 virtio_notify_config(&g->parent_obj);
71}
72
73static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
74{
75 VirtIOGPUBase *g = opaque;
76
77 if (idx >= g->conf.max_outputs) {
78 return -1;
79 }
80
81 g->req_state[idx].x = info->xoff;
82 g->req_state[idx].y = info->yoff;
83 g->req_state[idx].width = info->width;
84 g->req_state[idx].height = info->height;
85
86 if (info->width && info->height) {
87 g->enabled_output_bitmask |= (1 << idx);
88 } else {
89 g->enabled_output_bitmask &= ~(1 << idx);
90 }
91
92 /* send event to guest */
93 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
94 return 0;
95}
96
97static void
98virtio_gpu_gl_block(void *opaque, bool block)
99{
100 VirtIOGPUBase *g = opaque;
101 VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_GET_CLASS(g);
102
103 if (block) {
104 g->renderer_blocked++;
105 } else {
106 g->renderer_blocked--;
107 }
108 assert(g->renderer_blocked >= 0);
109
110 if (g->renderer_blocked == 0) {
111 vgc->gl_unblock(g);
112 }
113}
114
115const GraphicHwOps virtio_gpu_ops = {
116 .invalidate = virtio_gpu_invalidate_display,
117 .gfx_update = virtio_gpu_update_display,
118 .text_update = virtio_gpu_text_update,
119 .ui_info = virtio_gpu_ui_info,
120 .gl_block = virtio_gpu_gl_block,
121};
122
123bool
124virtio_gpu_base_device_realize(DeviceState *qdev,
125 VirtIOHandleOutput ctrl_cb,
126 VirtIOHandleOutput cursor_cb,
127 Error **errp)
128{
129 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
130 VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
131 int i;
132
133 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
134 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
135 return false;
136 }
137
138 g->use_virgl_renderer = false;
139 if (virtio_gpu_virgl_enabled(g->conf)) {
140 error_setg(&g->migration_blocker, "virgl is not yet migratable");
141 if (migrate_add_blocker(g->migration_blocker, errp) < 0) {
142 error_free(g->migration_blocker);
143 return false;
144 }
145 }
146
147 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
148 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
149 sizeof(struct virtio_gpu_config));
150
151 if (virtio_gpu_virgl_enabled(g->conf)) {
152 /* use larger control queue in 3d mode */
153 virtio_add_queue(vdev, 256, ctrl_cb);
154 virtio_add_queue(vdev, 16, cursor_cb);
155 } else {
156 virtio_add_queue(vdev, 64, ctrl_cb);
157 virtio_add_queue(vdev, 16, cursor_cb);
158 }
159
160 g->enabled_output_bitmask = 1;
161
162 g->req_state[0].width = g->conf.xres;
163 g->req_state[0].height = g->conf.yres;
164
165 for (i = 0; i < g->conf.max_outputs; i++) {
166 g->scanout[i].con =
167 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
168 if (i > 0) {
169 dpy_gfx_replace_surface(g->scanout[i].con, NULL);
170 }
171 }
172
173 return true;
174}
175
176static uint64_t
177virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features,
178 Error **errp)
179{
180 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
181
182 if (virtio_gpu_virgl_enabled(g->conf)) {
183 features |= (1 << VIRTIO_GPU_F_VIRGL);
184 }
185 if (virtio_gpu_edid_enabled(g->conf)) {
186 features |= (1 << VIRTIO_GPU_F_EDID);
187 }
188
189 return features;
190}
191
192static void
193virtio_gpu_base_set_features(VirtIODevice *vdev, uint64_t features)
194{
195 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
196 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
197
198 g->use_virgl_renderer = ((features & virgl) == virgl);
199 trace_virtio_gpu_features(g->use_virgl_renderer);
200}
201
202static void
203virtio_gpu_base_device_unrealize(DeviceState *qdev)
204{
205 VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
206
207 if (g->migration_blocker) {
208 migrate_del_blocker(g->migration_blocker);
209 error_free(g->migration_blocker);
210 }
211}
212
213static void
214virtio_gpu_base_class_init(ObjectClass *klass, void *data)
215{
216 DeviceClass *dc = DEVICE_CLASS(klass);
217 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
218
219 vdc->unrealize = virtio_gpu_base_device_unrealize;
220 vdc->get_features = virtio_gpu_base_get_features;
221 vdc->set_features = virtio_gpu_base_set_features;
222
223 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
224 dc->hotpluggable = false;
225}
226
227static const TypeInfo virtio_gpu_base_info = {
228 .name = TYPE_VIRTIO_GPU_BASE,
229 .parent = TYPE_VIRTIO_DEVICE,
230 .instance_size = sizeof(VirtIOGPUBase),
231 .class_size = sizeof(VirtIOGPUBaseClass),
232 .class_init = virtio_gpu_base_class_init,
233 .abstract = true
234};
235
236static void
237virtio_register_types(void)
238{
239 type_register_static(&virtio_gpu_base_info);
240}
241
242type_init(virtio_register_types)
243
244QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
245QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
246QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
247QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
248QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
249QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
250QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
251QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
252QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
253QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
254QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
255
256QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
257QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
258QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
259QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
260QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
261QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
262QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
263QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
264QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
265QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);