The open source OpenXR runtime

comp: Normalize naming (fini)

Part-of: <https://gitlab.freedesktop.org/monado/monado/-/merge_requests/2380>

+38 -37
+2 -2
src/xrt/compositor/main/comp_compositor.c
··· 415 415 comp_target_destroy(&c->target); 416 416 417 417 // Only depends on vk_bundle and shaders. 418 - render_resources_close(&c->nr); 418 + render_resources_fini(&c->nr); 419 419 420 420 // As long as vk_bundle is valid it's safe to call this function. 421 - render_shaders_close(&c->shaders, vk); 421 + render_shaders_fini(&c->shaders, vk); 422 422 423 423 if (vk->device != VK_NULL_HANDLE) { 424 424 vk->vkDestroyDevice(vk->device, NULL);
+4 -4
src/xrt/compositor/main/comp_renderer.c
··· 437 437 // Renderings 438 438 if (r->buffer_count > 0 && r->rtr_array != NULL) { 439 439 for (uint32_t i = 0; i < r->buffer_count; i++) { 440 - render_gfx_target_resources_close(&r->rtr_array[i]); 440 + render_gfx_target_resources_fini(&r->rtr_array[i]); 441 441 } 442 442 443 443 // Close the render pass used for rendering to the target. 444 - render_gfx_render_pass_close(&r->target_render_pass); 444 + render_gfx_render_pass_fini(&r->target_render_pass); 445 445 446 446 free(r->rtr_array); 447 447 r->rtr_array = NULL; ··· 825 825 // Do this after the layer renderer. 826 826 for (uint32_t i = 0; i < r->c->nr.view_count; i++) { 827 827 for (uint32_t k = 0; k < COMP_SCRATCH_NUM_IMAGES; k++) { 828 - render_gfx_target_resources_close(&r->scratch.views[i].targets[k]); 828 + render_gfx_target_resources_fini(&r->scratch.views[i].targets[k]); 829 829 } 830 830 } 831 831 832 832 // Do this after the layer renderer and targert resources. 833 - render_gfx_render_pass_close(&r->scratch_render_pass); 833 + render_gfx_render_pass_fini(&r->scratch_render_pass); 834 834 } 835 835 836 836
+1 -1
src/xrt/compositor/render/render_buffer.c
··· 178 178 } 179 179 180 180 void 181 - render_buffer_close(struct vk_bundle *vk, struct render_buffer *buffer) 181 + render_buffer_fini(struct vk_bundle *vk, struct render_buffer *buffer) 182 182 { 183 183 D(Buffer, buffer->buffer); 184 184 DF(Memory, buffer->memory);
+7 -7
src/xrt/compositor/render/render_distortion.c
··· 272 272 return VK_SUCCESS; 273 273 274 274 err_buffers: 275 - render_buffer_close(vk, r_buffer); 276 - render_buffer_close(vk, g_buffer); 277 - render_buffer_close(vk, b_buffer); 275 + render_buffer_fini(vk, r_buffer); 276 + render_buffer_fini(vk, g_buffer); 277 + render_buffer_fini(vk, b_buffer); 278 278 279 279 return ret; 280 280 } ··· 359 359 */ 360 360 361 361 for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) { 362 - render_buffer_close(vk, &bufs[i]); 362 + render_buffer_fini(vk, &bufs[i]); 363 363 } 364 364 365 365 return true; ··· 376 376 D(ImageView, image_views[i]); 377 377 D(Image, images[i]); 378 378 DF(Memory, device_memories[i]); 379 - render_buffer_close(vk, &bufs[i]); 379 + render_buffer_fini(vk, &bufs[i]); 380 380 } 381 381 382 382 return false; ··· 390 390 */ 391 391 392 392 void 393 - render_distortion_images_close(struct render_resources *r) 393 + render_distortion_images_fini(struct render_resources *r) 394 394 { 395 395 struct vk_bundle *vk = r->vk; 396 396 ··· 408 408 bool pre_rotate) 409 409 { 410 410 if (r->distortion.image_views[0] == VK_NULL_HANDLE || pre_rotate != r->distortion.pre_rotated) { 411 - render_distortion_images_close(r); 411 + render_distortion_images_fini(r); 412 412 return render_distortion_buffer_init(r, vk, xdev, pre_rotate); 413 413 } 414 414
+2 -2
src/xrt/compositor/render/render_gfx.c
··· 884 884 } 885 885 886 886 void 887 - render_gfx_render_pass_close(struct render_gfx_render_pass *rgrp) 887 + render_gfx_render_pass_fini(struct render_gfx_render_pass *rgrp) 888 888 { 889 889 struct vk_bundle *vk = rgrp->r->vk; 890 890 ··· 940 940 } 941 941 942 942 void 943 - render_gfx_target_resources_close(struct render_gfx_target_resources *rtr) 943 + render_gfx_target_resources_fini(struct render_gfx_target_resources *rtr) 944 944 { 945 945 struct vk_bundle *vk = vk_from_rtr(rtr); 946 946
+8 -7
src/xrt/compositor/render/render_interface.h
··· 168 168 * Unload and cleanup shaders. 169 169 */ 170 170 void 171 - render_shaders_close(struct render_shaders *s, struct vk_bundle *vk); 171 + render_shaders_fini(struct render_shaders *s, struct vk_bundle *vk); 172 172 173 173 174 174 /* ··· 224 224 * Frees all resources that this buffer has, but does not free the buffer itself. 225 225 */ 226 226 void 227 - render_buffer_close(struct vk_bundle *vk, struct render_buffer *buffer); 227 + render_buffer_fini(struct vk_bundle *vk, struct render_buffer *buffer); 228 228 229 229 /*! 230 230 * Maps the memory, sets render_buffer::mapped to the memory. ··· 575 575 * @public @memberof render_resources 576 576 */ 577 577 void 578 - render_resources_close(struct render_resources *r); 578 + render_resources_fini(struct render_resources *r); 579 579 580 580 /*! 581 581 * Creates or recreates the compute distortion textures if necessary. 582 582 * 583 + * @see render_distortion_images_fini 583 584 * @public @memberof render_resources 584 585 */ 585 586 bool ··· 595 596 * @public @memberof render_resources 596 597 */ 597 598 void 598 - render_distortion_images_close(struct render_resources *r); 599 + render_distortion_images_fini(struct render_resources *r); 599 600 600 601 /*! 601 602 * Returns the timestamps for when the latest GPU work started and stopped that ··· 670 671 * @public @memberof render_scratch_images 671 672 */ 672 673 void 673 - render_scratch_images_close(struct render_resources *r, struct render_scratch_images *rsi); 674 + render_scratch_images_fini(struct render_resources *r, struct render_scratch_images *rsi); 674 675 675 676 676 677 /* ··· 763 764 * @public @memberof render_gfx_render_pass 764 765 */ 765 766 void 766 - render_gfx_render_pass_close(struct render_gfx_render_pass *rgrp); 767 + render_gfx_render_pass_fini(struct render_gfx_render_pass *rgrp); 767 768 768 769 769 770 /* ··· 813 814 * @public @memberof render_gfx_target_resources 814 815 */ 815 816 void 816 - render_gfx_target_resources_close(struct render_gfx_target_resources *rtr); 817 + render_gfx_target_resources_fini(struct render_gfx_target_resources *rtr); 817 818 818 819 819 820 /*
+12 -12
src/xrt/compositor/render/render_resources.c
··· 1083 1083 } 1084 1084 1085 1085 void 1086 - render_resources_close(struct render_resources *r) 1086 + render_resources_fini(struct render_resources *r) 1087 1087 { 1088 1088 // We were never initialised or already closed, always safe to call this function. 1089 1089 if (r->vk == NULL) { ··· 1101 1101 D(Image, r->mock.color.image); 1102 1102 DF(Memory, r->mock.color.memory); 1103 1103 1104 - render_buffer_close(vk, &r->gfx.shared_ubo); 1104 + render_buffer_fini(vk, &r->gfx.shared_ubo); 1105 1105 D(DescriptorPool, r->gfx.ubo_and_src_descriptor_pool); 1106 1106 1107 1107 D(DescriptorSetLayout, r->gfx.layer.shared.descriptor_set_layout); ··· 1111 1111 D(PipelineLayout, r->mesh.pipeline_layout); 1112 1112 D(PipelineCache, r->pipeline_cache); 1113 1113 D(QueryPool, r->query_pool); 1114 - render_buffer_close(vk, &r->mesh.vbo); 1115 - render_buffer_close(vk, &r->mesh.ibo); 1114 + render_buffer_fini(vk, &r->mesh.vbo); 1115 + render_buffer_fini(vk, &r->mesh.ibo); 1116 1116 for (uint32_t i = 0; i < r->view_count; ++i) { 1117 - render_buffer_close(vk, &r->mesh.ubos[i]); 1117 + render_buffer_fini(vk, &r->mesh.ubos[i]); 1118 1118 } 1119 1119 1120 1120 D(DescriptorPool, r->compute.descriptor_pool); ··· 1131 1131 1132 1132 D(Pipeline, r->compute.clear.pipeline); 1133 1133 1134 - render_distortion_images_close(r); 1135 - render_buffer_close(vk, &r->compute.clear.ubo); 1134 + render_distortion_images_fini(r); 1135 + render_buffer_fini(vk, &r->compute.clear.ubo); 1136 1136 for (uint32_t i = 0; i < r->view_count; i++) { 1137 - render_buffer_close(vk, &r->compute.layer.ubos[i]); 1137 + render_buffer_fini(vk, &r->compute.layer.ubos[i]); 1138 1138 } 1139 - render_buffer_close(vk, &r->compute.distortion.ubo); 1139 + render_buffer_fini(vk, &r->compute.distortion.ubo); 1140 1140 1141 1141 vk_cmd_pool_destroy(vk, &r->distortion_pool); 1142 1142 D(CommandPool, r->cmd_pool); ··· 1255 1255 return true; 1256 1256 } 1257 1257 1258 - render_scratch_images_close(r, rsi); 1258 + render_scratch_images_fini(r, rsi); 1259 1259 1260 1260 for (uint32_t i = 0; i < r->view_count; i++) { 1261 1261 bret = create_scratch_image_and_view( // ··· 1268 1268 } 1269 1269 1270 1270 if (!bret) { 1271 - render_scratch_images_close(r, rsi); 1271 + render_scratch_images_fini(r, rsi); 1272 1272 return false; 1273 1273 } 1274 1274 ··· 1278 1278 } 1279 1279 1280 1280 void 1281 - render_scratch_images_close(struct render_resources *r, struct render_scratch_images *rsi) 1281 + render_scratch_images_fini(struct render_resources *r, struct render_scratch_images *rsi) 1282 1282 { 1283 1283 struct vk_bundle *vk = r->vk; 1284 1284
+2 -2
src/xrt/compositor/render/render_shaders.c
··· 61 61 &s->SHADER); /* out */ \ 62 62 if (ret != VK_SUCCESS) { \ 63 63 VK_ERROR(vk, "Failed to load shader '" #SHADER "'"); \ 64 - render_shaders_close(s, vk); \ 64 + render_shaders_fini(s, vk); \ 65 65 return false; \ 66 66 } \ 67 67 VK_NAME_SHADER_MODULE(vk, s->SHADER, #SHADER); \ ··· 126 126 } 127 127 128 128 void 129 - render_shaders_close(struct render_shaders *s, struct vk_bundle *vk) 129 + render_shaders_fini(struct render_shaders *s, struct vk_bundle *vk) 130 130 { 131 131 D(ShaderModule, s->blit_comp); 132 132 D(ShaderModule, s->clear_comp);