diff --git a/src/xrt/compositor/main/comp_compositor.c b/src/xrt/compositor/main/comp_compositor.c index 2c0c337f49b993cbd6c923a6489a25938a3d7607..e13132820ed7bd3c3a8b6a47c4845e4dcd9bd495 100644 --- a/src/xrt/compositor/main/comp_compositor.c +++ b/src/xrt/compositor/main/comp_compositor.c @@ -559,7 +559,7 @@ compositor_destroy(struct xrt_compositor *xc) comp_resources_close(c, &c->nr); // As long as vk_bundle is valid it's safe to call this function. - comp_shaders_close(&c->vk, &c->shaders); + comp_shaders_close(vk, &c->shaders); // Does NULL checking. comp_target_destroy(&c->target); @@ -645,21 +645,10 @@ compositor_check_and_prepare_xdev(struct comp_compositor *c, struct xrt_device * * */ -#define GET_DEV_PROC(c, name) (PFN_##name) c->vk.vkGetDeviceProcAddr(c->vk.device, #name); -#define GET_INS_PROC(c, name) (PFN_##name) c->vk.vkGetInstanceProcAddr(c->vk.instance, #name); -#define GET_DEV_PROC(c, name) (PFN_##name) c->vk.vkGetDeviceProcAddr(c->vk.device, #name); - // NOLINTNEXTLINE // don't remove the forward decl. VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *pName); -static VkResult -find_get_instance_proc_addr(struct comp_compositor *c) -{ - //! @todo Do any library loading here. - return vk_get_loader_functions(&c->vk, vkGetInstanceProcAddr); -} - // If any of these lists are updated, please also update the appropriate column // in `vulkan-extensions.md` @@ -809,6 +798,7 @@ select_instances_extensions(struct comp_compositor *c, const char ***out_exts, u static VkResult create_instance(struct comp_compositor *c) { + struct vk_bundle *vk = &c->vk; const char **instance_extensions; uint32_t num_extensions; VkResult ret; @@ -833,13 +823,13 @@ create_instance(struct comp_compositor *c) .ppEnabledExtensionNames = instance_extensions, }; - ret = c->vk.vkCreateInstance(&instance_info, NULL, &c->vk.instance); + ret = vk->vkCreateInstance(&instance_info, NULL, &vk->instance); if (ret != VK_SUCCESS) { CVK_ERROR(c, "vkCreateInstance", "Failed to create Vulkan instance", ret); return ret; } - ret = vk_get_instance_functions(&c->vk); + ret = vk_get_instance_functions(vk); if (ret != VK_SUCCESS) { CVK_ERROR(c, "vk_get_instance_functions", "Failed to get Vulkan instance functions.", ret); return ret; @@ -873,13 +863,15 @@ get_device_uuid(struct vk_bundle *vk, struct comp_compositor *c, int gpu_index, static bool compositor_init_vulkan(struct comp_compositor *c) { + struct vk_bundle *vk = &c->vk; VkResult ret; - c->vk.ll = c->settings.log_level; + vk->ll = c->settings.log_level; - ret = find_get_instance_proc_addr(c); + //! @todo Do any library loading here. + ret = vk_get_loader_functions(vk, vkGetInstanceProcAddr); if (ret != VK_SUCCESS) { - CVK_ERROR(c, "find_get_instance_proc_addr", "Failed to get VkInstance get process address.", ret); + CVK_ERROR(c, "vk_get_loader_functions", "Failed to get VkInstance get process address.", ret); return false; } @@ -904,7 +896,7 @@ compositor_init_vulkan(struct comp_compositor *c) // No other way then to try to see if realtime is available. for (size_t i = 0; i < ARRAY_SIZE(prios); i++) { ret = vk_create_device( // - &c->vk, // + vk, // c->settings.selected_gpu_index, // prios[i], // global_priority required_device_extensions, // @@ -928,17 +920,17 @@ compositor_init_vulkan(struct comp_compositor *c) return false; } - ret = vk_init_mutex(&c->vk); + ret = vk_init_mutex(vk); if (ret != VK_SUCCESS) { CVK_ERROR(c, "vk_init_mutex", "Failed to init mutex.", ret); return false; } - c->settings.selected_gpu_index = c->vk.physical_device_index; + c->settings.selected_gpu_index = vk->physical_device_index; // store physical device UUID for compositor in settings if (c->settings.selected_gpu_index >= 0) { - if (get_device_uuid(&c->vk, c, c->settings.selected_gpu_index, c->settings.selected_gpu_deviceUUID)) { + if (get_device_uuid(vk, c, c->settings.selected_gpu_index, c->settings.selected_gpu_deviceUUID)) { char uuid_str[XRT_GPU_UUID_SIZE * 3 + 1] = {0}; for (int i = 0; i < XRT_GPU_UUID_SIZE; i++) { sprintf(uuid_str + i * 3, "%02x ", c->settings.selected_gpu_deviceUUID[i]); @@ -956,7 +948,7 @@ compositor_init_vulkan(struct comp_compositor *c) // store physical device UUID suggested to clients in settings if (c->settings.client_gpu_index >= 0) { - if (get_device_uuid(&c->vk, c, c->settings.client_gpu_index, c->settings.client_gpu_deviceUUID)) { + if (get_device_uuid(vk, c, c->settings.client_gpu_index, c->settings.client_gpu_deviceUUID)) { char uuid_str[XRT_GPU_UUID_SIZE * 3 + 1] = {0}; for (int i = 0; i < XRT_GPU_UUID_SIZE; i++) { sprintf(uuid_str + i * 3, "%02x ", c->settings.client_gpu_deviceUUID[i]); @@ -967,7 +959,7 @@ compositor_init_vulkan(struct comp_compositor *c) } } - ret = vk_init_cmd_pool(&c->vk); + ret = vk_init_cmd_pool(vk); if (ret != VK_SUCCESS) { CVK_ERROR(c, "vk_init_cmd_pool", "Failed to init command pool.", ret); return false; @@ -1295,7 +1287,9 @@ compositor_init_swapchain(struct comp_compositor *c) static bool compositor_init_shaders(struct comp_compositor *c) { - return comp_shaders_load(&c->vk, &c->shaders); + struct vk_bundle *vk = &c->vk; + + return comp_shaders_load(vk, &c->shaders); } static bool @@ -1312,8 +1306,10 @@ compositor_init_renderer(struct comp_compositor *c) bool comp_is_format_supported(struct comp_compositor *c, VkFormat format) { + struct vk_bundle *vk = &c->vk; VkFormatProperties prop; - c->vk.vkGetPhysicalDeviceFormatProperties(c->vk.physical_device, format, &prop); + + vk->vkGetPhysicalDeviceFormatProperties(vk->physical_device, format, &prop); // This is a fairly crude way of checking support, // but works well enough. diff --git a/src/xrt/compositor/main/comp_layer.c b/src/xrt/compositor/main/comp_layer.c index bb57ca70f04ab0ac794724c99e1a1b203053ba93..0ea9278d40dfd75d02b0de27cc892ea6b02b5087 100644 --- a/src/xrt/compositor/main/comp_layer.c +++ b/src/xrt/compositor/main/comp_layer.c @@ -51,18 +51,25 @@ _update_mvp_matrix(struct comp_render_layer *self, uint32_t eye, const struct xr static bool _init_ubos(struct comp_render_layer *self) { + struct vk_bundle *vk = self->vk; + VkBufferUsageFlags usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; VkMemoryPropertyFlags properties = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; for (uint32_t i = 0; i < 2; i++) { math_matrix_4x4_identity(&self->transformation[i].mvp); - if (!vk_buffer_init(self->vk, sizeof(struct layer_transformation), usage, properties, - &self->transformation_ubos[i].handle, &self->transformation_ubos[i].memory)) + if (!vk_buffer_init(vk, // + sizeof(struct layer_transformation), // + usage, // + properties, // + &self->transformation_ubos[i].handle, // + &self->transformation_ubos[i].memory)) { return false; + } - VkResult res = self->vk->vkMapMemory(self->vk->device, self->transformation_ubos[i].memory, 0, - VK_WHOLE_SIZE, 0, &self->transformation_ubos[i].data); + VkResult res = vk->vkMapMemory(vk->device, self->transformation_ubos[i].memory, 0, VK_WHOLE_SIZE, 0, + &self->transformation_ubos[i].data); vk_check_error("vkMapMemory", res, false); memcpy(self->transformation_ubos[i].data, &self->transformation[i], @@ -75,15 +82,22 @@ _init_ubos(struct comp_render_layer *self) static bool _init_equirect1_ubo(struct comp_render_layer *self) { + struct vk_bundle *vk = self->vk; + VkBufferUsageFlags usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; VkMemoryPropertyFlags properties = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; - if (!vk_buffer_init(self->vk, sizeof(struct layer_transformation), usage, properties, - &self->equirect1_ubo.handle, &self->equirect1_ubo.memory)) + if (!vk_buffer_init(vk, // + sizeof(struct layer_transformation), // + usage, // + properties, // + &self->equirect1_ubo.handle, // + &self->equirect1_ubo.memory)) { return false; + } - VkResult res = self->vk->vkMapMemory(self->vk->device, self->equirect1_ubo.memory, 0, VK_WHOLE_SIZE, 0, - &self->equirect1_ubo.data); + VkResult res = + vk->vkMapMemory(vk->device, self->equirect1_ubo.memory, 0, VK_WHOLE_SIZE, 0, &self->equirect1_ubo.data); vk_check_error("vkMapMemory", res, false); memcpy(self->equirect1_ubo.data, &self->equirect1_data, sizeof(struct layer_equirect1_data)); @@ -95,15 +109,22 @@ _init_equirect1_ubo(struct comp_render_layer *self) static bool _init_equirect2_ubo(struct comp_render_layer *self) { + struct vk_bundle *vk = self->vk; + VkBufferUsageFlags usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; VkMemoryPropertyFlags properties = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; - if (!vk_buffer_init(self->vk, sizeof(struct layer_transformation), usage, properties, - &self->equirect2_ubo.handle, &self->equirect2_ubo.memory)) + if (!vk_buffer_init(vk, // + sizeof(struct layer_transformation), // + usage, // + properties, // + &self->equirect2_ubo.handle, // + &self->equirect2_ubo.memory)) { return false; + } - VkResult res = self->vk->vkMapMemory(self->vk->device, self->equirect2_ubo.memory, 0, VK_WHOLE_SIZE, 0, - &self->equirect2_ubo.data); + VkResult res = + vk->vkMapMemory(vk->device, self->equirect2_ubo.memory, 0, VK_WHOLE_SIZE, 0, &self->equirect2_ubo.data); vk_check_error("vkMapMemory", res, false); memcpy(self->equirect2_ubo.data, &self->equirect2_data, sizeof(struct layer_equirect2_data)); @@ -159,6 +180,8 @@ _update_descriptor(struct comp_render_layer *self, static void _update_descriptor_equirect(struct comp_render_layer *self, VkDescriptorSet set, VkBuffer buffer) { + struct vk_bundle *vk = self->vk; + VkWriteDescriptorSet *sets = (VkWriteDescriptorSet[]){ { .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, @@ -176,16 +199,19 @@ _update_descriptor_equirect(struct comp_render_layer *self, VkDescriptorSet set, }, }; - self->vk->vkUpdateDescriptorSets(self->vk->device, 1, sets, 0, NULL); + vk->vkUpdateDescriptorSets(vk->device, 1, sets, 0, NULL); } #endif void comp_layer_update_descriptors(struct comp_render_layer *self, VkSampler sampler, VkImageView image_view) { - for (uint32_t eye = 0; eye < 2; eye++) - _update_descriptor(self, self->vk, self->descriptor_sets[eye], self->transformation_ubos[eye].handle, - sampler, image_view); + struct vk_bundle *vk = self->vk; + + for (uint32_t eye = 0; eye < 2; eye++) { + _update_descriptor(self, vk, self->descriptor_sets[eye], self->transformation_ubos[eye].handle, sampler, + image_view); + } } #ifdef XRT_FEATURE_OPENXR_LAYER_EQUIRECT1 @@ -225,10 +251,12 @@ comp_layer_update_stereo_descriptors(struct comp_render_layer *self, VkImageView left_image_view, VkImageView right_image_view) { - _update_descriptor(self, self->vk, self->descriptor_sets[0], self->transformation_ubos[0].handle, left_sampler, + struct vk_bundle *vk = self->vk; + + _update_descriptor(self, vk, self->descriptor_sets[0], self->transformation_ubos[0].handle, left_sampler, left_image_view); - _update_descriptor(self, self->vk, self->descriptor_sets[1], self->transformation_ubos[1].handle, right_sampler, + _update_descriptor(self, vk, self->descriptor_sets[1], self->transformation_ubos[1].handle, right_sampler, right_image_view); } @@ -268,17 +296,15 @@ _init(struct comp_render_layer *self, }, }; - if (!vk_init_descriptor_pool(self->vk, pool_sizes, ARRAY_SIZE(pool_sizes), 3, &self->descriptor_pool)) + if (!vk_init_descriptor_pool(vk, pool_sizes, ARRAY_SIZE(pool_sizes), 3, &self->descriptor_pool)) return false; for (uint32_t eye = 0; eye < 2; eye++) - if (!vk_allocate_descriptor_sets(self->vk, self->descriptor_pool, 1, layout, - &self->descriptor_sets[eye])) + if (!vk_allocate_descriptor_sets(vk, self->descriptor_pool, 1, layout, &self->descriptor_sets[eye])) return false; #if defined(XRT_FEATURE_OPENXR_LAYER_EQUIRECT1) || defined(XRT_FEATURE_OPENXR_LAYER_EQUIRECT2) - if (!vk_allocate_descriptor_sets(self->vk, self->descriptor_pool, 1, layout_equirect, - &self->descriptor_equirect)) + if (!vk_allocate_descriptor_sets(vk, self->descriptor_pool, 1, layout_equirect, &self->descriptor_equirect)) return false; #endif return true; @@ -294,6 +320,8 @@ comp_layer_draw(struct comp_render_layer *self, const struct xrt_matrix_4x4 *vp_world, const struct xrt_matrix_4x4 *vp_eye) { + struct vk_bundle *vk = self->vk; + if (eye == 0 && (self->visibility & XRT_LAYER_EYE_VISIBILITY_LEFT_BIT) == 0) { return; } @@ -302,7 +330,7 @@ comp_layer_draw(struct comp_render_layer *self, return; } - self->vk->vkCmdBindPipeline(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); + vk->vkCmdBindPipeline(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); // Is this layer viewspace or not. const struct xrt_matrix_4x4 *vp = self->view_space ? vp_eye : vp_world; @@ -326,18 +354,18 @@ comp_layer_draw(struct comp_render_layer *self, self->descriptor_equirect, }; - self->vk->vkCmdBindDescriptorSets(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 2, - sets, 0, NULL); + vk->vkCmdBindDescriptorSets(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 2, sets, 0, + NULL); } else { - self->vk->vkCmdBindDescriptorSets(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1, - &self->descriptor_sets[eye], 0, NULL); + vk->vkCmdBindDescriptorSets(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1, + &self->descriptor_sets[eye], 0, NULL); } VkDeviceSize offsets[1] = {0}; - self->vk->vkCmdBindVertexBuffers(cmd_buffer, 0, 1, &vertex_buffer->handle, &offsets[0]); + vk->vkCmdBindVertexBuffers(cmd_buffer, 0, 1, &vertex_buffer->handle, &offsets[0]); - self->vk->vkCmdDraw(cmd_buffer, vertex_buffer->size, 1, 0, 0); + vk->vkCmdDraw(cmd_buffer, vertex_buffer->size, 1, 0, 0); } // clang-format off @@ -444,11 +472,17 @@ _init_cylinder_vertex_buffer(struct comp_render_layer *self) VkBufferUsageFlags usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; VkMemoryPropertyFlags properties = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; - if (!vk_buffer_init(vk, sizeof(float) * ARRAY_SIZE(cylinder_vertices), usage, properties, - &self->cylinder.vertex_buffer.handle, &self->cylinder.vertex_buffer.memory)) + if (!vk_buffer_init(vk, // + sizeof(float) * ARRAY_SIZE(cylinder_vertices), // + usage, // + properties, // + &self->cylinder.vertex_buffer.handle, // + &self->cylinder.vertex_buffer.memory)) { return false; + } self->cylinder.vertex_buffer.size = CYLINDER_VERTICES; + return true; } @@ -465,8 +499,9 @@ comp_layer_create(struct vk_bundle *vk, VkDescriptorSetLayout *layout, VkDescrip _init(q, vk, layout, layout_equirect); - if (!_init_cylinder_vertex_buffer(q)) + if (!_init_cylinder_vertex_buffer(q)) { return NULL; + } return q; } @@ -474,18 +509,21 @@ comp_layer_create(struct vk_bundle *vk, VkDescriptorSetLayout *layout, VkDescrip void comp_layer_destroy(struct comp_render_layer *self) { - for (uint32_t eye = 0; eye < 2; eye++) - vk_buffer_destroy(&self->transformation_ubos[eye], self->vk); + struct vk_bundle *vk = self->vk; + + for (uint32_t eye = 0; eye < 2; eye++) { + vk_buffer_destroy(&self->transformation_ubos[eye], vk); + } #ifdef XRT_FEATURE_OPENXR_LAYER_EQUIRECT1 - vk_buffer_destroy(&self->equirect1_ubo, self->vk); + vk_buffer_destroy(&self->equirect1_ubo, vk); #endif #ifdef XRT_FEATURE_OPENXR_LAYER_EQUIRECT2 - vk_buffer_destroy(&self->equirect2_ubo, self->vk); + vk_buffer_destroy(&self->equirect2_ubo, vk); #endif - self->vk->vkDestroyDescriptorPool(self->vk->device, self->descriptor_pool, NULL); + vk->vkDestroyDescriptorPool(vk->device, self->descriptor_pool, NULL); - vk_buffer_destroy(&self->cylinder.vertex_buffer, self->vk); + vk_buffer_destroy(&self->cylinder.vertex_buffer, vk); free(self); } diff --git a/src/xrt/compositor/main/comp_renderer.c b/src/xrt/compositor/main/comp_renderer.c index 12fce0af98a12a8a11cb7094dc6c89d95182d67c..7e5a3464fa3be8ef1bbc6df690928bbda0e5f37f 100644 --- a/src/xrt/compositor/main/comp_renderer.c +++ b/src/xrt/compositor/main/comp_renderer.c @@ -103,10 +103,11 @@ static void renderer_wait_gpu_idle(struct comp_renderer *r) { COMP_TRACE_MARKER(); + struct vk_bundle *vk = &r->c->vk; - os_mutex_lock(&r->c->vk.queue_mutex); - r->c->vk.vkDeviceWaitIdle(r->c->vk.device); - os_mutex_unlock(&r->c->vk.queue_mutex); + os_mutex_lock(&vk->queue_mutex); + vk->vkDeviceWaitIdle(vk->device); + os_mutex_unlock(&vk->queue_mutex); } static void @@ -453,7 +454,7 @@ renderer_create(struct comp_renderer *r, struct comp_compositor *c) struct vk_bundle *vk = &r->c->vk; - vk->vkGetDeviceQueue(vk->device, r->c->vk.queue_family_index, 0, &r->queue); + vk->vkGetDeviceQueue(vk->device, vk->queue_family_index, 0, &r->queue); renderer_init_semaphores(r); // Try to early-allocate these, in case we can. diff --git a/src/xrt/compositor/main/comp_swapchain.c b/src/xrt/compositor/main/comp_swapchain.c index 49ac6f0c169a47df60de894b5262e49ac0069ae1..0bfd7fa58b7b3ca76c0fcac0132a1b64bf607837 100644 --- a/src/xrt/compositor/main/comp_swapchain.c +++ b/src/xrt/compositor/main/comp_swapchain.c @@ -132,6 +132,7 @@ do_post_create_vulkan_setup(struct comp_compositor *c, const struct xrt_swapchain_create_info *info, struct comp_swapchain *sc) { + struct vk_bundle *vk = &c->vk; uint32_t num_images = sc->vkic.num_images; VkCommandBuffer cmd_buffer; @@ -172,9 +173,9 @@ do_post_create_vulkan_setup(struct comp_compositor *c, sc->images[i].views.no_alpha = U_TYPED_ARRAY_CALLOC(VkImageView, info->array_size); sc->images[i].array_size = info->array_size; - vk_create_sampler(&c->vk, VK_SAMPLER_ADDRESS_MODE_REPEAT, &sc->images[i].repeat_sampler); + vk_create_sampler(vk, VK_SAMPLER_ADDRESS_MODE_REPEAT, &sc->images[i].repeat_sampler); - vk_create_sampler(&c->vk, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, &sc->images[i].sampler); + vk_create_sampler(vk, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, &sc->images[i].sampler); for (uint32_t layer = 0; layer < info->array_size; ++layer) { @@ -186,9 +187,9 @@ do_post_create_vulkan_setup(struct comp_compositor *c, .layerCount = 1, }; - vk_create_view(&c->vk, sc->vkic.images[i].handle, (VkFormat)info->format, subresource_range, + vk_create_view(vk, sc->vkic.images[i].handle, (VkFormat)info->format, subresource_range, &sc->images[i].views.alpha[layer]); - vk_create_view_swizzle(&c->vk, sc->vkic.images[i].handle, format, subresource_range, components, + vk_create_view_swizzle(vk, sc->vkic.images[i].handle, format, subresource_range, components, &sc->images[i].views.no_alpha[layer]); } } @@ -205,7 +206,7 @@ do_post_create_vulkan_setup(struct comp_compositor *c, * */ - vk_init_cmd_buffer(&c->vk, &cmd_buffer); + vk_init_cmd_buffer(vk, &cmd_buffer); VkImageSubresourceRange subresource_range = { .aspectMask = aspect, @@ -216,12 +217,12 @@ do_post_create_vulkan_setup(struct comp_compositor *c, }; for (uint32_t i = 0; i < num_images; i++) { - vk_set_image_layout(&c->vk, cmd_buffer, sc->vkic.images[i].handle, 0, VK_ACCESS_SHADER_READ_BIT, + vk_set_image_layout(vk, cmd_buffer, sc->vkic.images[i].handle, 0, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, subresource_range); } - vk_submit_cmd_buffer(&c->vk, cmd_buffer); + vk_submit_cmd_buffer(vk, cmd_buffer); } static void @@ -282,6 +283,7 @@ comp_swapchain_create(struct xrt_compositor *xc, struct xrt_swapchain **out_xsc) { struct comp_compositor *c = comp_compositor(xc); + struct vk_bundle *vk = &c->vk; uint32_t num_images = 3; VkResult ret; @@ -306,7 +308,7 @@ comp_swapchain_create(struct xrt_compositor *xc, vk_color_format_string(info->format)); // Use the image helper to allocate the images. - ret = vk_ic_allocate(&c->vk, info, num_images, &sc->vkic); + ret = vk_ic_allocate(vk, info, num_images, &sc->vkic); if (ret == VK_ERROR_FEATURE_NOT_PRESENT) { free(sc); return XRT_ERROR_SWAPCHAIN_FLAG_VALID_BUT_UNSUPPORTED; @@ -321,7 +323,7 @@ comp_swapchain_create(struct xrt_compositor *xc, xrt_graphics_buffer_handle_t handles[ARRAY_SIZE(sc->vkic.images)]; - vk_ic_get_handles(&c->vk, &sc->vkic, ARRAY_SIZE(handles), handles); + vk_ic_get_handles(vk, &sc->vkic, ARRAY_SIZE(handles), handles); for (uint32_t i = 0; i < sc->vkic.num_images; i++) { sc->base.images[i].handle = handles[i]; sc->base.images[i].size = sc->vkic.images[i].size; @@ -343,6 +345,7 @@ comp_swapchain_import(struct xrt_compositor *xc, struct xrt_swapchain **out_xsc) { struct comp_compositor *c = comp_compositor(xc); + struct vk_bundle *vk = &c->vk; VkResult ret; struct comp_swapchain *sc = alloc_and_set_funcs(c, num_images); @@ -350,7 +353,7 @@ comp_swapchain_import(struct xrt_compositor *xc, COMP_DEBUG(c, "CREATE FROM NATIVE %p %dx%d", (void *)sc, info->width, info->height); // Use the image helper to get the images. - ret = vk_ic_from_natives(&c->vk, info, native_images, num_images, &sc->vkic); + ret = vk_ic_from_natives(vk, info, native_images, num_images, &sc->vkic); if (ret != VK_SUCCESS) { return XRT_ERROR_VULKAN; } diff --git a/src/xrt/compositor/main/comp_target_swapchain.c b/src/xrt/compositor/main/comp_target_swapchain.c index 4999b8bd3ad6f3118e58cec4ea2a4651fa751a0c..8b87d456a4cc2e761def500b690d8861dc2080b6 100644 --- a/src/xrt/compositor/main/comp_target_swapchain.c +++ b/src/xrt/compositor/main/comp_target_swapchain.c @@ -586,8 +586,7 @@ static VkResult comp_target_swapchain_update_timings(struct comp_target *ct) { struct comp_target_swapchain *cts = (struct comp_target_swapchain *)ct; - struct comp_compositor *c = ct->c; - struct vk_bundle *vk = &c->vk; + struct vk_bundle *vk = get_vk(cts); if (!vk->has_GOOGLE_display_timing) { return VK_SUCCESS; @@ -598,21 +597,21 @@ comp_target_swapchain_update_timings(struct comp_target *ct) } uint32_t count = 0; - c->vk.vkGetPastPresentationTimingGOOGLE( // - vk->device, // - cts->swapchain.handle, // - &count, // - NULL); // + vk->vkGetPastPresentationTimingGOOGLE( // + vk->device, // + cts->swapchain.handle, // + &count, // + NULL); // if (count <= 0) { return VK_SUCCESS; } VkPastPresentationTimingGOOGLE *timings = U_TYPED_ARRAY_CALLOC(VkPastPresentationTimingGOOGLE, count); - c->vk.vkGetPastPresentationTimingGOOGLE( // - vk->device, // - cts->swapchain.handle, // - &count, // - timings); // + vk->vkGetPastPresentationTimingGOOGLE( // + vk->device, // + cts->swapchain.handle, // + &count, // + timings); // for (uint32_t i = 0; i < count; i++) { u_ft_info(cts->uft, // diff --git a/src/xrt/compositor/main/comp_window_direct_nvidia.c b/src/xrt/compositor/main/comp_window_direct_nvidia.c index b2c1eb035cb02c2aeaacfb8c7916ccdca488e077..8dd28d38e9bd0d09e91b5cc4df00d88d738184a6 100644 --- a/src/xrt/compositor/main/comp_window_direct_nvidia.c +++ b/src/xrt/compositor/main/comp_window_direct_nvidia.c @@ -163,9 +163,10 @@ static bool comp_window_direct_nvidia_init(struct comp_target *ct) { struct comp_window_direct_nvidia *w_direct = (struct comp_window_direct_nvidia *)ct; + struct vk_bundle *vk = &ct->c->vk; // Sanity check. - if (ct->c->vk.instance == VK_NULL_HANDLE) { + if (vk->instance == VK_NULL_HANDLE) { COMP_ERROR(ct->c, "Vulkan not initialized before NVIDIA init!"); return false; } diff --git a/src/xrt/compositor/main/comp_window_vk_display.c b/src/xrt/compositor/main/comp_window_vk_display.c index 9653b8db0055ec03b2708d64576e61b2d765ff03..fc0dcecafede830d2e283850af342c5eaf08c8d9 100644 --- a/src/xrt/compositor/main/comp_window_vk_display.c +++ b/src/xrt/compositor/main/comp_window_vk_display.c @@ -152,9 +152,10 @@ static bool comp_window_vk_display_init(struct comp_target *ct) { struct comp_window_vk_display *w_direct = (struct comp_window_vk_display *)ct; + struct vk_bundle *vk = &ct->c->vk; // Sanity check. - if (ct->c->vk.instance == VK_NULL_HANDLE) { + if (vk->instance == VK_NULL_HANDLE) { COMP_ERROR(ct->c, "Vulkan not initialized before vk display init!"); return false; }