Skip to content
Snippets Groups Projects
Commit ff176511 authored by Pete's avatar Pete
Browse files

merge master changes

parents 92dcdb1e adbafad0
No related branches found
No related tags found
No related merge requests found
Showing
with 399 additions and 275 deletions
......@@ -19,17 +19,21 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
include(CMakeDependentOption)
include(SPIR-V)
include(GNUInstallDirs)
if(NOT ${CMAKE_VERSION} VERSION_LESS 3.9)
include(CheckIPOSupported)
check_ipo_supported(RESULT HAS_IPO)
endif()
find_package(Eigen3 REQUIRED)
find_package(Vulkan REQUIRED)
find_package(EGL)
find_package(HIDAPI)
find_package(OpenHMD)
find_package(OpenCV COMPONENTS core calib3d highgui imgproc imgcodecs features2d video)
find_package(OpenCV COMPONENTS core calib3d highgui imgproc imgcodecs features2d video CONFIG)
find_package(Libusb1)
find_package(JPEG)
find_package(realsense2)
find_package(SDL2)
find_package(realsense2 CONFIG)
find_package(SDL2 CONFIG)
find_package(Threads)
find_package(ZLIB)
find_package(cJSON)
......@@ -64,6 +68,7 @@ else()
find_package(OpenGL)
endif()
cmake_dependent_option(CMAKE_INTERPROCEDURAL_OPTIMIZATION "Enable inter-procedural (link-time) optimization" OFF "HAS_IPO" OFF)
cmake_dependent_option(XRT_HAVE_WAYLAND "Enable Wayland support" ON "WAYLAND_FOUND AND WAYLAND_SCANNER_FOUND AND WAYLAND_PROTOCOLS_FOUND" OFF)
cmake_dependent_option(XRT_HAVE_XLIB "Enable xlib support" ON "X11_FOUND" OFF)
cmake_dependent_option(XRT_HAVE_XCB "Enable xcb support" ON "XCB_FOUND" OFF)
......@@ -152,6 +157,10 @@ endif()
# Default to PIC code
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
# Describe IPO setting
if(CMAKE_INTERPROCEDURAL_OPTIMIZATION)
message(STATUS "Inter-procedural optimization enabled")
endif()
###
# Decend into madness.
......
......@@ -172,7 +172,7 @@ SPDX-FileCopyrightText: 2020 Collabora, Ltd. and the Monado contributors
([!266](https://gitlab.freedesktop.org/monado/monado/merge_requests/266))
- u/json: Add bool getter function.
([!266](https://gitlab.freedesktop.org/monado/monado/merge_requests/266))
- tracking: Expose save function with none hardcode path for calibration data.
- tracking: Expose save function with non-hardcoded path for calibration data.
([!266](https://gitlab.freedesktop.org/monado/monado/merge_requests/266))
- tracking: Remove all path hardcoded calibration data loading and saving
functions.
......@@ -180,7 +180,7 @@ SPDX-FileCopyrightText: 2020 Collabora, Ltd. and the Monado contributors
- threading: New helper functions and structs for doing threaded work, these are
on a higher level then the one in os wrappers.
([!278](https://gitlab.freedesktop.org/monado/monado/merge_requests/278))
- threading: Fix missing `#pragma once` in `os/os_threading.h`.
- threading: Fix missing `#``pragma once` in `os/os_threading.h`.
([!282](https://gitlab.freedesktop.org/monado/monado/merge_requests/282))
- u/time: Temporarily disable the time skew in time state and used fixed offset
instead to fix various time issues in `st/oxr`. Will be fixed properly later.
......
build: Allow enabling inter-procedural optimization in CMake GUIs, if supported by platform and compiler.
OpenXR: More correctly verify the interactive profile binding data, including
the given interactive profile is correct and the binding point is valid.
......@@ -73,6 +73,12 @@ u_hashmap_int_erase(struct u_hashmap_int *hmi, uint64_t key)
return 0;
}
bool
u_hashmap_int_empty(const struct u_hashmap_int *hmi)
{
return hmi->map.empty();
}
extern "C" void
u_hashmap_int_clear_and_call_for_each(struct u_hashmap_int *hmi,
u_hashmap_int_callback cb,
......
......@@ -41,6 +41,12 @@ u_hashmap_int_insert(struct u_hashmap_int *hmi, uint64_t key, void *value);
int
u_hashmap_int_erase(struct u_hashmap_int *hmi, uint64_t key);
/*!
* Is the hash map empty?
*/
bool
u_hashmap_int_empty(const struct u_hashmap_int *hmi);
/*!
* First clear the hashmap and then call the given callback with each item that
* was in the hashmap.
......
......@@ -276,7 +276,8 @@ vk_create_image_from_fd(struct vk_bundle *vk,
VkImage *out_image,
VkDeviceMemory *out_mem)
{
VkImageUsageFlags image_usage = (VkImageUsageFlags)0;
VkImageUsageFlags image_usage =
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
VkImage image = VK_NULL_HANDLE;
VkResult ret = VK_SUCCESS;
......@@ -291,9 +292,6 @@ vk_create_image_from_fd(struct vk_bundle *vk,
if ((swapchain_usage & XRT_SWAPCHAIN_USAGE_DEPTH_STENCIL) != 0) {
image_usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
}
if ((swapchain_usage & XRT_SWAPCHAIN_USAGE_UNORDERED_ACCESS) != 0) {
image_usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
}
if ((swapchain_usage & XRT_SWAPCHAIN_USAGE_TRANSFER_SRC) != 0) {
image_usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
}
......@@ -348,6 +346,38 @@ vk_create_image_from_fd(struct vk_bundle *vk,
return ret;
}
VkResult
vk_create_semaphore_from_fd(struct vk_bundle *vk, int fd, VkSemaphore *out_sem)
{
VkResult ret;
VkSemaphoreCreateInfo semaphore_create_info = {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
};
ret = vk->vkCreateSemaphore(vk->device, &semaphore_create_info, NULL,
out_sem);
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkCreateSemaphore: %s", vk_result_string(ret));
// Nothing to cleanup
return ret;
}
VkImportSemaphoreFdInfoKHR import_semaphore_fd_info = {
.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,
.semaphore = *out_sem,
.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
.fd = fd,
};
ret = vk->vkImportSemaphoreFdKHR(vk->device, &import_semaphore_fd_info);
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkImportSemaphoreFdKHR: %s",
vk_result_string(ret));
vk->vkDestroySemaphore(vk->device, *out_sem, NULL);
return ret;
}
return ret;
}
VkResult
vk_create_sampler(struct vk_bundle *vk, VkSampler *out_sampler)
{
......@@ -497,7 +527,6 @@ VkResult
vk_submit_cmd_buffer(struct vk_bundle *vk, VkCommandBuffer cmd_buffer)
{
VkResult ret = VK_SUCCESS;
VkQueue queue;
VkFence fence;
VkFenceCreateInfo fence_info = {
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
......@@ -515,9 +544,6 @@ vk_submit_cmd_buffer(struct vk_bundle *vk, VkCommandBuffer cmd_buffer)
goto out;
}
// Get the queue.
vk->vkGetDeviceQueue(vk->device, vk->queue_family_index, 0, &queue);
// Create the fence.
ret = vk->vkCreateFence(vk->device, &fence_info, NULL, &fence);
if (ret != VK_SUCCESS) {
......@@ -526,9 +552,9 @@ vk_submit_cmd_buffer(struct vk_bundle *vk, VkCommandBuffer cmd_buffer)
}
// Do the actual submitting.
ret = vk->vkQueueSubmit(queue, 1, &submitInfo, fence);
ret = vk->vkQueueSubmit(vk->queue, 1, &submitInfo, fence);
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "Error: Could not submit queue.\n");
VK_ERROR(vk, "Error: Could not submit to queue.\n");
goto out_fence;
}
......@@ -711,6 +737,8 @@ vk_get_device_functions(struct vk_bundle *vk)
vk->vkGetSwapchainImagesKHR = GET_DEV_PROC(vk, vkGetSwapchainImagesKHR);
vk->vkAcquireNextImageKHR = GET_DEV_PROC(vk, vkAcquireNextImageKHR);
vk->vkQueuePresentKHR = GET_DEV_PROC(vk, vkQueuePresentKHR);
vk->vkImportSemaphoreFdKHR = GET_DEV_PROC(vk, vkImportSemaphoreFdKHR);
vk->vkGetSemaphoreFdKHR = GET_DEV_PROC(vk, vkGetSemaphoreFdKHR);
// clang-format on
return VK_SUCCESS;
......@@ -915,6 +943,7 @@ vk_create_device(struct vk_bundle *vk, int forced_index)
if (ret != VK_SUCCESS) {
goto err_destroy;
}
vk->vkGetDeviceQueue(vk->device, vk->queue_family_index, 0, &vk->queue);
return ret;
......@@ -966,6 +995,8 @@ vk_init_from_given(struct vk_bundle *vk,
goto err_memset;
}
vk->vkGetDeviceQueue(vk->device, vk->queue_family_index, 0, &vk->queue);
// Create the pool.
ret = vk_init_cmd_pool(vk);
if (ret != VK_SUCCESS) {
......@@ -1005,6 +1036,37 @@ vk_get_access_flags(VkImageLayout layout)
return 0;
}
VkAccessFlags
vk_swapchain_access_flags(enum xrt_swapchain_usage_bits bits)
{
VkAccessFlags result = 0;
if ((bits & XRT_SWAPCHAIN_USAGE_UNORDERED_ACCESS) != 0) {
result |= VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
if ((bits & XRT_SWAPCHAIN_USAGE_COLOR) != 0) {
result |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
}
if ((bits & XRT_SWAPCHAIN_USAGE_DEPTH_STENCIL) != 0) {
result |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
}
}
if ((bits & XRT_SWAPCHAIN_USAGE_COLOR) != 0) {
result |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
}
if ((bits & XRT_SWAPCHAIN_USAGE_DEPTH_STENCIL) != 0) {
result |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
}
if ((bits & XRT_SWAPCHAIN_USAGE_TRANSFER_SRC) != 0) {
result |= VK_ACCESS_TRANSFER_READ_BIT;
}
if ((bits & XRT_SWAPCHAIN_USAGE_TRANSFER_DST) != 0) {
result |= VK_ACCESS_TRANSFER_WRITE_BIT;
}
if ((bits & XRT_SWAPCHAIN_USAGE_SAMPLED) != 0) {
result |= VK_ACCESS_SHADER_READ_BIT;
}
return result;
}
bool
vk_init_descriptor_pool(struct vk_bundle *vk,
const VkDescriptorPoolSize *pool_sizes,
......
......@@ -40,6 +40,7 @@ struct vk_bundle
VkDevice device;
uint32_t queue_family_index;
uint32_t queue_index;
VkQueue queue;
VkDebugReportCallbackEXT debug_report_cb;
......@@ -177,6 +178,9 @@ struct vk_bundle
PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;
PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;
PFN_vkQueuePresentKHR vkQueuePresentKHR;
PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;
PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR;
// clang-format on
};
......@@ -350,6 +354,12 @@ vk_create_image_from_fd(struct vk_bundle *vk,
VkImage *out_image,
VkDeviceMemory *out_mem);
/*!
* @ingroup aux_vk
*/
VkResult
vk_create_semaphore_from_fd(struct vk_bundle *vk, int fd, VkSemaphore *out_sem);
/*!
* @ingroup aux_vk
*/
......@@ -406,6 +416,9 @@ vk_submit_cmd_buffer(struct vk_bundle *vk, VkCommandBuffer cmd_buffer);
VkAccessFlags
vk_get_access_flags(VkImageLayout layout);
VkAccessFlags
vk_swapchain_access_flags(enum xrt_swapchain_usage_bits bits);
bool
vk_init_descriptor_pool(struct vk_bundle *vk,
const VkDescriptorPoolSize *pool_sizes,
......
......@@ -56,12 +56,13 @@ client_gl_swapchain_destroy(struct xrt_swapchain *xsc)
}
static xrt_result_t
client_gl_swapchain_acquire_image(struct xrt_swapchain *xsc, uint32_t *index)
client_gl_swapchain_acquire_image(struct xrt_swapchain *xsc,
uint32_t *out_index)
{
struct client_gl_swapchain *sc = client_gl_swapchain(xsc);
// Pipe down call into fd swapchain.
return xrt_swapchain_acquire_image(&sc->xscfd->base, index);
return xrt_swapchain_acquire_image(&sc->xscfd->base, out_index);
}
static xrt_result_t
......@@ -154,61 +155,40 @@ client_gl_compositor_layer_begin(struct xrt_compositor *xc,
}
static xrt_result_t
client_gl_compositor_layer_stereo_projection(
struct xrt_compositor *xc,
uint64_t timestamp,
client_gl_compositor_layer_stereo_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
enum xrt_input_name name,
enum xrt_layer_composition_flags layer_flags,
struct xrt_swapchain *l_sc,
uint32_t l_image_index,
struct xrt_rect *l_rect,
uint32_t l_array_index,
struct xrt_fov *l_fov,
struct xrt_pose *l_pose,
struct xrt_swapchain *r_sc,
uint32_t r_image_index,
struct xrt_rect *r_rect,
uint32_t r_array_index,
struct xrt_fov *r_fov,
struct xrt_pose *r_pose,
bool flip_y)
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
struct xrt_layer_data *data)
{
struct client_gl_compositor *c = client_gl_compositor(xc);
struct xrt_swapchain *l_xscfd, *r_xscfd;
l_xscfd = &client_gl_swapchain(l_sc)->xscfd->base;
r_xscfd = &client_gl_swapchain(r_sc)->xscfd->base;
assert(data->type == XRT_LAYER_STEREO_PROJECTION);
return xrt_comp_layer_stereo_projection(
&c->xcfd->base, timestamp, xdev, name, layer_flags, l_xscfd,
l_image_index, l_rect, l_array_index, l_fov, l_pose, r_xscfd,
r_image_index, r_rect, r_array_index, r_fov, r_pose, true);
l_xscfd = &client_gl_swapchain(l_xsc)->xscfd->base;
r_xscfd = &client_gl_swapchain(r_xsc)->xscfd->base;
data->flip_y = true;
return xrt_comp_layer_stereo_projection(&c->xcfd->base, xdev, l_xscfd,
r_xscfd, data);
}
static xrt_result_t
client_gl_compositor_layer_quad(struct xrt_compositor *xc,
uint64_t timestamp,
struct xrt_device *xdev,
enum xrt_input_name name,
enum xrt_layer_composition_flags layer_flags,
enum xrt_layer_eye_visibility visibility,
struct xrt_swapchain *sc,
uint32_t image_index,
struct xrt_rect *rect,
uint32_t array_index,
struct xrt_pose *pose,
struct xrt_vec2 *size,
bool flip_y)
struct xrt_swapchain *xsc,
struct xrt_layer_data *data)
{
struct client_gl_compositor *c = client_gl_compositor(xc);
struct xrt_swapchain *xscfb;
xscfb = &client_gl_swapchain(sc)->xscfd->base;
assert(data->type == XRT_LAYER_QUAD);
xscfb = &client_gl_swapchain(xsc)->xscfd->base;
data->flip_y = true;
return xrt_comp_layer_quad(&c->xcfd->base, timestamp, xdev, name,
layer_flags, visibility, xscfb, image_index,
rect, array_index, pose, size, true);
return xrt_comp_layer_quad(&c->xcfd->base, xdev, xscfb, data);
}
static xrt_result_t
......
......@@ -8,13 +8,14 @@
* @ingroup comp_client
*/
#include <stdio.h>
#include <stdlib.h>
#include "util/u_misc.h"
#include "comp_vk_client.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
/*!
* Down-cast helper.
*
......@@ -70,12 +71,32 @@ client_vk_swapchain_destroy(struct xrt_swapchain *xsc)
}
static xrt_result_t
client_vk_swapchain_acquire_image(struct xrt_swapchain *xsc, uint32_t *index)
client_vk_swapchain_acquire_image(struct xrt_swapchain *xsc,
uint32_t *out_index)
{
struct client_vk_swapchain *sc = client_vk_swapchain(xsc);
struct vk_bundle *vk = &sc->c->vk;
// Pipe down call into fd swapchain.
return xrt_swapchain_acquire_image(&sc->xscfd->base, index);
xrt_result_t xret =
xrt_swapchain_acquire_image(&sc->xscfd->base, out_index);
if (xret != XRT_SUCCESS) {
return xret;
}
// Acquire ownership and complete layout transition
VkSubmitInfo submitInfo = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.commandBufferCount = 1,
.pCommandBuffers = &sc->base.acquire[*out_index],
};
VkResult ret =
vk->vkQueueSubmit(vk->queue, 1, &submitInfo, VK_NULL_HANDLE);
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "Error: Could not submit to queue.\n");
return XRT_ERROR_FAILED_TO_SUBMIT_VULKAN_COMMANDS;
}
return XRT_SUCCESS;
}
static xrt_result_t
......@@ -93,6 +114,20 @@ static xrt_result_t
client_vk_swapchain_release_image(struct xrt_swapchain *xsc, uint32_t index)
{
struct client_vk_swapchain *sc = client_vk_swapchain(xsc);
struct vk_bundle *vk = &sc->c->vk;
// Release ownership and begin layout transition
VkSubmitInfo submitInfo = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.commandBufferCount = 1,
.pCommandBuffers = &sc->base.release[index],
};
VkResult ret =
vk->vkQueueSubmit(vk->queue, 1, &submitInfo, VK_NULL_HANDLE);
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "Error: Could not submit to queue.\n");
return XRT_ERROR_FAILED_TO_SUBMIT_VULKAN_COMMANDS;
}
// Pipe down call into fd swapchain.
return xrt_swapchain_release_image(&sc->xscfd->base, index);
......@@ -118,6 +153,10 @@ client_vk_compositor_destroy(struct xrt_compositor *xc)
struct client_vk_compositor *c = client_vk_compositor(xc);
if (c->vk.cmd_pool != VK_NULL_HANDLE) {
// Make sure that any of the command buffers from this command
// pool are n used here, this pleases the validation layer.
c->vk.vkDeviceWaitIdle(c->vk.device);
c->vk.vkDestroyCommandPool(c->vk.device, c->vk.cmd_pool, NULL);
c->vk.cmd_pool = VK_NULL_HANDLE;
}
......@@ -190,61 +229,40 @@ client_vk_compositor_layer_begin(struct xrt_compositor *xc,
}
static xrt_result_t
client_vk_compositor_layer_stereo_projection(
struct xrt_compositor *xc,
uint64_t timestamp,
client_vk_compositor_layer_stereo_projection(struct xrt_compositor *xc,
struct xrt_device *xdev,
enum xrt_input_name name,
enum xrt_layer_composition_flags layer_flags,
struct xrt_swapchain *l_sc,
uint32_t l_image_index,
struct xrt_rect *l_rect,
uint32_t l_array_index,
struct xrt_fov *l_fov,
struct xrt_pose *l_pose,
struct xrt_swapchain *r_sc,
uint32_t r_image_index,
struct xrt_rect *r_rect,
uint32_t r_array_index,
struct xrt_fov *r_fov,
struct xrt_pose *r_pose,
bool flip_y)
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
struct xrt_layer_data *data)
{
struct client_vk_compositor *c = client_vk_compositor(xc);
struct xrt_swapchain *l_xscfd, *r_xscfd;
l_xscfd = &client_vk_swapchain(l_sc)->xscfd->base;
r_xscfd = &client_vk_swapchain(r_sc)->xscfd->base;
assert(data->type == XRT_LAYER_STEREO_PROJECTION);
return xrt_comp_layer_stereo_projection(
&c->xcfd->base, timestamp, xdev, name, layer_flags, l_xscfd,
l_image_index, l_rect, l_array_index, l_fov, l_pose, r_xscfd,
r_image_index, r_rect, r_array_index, r_fov, r_pose, false);
l_xscfd = &client_vk_swapchain(l_xsc)->xscfd->base;
r_xscfd = &client_vk_swapchain(r_xsc)->xscfd->base;
data->flip_y = false;
return xrt_comp_layer_stereo_projection(&c->xcfd->base, xdev, l_xscfd,
r_xscfd, data);
}
static xrt_result_t
client_vk_compositor_layer_quad(struct xrt_compositor *xc,
uint64_t timestamp,
struct xrt_device *xdev,
enum xrt_input_name name,
enum xrt_layer_composition_flags layer_flags,
enum xrt_layer_eye_visibility visibility,
struct xrt_swapchain *sc,
uint32_t image_index,
struct xrt_rect *rect,
uint32_t array_index,
struct xrt_pose *pose,
struct xrt_vec2 *size,
bool flip_y)
struct xrt_swapchain *xsc,
struct xrt_layer_data *data)
{
struct client_vk_compositor *c = client_vk_compositor(xc);
struct xrt_swapchain *xscfb;
xscfb = &client_vk_swapchain(sc)->xscfd->base;
assert(data->type == XRT_LAYER_QUAD);
xscfb = &client_vk_swapchain(xsc)->xscfd->base;
data->flip_y = false;
return xrt_comp_layer_quad(&c->xcfd->base, timestamp, xdev, name,
layer_flags, visibility, xscfb, image_index,
rect, array_index, pose, size, false);
return xrt_comp_layer_quad(&c->xcfd->base, xdev, xscfb, data);
}
static xrt_result_t
......@@ -288,9 +306,9 @@ client_vk_swapchain_create(struct xrt_compositor *xc,
VkImageSubresourceRange subresource_range = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.levelCount = VK_REMAINING_MIP_LEVELS,
.baseArrayLayer = 0,
.layerCount = array_size,
.layerCount = VK_REMAINING_ARRAY_LAYERS,
};
struct client_vk_swapchain *sc =
......@@ -316,10 +334,15 @@ client_vk_swapchain_create(struct xrt_compositor *xc,
return NULL;
}
/*
* This is only to please the validation layer, that may or may
* not be a bug in the validation layer. That may or may not be
* fixed in the future version of the validation layer.
*/
vk_set_image_layout(&c->vk, cmd_buffer, sc->base.images[i], 0,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
subresource_range);
}
......@@ -328,6 +351,90 @@ client_vk_swapchain_create(struct xrt_compositor *xc,
return NULL;
}
// Prerecord command buffers for swapchain image ownership/layout
// transitions
for (uint32_t i = 0; i < xsc->num_images; i++) {
ret = vk_init_cmd_buffer(&c->vk, &sc->base.acquire[i]);
if (ret != VK_SUCCESS) {
return NULL;
}
ret = vk_init_cmd_buffer(&c->vk, &sc->base.release[i]);
if (ret != VK_SUCCESS) {
return NULL;
}
VkImageSubresourceRange subresource_range = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = VK_REMAINING_MIP_LEVELS,
.baseArrayLayer = 0,
.layerCount = VK_REMAINING_ARRAY_LAYERS,
};
/*
* The biggest reason is that VK_IMAGE_LAYOUT_PRESENT_SRC_KHR is
* used here is that this is what hello_xr used to barrier to,
* and it worked on a wide verity of drivers. So it's safe.
*
* There might not be a Vulkan renderer on the other endm
* there could be a OpenGL compositor, heck there could be a X
* server even. On Linux VK_IMAGE_LAYOUT_PRESENT_SRC_KHR is what
* you use if you want to "flush" out all of the pixels to the
* memory buffer that has been shared to you from a X11 server.
*
* This is not what the spec says you should do when it comes to
* external images thou. Instead we should use the queue family
* index `VK_QUEUE_FAMILY_EXTERNAL`. And use semaphores to
* synchronize.
*/
VkImageMemoryBarrier acquire = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = 0,
.dstAccessMask = vk_swapchain_access_flags(bits),
.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
.srcQueueFamilyIndex = c->vk.queue_family_index,
.dstQueueFamilyIndex = c->vk.queue_family_index,
.image = sc->base.images[i],
.subresourceRange = subresource_range,
};
VkImageMemoryBarrier release = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = vk_swapchain_access_flags(bits),
.dstAccessMask = 0,
.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
.srcQueueFamilyIndex = c->vk.queue_family_index,
.dstQueueFamilyIndex = c->vk.queue_family_index,
.image = sc->base.images[i],
.subresourceRange = subresource_range,
};
//! @todo less conservative pipeline stage masks based on usage
c->vk.vkCmdPipelineBarrier(sc->base.acquire[i],
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
0, 0, NULL, 0, NULL, 1, &acquire);
c->vk.vkCmdPipelineBarrier(sc->base.release[i],
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
0, 0, NULL, 0, NULL, 1, &release);
ret = c->vk.vkEndCommandBuffer(sc->base.acquire[i]);
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkEndCommandBuffer: %s",
vk_result_string(ret));
return NULL;
}
ret = c->vk.vkEndCommandBuffer(sc->base.release[i]);
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkEndCommandBuffer: %s",
vk_result_string(ret));
return NULL;
}
}
return &sc->base.base;
}
......
......@@ -294,23 +294,10 @@ compositor_layer_begin(struct xrt_compositor *xc,
static xrt_result_t
compositor_layer_stereo_projection(struct xrt_compositor *xc,
uint64_t timestamp,
struct xrt_device *xdev,
enum xrt_input_name name,
enum xrt_layer_composition_flags layer_flags,
struct xrt_swapchain *l_sc,
uint32_t l_image_index,
struct xrt_rect *l_rect,
uint32_t l_array_index,
struct xrt_fov *l_fov,
struct xrt_pose *l_pose,
struct xrt_swapchain *r_sc,
uint32_t r_image_index,
struct xrt_rect *r_rect,
uint32_t r_array_index,
struct xrt_fov *r_fov,
struct xrt_pose *r_pose,
bool flip_y)
struct xrt_swapchain *l_xsc,
struct xrt_swapchain *r_xsc,
struct xrt_layer_data *data)
{
struct comp_compositor *c = comp_compositor(xc);
......@@ -319,16 +306,9 @@ compositor_layer_stereo_projection(struct xrt_compositor *xc,
uint32_t layer_id = c->slots[slot_id].num_layers;
struct comp_layer *layer = &c->slots[slot_id].layers[layer_id];
layer->stereo.l.sc = comp_swapchain(l_sc);
layer->stereo.l.image_index = l_image_index;
layer->stereo.l.array_index = l_array_index;
layer->stereo.r.sc = comp_swapchain(r_sc);
layer->stereo.r.image_index = r_image_index;
layer->stereo.r.array_index = r_array_index;
layer->flags = layer_flags;
layer->flip_y = flip_y;
layer->type = COMP_LAYER_STEREO_PROJECTION;
layer->scs[0] = comp_swapchain(l_xsc);
layer->scs[1] = comp_swapchain(r_xsc);
layer->data = *data;
c->slots[slot_id].num_layers++;
return XRT_SUCCESS;
......@@ -336,18 +316,9 @@ compositor_layer_stereo_projection(struct xrt_compositor *xc,
static xrt_result_t
compositor_layer_quad(struct xrt_compositor *xc,
uint64_t timestamp,
struct xrt_device *xdev,
enum xrt_input_name name,
enum xrt_layer_composition_flags layer_flags,
enum xrt_layer_eye_visibility visibility,
struct xrt_swapchain *sc,
uint32_t image_index,
struct xrt_rect *rect,
uint32_t array_index,
struct xrt_pose *pose,
struct xrt_vec2 *size,
bool flip_y)
struct xrt_swapchain *xsc,
struct xrt_layer_data *data)
{
struct comp_compositor *c = comp_compositor(xc);
......@@ -356,17 +327,9 @@ compositor_layer_quad(struct xrt_compositor *xc,
uint32_t layer_id = c->slots[slot_id].num_layers;
struct comp_layer *layer = &c->slots[slot_id].layers[layer_id];
layer->quad.sc = comp_swapchain(sc);
layer->quad.visibility = visibility;
layer->quad.image_index = image_index;
layer->quad.rect = *rect;
layer->quad.array_index = array_index;
layer->quad.pose = *pose;
layer->quad.size = *size;
layer->flags = layer_flags;
layer->flip_y = flip_y;
layer->type = COMP_LAYER_QUAD;
layer->scs[0] = comp_swapchain(xsc);
layer->scs[1] = NULL;
layer->data = *data;
c->slots[slot_id].num_layers++;
return XRT_SUCCESS;
......@@ -388,25 +351,26 @@ compositor_layer_commit(struct xrt_compositor *xc)
for (uint32_t i = 0; i < num_layers; i++) {
struct comp_layer *layer = &c->slots[slot_id].layers[i];
switch (layer->type) {
case COMP_LAYER_QUAD: {
struct comp_layer_quad *quad = &layer->quad;
struct xrt_layer_data *data = &layer->data;
switch (data->type) {
case XRT_LAYER_QUAD: {
struct xrt_layer_quad_data *quad = &layer->data.quad;
struct comp_swapchain_image *image;
image = &quad->sc->images[quad->image_index];
comp_renderer_set_quad_layer(c->r, image, &quad->pose,
&quad->size, layer->flip_y,
i, quad->array_index);
image = &layer->scs[0]->images[quad->sub.image_index];
comp_renderer_set_quad_layer(c->r, i, image, data);
} break;
case COMP_LAYER_STEREO_PROJECTION: {
struct comp_layer_stereo *stereo = &layer->stereo;
case XRT_LAYER_STEREO_PROJECTION: {
struct xrt_layer_stereo_projection_data *stereo =
&data->stereo;
struct comp_swapchain_image *right;
struct comp_swapchain_image *left;
left = &stereo->l.sc->images[stereo->l.image_index];
right = &stereo->l.sc->images[stereo->r.image_index];
left =
&layer->scs[0]->images[stereo->l.sub.image_index];
right =
&layer->scs[1]->images[stereo->r.sub.image_index];
comp_renderer_set_projection_layer(
c->r, left, right, layer->flip_y, i,
stereo->l.array_index, stereo->r.array_index);
comp_renderer_set_projection_layer(c->r, i, left, right,
data);
} break;
}
}
......
......@@ -77,66 +77,26 @@ struct comp_swapchain
*/
struct u_index_fifo fifo;
};
/*!
* Tag for distinguishing the union contents of @ref comp_layer.
*/
enum comp_layer_type
{
//! comp_layer::stereo is initialized
COMP_LAYER_STEREO_PROJECTION,
//! comp_layer::quad is initialized
COMP_LAYER_QUAD,
};
/*!
* A quad layer.
* A single layer.
*
* @ingroup comp_main
* @see comp_layer
* @see comp_layer_slot
*/
struct comp_layer_quad
struct comp_layer
{
struct comp_swapchain *sc;
enum xrt_layer_eye_visibility visibility;
uint32_t image_index;
struct xrt_rect rect;
uint32_t array_index;
struct xrt_pose pose;
struct xrt_vec2 size;
};
/*!
* A stereo projection layer.
* Up to two compositor swapchains referenced per layer.
*
* @ingroup comp_main
* @see comp_layer
* Unused elements should be set to null.
*/
struct comp_layer_stereo
{
struct
{
struct comp_swapchain *sc;
uint32_t image_index;
uint32_t array_index;
} l, r;
};
struct comp_swapchain *scs[2];
/*!
* A single layer.
*
* @ingroup comp_main
* @see comp_layer_slot
* All basic (trivially-serializable) data associated with a layer.
*/
struct comp_layer
{
int64_t timestamp;
enum xrt_layer_composition_flags flags;
enum comp_layer_type type;
bool flip_y;
union {
struct comp_layer_quad quad;
struct comp_layer_stereo stereo;
};
struct xrt_layer_data data;
};
/*!
......
......@@ -154,7 +154,7 @@ comp_layer_update_stereo_descriptors(struct comp_render_layer *self,
static bool
_init(struct comp_render_layer *self,
struct vk_bundle *vk,
enum comp_layer_type type,
enum xrt_layer_type type,
VkDescriptorSetLayout *layout)
{
self->vk = vk;
......@@ -162,6 +162,7 @@ _init(struct comp_render_layer *self,
self->type = type;
self->visible = true;
self->view_space = true;
math_matrix_4x4_identity(&self->model_matrix);
......@@ -202,7 +203,8 @@ comp_layer_draw(struct comp_render_layer *self,
VkPipelineLayout pipeline_layout,
VkCommandBuffer cmd_buffer,
const struct vk_buffer *vertex_buffer,
const struct xrt_matrix_4x4 *vp)
const struct xrt_matrix_4x4 *vp_world,
const struct xrt_matrix_4x4 *vp_eye)
{
if (!self->visible)
return;
......@@ -210,11 +212,14 @@ comp_layer_draw(struct comp_render_layer *self,
self->vk->vkCmdBindPipeline(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline);
// Is this layer viewspace or not.
const struct xrt_matrix_4x4 *vp = self->view_space ? vp_eye : vp_world;
switch (self->type) {
case COMP_LAYER_STEREO_PROJECTION:
case XRT_LAYER_STEREO_PROJECTION:
_update_mvp_matrix(self, eye, &proj_scale);
break;
case COMP_LAYER_QUAD: _update_mvp_matrix(self, eye, vp); break;
case XRT_LAYER_QUAD: _update_mvp_matrix(self, eye, vp); break;
}
self->vk->vkCmdBindDescriptorSets(
......@@ -230,7 +235,7 @@ comp_layer_draw(struct comp_render_layer *self,
struct comp_render_layer *
comp_layer_create(struct vk_bundle *vk,
enum comp_layer_type type,
enum xrt_layer_type type,
VkDescriptorSetLayout *layout)
{
struct comp_render_layer *q = U_TYPED_CALLOC(struct comp_render_layer);
......
......@@ -23,8 +23,9 @@ struct comp_render_layer
struct vk_bundle *vk;
bool visible;
bool view_space;
enum comp_layer_type type;
enum xrt_layer_type type;
struct layer_transformation transformation[2];
struct vk_buffer transformation_ubos[2];
......@@ -37,7 +38,7 @@ struct comp_render_layer
struct comp_render_layer *
comp_layer_create(struct vk_bundle *vk,
enum comp_layer_type type,
enum xrt_layer_type type,
VkDescriptorSetLayout *layout);
void
......@@ -47,7 +48,8 @@ comp_layer_draw(struct comp_render_layer *self,
VkPipelineLayout pipeline_layout,
VkCommandBuffer cmd_buffer,
const struct vk_buffer *vertex_buffer,
const struct xrt_matrix_4x4 *vp);
const struct xrt_matrix_4x4 *vp_world,
const struct xrt_matrix_4x4 *vp_eye);
void
comp_layer_set_model_matrix(struct comp_render_layer *self,
......
......@@ -381,14 +381,18 @@ _render_eye(struct comp_layer_renderer *self,
VkCommandBuffer cmd_buffer,
VkPipelineLayout pipeline_layout)
{
struct xrt_matrix_4x4 vp;
struct xrt_matrix_4x4 vp_world;
struct xrt_matrix_4x4 vp_eye;
math_matrix_4x4_multiply(&self->mat_projection[eye],
&self->mat_view[eye], &vp);
&self->mat_world_view[eye], &vp_world);
math_matrix_4x4_multiply(&self->mat_projection[eye],
&self->mat_eye_view[eye], &vp_eye);
for (uint32_t i = 0; i < self->num_layers; i++)
for (uint32_t i = 0; i < self->num_layers; i++) {
comp_layer_draw(self->layers[i], eye, self->pipeline,
pipeline_layout, cmd_buffer,
&self->vertex_buffer, &vp);
&self->vertex_buffer, &vp_world, &vp_eye);
}
}
static bool
......@@ -451,7 +455,7 @@ comp_layer_renderer_allocate_layers(struct comp_layer_renderer *self,
for (uint32_t i = 0; i < self->num_layers; i++) {
self->layers[i] = comp_layer_create(
vk, COMP_LAYER_QUAD, &self->descriptor_set_layout);
vk, XRT_LAYER_QUAD, &self->descriptor_set_layout);
}
}
......@@ -484,7 +488,8 @@ _init(struct comp_layer_renderer *self,
for (uint32_t i = 0; i < 2; i++) {
math_matrix_4x4_identity(&self->mat_projection[i]);
math_matrix_4x4_identity(&self->mat_view[i]);
math_matrix_4x4_identity(&self->mat_world_view[i]);
math_matrix_4x4_identity(&self->mat_eye_view[i]);
}
if (!_init_render_pass(vk, format,
......@@ -649,7 +654,7 @@ comp_layer_renderer_destroy(struct comp_layer_renderer *self)
void
comp_layer_renderer_set_fov(struct comp_layer_renderer *self,
const struct xrt_fov *fov,
uint32_t view_id)
uint32_t eye)
{
const float tan_left = tanf(fov->angle_left);
const float tan_right = tanf(fov->angle_right);
......@@ -670,7 +675,7 @@ comp_layer_renderer_set_fov(struct comp_layer_renderer *self,
const float a43 = -(self->far * self->near) / (self->far - self->near);
// clang-format off
self->mat_projection[view_id] = (struct xrt_matrix_4x4) {
self->mat_projection[eye] = (struct xrt_matrix_4x4) {
.v = {
a11, 0, 0, 0,
0, a22, 0, 0,
......@@ -683,8 +688,10 @@ comp_layer_renderer_set_fov(struct comp_layer_renderer *self,
void
comp_layer_renderer_set_pose(struct comp_layer_renderer *self,
const struct xrt_pose *pose,
uint32_t view_id)
const struct xrt_pose *eye_pose,
const struct xrt_pose *world_pose,
uint32_t eye)
{
math_matrix_4x4_view_from_pose(pose, &self->mat_view[view_id]);
math_matrix_4x4_view_from_pose(eye_pose, &self->mat_eye_view[eye]);
math_matrix_4x4_view_from_pose(world_pose, &self->mat_world_view[eye]);
}
......@@ -42,7 +42,8 @@ struct comp_layer_renderer
VkPipelineLayout pipeline_layout;
VkPipelineCache pipeline_cache;
struct xrt_matrix_4x4 mat_view[2];
struct xrt_matrix_4x4 mat_world_view[2];
struct xrt_matrix_4x4 mat_eye_view[2];
struct xrt_matrix_4x4 mat_projection[2];
struct vk_buffer vertex_buffer;
......@@ -68,12 +69,13 @@ comp_layer_renderer_draw(struct comp_layer_renderer *self);
void
comp_layer_renderer_set_fov(struct comp_layer_renderer *self,
const struct xrt_fov *fov,
uint32_t view_id);
uint32_t eye);
void
comp_layer_renderer_set_pose(struct comp_layer_renderer *self,
const struct xrt_pose *pose,
uint32_t view_id);
const struct xrt_pose *eye_pose,
const struct xrt_pose *world_pose,
uint32_t eye);
void
comp_layer_renderer_allocate_layers(struct comp_layer_renderer *self,
......
......@@ -416,15 +416,15 @@ _get_view_projection(struct comp_renderer *r)
comp_layer_renderer_set_fov(r->lr, &fov, i);
struct xrt_pose view_pose;
struct xrt_pose eye_pose;
xrt_device_get_view_pose(r->c->xdev, &eye_relation, i,
&view_pose);
&eye_pose);
struct xrt_pose pose;
math_pose_openxr_locate(&view_pose, &relation.pose,
&base_space_pose, &pose);
struct xrt_pose world_pose;
math_pose_openxr_locate(&eye_pose, &relation.pose,
&base_space_pose, &world_pose);
comp_layer_renderer_set_pose(r->lr, &pose, i);
comp_layer_renderer_set_pose(r->lr, &eye_pose, &world_pose, i);
}
}
......@@ -474,44 +474,48 @@ renderer_init(struct comp_renderer *r)
void
comp_renderer_set_quad_layer(struct comp_renderer *r,
struct comp_swapchain_image *image,
struct xrt_pose *pose,
struct xrt_vec2 *size,
bool flip_y,
uint32_t layer,
uint32_t array_index)
struct comp_swapchain_image *image,
struct xrt_layer_data *data)
{
comp_layer_update_descriptors(r->lr->layers[layer], image->sampler,
image->views[array_index]);
image->views[data->quad.sub.array_index]);
struct xrt_matrix_4x4 model_matrix;
math_matrix_4x4_quad_model(pose, size, &model_matrix);
math_matrix_4x4_quad_model(&data->quad.pose, &data->quad.size,
&model_matrix);
comp_layer_set_model_matrix(r->lr->layers[layer], &model_matrix);
r->lr->layers[layer]->type = COMP_LAYER_QUAD;
comp_layer_set_flip_y(r->lr->layers[layer], flip_y);
comp_layer_set_flip_y(r->lr->layers[layer], data->flip_y);
r->lr->layers[layer]->type = XRT_LAYER_QUAD;
r->lr->layers[layer]->view_space =
(data->flags & XRT_LAYER_COMPOSITION_VIEW_SPACE_BIT) != 0;
r->c->vk.vkDeviceWaitIdle(r->c->vk.device);
}
void
comp_renderer_set_projection_layer(struct comp_renderer *r,
uint32_t layer,
struct comp_swapchain_image *left_image,
struct comp_swapchain_image *right_image,
bool flip_y,
uint32_t layer,
uint32_t left_array_index,
uint32_t right_array_index)
struct xrt_layer_data *data)
{
uint32_t left_array_index = data->stereo.l.sub.array_index;
uint32_t right_array_index = data->stereo.r.sub.array_index;
comp_layer_update_stereo_descriptors(
r->lr->layers[layer], left_image->sampler, right_image->sampler,
left_image->views[left_array_index],
right_image->views[right_array_index]);
comp_layer_set_flip_y(r->lr->layers[layer], flip_y);
comp_layer_set_flip_y(r->lr->layers[layer], data->flip_y);
r->lr->layers[layer]->type = COMP_LAYER_STEREO_PROJECTION;
r->lr->layers[layer]->type = XRT_LAYER_STEREO_PROJECTION;
r->lr->layers[layer]->view_space =
(data->flags & XRT_LAYER_COMPOSITION_VIEW_SPACE_BIT) != 0;
}
void
......
......@@ -56,22 +56,16 @@ comp_renderer_draw(struct comp_renderer *r);
void
comp_renderer_set_projection_layer(struct comp_renderer *r,
uint32_t layer,
struct comp_swapchain_image *left_image,
struct comp_swapchain_image *right_image,
bool flip_y,
uint32_t layer,
uint32_t left_array_index,
uint32_t right_array_index);
struct xrt_layer_data *data);
void
comp_renderer_set_quad_layer(struct comp_renderer *r,
struct comp_swapchain_image *image,
struct xrt_pose *pose,
struct xrt_vec2 *size,
bool flip_y,
uint32_t layer,
uint32_t array_index);
struct comp_swapchain_image *image,
struct xrt_layer_data *data);
void
comp_renderer_allocate_layers(struct comp_renderer *self, uint32_t num_layers);
......
......@@ -27,14 +27,14 @@ swapchain_destroy(struct xrt_swapchain *xsc)
}
static xrt_result_t
swapchain_acquire_image(struct xrt_swapchain *xsc, uint32_t *index)
swapchain_acquire_image(struct xrt_swapchain *xsc, uint32_t *out_index)
{
struct comp_swapchain *sc = comp_swapchain(xsc);
COMP_SPEW(sc->c, "ACQUIRE_IMAGE");
// Returns negative on empty fifo.
int res = u_index_fifo_pop(&sc->fifo, index);
int res = u_index_fifo_pop(&sc->fifo, out_index);
if (res >= 0) {
return XRT_SUCCESS;
} else {
......
......@@ -6,7 +6,7 @@ subdir('shaders')
comp_include = include_directories('.')
# TODO: Dependency resolution and subsequent configuration could be improved
compositor_deps = [aux, shaders, vulkan, xrt_config_vulkan]
compositor_deps = [aux, shaders, vulkan, xrt_config_vulkan, xcb_randr]
compositor_includes = [xrt_include]
compositor_srcs = [
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment