Commit ffd1b373 authored by Pete Black's avatar Pete Black Committed by Jakob Bornecrantz
Browse files

d/mt: Squashed commit from jakob-montrack-rp-rebased brach

parent e7a933a1
......@@ -35,6 +35,7 @@ pkg_check_modules(LIBUVC libuvc)
# @TODO Turn into a find_package FFMPEG file.
pkg_check_modules(FFMPEG libavcodec)
find_package(uvbi)
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
# Compositor backend
......@@ -52,6 +53,7 @@ cmake_dependent_option(BUILD_WITH_OPENGL "Enable OpenGL Graphics API support" ON
set(BUILD_WITH_LIBUSB TRUE)
cmake_dependent_option(BUILD_WITH_JPEG "Enable jpeg code (used for some video drivers)" ON "JPEG_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_UVBI "Enable UVBI-based optical tracking driver" ON "LIBUVC_FOUND AND uvbi_FOUND AND OPENCV_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_OPENCV "Enable OpenCV backend" ON "OpenCV_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_LIBUVC "Enable libuvc video driver" ON "LIBUVC_FOUND" OFF)
cmake_dependent_option(BUILD_WITH_FFMPEG "Enable ffmpeg testing video driver" ON "FFMPEG_FOUND" OFF)
......@@ -116,6 +118,17 @@ if(TRUE)
set(BUILD_DRIVER_PSMV TRUE)
endif()
if(BUILD_WITH_UVBI)
add_definitions(-DXRT_HAVE_UVBI)
endif()
if(BUILD_WITH_OPENCV AND (BUILD_WITH_FFMPEG OR BUILD_WITH_JPEG))
# Condition for enabling the montrack optical tracking driver.
# JPEG required for both UVC and v4l2 backends.
add_definitions(-DXRT_BUILD_MONTRACK)
set(BUILD_DRIVER_MONTRACK TRUE)
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic -Wall -Wextra -Wno-unused-parameter")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wno-unused-parameter")
......
......@@ -5,8 +5,13 @@
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/../include
${CMAKE_CURRENT_SOURCE_DIR}/../auxiliary
${CMAKE_CURRENT_SOURCE_DIR}/montrack
${CMAKE_CURRENT_SOURCE_DIR}/montrack/frameservers/common
)
if(BUILD_DRIVER_MONTRACK)
add_subdirectory(montrack)
endif()
if(BUILD_DRIVER_HDK)
set(HDK_SOURCE_FILES
......
set (MONTRACK_SOURCE_FILES
mt_device.c
mt_device.h
mt_interface.h
mt_prober.c
mt_events.h
mt_framequeue.h
mt_framequeue.c
)
add_subdirectory(frameservers)
add_subdirectory(filters)
add_subdirectory(optical_tracking)
# Use OBJECT to not create a archive, since it just gets in the way.
add_library(drv_montrack OBJECT ${MONTRACK_SOURCE_FILES}
$<TARGET_OBJECTS:frameserver>
$<TARGET_OBJECTS:filter>
$<TARGET_OBJECTS:optical_tracking>
)
set_property(TARGET drv_montrack PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(drv_montrack SYSTEM
PRIVATE frameservers
PRIVATE optical_tracking
PRIVATE filters)
target_link_libraries (drv_montrack frameserver filter optical_tracking)
# Copyright 2019, Collabora, Ltd.
# SPDX-License-Identifier: BSL-1.0
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/../include
${CMAKE_CURRENT_SOURCE_DIR}/../auxiliary
${CMAKE_CURRENT_SOURCE_DIR}
)
set(FILTER_SOURCE_FILES
common/filter.h
common/filter.c
filter_opencv_kalman.cpp
filter_opencv_kalman.h
)
# Use OBJECT to not create a archive, since it just gets in the way.
add_library(filter OBJECT ${FILTER_SOURCE_FILES})
set_property(TARGET filter PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(filter SYSTEM
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}../
PRIVATE ${OpenCV_INCLUDE_DIRS}
)
#include "filter.h"
#include "filter_opencv_kalman.h"
#include <string.h>
#include "util/u_misc.h"
struct filter_instance*
filter_create(filter_type_t t)
{
switch (t) {
case FILTER_TYPE_OPENCV_KALMAN: return filter_opencv_kalman_create();
case FILTER_TYPE_NONE:
default: return NULL;
}
}
bool
filters_test()
{
// create a filter
struct filter_instance* filter =
filter_create(FILTER_TYPE_OPENCV_KALMAN);
if (!filter) {
return false;
}
return true;
}
#ifndef FILTER_H
#define FILTER_H
#include <xrt/xrt_defines.h>
#include "util/u_time.h"
typedef void* filter_configuration_ptr;
struct filter_instance;
struct filter_state
{
struct xrt_pose pose;
bool has_position;
bool has_rotation;
struct xrt_vec3 velocity;
struct xrt_vec3 acceleration;
struct xrt_quat angular_velocity;
struct xrt_quat angular_accel;
timepoint_ns timestamp;
};
typedef enum filter_type
{
FILTER_TYPE_NONE,
FILTER_TYPE_OPENCV_KALMAN
} filter_type_t;
typedef struct tracker_measurement tracker_measurement_t;
struct filter_instance
{
filter_type_t tracker_type;
bool (*queue)(struct filter_instance* inst,
tracker_measurement_t* measurement);
bool (*set_state)(struct filter_instance* inst,
struct filter_state* state);
bool (*get_state)(struct filter_instance* inst,
struct filter_state* state);
bool (*predict_state)(struct filter_instance* inst,
struct filter_state* state,
timepoint_ns time);
bool (*configure)(struct filter_instance* inst,
filter_configuration_ptr config);
void (*destroy)(struct filter_instance* inst);
};
struct filter_instance*
filter_create(filter_type_t t);
bool
filters_test();
static inline void
filter_destroy(struct filter_instance* inst)
{
inst->destroy(inst);
}
static inline bool
filter_queue(struct filter_instance* inst, tracker_measurement_t* measurement)
{
return inst->queue(inst, measurement);
}
static inline bool
filter_set_state(struct filter_instance* inst, struct filter_state* state)
{
return inst->set_state(inst, state);
}
static inline bool
filter_get_state(struct filter_instance* inst, struct filter_state* state)
{
return inst->get_state(inst, state);
}
static inline bool
filter_predict_state(struct filter_instance* inst,
struct filter_state* state,
timepoint_ns time)
{
return inst->predict_state(inst, state, time);
}
static inline bool
filter_configure(struct filter_instance* inst, filter_configuration_ptr config)
{
return inst->configure(inst, config);
}
#endif // FILTER_H
#include <opencv2/opencv.hpp>
#include "../optical_tracking/common/tracker.h"
#include "filter_opencv_kalman.h"
#include "util/u_misc.h"
struct filter_opencv_kalman
{
struct filter_instance base;
bool configured;
opencv_filter_configuration_t configuration;
cv::KalmanFilter kalman_filter;
cv::Mat observation;
cv::Mat prediction;
cv::Mat state;
bool running;
};
/*!
* Casts the internal instance pointer from the generic opaque type to our
* opencv_kalman internal type.
*/
static inline struct filter_opencv_kalman*
filter_opencv_kalman(struct filter_instance* ptr)
{
return (struct filter_opencv_kalman*)ptr;
}
static void
filter_opencv_kalman_destroy(struct filter_instance* inst)
{
free(inst);
}
static bool
filter_opencv_kalman_queue(struct filter_instance* inst,
tracker_measurement_t* measurement)
{
struct filter_opencv_kalman* internal = filter_opencv_kalman(inst);
printf("queueing measurement in filter\n");
internal->observation.at<float>(0, 0) = measurement->pose.position.x;
internal->observation.at<float>(1, 0) = measurement->pose.position.y;
internal->observation.at<float>(2, 0) = measurement->pose.position.z;
internal->kalman_filter.correct(internal->observation);
internal->running = true;
return false;
}
bool
filter_opencv_kalman_get_state(struct filter_instance* inst,
struct filter_state* state)
{
return false;
}
bool
filter_opencv_kalman_set_state(struct filter_instance* inst,
struct filter_state* state)
{
return false;
}
bool
filter_opencv_kalman_predict_state(struct filter_instance* inst,
struct filter_state* state,
timepoint_ns time)
{
struct filter_opencv_kalman* internal = filter_opencv_kalman(inst);
// printf("getting filtered pose\n");
if (!internal->running) {
return false;
}
internal->prediction = internal->kalman_filter.predict();
state->has_position = true;
state->pose.position.x = internal->prediction.at<float>(0, 0);
state->pose.position.y = internal->prediction.at<float>(1, 0);
state->pose.position.z = internal->prediction.at<float>(2, 0);
return true;
}
bool
filter_opencv_kalman_configure(struct filter_instance* inst,
filter_configuration_ptr config_generic)
{
struct filter_opencv_kalman* internal = filter_opencv_kalman(inst);
opencv_filter_configuration_t* config =
(opencv_filter_configuration_t*)config_generic;
internal->configuration = *config;
cv::setIdentity(
internal->kalman_filter.processNoiseCov,
cv::Scalar::all(internal->configuration.process_noise_cov));
cv::setIdentity(
internal->kalman_filter.measurementNoiseCov,
cv::Scalar::all(internal->configuration.measurement_noise_cov));
internal->configured = true;
return true;
}
struct filter_opencv_kalman*
filter_opencv_kalman_create()
{
struct filter_opencv_kalman* i =
U_TYPED_CALLOC(struct filter_opencv_kalman);
if (!i) {
return NULL;
}
i->base.queue = filter_opencv_kalman_queue;
i->base.set_state = filter_opencv_kalman_set_state;
i->base.get_state = filter_opencv_kalman_get_state;
i->base.predict_state = filter_opencv_kalman_predict_state;
i->base.configure = filter_opencv_kalman_configure;
i->base.destroy = filter_opencv_kalman_destroy;
float dt = 1.0;
i->kalman_filter.init(6, 3);
i->observation = cv::Mat(3, 1, CV_32F);
i->prediction = cv::Mat(6, 1, CV_32F);
i->kalman_filter.transitionMatrix =
(cv::Mat_<float>(6, 6) << 1.0, 0.0, 0.0, dt, 0.0, 0.0, 0.0, 1.0,
0.0, 0.0, dt, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0);
cv::setIdentity(i->kalman_filter.measurementMatrix,
cv::Scalar::all(1.0f));
cv::setIdentity(i->kalman_filter.errorCovPost, cv::Scalar::all(0.0f));
// our filter parameters set the process and measurement noise
// covariances.
cv::setIdentity(i->kalman_filter.processNoiseCov,
cv::Scalar::all(i->configuration.process_noise_cov));
cv::setIdentity(
i->kalman_filter.measurementNoiseCov,
cv::Scalar::all(i->configuration.measurement_noise_cov));
i->configured = false;
i->running = false;
return i;
}
#ifndef FILTER_OPENCV_KALMAN_H
#define FILTER_OPENCV_KALMAN_H
#include <xrt/xrt_defines.h>
#include "common/filter.h"
typedef struct opencv_filter_configuration
{
float measurement_noise_cov;
float process_noise_cov;
} opencv_filter_configuration_t;
typedef struct opencv_kalman_filter_state
{
struct xrt_pose pose;
} opencv_kalman_filter_state_t;
#ifdef __cplusplus
extern "C" {
#endif
// forward declare this, as it contains C++ stuff
struct filter_opencv_kalman;
struct filter_opencv_kalman*
filter_opencv_kalman_create();
#ifdef __cplusplus
} // extern "C"
#endif
#endif // FILTER_OPENCV_KALMAN_H
# Copyright 2019, Collabora, Ltd.
# SPDX-License-Identifier: BSL-1.0
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}/../include
${CMAKE_CURRENT_SOURCE_DIR}/../auxiliary
${CMAKE_CURRENT_SOURCE_DIR}
)
set(FRAMESERVER_SOURCE_FILES
common/frameserver.c
common/frameserver.h
)
if(BUILD_WITH_FFMPEG)
list(APPEND FRAMESERVER_SOURCE_FILES
ffmpeg/ffmpeg_frameserver.c
ffmpeg/ffmpeg_frameserver.h
)
endif()
if(BUILD_WITH_LIBUVC AND BUILD_WITH_JPEG)
list(APPEND FRAMESERVER_SOURCE_FILES
uvc/uvc_frameserver.c
uvc/uvc_frameserver.h
)
endif()
if(BUILD_WITH_JPEG)
list(APPEND FRAMESERVER_SOURCE_FILES
v4l2/v4l2_frameserver.c
v4l2/v4l2_frameserver.h
)
endif()
# Use OBJECT to not create a archive, since it just gets in the way.
add_library(frameserver OBJECT ${FRAMESERVER_SOURCE_FILES})
set_property(TARGET frameserver PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(frameserver SYSTEM
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/..
)
if(BUILD_WITH_LIBUVC AND BUILD_WITH_JPEG)
target_include_directories(frameserver SYSTEM
PRIVATE
${libuvc_INCLUDE_DIRS}
${LIBUSB1_INCLUDE_DIRS}
)
endif()
if(BUILD_WITH_JPEG)
target_include_directories(frameserver SYSTEM
PRIVATE
${JPEG_INCLUDE_DIRS}
)
endif()
// Copyright 2019, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Implementation of frameserver interface and shared functions.
* @author Pete Black <pblack@collabora.com>
* @author Ryan Pavlik <ryan.pavlik@collabora.com>
*/
#include "frameserver.h"
#ifdef XRT_HAVE_FFMPEG
#include "ffmpeg/ffmpeg_frameserver.h"
#endif // XRT_HAVE_FFMPEG
#ifdef XRT_HAVE_LIBUVC
#include "uvc/uvc_frameserver.h"
#endif // XRT_HAVE_LIBUVC
#include "v4l2/v4l2_frameserver.h"
#include <stdio.h>
#include <stdlib.h>
struct frameserver*
frameserver_create(enum frameserver_type t)
{
/*
* Each implementation constructor should set up the members of the
* frameserver instance, as well as return a pointer to itself. If it
* fails, it should return NULL without de-allocating the frameserver
* instance: that is the responsibility of this function.
*/
switch (t) {
#ifdef XRT_HAVE_FFMPEG
case FRAMESERVER_TYPE_FFMPEG: return ffmpeg_frameserver_create();
#endif // XRT_HAVE_FFMPEG
#ifdef XRT_HAVE_LIBUVC
case FRAMESERVER_TYPE_UVC: return uvc_frameserver_create();
#endif // XRT_HAVE_LIBUVC
case FRAMESERVER_TYPE_V4L2: return v4l2_frameserver_create();
case FRAMESERVER_TYPE_NONE:
default: return NULL;
}
}
float
fs_format_bytes_per_pixel(enum fs_frame_format f)
{
switch (f) {
case FS_FORMAT_Y_UINT8: return 1.0f;
case FS_FORMAT_YUV420_UINT8: return 1.5f;
case FS_FORMAT_Y_UINT16:
case FS_FORMAT_YUV422_UINT8:
case FS_FORMAT_YUYV_UINT8: return 2.0f;
case FS_FORMAT_BGR_UINT8:
case FS_FORMAT_RGB_UINT8:
case FS_FORMAT_YUV444_UINT8: return 3.0f;
case FS_FORMAT_RAW:
case FS_FORMAT_JPG:
default:
printf("cannot compute format bytes per pixel\n");
return -1.0f;
}
return -1.0f;
}
int32_t
fs_frame_size_in_bytes(struct fs_frame* f)
{
if (f) {
int32_t frame_bytes = -1;
// TODO: alpha formats, padding etc.
switch (f->format) {
case FS_FORMAT_Y_UINT8:
case FS_FORMAT_YUV420_UINT8:
case FS_FORMAT_Y_UINT16:
case FS_FORMAT_YUV422_UINT8:
case FS_FORMAT_BGR_UINT8:
case FS_FORMAT_RGB_UINT8:
case FS_FORMAT_YUV444_UINT8:
case FS_FORMAT_YUYV_UINT8:
frame_bytes = f->stride * f->height;
break;
case FS_FORMAT_JPG:
// this is a maximum (assuming YUV444)
frame_bytes = f->width * f->height * 3;
case FS_FORMAT_RAW:
case FS_FORMAT_NONE:
default: printf("cannot compute frame size for this format\n");
}
return frame_bytes;
}
return -1;
}
int32_t
fs_frame_bytes_per_pixel(struct fs_frame* f)
{
printf("ERROR: Not implemented\n");
return -1;
}
bool
fs_frame_split_stereo(struct fs_frame* source,
struct fs_frame* left,
struct fs_frame* right)
{
printf("ERROR: Not implemented!\n");
return false;
}
bool
fs_frame_extract_plane(struct fs_frame* source,
enum fs_plane plane,
struct fs_frame* out)
{
// only handle splitting Y out of YUYV for now
if (source->format != FS_FORMAT_YUYV_UINT8 && plane != FS_PLANE_Y) {
printf("ERROR: unhandled plane extraction\n");
return false;
}
if (!source->data) {
printf("ERROR: no frame data!\n");
return false;
}
uint8_t* source_ptr;
uint8_t* dest_ptr;
uint8_t source_pixel_bytes = fs_format_bytes_per_pixel(source->format);
uint32_t source_line_bytes = source->stride;
uint8_t dest_pixel_bytes = fs_format_bytes_per_pixel(out->format);
uint32_t dest_line_bytes = out->width;
if (!out->data) {
printf(
"allocating data for NULL plane - someone needs to free "
"this!\n");
out->data = malloc