diff --git a/src/xrt/auxiliary/math/m_api.h b/src/xrt/auxiliary/math/m_api.h
index 79be0eb5c45d51f5283e9aa24198f78df9a850e7..5aff6afe4e1798a36593109104f0c539ba7e3715 100644
--- a/src/xrt/auxiliary/math/m_api.h
+++ b/src/xrt/auxiliary/math/m_api.h
@@ -316,6 +316,9 @@ math_min(int a, int b);
 int
 math_max(int a, int b);
 
+float math_quat_magnitude(struct xrt_quat q);
+
+float math_distance(struct xrt_vec3 a, struct xrt_vec3 b);
 
 #ifdef __cplusplus
 }
diff --git a/src/xrt/auxiliary/math/m_base.cpp b/src/xrt/auxiliary/math/m_base.cpp
index 6e2dc30ebe044b10b9b7c97e57d3ee215fef2711..e45b7e72a38a68cf7cc80a127b5dfce02e5f22bf 100644
--- a/src/xrt/auxiliary/math/m_base.cpp
+++ b/src/xrt/auxiliary/math/m_base.cpp
@@ -424,7 +424,7 @@ math_relation_openxr_locate(const struct xrt_pose* space_pose,
 	math_relation_accumulate_relation(relative_relation,
 	                                  &accumulating_relation);
 
-	// Apply the space pose.
+	// Apply the space pose.    
 	math_relation_accumulate_transform(&spc, &accumulating_relation);
 
 	*result = accumulating_relation;
diff --git a/src/xrt/auxiliary/math/m_eigen_interop.h b/src/xrt/auxiliary/math/m_eigen_interop.h
index 750637f95dfcb08e164d3fb5bc6b8defca862406..91dcccf452c35741c34414092755d0ae6d026f67 100644
--- a/src/xrt/auxiliary/math/m_eigen_interop.h
+++ b/src/xrt/auxiliary/math/m_eigen_interop.h
@@ -110,3 +110,16 @@ position(struct xrt_pose& pose)
 {
 	return map_vec3(pose.position);
 }
+
+/*!
+ * @brief Wrap an internal 4x4 matrix  struct in an Eigen type, non-const
+ * overload.
+ *
+ * Permits zero-overhead manipulation of `xrt_matrix_4x4&` by Eigen routines as
+ * if it were a `Eigen::Matrix4f&`.
+ */
+static inline Eigen::Map<Eigen::MatrixXf>
+map_mat4x4(struct xrt_matrix_4x4& m)
+{
+    return Eigen::Map<Eigen::MatrixXf>(m.v,4,4);
+}
diff --git a/src/xrt/auxiliary/math/m_track.c b/src/xrt/auxiliary/math/m_track.c
index 055f77dfad972758b63a02a441ddfcf8ca8a006a..6fbf7cca216402e92022ad1a410d9a784b833505 100644
--- a/src/xrt/auxiliary/math/m_track.c
+++ b/src/xrt/auxiliary/math/m_track.c
@@ -19,17 +19,17 @@ void
 math_euler_to_quat(struct xrt_vec3 euler, struct xrt_quat* q)
 {
 
-        double cy = cos(euler.z* 0.5);
-        double sy = sin(euler.z * 0.5);
-        double cp = cos(euler.x * 0.5);
-        double sp = sin(euler.x * 0.5);
-        double cr = cos(euler.y * 0.5);
-        double sr = sin(euler.y * 0.5);
-
-        q->w = cy * cp * cr + sy * sp * sr;
-        q->x = cy * cp * sr - sy * sp * cr;
-        q->y = sy * cp * sr + cy * sp * cr;
-        q->z = sy * cp * cr - cy * sp * sr;
+        double c_yaw = cos(euler.z * 0.5);
+        double s_yaw = sin(euler.z * 0.5);
+        double c_pitch = cos(euler.x * 0.5);
+        double s_pitch = sin(euler.x * 0.5);
+        double c_roll = cos(euler.y * 0.5);
+        double s_roll = sin(euler.y * 0.5);
+
+        q->w = c_yaw * c_pitch * c_roll + s_yaw * s_pitch * s_roll;
+        q->x = c_yaw * c_pitch * s_roll - s_yaw * s_pitch * c_roll;
+        q->y = s_yaw * c_pitch * s_roll + c_yaw * s_pitch * c_roll;
+        q->z = s_yaw * c_pitch * c_roll - c_yaw * s_pitch * s_roll;
 
 }
 
@@ -48,3 +48,16 @@ math_max(int a, int b) {
     }
     return b;
 }
+
+float math_quat_magnitude(struct xrt_quat q) {
+    return sqrt(q.x * q.x + q.y*q.y + q.z*q.z + q.w*q.w);
+}
+
+float
+math_distance(struct xrt_vec3 a, struct xrt_vec3 b) {
+    struct xrt_vec3 d;
+    d.x = a.x - b.x;
+    d.y = a.y - b.y;
+    d.z = a.z - b.z;
+    return sqrt(d.x * d.x + d.y * d.y + d.z * d.z);
+}
diff --git a/src/xrt/compositor/common/comp_vk_swapchain.c b/src/xrt/compositor/common/comp_vk_swapchain.c
index d984dd4bac9f85001ccf2bed8ea7ffa117fa362f..8493016a6c1f3fc8827a415675b00842e32098f2 100644
--- a/src/xrt/compositor/common/comp_vk_swapchain.c
+++ b/src/xrt/compositor/common/comp_vk_swapchain.c
@@ -214,12 +214,12 @@ vk_swapchain_select_extent(struct vk_swapchain *sc,
 static void
 vk_swapchain_destroy_old(struct vk_swapchain *sc, VkSwapchainKHR old)
 {
-	for (uint32_t i = 0; i < sc->image_count; i++) {
-		sc->vk->vkDestroyImageView(sc->vk->device, sc->buffers[i].view,
-		                           NULL);
-	}
+    for (uint32_t i = 0; i < sc->image_count; i++) {
+        sc->vk->vkDestroyImageView(sc->vk->device, sc->buffers[i].view,
+                                   NULL);
+    }
 
-	sc->vk->vkDestroySwapchainKHR(sc->vk->device, old, NULL);
+    sc->vk->vkDestroySwapchainKHR(sc->vk->device, old, NULL);
 }
 
 VkResult
diff --git a/src/xrt/drivers/montrack/filters/filter_complementary.c b/src/xrt/drivers/montrack/filters/filter_complementary.c
index 7dd2613edc24dc65d9592a9f9c4c7f8d1d346154..90527d3370cd6559e70ec97fe103fd06e26a9aa4 100644
--- a/src/xrt/drivers/montrack/filters/filter_complementary.c
+++ b/src/xrt/drivers/montrack/filters/filter_complementary.c
@@ -53,22 +53,32 @@ filter_complementary_get_state(filter_instance_t* inst, filter_state_t* state)
 	tracker_measurement_t* measurement_array;
     uint32_t count = measurement_queue_get_since_timestamp(inst->measurement_queue,0,internal->last_state.timestamp,&measurement_array);
 	float one_minus_bias = 1.0f - internal->configuration.bias;
-	for (uint32_t i=0;i<count;i++)
-	{
+    int avg_max =8;
+    for (uint32_t i=0;i<count;i++) {
 		tracker_measurement_t* m = &measurement_array[i];
-        float dt = (m->source_timestamp - internal->last_state.timestamp) *0.00000001;
-        if (dt > 1.0f) {
-            internal->last_state.timestamp = m->source_timestamp;
-            return false; //this is our first frame, or something has gone wrong... big dt will blow up calculations.
-        }
 
-        //convert our
+        if (m->flags & ( MEASUREMENT_OPTICAL | MEASUREMENT_POSITION) ) {
+            //we can just stuff our position into our state for now, ignoring  timestamps.
+            internal->state.pose.position = m->pose.position;
+            internal->state.pose.orientation = m->pose.orientation;
+
+            internal->state.timestamp = m->source_timestamp;
+            internal->last_state = internal->state;
+        }
+        if (m->flags & ( MEASUREMENT_IMU | MEASUREMENT_RAW_ACCEL | MEASUREMENT_RAW_GYRO) ) {
+            float dt = (m->source_timestamp - internal->last_state.timestamp) * 0.000001;
+            //printf("dt %f\n",dt);
+            if (dt > 1.0f) {
+                internal->last_state.timestamp = m->source_timestamp;
+                return false; //this is our first frame, or something has gone wrong... big dt will blow up calculations.
+            }
 
-        float mag_accel = sqrt(m->accel.x * m->accel.x + m->accel.y * m->accel.y + m->accel.z * m->accel.z) * internal->configuration.accel_to_radian;
+        float raw_mag_accel =  sqrt(m->accel.x * m->accel.x + m->accel.y * m->accel.y + m->accel.z * m->accel.z);
+        float mag_accel = raw_mag_accel * internal->configuration.accel_to_g;
 
+        //determine roll/pitch angles with gravity as a reference
         float roll = atan2(-1.0f * m->accel.x,sqrt(m->accel.y * m->accel.y + m->accel.z * m->accel.z));
-        float pitch = atan2(m->accel.y,m->accel.z);
-
+        float pitch = atan2(m->accel.y, m->accel.z);
 
         //assume that, if acceleration is only gravity, then any change in the gyro is drift and update compensation
         //if acceleration magnitude is close to 1.0f, we assume its just gravity, and can integrate our gyro reading
@@ -76,7 +86,6 @@ filter_complementary_get_state(filter_instance_t* inst, filter_state_t* state)
         //sample
 
 
-        int avg_max =16;
         if (fabs(1.0f - mag_accel) < 0.05 ) {             //looks like gravity only
             //fill up the running average count as fast as possible, but subsequently
             //only integrate measurements that are not outliers w/respect to the average
@@ -89,24 +98,29 @@ filter_complementary_get_state(filter_instance_t* inst, filter_state_t* state)
                 internal->gyro_z_bias = internal->gyro_z_bias + (m->gyro.z - internal->gyro_z_bias) / math_min(internal->avg_count, avg_max);
                 //printf("yaw correction: %f %f %f\n",internal->gyro_x_bias,internal->gyro_y_bias,internal->gyro_z_bias);
             }
-            }
-        //printf("roll %f pitch %f gbc,%f,%f,%f,axyz,%f,%f,%f,gxyz,%f,%f,%f,dt,%f\n",roll,pitch,internal->gyro_x_bias,internal->gyro_y_bias,internal->gyro_z_bias,m->accel.x,m->accel.y,m->accel.z,m->gyro.x,m->gyro.y,m->gyro.z,dt);;
-        internal->state.rotation_euler.z = internal->last_state.rotation_euler.z + (m->gyro.z - internal->gyro_z_bias) * internal->configuration.gyro_to_radian * dt;
+        }
+        //printf("roll %f pitch %f yaw 0.0f,gbc,%f,%f,%f,axyz,%f,%f,%f,gxyz,%f,%f,%f,dt,%f\n",roll,pitch,internal->gyro_x_bias,internal->gyro_y_bias,internal->gyro_z_bias,m->accel.x,m->accel.y,m->accel.z,m->gyro.x,m->gyro.y,m->gyro.z,dt);;
+        internal->state.rotation_euler.z = internal->last_state.rotation_euler.z + (m->gyro.z - internal->gyro_z_bias) * internal->configuration.gyro_to_radians_per_second * dt;
         //push back towards zero, as 'returning to 0 slowly' is better than 'just drift', probably
-        internal->state.rotation_euler.z -=internal->state.rotation_euler.z * internal->configuration.drift_z_to_zero; //0.001;
-        internal->state.rotation_euler.y = internal->last_state.rotation_euler.y + (0.99 * ((m->gyro.y - internal->gyro_y_bias) * internal->configuration.gyro_to_radian * dt)) - 0.01 * (internal->last_state.rotation_euler.y - roll);
-        internal->state.rotation_euler.x = internal->last_state.rotation_euler.x + (0.99 * ((m->gyro.x - internal->gyro_x_bias) * internal->configuration.gyro_to_radian * dt)) - 0.01 * (internal->last_state.rotation_euler.x - pitch);
-        internal->state.timestamp = m->source_timestamp;
-        internal->last_state = internal->state;
+        internal->state.rotation_euler.z -=internal->state.rotation_euler.z * internal->configuration.drift_z_to_zero * dt;
+        internal->state.rotation_euler.y = internal->last_state.rotation_euler.y + (one_minus_bias * ((m->gyro.y - internal->gyro_y_bias) * internal->configuration.gyro_to_radians_per_second * dt)) - internal->configuration.bias * (internal->last_state.rotation_euler.y - roll);
+        internal->state.rotation_euler.x = internal->last_state.rotation_euler.x + (one_minus_bias * ((m->gyro.x - internal->gyro_x_bias) * internal->configuration.gyro_to_radians_per_second * dt)) - internal->configuration.bias * (internal->last_state.rotation_euler.x - pitch);
+
+        //DISABLED FOR POS TRQCKER TESTING
+        //internal->state.timestamp = m->source_timestamp;
+        //internal->last_state = internal->state;
         //printf("source tstamp: %lld\n",m->source_timestamp);
-	    }
-	//TODO: come up with a way to avoid alloc/free - use max length buffer?
-	free(measurement_array);
-	//convert to a quat for consumption as pose
+        }
+    }
+    //TODO: come up with a way to avoid alloc/free - use max length buffer?
+    free(measurement_array);
+    //convert to a quat for consumption as pose
     //printf(" integrated %d measurements after %lld X %f Y %f Z %f\n",count,internal->last_state.timestamp,internal->state.rotation_euler.x,internal->state.rotation_euler.y,internal->state.rotation_euler.z);
-    struct xrt_quat integrate_quat;
-    math_euler_to_quat(internal->state.rotation_euler,&internal->state.pose.orientation);
-	*state = internal->state;
+
+    //removed for debugging pos tracking
+    //math_euler_to_quat(internal->state.rotation_euler,&internal->state.pose.orientation);
+
+    *state = internal->state;
 	return true;
 }
 bool
diff --git a/src/xrt/drivers/montrack/filters/filter_complementary.h b/src/xrt/drivers/montrack/filters/filter_complementary.h
index 995dc6036a5734e2e27dc74f2f4126c279c619b4..043afaa1642f20b9d310b9c65fcae2b497818781 100644
--- a/src/xrt/drivers/montrack/filters/filter_complementary.h
+++ b/src/xrt/drivers/montrack/filters/filter_complementary.h
@@ -11,8 +11,8 @@ typedef struct filter_complementary_configuration
 	float bias;
 	float scale;
     uint64_t max_timestamp; //wrap timestamp here
-    float accel_to_radian;
-    float gyro_to_radian;
+    float accel_to_g;
+    float gyro_to_radians_per_second;
     float drift_z_to_zero;
 } filter_complementary_configuration_t;
 
diff --git a/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.c b/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.c
index 0b2ec5bac1c21e4b7e9bb26386d6b221b9995dfa..1e1e3441e88ca3cf3176aac34827de66a75bb3ce 100644
--- a/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.c
+++ b/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.c
@@ -349,9 +349,9 @@ uvc_frameserver_stream_run(void* ptr)
 					    f.width * 3; // jpg format does not
 					                 // supply stride
 					// decode our jpg frame.
-					if (!temp_data) {
-						temp_data = malloc(
-						    frame_size_in_bytes(&f));
+                    f.size_bytes = frame_size_in_bytes(&f);
+                    if (!temp_data) {
+                        temp_data = malloc(f.size_bytes);
 					}
 					jpeg_mem_src(&cinfo, frame->data,
 					             frame->data_bytes);
diff --git a/src/xrt/drivers/montrack/mt_device.c b/src/xrt/drivers/montrack/mt_device.c
index 803c8d2943d858cb732271d601af202cab6825b8..cecc0792bb41ecbb49b078f620c11fcfb0e96710 100644
--- a/src/xrt/drivers/montrack/mt_device.c
+++ b/src/xrt/drivers/montrack/mt_device.c
@@ -108,7 +108,7 @@ mt_device_update_inputs(struct xrt_device *xdev,
         //configure our tracker to work with our source - hardcoded to first source
         frame_t source_config = fq->source_frames[0];
         tracker_stereo_configuration_t tc;
-        snprintf(tc.configuration_filename,256,"PETE");
+        snprintf(tc.camera_configuration_filename,256,"PETE");
         tc.l_format= source_config.format;
         tc.split_left=true;
         tc.l_source_id = 0;
diff --git a/src/xrt/drivers/montrack/optical_tracking/CMakeLists.txt b/src/xrt/drivers/montrack/optical_tracking/CMakeLists.txt
index 02a6d0cc46ead61829d347b7403132b92609ca40..dbaa37ddaf743ee50c3ed840e95a55da6e4c2d35 100644
--- a/src/xrt/drivers/montrack/optical_tracking/CMakeLists.txt
+++ b/src/xrt/drivers/montrack/optical_tracking/CMakeLists.txt
@@ -7,6 +7,7 @@ include_directories(
 
 set(OPTICAL_TRACKING_SOURCE_FILES
         common/calibration.cpp
+        common/calibration_opencv.hpp
 	common/calibration.h
 	common/tracked_object.h
 	common/tracked_object.c
@@ -17,8 +18,8 @@ set(OPTICAL_TRACKING_SOURCE_FILES
 	tracker3D_sphere_stereo.h
 	tracker3D_sphere_mono.cpp
 	tracker3D_sphere_mono.h
-	track_psvr.h
-	track_psvr.cpp
+        tracker3D_psvr_stereo.h
+        tracker3D_psvr_stereo.cpp
 	)
 if(BUILD_WITH_UVBI)
 	list(APPEND OPTICAL_TRACKING_SOURCE_FILES
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/calibration.cpp b/src/xrt/drivers/montrack/optical_tracking/common/calibration.cpp
index 5db5dfc0957fabfc86fed49837a909b336b26942..3df2b915e14778120951eab9ddd401911dd9639a 100644
--- a/src/xrt/drivers/montrack/optical_tracking/common/calibration.cpp
+++ b/src/xrt/drivers/montrack/optical_tracking/common/calibration.cpp
@@ -4,6 +4,7 @@
 #include <opencv2/opencv.hpp>
 
 #include "calibration.h"
+#include "calibration_opencv.hpp"
 #include "common/opencv_utils.hpp"
 
 #include <sys/stat.h>
@@ -11,9 +12,44 @@
 
 #include "util/u_misc.h"
 
-#define MAX_CALIBRATION_SAMPLES                                                \
-    23 // mo' samples, mo' calibration accuracy, at the expense of time.
-static cv::Rect2f calibration_distrib_rectilinear[9] = {};
+#define MAX_CALIBRATION_SAMPLES 15
+
+//set up our calibration rectangles, we will collect chessboard samples
+// that 'fill' these rectangular regions to get good coverage
+#define COVERAGE_X_RECTI 0.8f
+#define COVERAGE_Y_RECTI 0.8f
+#define COVERAGE_X_FISHEYE 0.6f
+#define COVERAGE_Y_FISHEYE 0.8f
+
+static cv::Rect2f calibration_rect_rectilinear[] = {
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + COVERAGE_X_RECTI / 3.0f ,(1.0f - COVERAGE_Y_RECTI)/2.0f,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + 2 * COVERAGE_X_RECTI / 3.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f,0.3f,0.3f),
+
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f + COVERAGE_Y_RECTI / 3.0f,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + COVERAGE_X_RECTI / 3.0f ,(1.0f - COVERAGE_Y_RECTI)/2.0f + COVERAGE_Y_RECTI / 3.0f ,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + 2 * COVERAGE_X_RECTI / 3.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f+ COVERAGE_Y_RECTI / 3.0f ,0.3f,0.3f),
+
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f + 2 * COVERAGE_Y_RECTI / 3.0f,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + COVERAGE_X_RECTI / 3.0f ,(1.0f - COVERAGE_Y_RECTI)/2.0f + 2 * COVERAGE_Y_RECTI / 3.0f,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + 2 * COVERAGE_X_RECTI / 3.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f + 2 * COVERAGE_Y_RECTI / 3.0f,0.3f,0.3f),
+
+
+};
+static cv::Rect2f calibration_rect_fisheye[9] = {
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + COVERAGE_X_RECTI / 3.0f ,(1.0f - COVERAGE_Y_RECTI)/2.0f,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + 2 * COVERAGE_X_RECTI / 3.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f,0.3f,0.3f),
+
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f + COVERAGE_Y_RECTI / 3.0f,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + COVERAGE_X_RECTI / 3.0f ,(1.0f - COVERAGE_Y_RECTI)/2.0f + COVERAGE_Y_RECTI / 3.0f ,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + 2 * COVERAGE_X_RECTI / 3.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f+ COVERAGE_Y_RECTI / 3.0f ,0.3f,0.3f),
+
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f + 2 * COVERAGE_Y_RECTI / 3.0f,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + COVERAGE_X_RECTI / 3.0f ,(1.0f - COVERAGE_Y_RECTI)/2.0f + 2 * COVERAGE_Y_RECTI / 3.0f,0.3f,0.3f),
+    cv::Rect2f((1.0f - COVERAGE_X_RECTI)/2.0f + 2 * COVERAGE_X_RECTI / 3.0f,(1.0f - COVERAGE_Y_RECTI)/2.0f + 2 * COVERAGE_Y_RECTI / 3.0f,0.3f,0.3f),
+
+};
 
 static bool tracker3D_calibration_stereo_calibrate(tracker_instance_t* inst);
 
@@ -46,20 +82,20 @@ bool calibration_get_stereo(char* configuration_filename,bool use_fisheye, cv::M
     FILE* calib_file = fopen(path_string, "rb");
     if (calib_file) {
         // read our calibration from this file
-        read_mat(calib_file, &l_intrinsics);
-        read_mat(calib_file, &r_intrinsics);
-        read_mat(calib_file, &l_distortion);
-        read_mat(calib_file, &r_distortion);
-        read_mat(calib_file, &l_distortion_fisheye);
-        read_mat(calib_file, &r_distortion_fisheye);
-        read_mat(calib_file, &l_rotation);
-        read_mat(calib_file, &r_rotation);
-        read_mat(calib_file, &l_translation);
-        read_mat(calib_file, &r_translation);
-        read_mat(calib_file, &l_projection);
-        read_mat(calib_file, &r_projection);
-        read_mat(calib_file,disparity_to_depth); //provided by caller
-        read_mat(calib_file,&mat_image_size);
+        read_cv_mat(calib_file, &l_intrinsics);
+        read_cv_mat(calib_file, &r_intrinsics);
+        read_cv_mat(calib_file, &l_distortion);
+        read_cv_mat(calib_file, &r_distortion);
+        read_cv_mat(calib_file, &l_distortion_fisheye);
+        read_cv_mat(calib_file, &r_distortion_fisheye);
+        read_cv_mat(calib_file, &l_rotation);
+        read_cv_mat(calib_file, &r_rotation);
+        read_cv_mat(calib_file, &l_translation);
+        read_cv_mat(calib_file, &r_translation);
+        read_cv_mat(calib_file, &l_projection);
+        read_cv_mat(calib_file, &r_projection);
+        read_cv_mat(calib_file,disparity_to_depth); //provided by caller
+        read_cv_mat(calib_file,&mat_image_size);
 
         cv::Size image_size(mat_image_size.at<float>(0,0),mat_image_size.at<float>(0,1));
 
@@ -135,6 +171,8 @@ typedef struct tracker3D_calibration_stereo_instance
     bool got_left;
     bool got_right;
 
+    uint32_t calibration_count;
+
 } tracker3D_calibration_stereo_instance_t;
 
 
@@ -153,6 +191,7 @@ tracker3D_calibration_stereo_create(tracker_instance_t* inst)
 
         // alloc our debug frame here - opencv is h,w, not w,h
         i->debug_rgb = cv::Mat(480, 640, CV_8UC3, cv::Scalar(0, 0, 0));
+        i->calibration_count =0;
         return i;
     }
     return NULL;
@@ -300,7 +339,7 @@ bool
 tracker3D_calibration_stereo_calibrate(tracker_instance_t* inst)
 {
 
-    printf("calibrating...\n");
+    //printf("calibrating...\n");
 
     // check if we have saved calibration data. if so, just use it.
     tracker3D_calibration_stereo_instance_t* internal =
@@ -324,6 +363,17 @@ tracker3D_calibration_stereo_calibrate(tracker_instance_t* inst)
         internal->debug_rgb, cv::Point2f(0, 0),
         cv::Point2f(internal->debug_rgb.cols, internal->debug_rgb.rows),
         cv::Scalar(0, 0, 0), -1, 0);
+
+    cv::Point2f bound_tl = calibration_rect_rectilinear[internal->calibration_count].tl();
+    bound_tl.x *= internal->l_frame_gray.cols;
+    bound_tl.y *= internal->l_frame_gray.rows;
+
+    cv::Point2f bound_br = calibration_rect_rectilinear[internal->calibration_count].br();
+    bound_br.x *= internal->l_frame_gray.cols;
+    bound_br.y *= internal->l_frame_gray.rows;
+
+    cv::rectangle(internal->debug_rgb, bound_tl,bound_br, cv::Scalar(0, 64, 32));
+
     cv::Mat disp8;
     cv::cvtColor(internal->r_frame_gray, disp8, CV_GRAY2BGR);
     // disp8.copyTo(internal->debug_rgb);
@@ -337,34 +387,58 @@ tracker3D_calibration_stereo_calibrate(tracker_instance_t* inst)
     bool found_right = cv::findChessboardCorners(
         internal->r_frame_gray, board_size, r_chessboard_measured);
     char message[128];
-    message[0] = 0x0;
+    char message2[128];
+
+    snprintf(message, 128, "COLLECTING SAMPLE: %d/%d",
+             internal->l_chessboards_measured.size() + 1,
+             MAX_CALIBRATION_SAMPLES);
+
+
+    std::vector<cv::Point2f> coverage;
+    for (uint32_t i = 0;
+         i < internal->l_chessboards_measured.size(); i++) {
+        cv::Rect brect = cv::boundingRect(
+            internal->l_chessboards_measured[i]);
+        cv::rectangle(internal->debug_rgb, brect.tl(),
+                      brect.br(), cv::Scalar(0, 64, 32));
+
+        coverage.push_back(cv::Point2f(brect.tl()));
+        coverage.push_back(cv::Point2f(brect.br()));
+    }
+    cv::Rect pre_rect = cv::boundingRect(coverage);
+
+
+    // std::cout << "COVERAGE: " << brect.area() << "\n";
+
+    cv::rectangle(internal->debug_rgb, pre_rect.tl(),
+                  pre_rect.br(), cv::Scalar(0, 255, 0));
 
     if (found_left && found_right) {
-        std::vector<cv::Point2f> coverage;
-        for (uint32_t i = 0;
-             i < internal->l_chessboards_measured.size(); i++) {
-            cv::Rect brect = cv::boundingRect(
-                internal->l_chessboards_measured[i]);
-            cv::rectangle(internal->debug_rgb, brect.tl(),
-                          brect.br(), cv::Scalar(0, 64, 32));
-
-            coverage.push_back(cv::Point2f(brect.tl()));
-            coverage.push_back(cv::Point2f(brect.br()));
-        }
-        cv::Rect pre_rect = cv::boundingRect(coverage);
         cv::Rect brect = cv::boundingRect(l_chessboard_measured);
         coverage.push_back(cv::Point2f(brect.tl()));
         coverage.push_back(cv::Point2f(brect.br()));
         cv::Rect post_rect = cv::boundingRect(coverage);
-
         // std::cout << "COVERAGE: " << brect.area() << "\n";
 
         cv::rectangle(internal->debug_rgb, post_rect.tl(),
                       post_rect.br(), cv::Scalar(0, 255, 0));
+        bool add_sample=false;
+        uint32_t coverage_threshold = internal->l_frame_gray.cols * 0.3f * internal->l_frame_gray.rows * 0.3f;
+
+        snprintf(message2, 128, "TRY TO 'PUSH OUT EDGES' WITH LARGE BOARD IMAGES");
 
-        if (post_rect.area() > pre_rect.area() + 500) {
-            // we will use the last n samples to calculate our
-            // calibration
+        if (internal->calibration_count < 9) {
+            snprintf(message2, 128, "POSITION CHESSBOARD IN BOX");
+        }
+
+        if (internal->calibration_count < 9 && brect.tl().x >= bound_tl.x && brect.tl().y >= bound_tl.y  && brect.br().x <= bound_br.x && brect.br().y <= bound_br.y) {
+            add_sample=true;
+        }
+        printf(" THRESH %d BRECT AREA %d coverage_diff %d\n",coverage_threshold, brect.area(),post_rect.area() - pre_rect.area());
+        if (internal->calibration_count >= 9 && brect.area() > coverage_threshold && post_rect.area() > pre_rect.area() + coverage_threshold/5) {
+            add_sample=true;
+        }
+        if (add_sample) {
 
             if (internal->l_chessboards_measured.size() >
                 MAX_CALIBRATION_SAMPLES) {
@@ -381,6 +455,7 @@ tracker3D_calibration_stereo_calibrate(tracker_instance_t* inst)
                 l_chessboard_measured);
             internal->r_chessboards_measured.push_back(
                 r_chessboard_measured);
+            internal->calibration_count++;
         }
 
         if (internal->l_chessboards_measured.size() ==
@@ -444,7 +519,7 @@ tracker3D_calibration_stereo_calibrate(tracker_instance_t* inst)
                 disparity_to_depth,
                 cv::CALIB_ZERO_DISPARITY);
 
-
+            snprintf(message2, 128, "CALIBRATION DONE RP ERROR %f",rp_error);
             char path_string[PATH_MAX];
             char file_string[PATH_MAX];
             // TODO: centralise this - use multiple env vars?
@@ -454,7 +529,7 @@ tracker3D_calibration_stereo_calibrate(tracker_instance_t* inst)
             snprintf(
                 file_string, PATH_MAX,
                 "%s/.config/monado/%s.calibration", config_path,
-                internal->configuration.configuration_filename);
+                internal->configuration.camera_configuration_filename);
 
             printf("TRY WRITING CONFIG TO %s\n", file_string);
             FILE* calib_file = fopen(file_string, "wb");
@@ -469,25 +544,25 @@ tracker3D_calibration_stereo_calibrate(tracker_instance_t* inst)
                     "%s\n",
                     file_string);
             } else {
-                write_mat(calib_file, &l_intrinsics);
-                write_mat(calib_file, &r_intrinsics);
-                write_mat(calib_file, &l_distortion);
-                write_mat(calib_file, &r_distortion);
-                write_mat(calib_file, &l_distortion_fisheye);
-                write_mat(calib_file, &r_distortion_fisheye);
-                write_mat(calib_file, &l_rotation);
-                write_mat(calib_file, &r_rotation);
-                write_mat(calib_file, &l_translation);
-                write_mat(calib_file, &r_translation);
-                write_mat(calib_file, &l_projection);
-                write_mat(calib_file, &r_projection);
-                write_mat(calib_file, &disparity_to_depth);
+                write_cv_mat(calib_file, &l_intrinsics);
+                write_cv_mat(calib_file, &r_intrinsics);
+                write_cv_mat(calib_file, &l_distortion);
+                write_cv_mat(calib_file, &r_distortion);
+                write_cv_mat(calib_file, &l_distortion_fisheye);
+                write_cv_mat(calib_file, &r_distortion_fisheye);
+                write_cv_mat(calib_file, &l_rotation);
+                write_cv_mat(calib_file, &r_rotation);
+                write_cv_mat(calib_file, &l_translation);
+                write_cv_mat(calib_file, &r_translation);
+                write_cv_mat(calib_file, &l_projection);
+                write_cv_mat(calib_file, &r_projection);
+                write_cv_mat(calib_file, &disparity_to_depth);
 
                 cv::Mat mat_image_size;
                 mat_image_size.create(1,2,CV_32F);
                 mat_image_size.at<float>(0,0) = image_size.width;
                 mat_image_size.at<float>(0,1) = image_size.height;
-                write_mat(calib_file, &mat_image_size);
+                write_cv_mat(calib_file, &mat_image_size);
 
                 fclose(calib_file);
             }
@@ -496,9 +571,11 @@ tracker3D_calibration_stereo_calibrate(tracker_instance_t* inst)
             internal->calibrated = true;
             driver_event_t e = {};
             e.type = EVENT_CALIBRATION_DONE;
-            internal->event_target_callback(
-                internal->event_target_instance, e);
-        }
+            if (internal->event_target_callback){
+                internal->event_target_callback(
+                    internal->event_target_instance, e);
+            }
+       }
 
         snprintf(message, 128, "COLLECTING SAMPLE: %d/%d",
                  internal->l_chessboards_measured.size() + 1,
@@ -511,19 +588,22 @@ tracker3D_calibration_stereo_calibrate(tracker_instance_t* inst)
     cv::drawChessboardCorners(internal->debug_rgb, board_size,
                               r_chessboard_measured, found_right);
 
-    cv::putText(internal->debug_rgb, "CALIBRATION MODE",
-                cv::Point2i(160, 240), 0, 1.0f, cv::Scalar(192, 192, 192));
-    cv::putText(internal->debug_rgb, message, cv::Point2i(160, 460), 0,
+    cv::putText(internal->debug_rgb,
+                "CALIBRATION MODE",
+                cv::Point2i(160, 240),
+                0, 1.0f,
+                cv::Scalar(192, 192, 192) );
+
+    if (internal->calibration_count <= MAX_CALIBRATION_SAMPLES) {
+        cv::putText(internal->debug_rgb,
+                    message, cv::Point2i(160, 460),
+                    0,
+                    0.5f,
+                    cv::Scalar(192, 192, 192) );
+    }
+    cv::putText(internal->debug_rgb, message2, cv::Point2i(160, 30), 0,
                 0.5f, cv::Scalar(192, 192, 192));
 
-    // DEBUG: write out our image planes to confirm imagery is arriving as
-    // expected
-    /*cv::imwrite("/tmp/l_out_y.jpg",internal->l_frame_gray);
-    cv::imwrite("/tmp/r_out_y.jpg",internal->r_frame_gray);
-    cv::imwrite("/tmp/l_out_u.jpg",internal->l_frame_u);
-    cv::imwrite("/tmp/r_out_u.jpg",internal->r_frame_u);
-    cv::imwrite("/tmp/l_out_v.jpg",internal->l_frame_v);
-    cv::imwrite("/tmp/r_out_v.jpg",internal->r_frame_v);*/
     tracker_send_debug_frame(inst);
     return true;
 }
@@ -573,15 +653,6 @@ tracker3D_calibration_stereo_register_event_callback(
 }
 
 
-#include <opencv2/opencv.hpp>
-
-#include "tracker3D_sphere_mono.h"
-#include "common/opencv_utils.hpp"
-
-#include "util/u_misc.h"
-
-#define MAX_CALIBRATION_SAMPLES 23
-
 static bool
 tracker3D_sphere_mono_calibrate(tracker_instance_t* inst);
 
@@ -739,7 +810,7 @@ tracker3D_calibration_mono_calibrate(tracker_instance_t* inst)
             snprintf(
                 file_string, PATH_MAX,
                 "%s/.config/monado/%s.calibration", config_path,
-                internal->configuration.configuration_filename);
+                internal->configuration.camera_configuration_filename);
 
             printf("TRY WRITING CONFIG TO %s\n", file_string);
             FILE* calib_file = fopen(file_string, "wb");
@@ -757,10 +828,10 @@ tracker3D_calibration_mono_calibrate(tracker_instance_t* inst)
                 mat_image_size.create(1,2,CV_32F);
                 mat_image_size.at<float>(0,0) = image_size.width;
                 mat_image_size.at<float>(0,0) = image_size.height;
-                write_mat(calib_file, &intrinsics);
-                write_mat(calib_file, &distortion);
-                write_mat(calib_file, &distortion_fisheye);
-                write_mat(calib_file, &mat_image_size);
+                write_cv_mat(calib_file, &intrinsics);
+                write_cv_mat(calib_file, &distortion);
+                write_cv_mat(calib_file, &distortion_fisheye);
+                write_cv_mat(calib_file, &mat_image_size);
                 fclose(calib_file);
             }
 
@@ -841,3 +912,26 @@ tracker3D_calibration_register_measurement_callback(
 {
     //do nothing
 }
+
+
+//TODO: have a roomsetup type, rather than the bare matrix and scale
+bool calibration_get_roomsetup(char* configuration_filename,room_setup_t* rs)
+{
+    char path_string[256]; // TODO: 256 maybe not enough
+    // TODO: use multiple env vars?
+    char* config_path = secure_getenv("HOME");
+    snprintf(path_string, 256, "%s/.config/monado/%s.calibration",
+             config_path, configuration_filename); // TODO: hardcoded 256
+
+    printf("TRY LOADING CONFIG FROM %s\n", path_string);
+    FILE* calib_file = fopen(path_string, "rb");
+    if (calib_file) {
+        // read our calibration from this file
+        read_xrt_matrix44(calib_file, &rs->origin_transform);
+        read_xrt_vec3(calib_file, &rs->metric_scale);
+        fclose(calib_file);
+        return true;
+    }
+    return false;
+}
+
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/calibration.h b/src/xrt/drivers/montrack/optical_tracking/common/calibration.h
index ec7780bba3b1ba882db5d26db72844aa90014411..ef031478a2905ea27648c51ca456f854ad918370 100644
--- a/src/xrt/drivers/montrack/optical_tracking/common/calibration.h
+++ b/src/xrt/drivers/montrack/optical_tracking/common/calibration.h
@@ -9,11 +9,18 @@
 #define DISTORTION_SIZE 5
 #define DISTORTION_FISHEYE_SIZE 4
 
+typedef struct room_setup {
+    struct xrt_matrix_4x4 origin_transform;
+    struct xrt_vec3 metric_scale;
+} room_setup_t;
 \
 #ifdef __cplusplus
 extern "C" {
 #endif
 
+
+
+
 // forward declare these
 typedef struct tracker3D_calibration_stereo_instance tracker3D_calibration_stereo_instance_t;
 typedef struct tracker3D_calibration_mono_instance tracker3D_calibration_mono_instance_t;
@@ -40,6 +47,9 @@ bool tracker3D_calibration_mono_queue(tracker_instance_t* inst, frame_t* frame);
 bool tracker3D_calibration_mono_configure(tracker_instance_t* inst, tracker_mono_configuration_t* config);
 void tracker3D_calibration_mono_register_event_callback( tracker_instance_t* inst, void* target_instance, event_consumer_callback_func target_func);
 
+bool calibration_get_roomsetup(char* configuration_filename,room_setup_t* rs);
+
+
 #ifdef __cplusplus
 } // extern "C"
 #endif
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/calibration_opencv.hpp b/src/xrt/drivers/montrack/optical_tracking/common/calibration_opencv.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6041af8ec9e3d042decb60d57e6cf4d3e837a599
--- /dev/null
+++ b/src/xrt/drivers/montrack/optical_tracking/common/calibration_opencv.hpp
@@ -0,0 +1,21 @@
+#ifndef CALIBRATION_HPP
+#define CALIBRATION_HPP
+
+//separate file to avoid polluting C code with C++/opencv classes
+
+#include <xrt/xrt_defines.h>
+#include <opencv/cv.hpp>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool calibration_get_stereo(char* configuration_filename,bool use_fisheye, cv::Mat* l_undistort_map_x,cv::Mat* l_undistort_map_y,cv::Mat* l_rectify_map_x,cv::Mat* l_rectify_map_y,cv::Mat* r_undistort_map_x,cv::Mat* r_undistort_map_y,cv::Mat* r_rectify_map_x,cv::Mat* r_rectify_map_y,cv::Mat* disparity_to_depth);
+bool calibration_get_mono(char* configuration_filename,bool use_fisheye, cv::Mat* undistort_map_x,cv::Mat* undistort_map_y);
+
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // CALIBRATION_HPP
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/opencv_utils.hpp b/src/xrt/drivers/montrack/optical_tracking/common/opencv_utils.hpp
index 9a405d60a2954161cf576c96c3f619dbce2af441..ffc56db9ee18a687530dd8e002d0e2b5db7b0747 100644
--- a/src/xrt/drivers/montrack/optical_tracking/common/opencv_utils.hpp
+++ b/src/xrt/drivers/montrack/optical_tracking/common/opencv_utils.hpp
@@ -4,10 +4,10 @@
 #include <sys/stat.h>
 #include <opencv2/opencv.hpp>
 
-#include "xrt/xrt_compiler.h"
+#include <xrt/xrt_defines.h>
 
 
-static bool write_mat(FILE* f, cv::Mat* m)
+static bool write_cv_mat(FILE* f, cv::Mat* m)
 {
 	uint32_t header[3];
 	header[0] = m->elemSize();
@@ -18,7 +18,7 @@ static bool write_mat(FILE* f, cv::Mat* m)
 	return true;
 }
 
-static bool read_mat(FILE* f, cv::Mat* m)
+static bool read_cv_mat(FILE* f, cv::Mat* m)
 {
 	uint32_t header[3];
 	fread((void*)header,sizeof(uint32_t),3,f);
@@ -32,11 +32,56 @@ static bool read_mat(FILE* f, cv::Mat* m)
 	return true;
 }
 
-XRT_MAYBE_UNUSED static float dist_3d(cv::Point3f& p, cv::Point3f& q) {
+
+
+
+static bool write_xrt_matrix44(FILE* f, struct xrt_matrix_4x4* m)
+{
+    fwrite((void*)m->v,sizeof(float),16,f);
+    return true;
+}
+
+static bool read_xrt_matrix44(FILE* f, struct xrt_matrix_4x4* m)
+{
+    fread((void*)m->v,sizeof(float),16,f);
+    return true;
+}
+
+static bool write_xrt_vec3(FILE* f, struct xrt_vec3* m)
+{
+    fwrite((void*)&m->x,sizeof(float),3,f);
+    return true;
+}
+
+static bool read_xrt_vec3(FILE* f, struct xrt_vec3* m)
+{
+    fread((void*)&m->x,sizeof(float),3,f);
+    return true;
+}
+
+static bool write_xrt_quat(FILE* f, struct xrt_quat* m)
+{
+    fwrite((void*)&m->x,sizeof(float),4,f);
+    return true;
+}
+
+static bool read_xrt_quat(FILE* f, struct xrt_quat* m)
+{
+    fread((void*)&m->x,sizeof(float),4,f);
+    return true;
+}
+
+
+static float cv_dist_3d(cv::Point3f& p, cv::Point3f& q) {
 	cv::Point3f d = p - q;
 	return cv::sqrt(d.x*d.x + d.y*d.y + d.z * d.z);
 }
 
+static float cv_dist_3d(cv::Vec3f& p, cv::Vec3f& q) {
+    cv::Point3f d = p - q;
+    return cv::sqrt(d.x*d.x + d.y*d.y + d.z * d.z);
+}
+
 
 //TODO - move this as it is a generic helper
 static int mkpath(char* path) {
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/tracker.c b/src/xrt/drivers/montrack/optical_tracking/common/tracker.c
index 4b1fc7d7c500a0c8a6a9130da08fdb36f09552c2..b3b32a6f1feea13f6fb94c29ea81dfbb84d3d9bf 100644
--- a/src/xrt/drivers/montrack/optical_tracking/common/tracker.c
+++ b/src/xrt/drivers/montrack/optical_tracking/common/tracker.c
@@ -1,6 +1,7 @@
 #include "tracker.h"
 #include "tracker3D_sphere_mono.h"
 #include "tracker3D_sphere_stereo.h"
+#include "tracker3D_psvr_stereo.h"
 #include "tracker3D_uvbi.h"
 
 #include "util/u_misc.h"
@@ -80,6 +81,26 @@ tracker_create(tracker_type_t t)
 			i->tracker_configure =
 			    tracker3D_sphere_stereo_configure;
 			break;
+        case TRACKER_TYPE_PSVR_STEREO:
+            i->tracker_type = t;
+            i->internal_instance =
+                tracker3D_psvr_stereo_create(i);
+            i->tracker_get_capture_params =
+                tracker3D_psvr_stereo_get_capture_params;
+            i->tracker_get_poses =
+                tracker3D_psvr_stereo_get_poses;
+            i->tracker_get_debug_frame =
+                tracker3D_psvr_stereo_get_debug_frame;
+            i->tracker_queue = tracker3D_psvr_stereo_queue;
+            i->tracker_register_measurement_callback =
+                tracker3D_psvr_stereo_register_measurement_callback;
+            i->tracker_register_event_callback =
+                tracker3D_psvr_stereo_register_event_callback;
+            i->tracker_has_new_poses =
+                tracker3D_psvr_stereo_new_poses;
+            i->tracker_configure =
+                tracker3D_psvr_stereo_configure;
+            break;
 #ifdef XRT_HAVE_UVBI
 		case TRACKER_TYPE_UVBI:
 			i->tracker_type = t;
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/tracker.h b/src/xrt/drivers/montrack/optical_tracking/common/tracker.h
index 2989268adad752415466ab3e317c07003bfd529e..f14c2fa73966a61005af2e0e811d6a076375bc4c 100644
--- a/src/xrt/drivers/montrack/optical_tracking/common/tracker.h
+++ b/src/xrt/drivers/montrack/optical_tracking/common/tracker.h
@@ -40,9 +40,16 @@ typedef enum tracker_type
 	TRACKER_TYPE_SPHERE_MONO,
     TRACKER_TYPE_UVBI,
     TRACKER_TYPE_CALIBRATION_STEREO,
-    TRACKER_TYPE_CALIBRATION_MONO
+    TRACKER_TYPE_CALIBRATION_MONO,
+    TRACKER_TYPE_PSVR_STEREO,
 } tracker_type_t;
 
+typedef enum tracker_source_type {
+    TRACKER_SOURCE_TYPE_NONE,
+    TRACKER_SOURCE_TYPE_RECTILINEAR,
+    TRACKER_SOURCE_TYPE_FISHEYE
+} tracker_source_type_t;
+
 
 
 struct _tracker_instance;
@@ -56,26 +63,25 @@ typedef void (*measurement_consumer_callback_func)(
 
 typedef struct tracker_mono_configuration
 {
-	char configuration_filename[256]; // TODO: maybe too small?
-    // camera_calibration_t calibration;
-	frame_format_t format;
+    tracker_source_type_t source_type;
+    char camera_configuration_filename[256]; // TODO: maybe too small?
+    char room_setup_filename[256];
+    frame_format_t format;
 	uint64_t source_id;
 } tracker_mono_configuration_t;
 
 typedef struct tracker_stereo_configuration
 {
-	char configuration_filename[256]; // TODO: maybe too small?
-    // camera_calibration_t l_calibration;
-	// camera_calibration_t r_calibration;
-	frame_format_t l_format;
+    tracker_source_type_t source_type;
+    char camera_configuration_filename[256]; // TODO: maybe too small?
+    char room_setup_filename[256];
+    frame_format_t l_format;
 	uint64_t l_source_id;
 	frame_format_t r_format;
 	uint64_t r_source_id;
 	bool split_left; // single-frame stereo will split the left frame
 	frame_rect_t l_rect;
 	frame_rect_t r_rect;
-
-
 } tracker_stereo_configuration_t;
 
 
diff --git a/src/xrt/drivers/montrack/optical_tracking/track_psvr.cpp b/src/xrt/drivers/montrack/optical_tracking/track_psvr.cpp
index b07f72c4229f5f3a1b31b83d1316a53bd7ce641f..40e0aba948aafdea14d15a6b7329a42eb79becbf 100644
--- a/src/xrt/drivers/montrack/optical_tracking/track_psvr.cpp
+++ b/src/xrt/drivers/montrack/optical_tracking/track_psvr.cpp
@@ -137,58 +137,56 @@ psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data* t)
 	return true;
 }
 
-/*//TODO: we don't need to pass a TrackData* here
-bool psvr_compute_svd()
+//TODO: we don't need to pass a TrackData* here
+bool psvr_compute_svd(psvr_track_data* t)
 {
-        //compute SVD for the points we have found, assuming we have at least 3
-points
-
-        uint8_t pointCount=0;
-        for (uint32_t i=0;i<MAX_POINTS;i++)
+    //compute SVD for the points we have found, assuming we have at least 3 points
+    uint8_t point_count=0;
+    for (uint32_t i=0;i<NUM_LEDS;i++)
+    {
+            if (t->confidence[i] > 0)
+            {
+                    point_count++;
+            }
+    }
+
+    if (point_count > 2)
+    {
+        cv::Mat measurement(point_count, 3, cv::DataType<float>::type);
+        cv::Mat model(point_count, 3, cv::DataType<float>::type);
+        cv::Mat xCovar;
+        uint8_t c = 0;
+        for (uint32_t i=0;i<NUM_LEDS;i++)
         {
                 if (t->confidence[i] > 0)
                 {
-                        pointCount++;
+                        measurement.at<float>(c,0) = t->positions_3d[i].x;
+                        measurement.at<float>(c,1) = t->positions_3d[i].y;
+                        measurement.at<float>(c,2) = t->positions_3d[i].z;
+                        model.at<float>(c,0) = physical_led_positions[i].x;
+                        model.at<float>(c,1) = physical_led_positions[i].y;
+                        model.at<float>(c,2) = physical_led_positions[i].z;
+                        c++;
                 }
         }
 
-        if (pointCount > 2)
-        {
-                cv::Mat measurement(pointCount, 3, cv::DataType<float>::type);
-                cv::Mat model(pointCount, 3, cv::DataType<float>::type);
-                cv::Mat xCovar;
-                uint8_t c = 0;
-                for (uint32_t i=0;i<MAX_POINTS;i++)
-                {
-                        if (t->confidence[i] > 0)
-                        {
-                                measurement.at<float>(c,0) = t->positions[i].x;
-                                measurement.at<float>(c,1) = t->positions[i].y;
-                                measurement.at<float>(c,2) = t->positions[i].z;
-                                model.at<float>(c,0) = ledPositions[i].x;
-                                model.at<float>(c,1) = ledPositions[i].y;
-                                model.at<float>(c,2) = ledPositions[i].z;
-                                c++;
-                        }
-                }
-
-                // create our cross-covariance matrix
-                cv::transpose(model,model);
-                xCovar =  model * measurement;
-                cv::Mat w,u,v,ut;
-                decomposer->compute(xCovar,w,u,v);
-                cv::transpose(u,ut);
-                //TODO: compute determinant
-                cv::Mat rot = v * ut;
-                glm::mat3 glmRot;
-                memcpy((void*)&(glmRot[0][0]),rot.data, 9 * sizeof(float));
-                glm::mat3 tRot = glm::transpose(glmRot);
-                t->rotationMatrix = glm::mat4(tRot);
-                //cout << "R = "<< endl << " "  << rotationMatrix << endl <<
-endl; return true;
-        }
-        else
-        {
-                return false;
-        }
-}*/
+        // create our cross-covariance matrix
+        cv::transpose(model,model);
+        xCovar =  model * measurement;
+        cv::Mat w,u,v,ut;
+        cv::SVD::compute(xCovar,w,u,v);
+        cv::transpose(u,ut);
+        //TODO: compute determinant
+        cv::Mat rot = v * ut;
+
+        cv::transpose(rot,rot);
+        // we assume this is a 3x3 matrix
+        memcpy(t->rotation_matrix.v,rot.data, 9 * sizeof(float));
+
+        return true;
+    }
+    else
+    {
+        return false;
+    }
+}
diff --git a/src/xrt/drivers/montrack/optical_tracking/track_psvr.h b/src/xrt/drivers/montrack/optical_tracking/track_psvr.h
index f3a174b76a0b921882f6937861fc8372d21b1c8b..eb74f1396d6e3de336af7a0e3c1ba4889a103ab0 100644
--- a/src/xrt/drivers/montrack/optical_tracking/track_psvr.h
+++ b/src/xrt/drivers/montrack/optical_tracking/track_psvr.h
@@ -10,6 +10,16 @@
 static const char* LED_LABELS[] = {"LU", "RU", "C",  "LL", "RL",
                                    "LS", "RS", "LB", "RB"};
 
+static const xrt_vec3 physical_led_positions[] = {
+    {0.0f,0.0f,0.0f},
+    {0.0f,0.0f,0.0f},
+    {0.0f,0.0f,0.0f},
+    {0.0f,0.0f,0.0f},
+    {0.0f,0.0f,0.0f},
+    {0.0f,0.0f,0.0f},
+    {0.0f,0.0f,0.0f},
+    {0.0f,0.0f,0.0f},
+};
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -25,7 +35,7 @@ typedef struct psvr_track_data
 	xrt_vec2 r_positions_2d[NUM_LEDS];
 	int8_t confidence[NUM_LEDS]; //-1 if point is not tracked, TODO: 0-128
 	                             // for confidence
-	xrt_matrix_4x4 rotation_matrix; // SVD-fitted head rotation matrix
+    xrt_matrix_3x3 rotation_matrix; // SVD-fitted head rotation matrix
 	xrt_vec3 translation;           // head translation
 } psvr_track_data_t;
 
@@ -56,7 +66,7 @@ typedef struct psvr_led
 bool
 psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data* t);
 
-// bool psvr_computeSVD();
+bool psvr_computeSVD(psvr_track_data* t);
 
 
 #ifdef __cplusplus
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_psvr_stereo.cpp b/src/xrt/drivers/montrack/optical_tracking/tracker3D_psvr_stereo.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6b0cce857eaa040d9e7f3b432d429381d1f3d7c3
--- /dev/null
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_psvr_stereo.cpp
@@ -0,0 +1,1122 @@
+#include <opencv2/opencv.hpp>
+
+#include "tracker3D_psvr_stereo.h"
+#include "common/calibration_opencv.hpp"
+#include "common/opencv_utils.hpp"
+
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <linux/limits.h>
+
+#include "util/u_misc.h"
+
+#include <eigen3/Eigen/Core>
+#include <eigen3/Eigen/Geometry>
+
+static bool
+tracker3D_psvr_stereo_track(tracker_instance_t* inst);
+
+
+typedef struct psvr_track_data
+{
+    uint64_t timestamp;
+    struct xrt_vec3 positions_3d[NUM_LEDS]; // x,y,z position for up to (currently)
+                                     // 9 points - LU,RU,C,LL,RL,LS,RS,LB,RB
+    struct xrt_vec2 l_positions_2d[NUM_LEDS]; // 2d positions in left and right images
+    struct xrt_vec2 r_positions_2d[NUM_LEDS];
+    int8_t confidence[NUM_LEDS]; //-1 if point is not tracked, TODO: 0-128
+                                 // for confidence
+    uint8_t tracked_count; //count of leds with confidence >= 1
+    float longest_dist;
+    struct xrt_matrix_3x3 rotation_matrix; // SVD-fitted head rotation matrix
+    struct xrt_vec3 translation; // head translation
+
+} psvr_track_data_t;
+
+
+typedef struct point_dist
+{
+    uint32_t index;
+    struct xrt_vec3 translation;
+    float length;
+    float angle;
+} point_dist_t;
+
+
+typedef struct psvr_led
+{
+    uint32_t world_point_index;
+    struct xrt_vec2 l_position_2d;
+    struct xrt_vec2 r_position_2d;
+    float radius;
+    cv::Point2f distance;
+    struct xrt_vec3 position;
+    struct xrt_vec3 scaled_position;
+    std::vector<point_dist> point_dists;
+    float rms_distance;
+    float longest_distance;
+    int sign_x;
+    int sign_y;
+} psvr_led_t;
+
+
+typedef struct tracker3D_psvr_stereo_instance
+{
+    tracker_stereo_configuration_t configuration;
+    measurement_consumer_callback_func measurement_target_callback;
+    void* measurement_target_instance; // where we send our measurements
+    event_consumer_callback_func event_target_callback;
+    void* event_target_instance; // where we send our events
+
+    tracked_object_t tracked_object;
+    tracked_blob_t l_tracked_blob;
+    tracked_blob_t r_tracked_blob;
+
+    bool poses_consumed;
+    cv::SimpleBlobDetector::Params blob_params;
+    std::vector<cv::KeyPoint> l_keypoints;
+    std::vector<cv::KeyPoint> r_keypoints;
+    // these components hold no state so we can use a single instance for l
+    // and r ?
+    cv::Ptr<cv::SimpleBlobDetector> sbd;
+    cv::Ptr<cv::BackgroundSubtractorMOG2> background_subtractor;
+    cv::Mat l_frame_gray;
+    cv::Mat r_frame_gray;
+    cv::Mat l_mask_gray;
+    cv::Mat r_mask_gray;
+
+    cv::Mat l_frame_u;
+    cv::Mat l_frame_v;
+    cv::Mat r_frame_u;
+    cv::Mat r_frame_v;
+
+
+    cv::Mat debug_rgb;
+
+    cv::Mat disparity_to_depth;
+
+    cv::Mat l_undistort_map_x;
+    cv::Mat l_undistort_map_y;
+    cv::Mat r_undistort_map_x;
+    cv::Mat r_undistort_map_y;
+
+    cv::Mat l_rectify_map_x;
+    cv::Mat l_rectify_map_y;
+    cv::Mat r_rectify_map_x;
+    cv::Mat r_rectify_map_y;
+
+    bool calibrated;
+    room_setup rs;
+
+    bool l_alloced_frames;
+    bool r_alloced_frames;
+
+    bool got_left;
+    bool got_right;
+
+    psvr_track_data_t prev_track_data;
+    psvr_led_t model_leds[NUM_LEDS];
+
+} tracker3D_psvr_stereo_instance_t;
+
+static bool
+psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data_t* t);
+
+static bool psvr_compute_svd(psvr_track_data_t* t);
+static bool psvr_remove_outliers(std::vector<psvr_led_t>* leds);
+static bool psvr_classify_leds(tracker_instance_t* inst,std::vector<psvr_led_t>* leds,psvr_track_data_t* t);
+
+static void psvr_blob_to_led(std::vector<cv::Point3f>* world_points, psvr_led_t* led_data);
+
+
+tracker3D_psvr_stereo_instance_t*
+tracker3D_psvr_stereo_create(tracker_instance_t* inst)
+{
+    tracker3D_psvr_stereo_instance_t* i =
+        U_TYPED_CALLOC(tracker3D_psvr_stereo_instance_t);
+    if (i) {
+        i->blob_params.filterByArea = false;
+        i->blob_params.filterByConvexity = false;
+        i->blob_params.filterByInertia = false;
+        i->blob_params.filterByColor = true;
+        i->blob_params.blobColor =
+            255; // 0 or 255 - color comes from binarized image?
+        i->blob_params.minArea = 1;
+        i->blob_params.maxArea = 1000;
+        i->blob_params.maxThreshold =
+            51; // using a wide threshold span slows things down bigtime
+        i->blob_params.minThreshold = 50;
+        i->blob_params.thresholdStep = 1;
+        i->blob_params.minDistBetweenBlobs = 5;
+        i->blob_params.minRepeatability = 1; // need this to avoid
+                                             // error?
+
+        i->sbd = cv::SimpleBlobDetector::create(i->blob_params);
+        i->background_subtractor =
+            cv::createBackgroundSubtractorMOG2(32, 16, false);
+
+        i->poses_consumed = false;
+        i->l_alloced_frames = false;
+        i->r_alloced_frames = false;
+        i->got_left = false;
+        i->got_right = false;
+
+        i->calibrated = false;
+
+
+        // alloc our debug frame here - opencv is h,w, not w,h
+        i->debug_rgb = cv::Mat(480, 640, CV_8UC3, cv::Scalar(0, 0, 0));
+
+        std::vector<cv::Point3f> model_points;
+        for (uint32_t j=0;j<NUM_LEDS;j++)
+        {
+            model_points.push_back(cv::Vec3f(physical_led_positions[j].x,physical_led_positions[j].y,physical_led_positions[j].z));
+
+        }
+        for (uint32_t j=0;j<NUM_LEDS;j++)
+        {
+           i->model_leds[j].world_point_index =j; //this index must be set
+           psvr_blob_to_led(&model_points,&i->model_leds[j]);
+        }
+
+
+        return i;
+    }
+    return NULL;
+}
+bool
+tracker3D_psvr_stereo_get_debug_frame(tracker_instance_t* inst,
+                                        frame_t* frame)
+{
+    tracker3D_psvr_stereo_instance_t* internal =
+        (tracker3D_psvr_stereo_instance_t*)inst->internal_instance;
+
+    // wrap a frame struct around our debug cv::Mat and return it.
+
+    frame->format = FORMAT_RGB_UINT8;
+    frame->width = internal->debug_rgb.cols;
+    frame->stride =
+        internal->debug_rgb.cols * format_bytes_per_pixel(frame->format);
+    frame->height = internal->debug_rgb.rows;
+    frame->data = internal->debug_rgb.data;
+    frame->size_bytes = frame_size_in_bytes(frame);
+    return true;
+}
+capture_parameters_t
+tracker3D_psvr_stereo_get_capture_params(tracker_instance_t* inst)
+{
+    tracker3D_psvr_stereo_instance_t* internal =
+        (tracker3D_psvr_stereo_instance_t*)inst->internal_instance;
+    capture_parameters_t cp = {};
+    cp.exposure = 1.0 / 2048.0;
+    cp.gain = 0.01f;
+    return cp;
+}
+
+bool
+tracker3D_psvr_stereo_queue(tracker_instance_t* inst, frame_t* frame)
+{
+    tracker3D_psvr_stereo_instance_t* internal =
+        (tracker3D_psvr_stereo_instance_t*)inst->internal_instance;
+    if (!inst->configured) {
+        printf(
+            "ERROR: you must configure this tracker before it can "
+            "accept frames\n");
+        return false;
+    }
+
+    // alloc left if required - if we have a composite stereo frame, alloc
+    // both left and right eyes.
+
+    if (frame->source_id == internal->configuration.l_source_id &&
+        !internal->l_alloced_frames) {
+        uint16_t eye_width = frame->width / 2;
+        if (internal->configuration.split_left == true) {
+            eye_width = frame->width / 2;
+            internal->r_frame_gray =
+                cv::Mat(frame->height, eye_width, CV_8UC1,
+                        cv::Scalar(0, 0, 0));
+            internal->r_frame_u =
+                cv::Mat(frame->height, eye_width, CV_8UC1,
+                        cv::Scalar(0, 0, 0));
+            internal->r_frame_v =
+                cv::Mat(frame->height, eye_width, CV_8UC1,
+                        cv::Scalar(0, 0, 0));
+            internal->r_mask_gray =
+                cv::Mat(frame->height, eye_width, CV_8UC1,
+                        cv::Scalar(0, 0, 0));
+            internal->r_alloced_frames = true;
+        }
+        internal->l_frame_gray = cv::Mat(frame->height, eye_width,
+                                         CV_8UC1, cv::Scalar(0, 0, 0));
+        internal->l_frame_u = cv::Mat(frame->height, eye_width, CV_8UC1,
+                                      cv::Scalar(0, 0, 0));
+        internal->l_frame_v = cv::Mat(frame->height, eye_width, CV_8UC1,
+                                      cv::Scalar(0, 0, 0));
+
+        internal->l_mask_gray = cv::Mat(frame->height, eye_width,
+                                        CV_8UC1, cv::Scalar(0, 0, 0));
+        internal->l_alloced_frames = true;
+    }
+
+    // if we have a right frame, alloc if required.
+
+    if (frame->source_id == internal->configuration.r_source_id &&
+        !internal->r_alloced_frames) {
+        uint16_t eye_width = frame->width / 2;
+        internal->r_frame_gray = cv::Mat(frame->height, eye_width,
+                                         CV_8UC1, cv::Scalar(0, 0, 0));
+        internal->r_frame_u = cv::Mat(frame->height, eye_width, CV_8UC1,
+                                      cv::Scalar(0, 0, 0));
+        internal->r_frame_v = cv::Mat(frame->height, eye_width, CV_8UC1,
+                                      cv::Scalar(0, 0, 0));
+
+        internal->r_mask_gray = cv::Mat(frame->height, eye_width,
+                                        CV_8UC1, cv::Scalar(0, 0, 0));
+        internal->r_alloced_frames = true;
+    }
+
+    // copy our data from our video buffer into our cv::Mats
+    // TODO: initialise once
+    cv::Mat l_chans[3];
+    l_chans[0] = internal->l_frame_gray;
+    l_chans[1] = internal->l_frame_u;
+    l_chans[2] = internal->l_frame_v;
+
+    cv::Mat r_chans[3];
+    r_chans[0] = internal->r_frame_gray;
+    r_chans[1] = internal->r_frame_u;
+    r_chans[2] = internal->r_frame_v;
+
+
+    if (frame->source_id == internal->configuration.l_source_id) {
+
+        if (internal->configuration.split_left == true) {
+            internal->got_left = true;
+            internal->got_right = true;
+            cv::Mat tmp(frame->height, frame->width, CV_8UC3,
+                        frame->data);
+            cv::Rect lr(internal->configuration.l_rect.tl.x,
+                        internal->configuration.l_rect.tl.y,
+                        internal->configuration.l_rect.br.x,
+                        internal->configuration.l_rect.br.y);
+            cv::Rect rr(internal->configuration.r_rect.tl.x,
+                        internal->configuration.r_rect.tl.y,
+                        internal->configuration.r_rect.br.x -
+                            internal->configuration.r_rect.tl.x,
+                        internal->configuration.r_rect.br.y);
+            cv::split(tmp(lr), l_chans);
+            cv::split(tmp(rr), r_chans);
+        } else {
+            internal->got_left = true;
+            cv::Mat tmp(frame->height, frame->width, CV_8UC3,
+                        frame->data);
+            cv::split(tmp, l_chans);
+        }
+    }
+    if (frame->source_id == internal->configuration.r_source_id &&
+        internal->configuration.split_left == false) {
+        internal->got_right = true;
+        cv::Mat tmp(frame->height, frame->width, CV_8UC3, frame->data);
+        cv::split(tmp, r_chans);
+    }
+
+    // we have our pair of frames, now we can process them - we should do
+    // this async, rather than in queue
+
+    if (internal->got_left && internal->got_right) {
+        return tracker3D_psvr_stereo_track(inst);
+    }
+    return true;
+}
+
+bool
+tracker3D_psvr_stereo_track(tracker_instance_t* inst)
+{
+
+    tracker3D_psvr_stereo_instance_t* internal =
+        (tracker3D_psvr_stereo_instance_t*)inst->internal_instance;
+    internal->l_keypoints.clear();
+    internal->r_keypoints.clear();
+
+
+    // DEBUG: dump all our planes out for inspection
+   /* cv::imwrite("/tmp/l_out_y.jpg",internal->l_frame_gray);
+    cv::imwrite("/tmp/r_out_y.jpg",internal->r_frame_gray);
+    cv::imwrite("/tmp/l_out_u.jpg",internal->l_frame_u);
+    cv::imwrite("/tmp/r_out_u.jpg",internal->r_frame_u);
+    cv::imwrite("/tmp/l_out_v.jpg",internal->l_frame_v);
+    cv::imwrite("/tmp/r_out_v.jpg",internal->r_frame_v);*/
+
+
+    // disabled channel combining in favour of using v plane directly - Y is
+    // 2x resolution
+    // so we do want to use that eventually
+
+    // combine our yuv channels to isolate blue leds - y channel is all we
+    // will use from now on
+    // cv::subtract(internal->l_frame_u,internal->l_frame_v,internal->l_frame_u);
+    // cv::subtract(internal->r_frame_u,internal->r_frame_v,internal->r_frame_u);
+
+    // cv::subtract(internal->l_frame_u,internal->l_frame_gray,internal->l_frame_gray);
+    // cv::subtract(internal->r_frame_u,internal->r_frame_gray,internal->r_frame_gray);
+
+    // just use the u plane directly
+    cv::bitwise_not(internal->l_frame_v, internal->l_frame_v);
+    cv::bitwise_not(internal->r_frame_v, internal->r_frame_v);
+
+    cv::threshold(internal->l_frame_v, internal->l_frame_gray, 150.0, 255.0,
+                  0);
+    cv::threshold(internal->r_frame_v, internal->r_frame_gray, 150.0, 255.0,
+                  0);
+
+
+    cv::Mat l_frame_undist;
+    cv::Mat r_frame_undist;
+
+    // undistort the whole image
+    cv::remap(internal->l_frame_gray, l_frame_undist,
+              internal->l_undistort_map_x, internal->l_undistort_map_y,
+              cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
+    cv::remap(internal->r_frame_gray, r_frame_undist,
+              internal->r_undistort_map_x, internal->r_undistort_map_y,
+              cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
+
+    // rectify the whole image
+    cv::remap(l_frame_undist, internal->l_frame_gray,
+              internal->l_rectify_map_x, internal->l_rectify_map_y,
+              cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
+    cv::remap(r_frame_undist, internal->r_frame_gray,
+              internal->r_rectify_map_x, internal->r_rectify_map_y,
+              cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
+
+   // block-match for disparity calculation - disabled
+
+    cv::Mat disp;
+    cv::Ptr<cv::StereoBM> sbm = cv::StereoBM::create(128, 5);
+    sbm->setNumDisparities(64);
+    sbm->setBlockSize(5);
+    sbm->setPreFilterCap(61);
+    sbm->setPreFilterSize(15);
+    sbm->setTextureThreshold(32);
+    sbm->setSpeckleWindowSize(100);
+    sbm->setSpeckleRange(32);
+    sbm->setMinDisparity(2);
+    sbm->setUniquenessRatio(10);
+    sbm->setDisp12MaxDiff(0);
+    //	sbm->compute(internal->l_frame_gray,
+    // internal->r_frame_gray,disp); 	cv::normalize(disp, disp8, 0.1,
+    // 255, CV_MINMAX, CV_8UC1);
+
+
+    // disabled background subtraction for now
+
+    // internal->background_subtractor->apply(internal->l_frame_gray,internal->l_mask_gray);
+    // internal->background_subtractor->apply(internal->r_frame_gray,internal->r_mask_gray);
+
+    xrt_vec2 lastPos = internal->l_tracked_blob.center;
+    float offset = ROI_OFFSET;
+    if (internal->l_tracked_blob.diameter > ROI_OFFSET) {
+        offset = internal->l_tracked_blob.diameter;
+    }
+
+    // cv::rectangle(internal->l_mask_gray,
+    // cv::Point2f(lastPos.x-offset,lastPos.y-offset),cv::Point2f(lastPos.x+offset,lastPos.y+offset),cv::Scalar(
+    // 255 ),-1,0); lastPos = internal->r_tracked_blob.center;
+    // cv::rectangle(internal->r_mask_gray,
+    // cv::Point2f(lastPos.x-offset,lastPos.y-offset),cv::Point2f(lastPos.x+offset,lastPos.y+offset),cv::Scalar(
+    // 255 ),-1,0);
+
+    // cv::rectangle(internal->debug_rgb,
+    // cv::Point2f(0,0),cv::Point2f(internal->debug_rgb.cols,internal->debug_rgb.rows),cv::Scalar(
+    // 0,0,0 ),-1,0);
+
+    cv::threshold(internal->l_frame_gray, internal->l_frame_gray, 32.0,
+                  255.0, 0);
+    cv::threshold(internal->r_frame_gray, internal->r_frame_gray, 32.0,
+                  255.0, 0);
+
+    // TODO: handle source images larger than debug_rgb
+    cv::Mat debug_img;
+    cv::cvtColor(internal->l_frame_gray, debug_img, CV_GRAY2BGR);
+    int32_t cropped_rows = debug_img.rows;
+    int32_t cropped_cols = debug_img.cols;
+
+    if (debug_img.rows > internal->debug_rgb.rows || debug_img.cols > internal->debug_rgb.cols)
+    {
+        cropped_rows = internal->debug_rgb.rows;
+        cropped_cols = internal->debug_rgb.cols;
+
+    }
+    cv::Mat dst_roi =
+        internal->debug_rgb(cv::Rect(0, 0, cropped_cols, cropped_rows));
+    debug_img.copyTo(dst_roi);
+
+
+    tracker_measurement_t m = {};
+
+    // do blob detection with our masks
+    internal->sbd->detect(internal->l_frame_gray,
+                          internal->l_keypoints); //,internal->l_mask_gray);
+    internal->sbd->detect(internal->r_frame_gray,
+                          internal->r_keypoints); //,internal->r_mask_gray);
+    // do some basic matching to come up with likely disparity-pairs
+    std::vector<cv::KeyPoint> l_blobs, r_blobs;
+    for (uint32_t i = 0; i < internal->l_keypoints.size(); i++) {
+        cv::KeyPoint l_blob = internal->l_keypoints[i];
+        int l_index = -1;
+        int r_index = -1;
+        for (uint32_t j = 0; j < internal->r_keypoints.size(); j++) {
+            float lowest_dist = 128;
+            cv::KeyPoint r_blob = internal->r_keypoints[j];
+            // find closest point on same-ish scanline
+            if ((l_blob.pt.y < r_blob.pt.y + 3) &&
+                (l_blob.pt.y > r_blob.pt.y - 3) &&
+                ((r_blob.pt.x - l_blob.pt.x) < lowest_dist)) {
+                lowest_dist = r_blob.pt.x - l_blob.pt.x;
+                r_index = j;
+                l_index = i;
+            }
+        }
+        if (l_index > -1 && r_index > -1) {
+            l_blobs.push_back(internal->l_keypoints.at(l_index));
+            r_blobs.push_back(internal->r_keypoints.at(r_index));
+        }
+    }
+
+    // draw our disparity markers into our debug frame
+
+    for (uint32_t i = 0; i < l_blobs.size(); i++) {
+        cv::line(internal->debug_rgb, l_blobs[i].pt, r_blobs[i].pt,
+                 cv::Scalar(255, 0, 0));
+    }
+
+    // convert our 2d point + disparities into 3d points.
+
+    std::vector<cv::Point3f> world_points;
+    if (l_blobs.size() > 0) {
+        for (uint32_t i = 0; i < l_blobs.size(); i++) {
+            float disp = r_blobs[i].pt.x - l_blobs[i].pt.x;
+            cv::Vec4d xydw(l_blobs[i].pt.x, l_blobs[i].pt.y, disp,
+                           1.0f);
+            // Transform
+            cv::Vec4d h_world = (cv::Matx44d)internal->disparity_to_depth * xydw;
+            // Divide by scale to get 3D vector from homogeneous
+            // coordinate. invert x while we are at it.
+            world_points.push_back(cv::Point3f(
+                -h_world[0] / h_world[3], h_world[1] / h_world[3],
+                h_world[2] / h_world[3]));
+        }
+    }
+
+    // initial implementation simply tracks a single blob.
+    // keep the last position of the blob, and use the closest
+    // 'new' blob so noise or random reflections etc. have
+    // less chance of throwing tracking off.
+
+    int tracked_index = -1;
+    float lowest_dist = 65535.0f;
+    xrt_vec3 position = internal->tracked_object.pose.position;
+    cv::Point3f last_point(position.x, position.y, position.z);
+
+    for (uint32_t i = 0; i < world_points.size(); i++) {
+        cv::Point3f world_point = world_points[i];
+        // show our tracked world points (just x,y) in our debug output
+        cv::Point2f img_point;
+        img_point.x = world_point.x * internal->debug_rgb.cols / 2 +
+                      internal->debug_rgb.cols / 2;
+        img_point.y = world_point.y * internal->debug_rgb.rows / 2 +
+                      internal->debug_rgb.rows / 2;
+        cv::circle(internal->debug_rgb, img_point, 3,
+                   cv::Scalar(0, 255, 0));
+
+        float dist = cv_dist_3d(world_point, last_point);
+        if (dist < lowest_dist) {
+            tracked_index = i;
+            lowest_dist = dist;
+        }
+    }
+
+    std::vector<psvr_led_t> led_data;
+    psvr_track_data_t track_data;
+    memset(track_data.confidence,0,NUM_LEDS * sizeof(char));
+
+    // longest length gives us the highest distance between any 2 points.
+    // scaling by this value gives us a normalised distance.
+    for (uint32_t i=0;i<world_points.size();i++) {
+        cv::Point3f point_a = world_points[i];
+        psvr_led_t l;
+        l.sign_x=0;
+        l.sign_y=0;
+        l.l_position_2d.x = l_blobs[i].pt.x;
+        l.l_position_2d.y = l_blobs[i].pt.y;
+        l.r_position_2d.x = r_blobs[i].pt.x;
+        l.r_position_2d.y = r_blobs[i].pt.y;
+
+        l.position.x=point_a.x;
+        l.position.y=point_a.y;
+        l.position.z=point_a.z;
+
+        l.world_point_index = i;
+
+        // fill in our led struct with distances etc.
+        psvr_blob_to_led(&world_points, &l);
+
+        l.rms_distance = sqrt(l.rms_distance);
+        //printf("longest distance %f\n", l.longest_distance);
+        led_data.push_back(l);
+    }
+
+    psvr_remove_outliers(&led_data);
+
+    psvr_classify_leds(inst,&led_data,&track_data);
+
+    //draw our points in the debug output
+    for (uint32_t i=0;i<NUM_LEDS;i++)
+    {
+        cv::Point2f pos(track_data.l_positions_2d[i].x,track_data.l_positions_2d[i].y);
+        cv::circle(internal->debug_rgb,pos,3,cv::Scalar(96,128,192));
+        char label[128];
+        snprintf(label, 128, "LED %d %f %f %f", i,track_data.positions_3d[i].x,track_data.positions_3d[i].y,track_data.positions_3d[i].z);
+        cv::putText(internal->debug_rgb,label,pos,0, 0.3f, cv::Scalar(255, 192, 0));
+    }
+
+    if (track_data.tracked_count> 2) {
+    psvr_compute_svd(&track_data);
+    Eigen::Matrix3f rot_mat = Eigen::Map<Eigen::Matrix<float,3,3>>(track_data.rotation_matrix.v);
+    Eigen::Quaternionf q(rot_mat);
+    Eigen::Vector3f ea = rot_mat.eulerAngles(0, 1, 2);
+
+    float rad2deg = 180.0f/EIGEN_PI;
+
+    std::cout << "QUAT: " <<  q.vec() << q.w() << "\n";
+    std::cout << "ANGLE: " <<  ea.x() * rad2deg << "," <<ea.y() * rad2deg << "," << ea.z() * rad2deg  << "\n";
+    m.pose.orientation.x = q.x();
+    m.pose.orientation.y = q.y();
+    m.pose.orientation.z = q.z();
+    m.pose.orientation.w = q.w();
+    }
+
+
+    if (tracked_index != -1) {
+        cv::Point3f world_point = world_points[tracked_index];
+
+        // create our measurement for the filter
+        m.flags = (tracker_measurement_flags_t)(MEASUREMENT_POSITION | MEASUREMENT_OPTICAL);
+        struct timeval tp;
+        gettimeofday(&tp,NULL);
+        m.source_timestamp = tp.tv_sec * 1000000 + tp.tv_usec;
+
+        //apply our room setup transforms
+        Eigen::Vector3f p = Eigen::Map<Eigen::Vector3f>(&track_data.translation.x);
+        Eigen::Vector4f pt;
+        pt.x() = p.x();
+        pt.y() = p.y();
+        pt.z() = p.z();
+        pt.w() = 1.0f;
+
+        //this is a glm mat4 written out 'flat'
+        Eigen::Matrix4f mat = Eigen::Map<Eigen::Matrix<float,4,4>>(internal->rs.origin_transform.v);
+        pt = mat * pt;
+
+        m.pose.position.x = pt.x();
+        m.pose.position.y = pt.y();
+        m.pose.position.z = pt.z();
+
+        // update internal state
+        cv::KeyPoint l_kp = l_blobs[tracked_index];
+        cv::KeyPoint r_kp = l_blobs[tracked_index];
+
+        internal->l_tracked_blob.center.x = l_kp.pt.x;
+        internal->l_tracked_blob.center.y = l_kp.pt.y;
+        internal->l_tracked_blob.diameter = l_kp.size;
+
+        internal->r_tracked_blob.center.x = r_kp.pt.x;
+        internal->r_tracked_blob.center.y = r_kp.pt.y;
+        internal->r_tracked_blob.diameter = r_kp.size;
+
+        internal->tracked_object.pose.position.x = world_point.x;
+        internal->tracked_object.pose.position.y = world_point.y;
+        internal->tracked_object.pose.position.z = world_point.z;
+        internal->tracked_object.tracking_id = 1;
+
+
+
+
+
+        char message[128];
+        snprintf(message, 128, "X: %f Y: %f Z: %f", world_point.x,
+                 world_point.y, world_point.z);
+
+        cv::putText(internal->debug_rgb, message, cv::Point2i(10, 50),
+                    0, 0.5f, cv::Scalar(96, 128, 192));
+        if (internal->measurement_target_callback) {
+            //printf("STTR queueing timestamp %lld\n",m.source_timestamp);
+              //printf("X: %f Y: %f Z: %f mx: %f my: %f mz: %f\n", world_point.x,
+              //       world_point.y, world_point.z,m.pose.position.x,m.pose.position.y,m.pose.position.z);
+            internal->measurement_target_callback(
+                internal->measurement_target_instance, &m);
+        }
+    }
+
+    tracker_send_debug_frame(inst); // publish our debug frame
+
+
+    return true;
+}
+
+bool
+tracker3D_psvr_stereo_get_poses(tracker_instance_t* inst,
+                                  tracked_object_t* objects,
+                                  uint32_t* count)
+{
+    if (objects == NULL) {
+        *count = TRACKED_POINTS; // tracking a single object
+        return true;
+    }
+
+    tracker3D_psvr_stereo_instance_t* internal =
+        (tracker3D_psvr_stereo_instance_t*)inst->internal_instance;
+    for (uint32_t i = 0; i < 1; i++) {
+
+        objects[i] = internal->tracked_object;
+    }
+    *count = 1;
+    internal->poses_consumed = true;
+    return true;
+}
+
+bool
+tracker3D_psvr_stereo_new_poses(tracker_instance_t* inst)
+{
+    tracker3D_psvr_stereo_instance_t* internal =
+        (tracker3D_psvr_stereo_instance_t*)inst->internal_instance;
+    return internal->poses_consumed;
+}
+
+bool
+tracker3D_psvr_stereo_configure(tracker_instance_t* inst,
+                                  tracker_stereo_configuration_t* config)
+{
+    tracker3D_psvr_stereo_instance_t* internal =
+        (tracker3D_psvr_stereo_instance_t*)inst->internal_instance;
+    // return false if we cannot handle this config
+
+    if (config->l_format != FORMAT_YUV444_UINT8) {
+        inst->configured = false;
+        return false;
+    }
+
+    //load calibration data
+
+    if (! internal->calibrated) {
+        if ( calibration_get_stereo(config->camera_configuration_filename,true,
+                               &internal->l_undistort_map_x,
+                               &internal->l_undistort_map_y,
+                               &internal->l_rectify_map_x,
+                               &internal->l_rectify_map_y,
+                               &internal->r_undistort_map_x,
+                               &internal->r_undistort_map_y,
+                               &internal->r_rectify_map_x,
+                               &internal->r_rectify_map_y,
+                               &internal->disparity_to_depth) ) {
+            printf("loaded calibration for camera!\n");
+
+          }
+        if (! calibration_get_roomsetup(config->room_setup_filename,&internal->rs))
+        {
+            printf("Could not load room setup. tracking frame will be b0rked.\n");
+        }
+
+            internal->calibrated = true;
+
+
+    }
+
+    internal->configuration = *config;
+    inst->configured=true;
+    return true;
+}
+
+void
+tracker3D_psvr_stereo_register_measurement_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    measurement_consumer_callback_func target_func)
+{
+    tracker3D_psvr_stereo_instance_t* internal =
+        (tracker3D_psvr_stereo_instance_t*)inst->internal_instance;
+    internal->measurement_target_instance = target_instance;
+    internal->measurement_target_callback = target_func;
+}
+
+void
+tracker3D_psvr_stereo_register_event_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func)
+{
+    tracker3D_psvr_stereo_instance_t* internal =
+        (tracker3D_psvr_stereo_instance_t*)inst->internal_instance;
+    internal->event_target_instance = target_instance;
+    internal->event_target_callback = target_func;
+}
+
+
+bool psvr_remove_outliers(std::vector<psvr_led_t>* leds)
+{
+    return false;
+}
+
+
+void psvr_blob_to_led(std::vector<cv::Point3f>* world_points, psvr_led_t* led_data)
+{
+    printf("world points: %d\n",world_points->size());
+    cv::Point3f point_a = world_points->at(led_data->world_point_index);
+    led_data->longest_distance = 0.0f;
+
+    cv::Point3f centroid(0.0f,0.0f,0.0f);
+    for (uint32_t i=0;i<world_points->size();i++) {
+        centroid.x += world_points->at(i).x;
+        centroid.y += world_points->at(i).y;
+        centroid.z += world_points->at(i).z;
+    }
+    centroid.x /= world_points->size();
+    centroid.y /= world_points->size();
+    centroid.z /= world_points->size();
+
+    printf("CENTROID: %f %f %f\n",centroid.x,centroid.y,centroid.z);
+
+
+    for (uint32_t j=0;j<world_points->size();j++) {
+        if (led_data->world_point_index != j) {
+            cv::Point3f point_b = world_points->at(j);
+            point_dist pd;
+            pd.index = j;
+            pd.translation.x = point_b.x - point_a.x;
+            pd.translation.y = point_b.y - point_a.y;
+            pd.translation.z = point_b.z - point_a.z;
+            pd.length = cv_dist_3d(point_b,point_a);
+            cv::Vec3f pa,pb,cr;
+            pa = cv::normalize((cv::Vec3f)(point_a - centroid));
+            pb = cv::normalize((cv::Vec3f)(point_b - centroid));
+            cv::Point3f n = pa.cross(pb);
+            cv::Vec3f o(0.0f,0.0f,0.0f);
+            cr = pa.cross(pb);
+            float s = cv_dist_3d(o,cr);
+            float c = pa.dot(pb);
+            pd.angle = atan2(s, c);
+
+
+            //pd.angle = acos(pa.dot(pb));
+            //if (pa.cross(pb) < 0) {
+            //    pd.angle *=-1;
+            //}
+
+
+            std::cout << "raw a: " << point_a << " raw b: " << point_b << " pA: " << pa << " pB: " << pb << " NORM: " << n << " ANGLE: " << pd.angle << "\n";
+
+
+            led_data->rms_distance += pd.length * pd.length;
+            //accumulate the signs of the x and y translations
+            //so we can determine the 'center' led
+            led_data->sign_x += copysignf(1.0, pd.translation.x);
+            led_data->sign_y += copysignf(1.0, pd.translation.y);
+            float absl = abs(pd.length);
+            //determine the longest distance - used to normalise
+            //distances
+            if ( absl > led_data->longest_distance) {
+                led_data->longest_distance = absl;
+            }
+            led_data->point_dists.push_back(pd);
+            //printf ("pA index %d pB index %d trans: %f %f %f %f %d %d\n",i,j,pd.translation.x,pd.translation.y,pd.translation.z,pd.length,l.sign_x,l.sign_y);
+         }
+    }
+}
+
+bool psvr_classify_leds(tracker_instance_t* inst,std::vector<psvr_led_t>* leds,psvr_track_data_t* t) {
+    tracker3D_psvr_stereo_instance_t* internal =
+        (tracker3D_psvr_stereo_instance_t*)inst->internal_instance;
+    printf("CLASSIFY LEDS ----\n");
+    for (uint32_t i=0;i<leds->size();i++) {
+        psvr_led_t l = leds->at(i);
+        //printf("led %f %d %d\n",l.rms_distance,l.sign_x,l.sign_y);
+        float lowest_err=65535.0f;
+        float confidence[NUM_LEDS];
+        memset (confidence,0,sizeof(float) * NUM_LEDS);
+        uint32_t matched_led;
+        for (uint32_t j=0;j<l.point_dists.size();j++) {
+            point_dist_t measured_pd = l.point_dists[j];
+            printf("measured angle %f \n",-measured_pd.angle);
+            for (uint32_t k=0;k<NUM_LEDS;k++)
+            {
+                float lowest_diff=65535.0f;
+                for (uint32_t m=0;m<internal->model_leds[k].point_dists.size();m++) {
+                point_dist_t model_pd = internal->model_leds[k].point_dists[m];
+                printf("%d %d model angle: %f\n",k,m,model_pd.angle);
+                float diff = -measured_pd.angle - model_pd.angle;
+                confidence[k] += diff;
+                }
+
+            }
+
+        }
+    printf("confidence %f %f %f %f %f\n",confidence[0],confidence[1],confidence[2],confidence[3],confidence[4]);
+    }
+    printf("----\n");
+
+
+
+    //if we have 5 leds visible, we can use our geometric approach
+    if (leds->size() ==5) {
+        psvr_disambiguate_5points(leds,t);
+    } else {
+        // we dont have 5 - we can at least assume that the leds are the same as the closest led in the previous frame
+
+        // if we dont have previous track data, we cannot continue.
+        if (internal->prev_track_data.tracked_count == 0) {
+        //    return false;
+        }
+        t->longest_dist = 0.0f;
+        for (uint32_t i=0;i<leds->size();i++) {
+            psvr_led_t l = leds->at(i);
+            float lowest_dist= 65535.0f;
+            int32_t closest_index = -1;
+            for (int32_t j=0;j<NUM_LEDS;j++) {
+                if (internal->prev_track_data.confidence[j] > 0) {
+                    float d = math_distance(internal->prev_track_data.positions_3d[j],l.position);
+                    //TODO: do not hardcode 0.5f distance threshold
+                    if (d < lowest_dist && d < 0.5f) {
+                        lowest_dist = d;
+                        closest_index=j;
+                    }
+                }
+            }
+
+            t->positions_3d[closest_index] = l.position;
+            t->l_positions_2d[closest_index] = l.l_position_2d;
+            t->r_positions_2d[closest_index] = l.r_position_2d;
+            t->confidence[closest_index] = 1;
+
+            //update our longest distance
+            if (t->longest_dist < l.longest_distance) {
+                t->longest_dist = l.longest_distance;
+            }
+
+        }
+        //if we dont have a center led, just use old position for now
+        t->translation = internal->prev_track_data.translation;
+        if (t->confidence[2] > 0){
+            t->translation = t->positions_3d[2];
+        }
+        t->tracked_count =0;
+        for (uint32_t i=0;i<NUM_LEDS;i++) {
+            if (t->confidence[i] >0){
+                t->tracked_count++;
+            }
+        }
+
+    }
+    internal->prev_track_data = *t;
+    return true;
+}
+
+bool
+psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data_t* t)
+{
+	// create a list of the corners, ignoring the center
+    bool found_center = false;
+    t->longest_dist = 0.0f;
+    std::vector<uint32_t> corner_indices;
+	for (uint32_t i = 0; i < leds->size(); i++) {
+		psvr_led_t p = leds->at(i);
+		if (p.sign_x == 0 && p.sign_y == 0) {
+            //this is our center
+            t->translation = p.position;
+            t->confidence[2] = 1;
+			t->positions_3d[2] = p.position;
+			t->l_positions_2d[2] = p.l_position_2d;
+			t->r_positions_2d[2] = p.r_position_2d;
+            found_center = true;
+		} else {
+            //everything else is a corner
+            corner_indices.push_back(i);
+		}
+        if (t->longest_dist < p.longest_distance)
+        {
+            t->longest_dist = p.longest_distance;
+        }
+
+	}
+    if (! found_center)
+    {
+        return false;
+    }
+    t->tracked_count = 5;
+
+	// find the leftmost and rightmost points - these will belong to our
+	// left and right side edges.
+
+    float lowest_x = 65535.0f;
+    float highest_x = -65535.0f;
+    uint32_t lowest_x_index =0;
+    uint32_t highest_x_index =0;
+	for (uint32_t i = 0; i < corner_indices.size(); i++) {
+		psvr_led_t p = leds->at(corner_indices[i]);
+		if (p.position.x < lowest_x) {
+			lowest_x = p.position.x;
+            lowest_x_index = p.world_point_index;
+		}
+		if (p.position.x > highest_x) {
+			highest_x = p.position.x;
+            highest_x_index = p.world_point_index;
+		}
+	}
+	// printf("lowestX %f lowestXIndex %d highestX %f highestXIndex
+	// %d\n",lowestX,lowestXIndex,highestX,highestXIndex); find the
+	// corresponding (closest) point on the 'short side' for the left and
+	// right extremities
+
+	float lowest_l_x_distance = 65535.0f;
+	float lowest_h_x_distance = 65535.0f;
+	uint32_t lowest_x_pair_index;
+	uint32_t highest_x_pair_index;
+
+	psvr_led_t lcA = leds->at(lowest_x_index);
+    for (uint32_t i = 0; i < corner_indices.size(); i++) {
+        psvr_led_t lcB = leds->at(corner_indices[i]);
+		if (corner_indices[i] != lowest_x_index) {
+            float dist_l_x = math_distance(lcA.position, lcB.position);
+			if (dist_l_x < lowest_l_x_distance) {
+                lowest_x_pair_index = lcB.world_point_index;
+				lowest_l_x_distance = dist_l_x;
+			}
+		}
+	}
+	psvr_led_t hcA = leds->at(highest_x_index);
+	for (uint32_t i = 0; i < corner_indices.size(); i++) {
+		psvr_led_t hcB = leds->at(corner_indices[i]);
+		if (corner_indices[i] != highest_x_index) {
+            float dist_h_x = math_distance(hcA.position, hcB.position);
+			if (dist_h_x < lowest_h_x_distance) {
+                highest_x_pair_index = hcB.world_point_index;
+				lowest_h_x_distance = dist_h_x;
+			}
+		}
+	}
+
+	// now we have 4 points, and can know which 2 are left and which 2 are
+	// right.
+
+    psvr_led_t lA = leds->at(lowest_x_index);
+    psvr_led_t lB = leds->at(lowest_x_pair_index);
+	if (lA.position.y < lB.position.y) {
+		// lA is upper left and lB is lower left
+		t->positions_3d[0] = lA.position;
+		t->l_positions_2d[0] = lA.l_position_2d;
+		t->r_positions_2d[0] = lA.r_position_2d;
+		t->confidence[0] = 1;
+		t->positions_3d[3] = lB.position;
+		t->l_positions_2d[3] = lB.l_position_2d;
+		t->r_positions_2d[3] = lB.r_position_2d;
+		t->confidence[3] = 1;
+
+	} else {
+		// lA is lower left and lB is upper left
+		t->positions_3d[0] = lB.position;
+		t->l_positions_2d[0] = lB.l_position_2d;
+		t->r_positions_2d[0] = lB.r_position_2d;
+		t->confidence[0] = 1;
+
+		t->positions_3d[3] = lA.position;
+		t->l_positions_2d[3] = lA.l_position_2d;
+		t->r_positions_2d[3] = lA.r_position_2d;
+		t->confidence[3] = 1;
+	}
+
+    psvr_led_t hA = leds->at(highest_x_index);
+    psvr_led_t hB = leds->at(highest_x_pair_index);
+	if (hA.position.y < hB.position.y) {
+		// hA is upper right and rB is lower right
+		t->positions_3d[1] = hA.position;
+		t->l_positions_2d[1] = hA.l_position_2d;
+		t->r_positions_2d[1] = hA.r_position_2d;
+		t->confidence[1] = 1;
+
+		t->positions_3d[4] = hB.position;
+		t->l_positions_2d[4] = hB.l_position_2d;
+		t->r_positions_2d[4] = hB.r_position_2d;
+		t->confidence[4] = 1;
+	} else {
+		// hA is lower right and hB is upper right
+		t->positions_3d[1] = hB.position;
+		t->l_positions_2d[1] = hB.l_position_2d;
+		t->r_positions_2d[1] = hB.r_position_2d;
+		t->confidence[1] = 1;
+
+		t->positions_3d[4] = hA.position;
+		t->l_positions_2d[4] = hA.l_position_2d;
+        t->r_positions_2d[4] = hA.r_position_2d;
+		t->confidence[4] = 1;
+	}
+	return true;
+}
+
+bool psvr_compute_svd(psvr_track_data_t* t)
+{
+    //compute SVD for the points we have found, assuming we have at least 3 points
+    if (t->tracked_count > 2)
+    {
+        cv::Mat measurement(t->tracked_count, 3, cv::DataType<float>::type);
+        cv::Mat model(t->tracked_count, 3, cv::DataType<float>::type);
+        cv::Mat xCovar;
+        uint8_t c = 0;
+        for (uint32_t i=0;i<NUM_LEDS;i++)
+        {
+                if (t->confidence[i] > 0)
+                {
+                        //normalised, and offset from center (represented as translation)
+                        measurement.at<float>(c,0) = (t->translation.x - t->positions_3d[i].x) / t->longest_dist;
+                        measurement.at<float>(c,1) = (t->translation.y - t->positions_3d[i].y) / t->longest_dist;
+                        measurement.at<float>(c,2) = (t->translation.z - t->positions_3d[i].z) / t->longest_dist;
+                        //printf("MEASURED POINT %d  %f %f %f %f %f %f\n",i,t->translation.x,t->translation.y,t->translation.z,measurement.at<float>(c,0),measurement.at<float>(c,1),measurement.at<float>(c,2));
+                        model.at<float>(c,0) = physical_led_positions[i].x;
+                        model.at<float>(c,1) = physical_led_positions[i].y;
+                        model.at<float>(c,2) = physical_led_positions[i].z;
+                        c++;
+                }
+        }
+
+        // create our cross-covariance matrix
+        cv::transpose(model,model);
+        xCovar =  model * measurement;
+        cv::Mat w,u,v,ut;
+        cv::SVD::compute(xCovar,w,u,v);
+        cv::transpose(u,ut);
+        //TODO: compute determinant
+
+
+        cv::Mat rot = v * ut;
+
+        // if our matrix determinant is negative, we need to fix it
+        // so the matrix is orthogonal
+
+        if(cv::determinant(rot) < 0) {
+            rot.at<float>(0,2) *= -1;
+            rot.at<float>(1,2) *= -1;
+            rot.at<float>(2,2) *= -1;
+        }
+        std::cout << rot << "det: " << cv::determinant(rot) <<"\n";
+
+        cv::transpose(rot,rot);
+        // we assume this is a 3x3 matrix
+        memcpy(t->rotation_matrix.v,rot.data, 9 * sizeof(float));
+
+
+        return true;
+    }
+    else
+    {
+        return false;
+    }
+}
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_psvr_stereo.h b/src/xrt/drivers/montrack/optical_tracking/tracker3D_psvr_stereo.h
new file mode 100644
index 0000000000000000000000000000000000000000..8f4daded479e6cccecc3d0456cd92a1d7585b5e2
--- /dev/null
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_psvr_stereo.h
@@ -0,0 +1,84 @@
+#pragma once
+#ifndef TRACKER3D_SPHERE_PSVR_H
+#define TRACKER3D_SPHERE_PSVR_H
+
+#include <xrt/xrt_defines.h>
+#include <common/calibration.h>
+#include "common/tracked_object.h"
+#include "common/tracker.h"
+
+#define NUM_LEDS 5
+#define TRACKED_POINTS 1
+#define ROI_OFFSET 32.0f
+
+static const char* LED_LABELS[] = {"LU", "RU", "C",  "LL", "RL",
+                                   "LS", "RS", "LB", "RB"};
+
+//simplified model, normalised to longest dist between pair of leds
+static const struct xrt_vec3 physical_led_positions[] = {
+    {-0.413f,-0.28f,-0.18f},
+    {0.413f,0.-0.28f,-0.18f},
+    {0.0f,0.0f,0.0f},
+    {-0.413f,0.28f,-0.18f},
+{0.413f,0.28f,-0.18f},};
+/*    {0.0f,0.0f,0.0f},
+    {0.0f,0.0f,0.0f},
+    {0.0f,0.0f,0.0f},
+    {0.0f,0.0f,0.0f},
+
+};*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+
+
+// forward declare this
+typedef struct tracker3D_psvr_stereo_instance
+    tracker3D_psvr_stereo_instance_t;
+
+
+tracker3D_psvr_stereo_instance_t*
+tracker3D_psvr_stereo_create(tracker_instance_t* inst);
+bool
+tracker3D_psvr_stereo_destroy(tracker_instance_t* inst);
+
+capture_parameters_t
+tracker3D_psvr_stereo_get_capture_params(tracker_instance_t* inst);
+
+bool
+tracker3D_psvr_stereo_get_debug_frame(tracker_instance_t* inst,
+                                        frame_t* frame);
+bool
+tracker3D_psvr_stereo_queue(tracker_instance_t* inst, frame_t* frame);
+bool
+tracker3D_psvr_stereo_get_poses(tracker_instance_t* inst,
+                                  tracked_object_t* objects,
+                                  uint32_t* count);
+bool
+tracker3D_psvr_stereo_new_poses(tracker_instance_t* inst);
+bool
+tracker3D_psvr_stereo_configure(tracker_instance_t* inst,
+                                  tracker_stereo_configuration_t* config);
+void
+tracker3D_psvr_stereo_register_measurement_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    measurement_consumer_callback_func target_func);
+void
+tracker3D_psvr_stereo_register_event_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func);
+
+
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+
+#endif // TRACKER3D_PSVR_STEREO_H
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.cpp b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.cpp
index 8d6794a536c14994391ee260f875b9e29dfd1b52..d851e9f2b845c876734baaaae3b619ea653f376d 100644
--- a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.cpp
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.cpp
@@ -9,8 +9,6 @@
 
 static bool
 tracker3D_sphere_mono_track(tracker_instance_t* inst);
-static bool
-tracker3D_sphere_mono_calibrate(tracker_instance_t* inst);
 
 typedef struct tracker3D_sphere_mono_instance
 {
@@ -242,176 +240,6 @@ tracker3D_sphere_mono_track(tracker_instance_t* inst)
 
 	return true;
 }
-/*
-bool
-tracker3D_sphere_mono_calibrate(tracker_instance_t* inst)
-{
-	printf("calibrating...\n");
-	tracker3D_sphere_mono_instance_t* internal =
-	    (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
-	cv::Size image_size(internal->frame_gray.cols,
-	                    internal->frame_gray.rows);
-
-	bool ret = cv::imwrite("/tmp/out.jpg", internal->frame_gray);
-
-
-	// TODO: use multiple env vars? - centralise this
-	char path_string[1024];
-	char* config_path = secure_getenv("HOME");
-	snprintf(path_string, 1024, "%s/.config/monado/%s.calibration",
-	         config_path, internal->configuration.configuration_filename);
-
-	printf("TRY LOADING CONFIG FROM %s\n", path_string);
-	FILE* calib_file = fopen(path_string, "rb");
-	if (calib_file) {
-		// read our calibration from this file
-		read_mat(calib_file, &internal->intrinsics);
-		read_mat(calib_file, &internal->distortion);
-		read_mat(calib_file, &internal->distortion_fisheye);
-
-		// TODO: save data indicating calibration image size
-		// and multiply intrinsics accordingly
-
-		cv::initUndistortRectifyMap(
-		    internal->intrinsics, internal->distortion, cv::noArray(),
-		    internal->intrinsics, image_size, CV_32FC1,
-		    internal->undistort_map_x, internal->undistort_map_y);
-
-		printf("calibrated cameras! setting tracking mode\n");
-		internal->calibrated = true;
-
-		internal->configuration.calibration_mode =
-		    CALIBRATION_MODE_NONE;
-		// send an event to notify our driver of the switch into
-		// tracking mode.
-		driver_event_t e = {};
-		e.type = EVENT_TRACKER_RECONFIGURED;
-		internal->event_target_callback(internal->event_target_instance,
-		                                e);
-		return true;
-	}
-
-	// no saved file - perform interactive calibration.
-	// try and find a chessboard in the image, and run the calibration.
-	// TODO: we need to define some mechanism for UI/user interaction.
-
-	// TODO: initialise this on construction and move this to internal state
-	cv::Size board_size(8, 6);
-	std::vector<cv::Point3f> chessboard_model;
-
-	for (uint32_t i = 0; i < board_size.width * board_size.height; i++) {
-		cv::Point3f p(i / board_size.width, i % board_size.width, 0.0f);
-		chessboard_model.push_back(p);
-	}
-
-	cv::Mat chessboard_measured;
-
-	// clear our debug image
-	cv::rectangle(
-	    internal->debug_rgb, cv::Point2f(0, 0),
-	    cv::Point2f(internal->debug_rgb.cols, internal->debug_rgb.rows),
-	    cv::Scalar(0, 0, 0), -1, 0);
-
-	// we will collect samples continuously - the user should be able to
-	// wave a chessboard around randomly while the system calibrates..
-
-	// TODO: we need a coverage measurement and an accuracy measurement,
-	// so we can converge to something that is as complete and correct as
-	// possible.
-
-	bool found_board = cv::findChessboardCorners(
-	    internal->frame_gray, board_size, chessboard_measured);
-	char message[128];
-	message[0] = 0x0;
-
-	if (found_board) {
-		// we will use the last n samples to calculate our calibration
-		if (internal->chessboards_measured.size() >
-		    MAX_CALIBRATION_SAMPLES) {
-			internal->chessboards_measured.erase(
-			    internal->chessboards_measured.begin());
-		} else {
-			internal->chessboards_model.push_back(chessboard_model);
-		}
-
-		internal->chessboards_measured.push_back(chessboard_measured);
-
-		if (internal->chessboards_measured.size() ==
-		    MAX_CALIBRATION_SAMPLES) {
-			// TODO - run this if coverage test passes
-			cv::Mat rvecs, tvecs;
-
-			float rp_error = cv::calibrateCamera(
-			    internal->chessboards_model,
-			    internal->chessboards_measured, image_size,
-			    internal->intrinsics, internal->distortion, rvecs,
-			    tvecs);
-
-			cv::initUndistortRectifyMap(
-			    internal->intrinsics, internal->distortion,
-			    cv::noArray(), internal->intrinsics, image_size,
-			    CV_32FC1, internal->undistort_map_x,
-			    internal->undistort_map_y);
-
-			char path_string[PATH_MAX];
-			char file_string[PATH_MAX];
-			// TODO: use multiple env vars?
-			char* config_path = secure_getenv("HOME");
-			snprintf(path_string, PATH_MAX, "%s/.config/monado",
-			         config_path);
-			snprintf(
-			    file_string, PATH_MAX,
-			    "%s/.config/monado/%s.calibration", config_path,
-			    internal->configuration.configuration_filename);
-
-			printf("TRY WRITING CONFIG TO %s\n", file_string);
-			FILE* calib_file = fopen(file_string, "wb");
-			if (!calib_file) {
-				mkpath(path_string);
-			}
-			calib_file = fopen(file_string, "wb");
-			if (!calib_file) {
-				printf(
-				    "ERROR. could not create calibration file "
-				    "%s\n",
-				    file_string);
-			} else {
-				write_mat(calib_file, &internal->intrinsics);
-				write_mat(calib_file, &internal->distortion);
-				write_mat(calib_file,
-				          &internal->distortion_fisheye);
-				fclose(calib_file);
-			}
-
-			printf("calibrated cameras! setting tracking mode\n");
-			internal->calibrated = true;
-			internal->configuration.calibration_mode =
-			    CALIBRATION_MODE_NONE;
-			// send an event to notify our driver of the switch into
-			// tracking mode.
-			driver_event_t e = {};
-			e.type = EVENT_TRACKER_RECONFIGURED;
-			internal->event_target_callback(
-			    internal->event_target_instance, e);
-		} else {
-			snprintf(message, 128, "COLLECTING SAMPLE: %d/%d",
-			         internal->chessboards_measured.size() + 1,
-			         MAX_CALIBRATION_SAMPLES);
-		}
-	}
-
-	cv::drawChessboardCorners(internal->debug_rgb, board_size,
-	                          chessboard_measured, found_board);
-
-	cv::putText(internal->debug_rgb, "CALIBRATION MODE",
-	            cv::Point2i(160, 240), 0, 1.0f, cv::Scalar(192, 192, 192));
-	cv::putText(internal->debug_rgb, message, cv::Point2i(160, 460), 0,
-	            0.5f, cv::Scalar(192, 192, 192));
-
-	tracker_send_debug_frame(inst);
-
-	return true;
-}*/
 
 bool
 tracker3D_sphere_mono_get_poses(tracker_instance_t* inst,
@@ -464,16 +292,16 @@ tracker3D_sphere_mono_configure(tracker_instance_t* inst,
         char path_string[1024];
         char* config_path = secure_getenv("HOME");
         snprintf(path_string, 1024, "%s/.config/monado/%s.calibration",
-                 config_path, internal->configuration.configuration_filename);
+                 config_path, internal->configuration.camera_configuration_filename);
 
         printf("TRY LOADING CONFIG FROM %s\n", path_string);
         FILE* calib_file = fopen(path_string, "rb");
         if (calib_file) {
             // read our calibration from this file
-            read_mat(calib_file, &intrinsics);
-            read_mat(calib_file, &distortion);
-            read_mat(calib_file, &distortion_fisheye);
-            read_mat(calib_file, &mat_image_size);
+            read_cv_mat(calib_file, &intrinsics);
+            read_cv_mat(calib_file, &distortion);
+            read_cv_mat(calib_file, &distortion_fisheye);
+            read_cv_mat(calib_file, &mat_image_size);
 
             cv::Size image_size(mat_image_size.at<float>(0,0),mat_image_size.at<float>(0,1));
 
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp
index f7294ab16b420f0ce1a934982649951880aeb167..8f8b9415b5d213e0eaa027a075fb2757842c0993 100644
--- a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp
@@ -1,18 +1,19 @@
 #include <opencv2/opencv.hpp>
 
 #include "tracker3D_sphere_stereo.h"
+#include "common/calibration_opencv.hpp"
 #include "common/opencv_utils.hpp"
 
 #include "track_psvr.h"
 
 #include <sys/stat.h>
+#include <sys/time.h>
 #include <linux/limits.h>
 
 #include "util/u_misc.h"
 
-#define MAX_CALIBRATION_SAMPLES                                                \
-	23 // mo' samples, mo' calibration accuracy, at the expense of time.
-
+#include <eigen3/Eigen/Core>
+#include <eigen3/Eigen/Geometry>
 
 static bool
 tracker3D_sphere_stereo_track(tracker_instance_t* inst);
@@ -65,13 +66,8 @@ typedef struct tracker3D_sphere_stereo_instance
 	cv::Mat r_rectify_map_x;
 	cv::Mat r_rectify_map_y;
 
-
-	// calibration data structures
-	std::vector<std::vector<cv::Point3f>> chessboards_model;
-	std::vector<std::vector<cv::Point2f>> l_chessboards_measured;
-	std::vector<std::vector<cv::Point2f>> r_chessboards_measured;
-
 	bool calibrated;
+    room_setup rs;
 
 	bool l_alloced_frames;
 	bool r_alloced_frames;
@@ -280,7 +276,7 @@ tracker3D_sphere_stereo_track(tracker_instance_t* inst)
 
 
 	// DEBUG: dump all our planes out for inspection
-    /*cv::imwrite("/tmp/l_out_y.jpg",internal->l_frame_gray);
+   /* cv::imwrite("/tmp/l_out_y.jpg",internal->l_frame_gray);
 	cv::imwrite("/tmp/r_out_y.jpg",internal->r_frame_gray);
 	cv::imwrite("/tmp/l_out_u.jpg",internal->l_frame_u);
 	cv::imwrite("/tmp/r_out_u.jpg",internal->r_frame_u);
@@ -384,7 +380,7 @@ tracker3D_sphere_stereo_track(tracker_instance_t* inst)
     if (debug_img.rows > internal->debug_rgb.rows || debug_img.cols > internal->debug_rgb.cols)
     {
         cropped_rows = internal->debug_rgb.rows;
-        cropped_cols = internal->debug_rgb.rows;
+        cropped_cols = internal->debug_rgb.cols;
 
     }
     cv::Mat dst_roi =
@@ -441,9 +437,9 @@ tracker3D_sphere_stereo_track(tracker_instance_t* inst)
 			// Transform
             cv::Vec4d h_world = (cv::Matx44d)internal->disparity_to_depth * xydw;
 			// Divide by scale to get 3D vector from homogeneous
-			// coordinate
+            // coordinate. invert x while we are here.
 			world_points.push_back(cv::Point3f(
-			    h_world[0] / h_world[3], h_world[1] / h_world[3],
+                -h_world[0] / h_world[3], h_world[1] / h_world[3],
 			    h_world[2] / h_world[3]));
 		}
 	}
@@ -466,7 +462,7 @@ tracker3D_sphere_stereo_track(tracker_instance_t* inst)
 		cv::circle(internal->debug_rgb, img_point, 3,
 		           cv::Scalar(0, 255, 0));
 
-		float dist = dist_3d(world_point, last_point);
+        float dist = cv_dist_3d(world_point, last_point);
 		if (dist < lowest_dist) {
 			tracked_index = i;
 			lowest_dist = dist;
@@ -478,9 +474,27 @@ tracker3D_sphere_stereo_track(tracker_instance_t* inst)
 
 		// create our measurement for the filter
 		m.flags = (tracker_measurement_flags_t)(MEASUREMENT_POSITION | MEASUREMENT_OPTICAL);
-		m.pose.position.x = world_point.x;
-		m.pose.position.y = world_point.y;
-		m.pose.position.z = world_point.z;
+        struct timeval tp;
+        gettimeofday(&tp,NULL);
+        m.source_timestamp = tp.tv_sec * 1000000 + tp.tv_usec;
+
+
+        //apply our room setup transform
+        Eigen::Vector3f p = Eigen::Map<Eigen::Vector3f>(&world_point.x);
+        Eigen::Vector4f pt;
+        pt.x() = p.x();
+        pt.y() = p.y();
+        pt.z() = p.z();
+        pt.w() = 1.0f;
+
+        //this is a glm mat4 written out 'flat'
+        Eigen::Matrix4f mat = Eigen::Map<Eigen::Matrix<float,4,4>>(internal->rs.origin_transform.v);
+        pt = mat * pt;
+
+        m.pose.position.x = pt.x();
+        m.pose.position.y = pt.y();
+        m.pose.position.z = pt.z();
+
 
 		// update internal state
 		cv::KeyPoint l_kp = l_blobs[tracked_index];
@@ -506,7 +520,10 @@ tracker3D_sphere_stereo_track(tracker_instance_t* inst)
 		cv::putText(internal->debug_rgb, message, cv::Point2i(10, 50),
 		            0, 0.5f, cv::Scalar(96, 128, 192));
 		if (internal->measurement_target_callback) {
-			internal->measurement_target_callback(
+            //printf("STTR queueing timestamp %lld\n",m.source_timestamp);
+              //printf("X: %f Y: %f Z: %f mx: %f my: %f mz: %f\n", world_point.x,
+              //       world_point.y, world_point.z,m.pose.position.x,m.pose.position.y,m.pose.position.z);
+            internal->measurement_target_callback(
 			    internal->measurement_target_instance, &m);
 		}
 	}
@@ -561,78 +578,31 @@ tracker3D_sphere_stereo_configure(tracker_instance_t* inst,
 
     //load calibration data
 
+
     if (! internal->calibrated) {
+        if ( calibration_get_stereo(config->camera_configuration_filename,true,
+                               &internal->l_undistort_map_x,
+                               &internal->l_undistort_map_y,
+                               &internal->l_rectify_map_x,
+                               &internal->l_rectify_map_y,
+                               &internal->r_undistort_map_x,
+                               &internal->r_undistort_map_y,
+                               &internal->r_rectify_map_x,
+                               &internal->r_rectify_map_y,
+                               &internal->disparity_to_depth) ) {
+            printf("loaded calibration for camera!\n");
+
+          }
+        if (! calibration_get_roomsetup(config->room_setup_filename,&internal->rs))
+        {
+            printf("Could not load room setup. tracking frame will be b0rked.\n");
+        }
+
+            internal->calibrated = true;
+
 
-        cv::Mat l_intrinsics;
-        cv::Mat l_distortion;
-        cv::Mat l_distortion_fisheye;
-        cv::Mat l_translation;
-        cv::Mat l_rotation;
-        cv::Mat l_projection;
-        cv::Mat r_intrinsics;
-        cv::Mat r_distortion;
-        cv::Mat r_distortion_fisheye;
-        cv::Mat r_translation;
-        cv::Mat r_rotation;
-        cv::Mat r_projection;
-        cv::Mat mat_image_size;
-
-        cv::Mat zero_distortion =
-                cv::Mat(DISTORTION_SIZE, 1, CV_32F, cv::Scalar(0.0f));;
-
-        char path_string[256]; // TODO: 256 maybe not enough
-        // TODO: use multiple env vars?
-        char* config_path = secure_getenv("HOME");
-        snprintf(path_string, 256, "%s/.config/monado/%s.calibration",
-             config_path,
-             config->configuration_filename); // TODO: hardcoded 256
-
-        printf("TRY LOADING CONFIG FROM %s\n", path_string);
-        FILE* calib_file = fopen(path_string, "rb");
-        if (calib_file) {
-            // read our calibration from this file
-            read_mat(calib_file, &l_intrinsics);
-            read_mat(calib_file, &r_intrinsics);
-            read_mat(calib_file, &l_distortion);
-            read_mat(calib_file, &r_distortion);
-            read_mat(calib_file, &l_distortion_fisheye);
-            read_mat(calib_file, &r_distortion_fisheye);
-            read_mat(calib_file, &l_rotation);
-            read_mat(calib_file, &r_rotation);
-            read_mat(calib_file, &l_translation);
-            read_mat(calib_file, &r_translation);
-            read_mat(calib_file, &l_projection);
-            read_mat(calib_file, &r_projection);
-            read_mat(calib_file, &internal->disparity_to_depth);
-            read_mat(calib_file, &mat_image_size);
-
-            cv::Size image_size(mat_image_size.at<float>(0,0),
-                            mat_image_size.at<float>(0,1));
-
-        cv::fisheye::initUndistortRectifyMap(
-            l_intrinsics, l_distortion_fisheye,
-            cv::noArray(), l_intrinsics, image_size, CV_32FC1,
-            internal->l_undistort_map_x, internal->l_undistort_map_y);
-        cv::fisheye::initUndistortRectifyMap(
-            r_intrinsics, r_distortion_fisheye,
-            cv::noArray(), r_intrinsics, image_size, CV_32FC1,
-            internal->r_undistort_map_x, internal->r_undistort_map_y);
-
-        cv::initUndistortRectifyMap(
-            l_intrinsics, zero_distortion,
-            l_rotation, l_projection, image_size,
-            CV_32FC1, internal->l_rectify_map_x,
-            internal->l_rectify_map_y);
-        cv::initUndistortRectifyMap(
-            r_intrinsics, zero_distortion,
-            r_rotation, r_projection, image_size,
-            CV_32FC1, internal->r_rectify_map_x,
-            internal->r_rectify_map_y);
-
-        printf("loaded calibration for camera!\n");
-        internal->calibrated = true;
     }
-}
+
     internal->configuration = *config;
     inst->configured=true;
 	return true;
diff --git a/src/xrt/drivers/psmv/psmv_driver.c b/src/xrt/drivers/psmv/psmv_driver.c
index 551581987507163dbd65d5baa5e0439f007fd7e0..00ecb77a87180bd21a8c0fc4b13b19ec6adc3e30 100644
--- a/src/xrt/drivers/psmv/psmv_driver.c
+++ b/src/xrt/drivers/psmv/psmv_driver.c
@@ -14,6 +14,7 @@
 #include <wchar.h>
 
 #include "xrt/xrt_prober.h"
+#include "xrt/xrt_defines.h"
 
 #include "util/u_time.h"
 #include "util/u_misc.h"
@@ -22,7 +23,7 @@
 
 #include "psmv_interface.h"
 #include <unistd.h>
-
+#include <optical_tracking/common/calibration.h>
 #include <optical_tracking/common/tracker.h>
 #include <filters/filter_complementary.h>
 
@@ -208,7 +209,6 @@ struct psmv_device
 	uint64_t last_frame_seq;
     pthread_t* tracking_thread;
     int64_t start_us;
-    float accel_scale;
 
 
 	bool print_spew;
@@ -288,9 +288,9 @@ psmv_read_hid(struct psmv_device *psmv)
 		if (psmv->filter) {
 			tracker_measurement_t m = {0};
 			m.flags = (tracker_measurement_flags_t)(MEASUREMENT_IMU | MEASUREMENT_RAW_ACCEL | MEASUREMENT_RAW_GYRO);
-            m.accel.x = input.frame[1].accel.x * psmv->accel_scale;
-            m.accel.y = input.frame[1].accel.y * psmv->accel_scale;
-            m.accel.z = input.frame[1].accel.z * psmv->accel_scale;
+            m.accel.x = input.frame[1].accel.x;
+            m.accel.y = input.frame[1].accel.y;
+            m.accel.z = input.frame[1].accel.z;
 			m.gyro.x = input.frame[1].gyro.x;
 			m.gyro.y = input.frame[1].gyro.y;
 			m.gyro.z = input.frame[1].gyro.z;
@@ -298,11 +298,13 @@ psmv_read_hid(struct psmv_device *psmv)
 			m.source_sequence = input.seq_no;
 
             //m.source_timestamp = input.timestamp;
+            struct timeval tp;
+            gettimeofday(&tp,NULL);
+            psmv->start_us += (diff * 10 );
 
-
-            psmv->start_us += (diff*10000);
+           // psmv->start_us += (diff);
             m.source_timestamp = psmv->start_us;
-            //printf("queueing timestamp %lld %d %d %d\n",m.source_timestamp,input.timestamp,diff,input.seq_no);
+            //printf("PSMV queueing timestamp %lld\n",m.source_timestamp);
 			psmv->filter->filter_queue(psmv->filter,&m);
 		}
 
@@ -449,16 +451,25 @@ psmv_device_get_tracked_pose(struct xrt_device *xdev,
 {
 	struct psmv_device *psmv = psmv_device(xdev);
 
+    out_relation->relation_flags = (enum xrt_space_relation_flags)(
+        XRT_SPACE_RELATION_ORIENTATION_VALID_BIT |
+        XRT_SPACE_RELATION_ORIENTATION_TRACKED_BIT | XRT_SPACE_RELATION_POSITION_TRACKED_BIT | XRT_SPACE_RELATION_POSITION_VALID_BIT);
+
 	psmv_read_hid(psmv);
 	filter_state_t fs;
 	psmv->filter->filter_get_state(psmv->filter,&fs);
-    struct xrt_quat temp = fs.pose.orientation;
-    fs.pose.orientation.x=temp.y;
-
-    fs.pose.orientation.y=temp.z;
-    fs.pose.orientation.z=-temp.x;
 
     out_relation->pose = fs.pose;
+    //change quaternion axes to align with xrt coordinate system.
+    // TODO: use a matrix transform here.
+    out_relation->pose.orientation.x = -fs.pose.orientation.x;
+    out_relation->pose.orientation.y = fs.pose.orientation.z;
+    out_relation->pose.orientation.z = -fs.pose.orientation.y;
+
+    //translate and rotate according to calibration
+
+
+
 }
 
 static void
@@ -544,7 +555,7 @@ psmv_found(struct xrt_prober *xp,
 	// We only have one output.
 	psmv->base.outputs[0].name = XRT_OUTPUT_NAME_PSMV_RUMBLE_VIBRATION;
 
-	static int hack = 0;
+    static int hack = 2;
 	switch (hack++ % 3) {
 	case 0: psmv->state.red = 0xff; break;
 	case 1:
@@ -554,8 +565,6 @@ psmv_found(struct xrt_prober *xp,
 	case 2: psmv->state.blue = 0xff; break;
 	}
 
-    psmv->accel_scale = 1.0f; //-8<->+8g (1G = ~4200 raw reading)
-
 	// Send the first update package
 	psmv_led_and_trigger_update(psmv, 1);
 
@@ -565,27 +574,28 @@ psmv_found(struct xrt_prober *xp,
     // create our tracker and determine tracked object count
     // we will defer full configuration until a frame is available
 
-
     psmv->tracker = tracker_create(TRACKER_TYPE_SPHERE_STEREO);
+
+
+    // if we want to generate a calibration, we can use this calibration tracker,
+    // instead of the above. this will move to a standalone application.
+    // TODO: note the config in the tracker run thread will determine the filename,
+    // this should be passed through from the frameserver.
+    //psmv->tracker = tracker_create(TRACKER_TYPE_CALIBRATION_STEREO);
+
 	psmv->filter = filter_create(FILTER_TYPE_COMPLEMENTARY);
 
     filter_complementary_configuration_t config;
-    config.accel_to_radian = 0.000244f;
-    config.gyro_to_radian = 0.0001f;
+    config.bias = 0.02;
+    config.accel_to_g = 0.000244f;
+    config.gyro_to_radians_per_second = 0.00125f;
     config.drift_z_to_zero = 0.001f;
-    config.max_timestamp=65535; //ps move timestamps are 16 bit
-
     psmv->filter->filter_configure(psmv->filter,&config);
 
 	//send our optical measurements to the filter
 	psmv->tracker->tracker_register_measurement_callback(psmv->tracker,psmv->filter,psmv->filter->filter_queue);
 
-    // if we want to generate a calibration, we can use this calibration tracker,
-    // instead of the above. this will move to a standalone application.
-    // TODO: note the config in the tracker run thread will determine the filename,
-    // this should be passed through from the frameserver.
 
-    // psmv->tracker = tracker_create(TRACKER_TYPE_CALIBRATION_STEREO);
 
     // initialise some info about how many objects are tracked, and the initial frame seq.
     psmv->tracker->tracker_get_poses(psmv->tracker,NULL,&psmv->tracked_objects);
@@ -621,8 +631,8 @@ void* psmv_tracking_run(void* arg) {
                    frame_t* source = &fq->source_frames[0];
                    tracker_stereo_configuration_t tc;
                    //TODO: pass this through the framequeue
-                   snprintf(tc.configuration_filename,256,"PETE");
-
+                   snprintf(tc.camera_configuration_filename,256,"PETE");
+                   snprintf(tc.room_setup_filename,256,"PSMV");
                    // set up our side-by-side split.
                    // TODO: this should probably be camera-specific.
                    tc.l_format = source->format;
@@ -648,7 +658,7 @@ void* psmv_tracking_run(void* arg) {
             //printf("psmv ref %d\n",f.source_sequence);
             if (f.source_sequence > psmv->last_frame_seq)
             {
-                //printf("psmv tracking\n");
+                printf("psmv tracking\n");
                 psmv->tracker->tracker_queue(psmv->tracker,&f);
                 psmv->last_frame_seq = f.source_sequence;
                 //printf("unrefing tracked %d\n",f.source_sequence);
diff --git a/src/xrt/drivers/psvr/psvr_device.c b/src/xrt/drivers/psvr/psvr_device.c
index 87bd2378692084e05999279d4bb92e3eb2a425ce..df0f4a390dd021c5a8729cb4fe5868729a63bac3 100644
--- a/src/xrt/drivers/psvr/psvr_device.c
+++ b/src/xrt/drivers/psvr/psvr_device.c
@@ -63,6 +63,16 @@ struct psvr_device
 
 	bool print_spew;
 	bool print_debug;
+
+    tracker_instance_t* tracker;
+    uint32_t tracked_objects;
+    tracked_object_t* tracked_object_array;
+
+    filter_instance_t* filter;
+
+    uint64_t last_frame_seq;
+    pthread_t* tracking_thread;
+    int64_t start_us;
 };
 
 
@@ -93,6 +103,8 @@ enum psvr_leds
 	PSVR_LED_BACK = PSVR_LED_H | PSVR_LED_I,
 
 	PSVR_LED_ALL = PSVR_LED_FRONT | PSVR_LED_BACK,
+    PSVR_LED_FRONT_5 = PSVR_LED_A | PSVR_LED_B | PSVR_LED_C | PSVR_LED_D | PSVR_LED_E,
+    PSVR_LED_CENTER =  PSVR_LED_E ,
 };
 
 
@@ -102,6 +114,9 @@ enum psvr_leds
  *
  */
 
+
+static void* psvr_tracking_run(void* arg);
+
 static inline struct psvr_device *
 psvr_device(struct xrt_device *p)
 {
@@ -613,12 +628,18 @@ psvr_device_get_tracked_pose(struct xrt_device *xdev,
 	//! @todo adjust for latency here
 	*out_timestamp = now;
 
-	out_relation->pose.orientation.w = 1.0f;
+    filter_state_t fs;
+    psvr->filter->filter_get_state(psvr->filter,&fs);
+
+    printf("setting pose orientation: %f %f %f %f\n",fs.pose.orientation.x,fs.pose.orientation.y,fs.pose.orientation.z,fs.pose.orientation.w);
+    out_relation->pose = fs.pose;
+
+    //translate and rotate according to calibration
 
 	//! @todo assuming that orientation is actually currently tracked.
 	out_relation->relation_flags = (enum xrt_space_relation_flags)(
 	    XRT_SPACE_RELATION_ORIENTATION_VALID_BIT |
-	    XRT_SPACE_RELATION_ORIENTATION_TRACKED_BIT);
+        XRT_SPACE_RELATION_ORIENTATION_TRACKED_BIT | XRT_SPACE_RELATION_POSITION_VALID_BIT | XRT_SPACE_RELATION_POSITION_TRACKED_BIT);
 
 	PSVR_SPEW(psvr, "\n\taccel = %f %f %f\n\tgyro = %f %f %f",
 	          psvr->raw.accel.x, psvr->raw.accel.y, psvr->raw.accel.z,
@@ -705,7 +726,9 @@ psvr_device_create(struct hid_device_info *hmd_handle_info,
 	if (debug_get_bool_option_psvr_disco()) {
 		ret = disco_leds(psvr);
 	} else {
-		ret = control_leds(psvr, PSVR_LED_ALL, PSVR_LED_POWER_MAX,
+        //ret = control_leds(psvr, PSVR_LED_ALL, PSVR_LED_POWER_OFF,
+        //                   (enum psvr_leds)0);
+        ret = control_leds(psvr, PSVR_LED_FRONT_5, PSVR_LED_POWER_MAX,
 		                   (enum psvr_leds)0);
 	}
 	if (ret < 0) {
@@ -742,6 +765,39 @@ psvr_device_create(struct hid_device_info *hmd_handle_info,
 		u_device_dump_config(&psvr->base, __func__, "Sony PSVR");
 	}
 
+    psvr->tracker = tracker_create(TRACKER_TYPE_PSVR_STEREO);
+
+
+    // if we want to generate a calibration, we can use this calibration tracker,
+    // instead of the above. this will move to a standalone application.
+    // TODO: note the config in the tracker run thread will determine the filename,
+    // this should be passed through from the frameserver.
+    //psmv->tracker = tracker_create(TRACKER_TYPE_CALIBRATION_STEREO);
+
+    psvr->filter = filter_create(FILTER_TYPE_COMPLEMENTARY);
+
+    filter_complementary_configuration_t config;
+    config.bias = 0.02;
+    config.accel_to_g = 0.000244f;
+    config.gyro_to_radians_per_second = 0.00125f;
+    config.drift_z_to_zero = 0.001f;
+    psvr->filter->filter_configure(psvr->filter,&config);
+
+    //send our optical measurements to the filter
+    psvr->tracker->tracker_register_measurement_callback(psvr->tracker,psvr->filter,psvr->filter->filter_queue);
+
+
+
+    // initialise some info about how many objects are tracked, and the initial frame seq.
+    psvr->tracker->tracker_get_poses(psvr->tracker,NULL,&psvr->tracked_objects);
+    psvr->last_frame_seq = 0;
+    struct timeval tp;
+    gettimeofday(&tp,NULL);
+    psvr->start_us = tp.tv_sec * 1000000 + tp.tv_usec;
+    pthread_create(&psvr->tracking_thread,NULL,psvr_tracking_run,psvr);
+
+
+
 	PSVR_DEBUG(psvr, "YES!");
 
 	return &psvr->base;
@@ -755,3 +811,67 @@ cleanup:
 
 	return NULL;
 }
+
+
+void* psvr_tracking_run(void* arg) {
+    struct psvr_device* psvr = (struct psvr_device*) arg;
+    frame_queue_t* fq = frame_queue_instance();
+    frame_t f;
+    //TODO: orderly shutdown
+    while(1) {
+
+        if (frame_queue_ref_latest(fq,0,&f)) {
+
+            if (! psvr->tracker->configured)
+            {
+                // we need to set up our tracker. currently
+                // we hardcode the source id to 0 - assuming only one camera
+                // is running.
+
+                // we also assume our calibration will be available in the specified filename.
+                // this should come from the framserver source descriptor, somehow.
+                if (fq->source_frames[0].format == FORMAT_YUV444_UINT8) {
+                   frame_t* source = &fq->source_frames[0];
+                   tracker_stereo_configuration_t tc;
+                   //TODO: pass this through the framequeue
+                   snprintf(tc.camera_configuration_filename,256,"PETE");
+                   snprintf(tc.room_setup_filename,256,"PSMV");
+                   // set up our side-by-side split.
+                   // TODO: this should probably be camera-specific.
+                   tc.l_format = source->format;
+                   tc.r_format = FORMAT_NONE;
+                   tc.split_left = true;
+                   tc.l_source_id = source->source_id;
+                   tc.l_rect.tl.x=0.0f;
+                   tc.l_rect.tl.y=0.0f;
+                   tc.l_rect.br.x=source->width/2.0f;
+                   tc.l_rect.br.y=source->height;
+                   tc.r_rect.tl.x=source->width/2.0f;
+                   tc.r_rect.tl.y=0.0f;
+                   tc.r_rect.br.x=source->width;
+
+                   tc.r_rect.br.y=source->height;
+                   // TODO: if this fails, we will just keep trying.. not ideal
+                   psvr->tracker->tracker_configure(psvr->tracker,&tc);
+               }
+            }
+
+
+
+            //printf("psmv ref %d\n",f.source_sequence);
+            if (f.source_sequence > psvr->last_frame_seq)
+            {
+                //printf("psvr tracking\n");
+                psvr->tracker->tracker_queue(psvr->tracker,&f);
+                psvr->last_frame_seq = f.source_sequence;
+                //printf("unrefing tracked %d\n",f.source_sequence);
+                frame_queue_unref(fq,&f);
+                usleep(1000);
+            } else {
+                //printf("unrefing unused %d\n",f.source_sequence);
+                frame_queue_unref(fq,&f);
+                usleep(1000);
+            }
+        }
+    }
+}
diff --git a/src/xrt/drivers/psvr/psvr_device.h b/src/xrt/drivers/psvr/psvr_device.h
index c17eab2026b0f4c778840c36a1314b6e90bdc7b2..deefb8a165a2c8ad8447ea1a663497c04e977b45 100644
--- a/src/xrt/drivers/psvr/psvr_device.h
+++ b/src/xrt/drivers/psvr/psvr_device.h
@@ -16,6 +16,15 @@
 
 #include <hidapi.h>
 
+#include <optical_tracking/common/calibration.h>
+#include <optical_tracking/common/tracker.h>
+#include <filters/filter_complementary.h>
+
+
+#include <mt_framequeue.h>
+#include <pthread.h>
+#include <sys/time.h>
+
 
 #ifdef __cplusplus
 extern "C" {
diff --git a/src/xrt/drivers/psvr/psvr_prober.c b/src/xrt/drivers/psvr/psvr_prober.c
index cd912c3efc982e57bdc2aacf78e75ff26693322e..c34dd91073598d4b5e992b0d968e0a085aa407a3 100644
--- a/src/xrt/drivers/psvr/psvr_prober.c
+++ b/src/xrt/drivers/psvr/psvr_prober.c
@@ -29,7 +29,7 @@
  */
 
 // Should the experimental PSVR driver be enabled.
-DEBUG_GET_ONCE_BOOL_OPTION(psvr_enable, "PSVR_ENABLE", false)
+DEBUG_GET_ONCE_BOOL_OPTION(psvr_enable, "PSVR_ENABLE", true)
 DEBUG_GET_ONCE_BOOL_OPTION(psvr_spew, "PSVR_PRINT_SPEW", false)
 DEBUG_GET_ONCE_BOOL_OPTION(psvr_debug, "PSVR_PRINT_DEBUG", false)
 
diff --git a/src/xrt/include/xrt/xrt_defines.h b/src/xrt/include/xrt/xrt_defines.h
index 34f6bc37ab16bb304983776522079802f94581d0..012071399913a49f9e1bbd728f179da4111e97f7 100644
--- a/src/xrt/include/xrt/xrt_defines.h
+++ b/src/xrt/include/xrt/xrt_defines.h
@@ -89,10 +89,23 @@ struct xrt_vec3
 	float z;
 };
 
+/*!
+ * A 4 element vector with single floats.
+ *
+ * @ingroup xrt_iface math
+ */
+struct xrt_vec4
+{
+        float x;
+        float y;
+        float z;
+        float w;
+};
+
 /*!
  * A pose composed of a position and orientation.
  *
- * @see xrt_qaut
+ * @see xrt_quat
  * @see xrt_vec3
  * @ingroup xrt_iface math
  */
@@ -128,6 +141,19 @@ struct xrt_matrix_2x2
 	};
 };
 
+/*!
+ * A tightly packed 3x3 matrix of floats.
+ *
+ * @ingroup xrt_iface math
+ */
+struct xrt_matrix_3x3
+{
+        union {
+                float v[9];
+                struct xrt_vec3 vecs[3];
+        };
+};
+
 /*!
  * A tightly packed 4x4 matrix of floats.
  *
@@ -135,7 +161,10 @@ struct xrt_matrix_2x2
  */
 struct xrt_matrix_4x4
 {
-	float v[16];
+    union {
+            float v[16];
+            struct xrt_vec4 vecs[4];
+    };
 };
 
 /*!
diff --git a/src/xrt/state_trackers/oxr/oxr_input.c b/src/xrt/state_trackers/oxr/oxr_input.c
index 2159f5c88e78461ce5a7fad1658660383fe83f88..3f37a90626e041e534d0d08a26dd76ad8dc57407 100644
--- a/src/xrt/state_trackers/oxr/oxr_input.c
+++ b/src/xrt/state_trackers/oxr/oxr_input.c
@@ -312,6 +312,12 @@ MEGA_HACK_get_binding(struct oxr_logger* log,
 	} else if (strcmp(act->name, "hand_pose") == 0) {
 		oxr_xdev_find_input(xdev, XRT_INPUT_PSMV_BODY_CENTER_POSE,
 		                    &input);
+    } else if (strcmp(act->name, "left_pose") == 0) {
+        oxr_xdev_find_input(xdev, XRT_INPUT_PSMV_BODY_CENTER_POSE,
+                            &input);
+    } else if (strcmp(act->name, "right_pose") == 0) {
+        oxr_xdev_find_input(xdev, XRT_INPUT_PSMV_BODY_CENTER_POSE,
+                            &input);
 	} else if (strcmp(act->name, "vibrate_hand") == 0) {
 		oxr_xdev_find_output(
 		    xdev, XRT_OUTPUT_NAME_PSMV_RUMBLE_VIBRATION, &output);
diff --git a/src/xrt/state_trackers/oxr/oxr_session.c b/src/xrt/state_trackers/oxr/oxr_session.c
index 0257500fbd94e080876daea67bf5da8d332bef1a..6d199ac5b9c0f300341400595dfa4c50d4df3b06 100644
--- a/src/xrt/state_trackers/oxr/oxr_session.c
+++ b/src/xrt/state_trackers/oxr/oxr_session.c
@@ -156,7 +156,7 @@ oxr_session_get_view_pose_at(struct oxr_logger *log,
 	                       &relation);
 
 	// Add in the offset from the tracking system.
-	math_relation_accumulate_transform(offset, &relation);
+    math_relation_accumulate_transform(offset, &relation);
 
 	// clang-format off
 	bool valid_pos = (relation.relation_flags & XRT_SPACE_RELATION_POSITION_VALID_BIT) != 0;
diff --git a/src/xrt/state_trackers/oxr/oxr_space.c b/src/xrt/state_trackers/oxr/oxr_space.c
index ba4774d96034bb4a29675e2219c9c93d88649878..3e81154850f549df02df99ad9474623d6ba6cd34 100644
--- a/src/xrt/state_trackers/oxr/oxr_space.c
+++ b/src/xrt/state_trackers/oxr/oxr_space.c
@@ -77,6 +77,15 @@ oxr_space_action_create(struct oxr_logger *log,
 	spc->sub_paths = sub_paths;
 	memcpy(&spc->pose, &createInfo->poseInActionSpace, sizeof(spc->pose));
 
+    //use an identity quaternion if we are passed an uninitialised one
+    if (math_quat_magnitude(spc->pose.orientation) == 0) {
+        spc->pose.orientation.x = 0.0f;
+        spc->pose.orientation.y = 0.0f;
+        spc->pose.orientation.z = 0.0f;
+        spc->pose.orientation.w = 1.0f;
+
+    }
+
 	*out_space = spc;
 
 	return XR_SUCCESS;
diff --git a/src/xrt/state_trackers/oxr/oxr_xdev.c b/src/xrt/state_trackers/oxr/oxr_xdev.c
index 6e1589d0ede5fcdb9084fc90276699bc2d3f3741..c7236e392ea0818ab0795a445858d4ab095803f9 100644
--- a/src/xrt/state_trackers/oxr/oxr_xdev.c
+++ b/src/xrt/state_trackers/oxr/oxr_xdev.c
@@ -77,7 +77,7 @@ oxr_xdev_get_pose_at(struct oxr_logger *log,
 
 	// clang-format off
 	bool valid_pos = (relation.relation_flags & XRT_SPACE_RELATION_POSITION_VALID_BIT) != 0;
-    bool valid_ori = true; //(relation.relation_flags & XRT_SPACE_RELATION_ORIENTATION_VALID_BIT) != 0;
+    bool valid_ori = (relation.relation_flags & XRT_SPACE_RELATION_ORIENTATION_VALID_BIT) != 0;
 	// clang-format on
 
 	if (valid_ori) {
diff --git a/src/xrt/state_trackers/prober/p_prober.c b/src/xrt/state_trackers/prober/p_prober.c
index 9a4c180c39addefdfffd072f33b3df4d7ba30d80..fd979fc2b84f228a16ed3738e0b2457fcd818e1a 100644
--- a/src/xrt/state_trackers/prober/p_prober.c
+++ b/src/xrt/state_trackers/prober/p_prober.c
@@ -16,6 +16,7 @@
 
 #include <frameservers/common/frameserver.h>
 #include <frameservers/v4l2/v4l2_frameserver.h>
+#include <frameservers/uvc/uvc_frameserver.h>
 
 
 /*
@@ -474,26 +475,49 @@ probe(struct xrt_prober* xp)
 		return -1;
 	}
 
-	printf("we are done with our probe, now start up a tracking camera");
-	frameserver_instance_t* fs = frameserver_create(FRAMESERVER_TYPE_V4L2);
-	//get our count of source descriptors
-	uint32_t source_count;
-	fs->frameserver_enumerate_sources(fs,NULL,&source_count);
-	v4l2_source_descriptor_t* source_descriptor_array = malloc(sizeof(v4l2_source_descriptor_t) * source_count);
-	fs->frameserver_enumerate_sources(fs,source_descriptor_array,&source_count);
-
-	for (uint32_t i=0;i<source_count;i++){
-		v4l2_source_descriptor_t source = source_descriptor_array[i];
-		//just use whatever
-		printf("source width: %d source height %d source rate %d\n",source.width,source.height,source.rate);
-            if (strcmp(source.model,"USB Camera-OV580: USB Camera-OV") == 0 && source.format == FORMAT_YUV444_UINT8) {
-                if (source.width == 1748 && source.height == 408 && source.rate == 166666) {
+    printf("we are done with our probe, now start up a tracking camera\n");
+
+    //start up PS4 camera
+
+    //frameserver_instance_t* fs = frameserver_create(FRAMESERVER_TYPE_V4L2);
+    //uint32_t source_count;
+    //fs->frameserver_enumerate_sources(fs,NULL,&source_count);
+    //v4l2_source_descriptor_t* source_descriptor_array = U_TYPED_ARRAY_CALLOC(v4l2_source_descriptor_t,source_count);;
+    //fs->frameserver_enumerate_sources(fs,source_descriptor_array,&source_count);
+
+    //for (uint32_t i=0;i<source_count;i++){
+        //v4l2_source_descriptor_t source = source_descriptor_array[i];
+        //if (strcmp(source.model,"USB Camera-OV580: USB Camera-OV") == 0 && source.format == FORMAT_YUV444_UINT8) {
+        //        if (source.width == 1748 && source.height == 408 && source.rate == 166666) {
+        //            fs->frameserver_stream_start(fs,&source);
+        //        }
+        //    }
+    //}
+    //free (source_descriptor_array);
+
+
+    //start up ELP camera
+
+    frameserver_instance_t* fs = frameserver_create(FRAMESERVER_TYPE_UVC);
+    uint32_t source_count;
+    fs->frameserver_enumerate_sources(fs,NULL,&source_count);
+    uvc_source_descriptor_t* source_descriptor_array = U_TYPED_ARRAY_CALLOC(uvc_source_descriptor_t,source_count);
+    fs->frameserver_enumerate_sources(fs,source_descriptor_array,&source_count);
+
+    for (uint32_t i=0;i<source_count;i++){
+        uvc_source_descriptor_t source = source_descriptor_array[i];
+        if (source.product_id == 0x9750 && source.vendor_id == 0x05a3 && source.format == FORMAT_YUV444_UINT8) {
+                if (source.width == 1280 && source.height == 480 && source.rate == 166666) {
                     fs->frameserver_stream_start(fs,&source);
                 }
             }
-	}
-	free (source_descriptor_array);
-	return 0;
+    }
+    free (source_descriptor_array);
+
+
+
+
+    return 0;
 }
 
 static int