diff --git a/src/xrt/drivers/montrack/filters/common/filter.c b/src/xrt/drivers/montrack/filters/common/filter.c
index 1810a19062163de56f6b53506b0eab2ad031a313..32dd3e42f629be5a78d1cdf430efba9ae1901b21 100644
--- a/src/xrt/drivers/montrack/filters/common/filter.c
+++ b/src/xrt/drivers/montrack/filters/common/filter.c
@@ -2,23 +2,26 @@
 #include "filter_opencv_kalman.h"
 #include <string.h>
 
-filter_instance_t* filter_create(filter_type_t t) {
-	filter_instance_t* i = calloc(1,sizeof(filter_instance_t));
+filter_instance_t*
+filter_create(filter_type_t t)
+{
+	filter_instance_t* i = calloc(1, sizeof(filter_instance_t));
 	if (i) {
 		switch (t) {
-		    case FILTER_TYPE_OPENCV_KALMAN:
-			    i->tracker_type = t;
-				i->filter_configure  = filter_opencv_kalman_configure;
-				i->filter_get_state = filter_opencv_kalman_get_state;
-				i->filter_predict_state = filter_opencv_kalman_predict_state;
-				i->filter_set_state = filter_opencv_kalman_set_state;
-				i->filter_queue = filter_opencv_kalman_queue;
-				i->internal_instance = filter_opencv_kalman_create(i);
-			    break;
-		    case FILTER_TYPE_NONE:
-		    default:
-			    free(i);
-			    return NULL;
+		case FILTER_TYPE_OPENCV_KALMAN:
+			i->tracker_type = t;
+			i->filter_configure = filter_opencv_kalman_configure;
+			i->filter_get_state = filter_opencv_kalman_get_state;
+			i->filter_predict_state =
+			    filter_opencv_kalman_predict_state;
+			i->filter_set_state = filter_opencv_kalman_set_state;
+			i->filter_queue = filter_opencv_kalman_queue;
+			i->internal_instance = filter_opencv_kalman_create(i);
+			break;
+		case FILTER_TYPE_NONE:
+		default:
+			free(i);
+			return NULL;
 			break;
 		}
 		return i;
@@ -26,12 +29,13 @@ filter_instance_t* filter_create(filter_type_t t) {
 	return NULL;
 }
 
-bool filters_test(){
+bool
+filters_test()
+{
 
-	//create a filter
+	// create a filter
 	filter_instance_t* filter = filter_create(FILTER_TYPE_OPENCV_KALMAN);
-	if (! filter)
-	{
+	if (!filter) {
 		return false;
 	}
 
diff --git a/src/xrt/drivers/montrack/filters/common/filter.h b/src/xrt/drivers/montrack/filters/common/filter.h
index 3b8a5107cbf10623575fcd34bceb606d0178173d..34ae5e6ee0ebf777fd515938d45163abaf5cd6f5 100644
--- a/src/xrt/drivers/montrack/filters/common/filter.h
+++ b/src/xrt/drivers/montrack/filters/common/filter.h
@@ -9,7 +9,8 @@ typedef void* filter_internal_instance_ptr;
 typedef void* filter_configuration_ptr;
 typedef void* filter_state_ptr;
 
-typedef struct filter_state {
+typedef struct filter_state
+{
 	struct xrt_pose pose;
 	bool has_position;
 	bool has_rotation;
@@ -21,23 +22,34 @@ typedef struct filter_state {
 } filter_state_t;
 
 
-typedef enum filter_type {
+typedef enum filter_type
+{
 	FILTER_TYPE_NONE,
 	FILTER_TYPE_OPENCV_KALMAN
 } filter_type_t;
 
-typedef struct _filter_instance {
-	 filter_type_t tracker_type;
-	 bool (*filter_queue)(filter_instance_ptr inst,tracker_measurement_t* measurement);
-	 bool (*filter_set_state)(filter_instance_ptr inst,filter_state_ptr state);
-	 bool (*filter_get_state)(filter_instance_ptr inst,filter_state_ptr state);
-	 bool (*filter_predict_state)(filter_instance_ptr inst, filter_state_t* state, timepoint_ns time);
-	 bool (*filter_configure)(filter_instance_ptr inst, filter_configuration_ptr config);
-	 filter_internal_instance_ptr internal_instance;
+typedef struct _filter_instance
+{
+	filter_type_t tracker_type;
+	bool (*filter_queue)(filter_instance_ptr inst,
+	                     tracker_measurement_t* measurement);
+	bool (*filter_set_state)(filter_instance_ptr inst,
+	                         filter_state_ptr state);
+	bool (*filter_get_state)(filter_instance_ptr inst,
+	                         filter_state_ptr state);
+	bool (*filter_predict_state)(filter_instance_ptr inst,
+	                             filter_state_t* state,
+	                             timepoint_ns time);
+	bool (*filter_configure)(filter_instance_ptr inst,
+	                         filter_configuration_ptr config);
+	filter_internal_instance_ptr internal_instance;
 } filter_instance_t;
 
-filter_instance_t* filter_create(filter_type_t t);
-bool filter_destroy(filter_instance_t* inst);
-bool filters_test();
+filter_instance_t*
+filter_create(filter_type_t t);
+bool
+filter_destroy(filter_instance_t* inst);
+bool
+filters_test();
 
-#endif //FILTER_H
+#endif // FILTER_H
diff --git a/src/xrt/drivers/montrack/filters/filter_opencv_kalman.cpp b/src/xrt/drivers/montrack/filters/filter_opencv_kalman.cpp
index a4d09caa019bf77678baf0dba142306dc1911b8e..39008ec1ac33eb35b83389f3e56a5646b4630298 100644
--- a/src/xrt/drivers/montrack/filters/filter_opencv_kalman.cpp
+++ b/src/xrt/drivers/montrack/filters/filter_opencv_kalman.cpp
@@ -2,7 +2,8 @@
 #include "opencv4/opencv2/opencv.hpp"
 
 
-typedef struct filter_opencv_kalman_instance {
+typedef struct filter_opencv_kalman_instance
+{
 	bool configured;
 	opencv_filter_configuration_t configuration;
 	cv::KalmanFilter kalman_filter;
@@ -14,72 +15,105 @@ typedef struct filter_opencv_kalman_instance {
 } filter_opencv_kalman_instance_t;
 
 
-bool filter_opencv_kalman__destroy(filter_instance_t* inst) {
-	//do nothing
+bool
+filter_opencv_kalman__destroy(filter_instance_t* inst)
+{
+	// do nothing
 	return false;
 }
 
-bool filter_opencv_kalman_queue(filter_instance_t* inst,tracker_measurement_t* measurement) {
-	filter_opencv_kalman_instance_t* internal = (filter_opencv_kalman_instance_t*)inst->internal_instance;
+bool
+filter_opencv_kalman_queue(filter_instance_t* inst,
+                           tracker_measurement_t* measurement)
+{
+	filter_opencv_kalman_instance_t* internal =
+	    (filter_opencv_kalman_instance_t*)inst->internal_instance;
 	printf("queueing measurement in filter\n");
-	internal->observation.at<float>(0,0) = measurement->pose.position.x;
-	internal->observation.at<float>(1,0) = measurement->pose.position.y;
-	internal->observation.at<float>(2,0) = measurement->pose.position.z;
+	internal->observation.at<float>(0, 0) = measurement->pose.position.x;
+	internal->observation.at<float>(1, 0) = measurement->pose.position.y;
+	internal->observation.at<float>(2, 0) = measurement->pose.position.z;
 	internal->kalman_filter.correct(internal->observation);
 	internal->running = true;
 	return false;
 }
-bool filter_opencv_kalman_get_state(filter_instance_t* inst,filter_state_t* state) {
+bool
+filter_opencv_kalman_get_state(filter_instance_t* inst, filter_state_t* state)
+{
 	return false;
 }
-bool filter_opencv_kalman_set_state(filter_instance_t* inst,filter_state_t* state) {
+bool
+filter_opencv_kalman_set_state(filter_instance_t* inst, filter_state_t* state)
+{
 	return false;
 }
-bool filter_opencv_kalman_predict_state(filter_instance_t* inst, filter_state_t* state, timepoint_ns time) {
-	filter_opencv_kalman_instance_t* internal = (filter_opencv_kalman_instance_t*)inst->internal_instance;
-	//printf("getting filtered pose\n");
-	if (! internal->running) {
+bool
+filter_opencv_kalman_predict_state(filter_instance_t* inst,
+                                   filter_state_t* state,
+                                   timepoint_ns time)
+{
+	filter_opencv_kalman_instance_t* internal =
+	    (filter_opencv_kalman_instance_t*)inst->internal_instance;
+	// printf("getting filtered pose\n");
+	if (!internal->running) {
 		return false;
 	}
 	internal->prediction = internal->kalman_filter.predict();
 	state->has_position = true;
-	state->pose.position.x = internal->prediction.at<float>(0,0);
-	state->pose.position.y = internal->prediction.at<float>(1,0);
-	state->pose.position.z = internal->prediction.at<float>(2,0);
+	state->pose.position.x = internal->prediction.at<float>(0, 0);
+	state->pose.position.y = internal->prediction.at<float>(1, 0);
+	state->pose.position.z = internal->prediction.at<float>(2, 0);
 	return true;
 }
-bool filter_opencv_kalman_configure(filter_instance_t* inst, opencv_filter_configuration_t* config) {
-	filter_opencv_kalman_instance_t* internal = (filter_opencv_kalman_instance_t*)inst->internal_instance;
+bool
+filter_opencv_kalman_configure(filter_instance_t* inst,
+                               opencv_filter_configuration_t* config)
+{
+	filter_opencv_kalman_instance_t* internal =
+	    (filter_opencv_kalman_instance_t*)inst->internal_instance;
 	internal->configuration = *config;
-	cv::setIdentity(internal->kalman_filter.processNoiseCov, cv::Scalar::all(internal->configuration.process_noise_cov));
-	cv::setIdentity(internal->kalman_filter.measurementNoiseCov, cv::Scalar::all(internal->configuration.measurement_noise_cov));
+	cv::setIdentity(
+	    internal->kalman_filter.processNoiseCov,
+	    cv::Scalar::all(internal->configuration.process_noise_cov));
+	cv::setIdentity(
+	    internal->kalman_filter.measurementNoiseCov,
+	    cv::Scalar::all(internal->configuration.measurement_noise_cov));
 	internal->configured = true;
 	return true;
 }
 
 
 
-filter_opencv_kalman_instance_t* filter_opencv_kalman_create(filter_instance_t* inst) {
-	filter_opencv_kalman_instance_t* i = (filter_opencv_kalman_instance_t*)calloc(1,sizeof(filter_opencv_kalman_instance_t));
+filter_opencv_kalman_instance_t*
+filter_opencv_kalman_create(filter_instance_t* inst)
+{
+	filter_opencv_kalman_instance_t* i =
+	    (filter_opencv_kalman_instance_t*)calloc(
+	        1, sizeof(filter_opencv_kalman_instance_t));
 	if (i) {
-		float dt=1.0;
-		i->kalman_filter.init(6,3);
-		i->observation = cv::Mat(3,1,CV_32F);
-		i->prediction = cv::Mat(6,1,CV_32F);
-		i->kalman_filter.transitionMatrix = (cv::Mat_<float>(6, 6) <<  1.0, 0.0, 0.0, dt, 0.0, 0.0,
-		                                                           0.0, 1.0, 0.0, 0.0, dt, 0.0,
-		                                                           0.0, 0.0, 1.0, 0.0, 0.0, dt,
-		                                                           0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
-		                                                           0.0, 0.0, 0.0, 0.0, 1.0, 0.0,
-		                                                           0.0, 0.0, 0.0, 0.0, 0.0, 1.0 );
+		float dt = 1.0;
+		i->kalman_filter.init(6, 3);
+		i->observation = cv::Mat(3, 1, CV_32F);
+		i->prediction = cv::Mat(6, 1, CV_32F);
+		i->kalman_filter.transitionMatrix =
+		    (cv::Mat_<float>(6, 6) << 1.0, 0.0, 0.0, dt, 0.0, 0.0, 0.0,
+		     1.0, 0.0, 0.0, dt, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, dt, 0.0,
+		     0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,
+		     0.0, 0.0, 0.0, 0.0, 1.0);
 
-		cv::setIdentity(i->kalman_filter.measurementMatrix,cv::Scalar::all(1.0f));
-		cv::setIdentity(i->kalman_filter.errorCovPost, cv::Scalar::all(0.0f));
+		cv::setIdentity(i->kalman_filter.measurementMatrix,
+		                cv::Scalar::all(1.0f));
+		cv::setIdentity(i->kalman_filter.errorCovPost,
+		                cv::Scalar::all(0.0f));
 
-		// our filter parameters set the process and measurement noise covariances.
+		// our filter parameters set the process and measurement noise
+		// covariances.
 
-		cv::setIdentity(i->kalman_filter.processNoiseCov, cv::Scalar::all(i->configuration.process_noise_cov));
-		cv::setIdentity(i->kalman_filter.measurementNoiseCov, cv::Scalar::all(i->configuration.measurement_noise_cov));
+		cv::setIdentity(
+		    i->kalman_filter.processNoiseCov,
+		    cv::Scalar::all(i->configuration.process_noise_cov));
+		cv::setIdentity(
+		    i->kalman_filter.measurementNoiseCov,
+		    cv::Scalar::all(i->configuration.measurement_noise_cov));
 
 		i->configured = false;
 		i->running = false;
@@ -87,5 +121,3 @@ filter_opencv_kalman_instance_t* filter_opencv_kalman_create(filter_instance_t*
 	}
 	return NULL;
 }
-
-
diff --git a/src/xrt/drivers/montrack/filters/filter_opencv_kalman.h b/src/xrt/drivers/montrack/filters/filter_opencv_kalman.h
index 251e46dc78cc9454465def00a3b75b1ce77e9bf8..4f4b3af5be8fa8a41a8f5159cf374a1872f6d28d 100644
--- a/src/xrt/drivers/montrack/filters/filter_opencv_kalman.h
+++ b/src/xrt/drivers/montrack/filters/filter_opencv_kalman.h
@@ -4,12 +4,14 @@
 #include <xrt/xrt_defines.h>
 #include "common/filter.h"
 
-typedef struct opencv_filter_configuration {
+typedef struct opencv_filter_configuration
+{
 	float measurement_noise_cov;
 	float process_noise_cov;
-}opencv_filter_configuration_t;
+} opencv_filter_configuration_t;
 
-typedef struct opencv_kalman_filter_state {
+typedef struct opencv_kalman_filter_state
+{
 	struct xrt_pose pose;
 } opencv_kalman_filter_state_t;
 
@@ -17,21 +19,32 @@ typedef struct opencv_kalman_filter_state {
 extern "C" {
 #endif
 
-//forward declare this, as it contains C++ stuff
+// forward declare this, as it contains C++ stuff
 typedef struct filter_opencv_kalman_instance filter_opencv_kalman_instance_t;
 
 
-filter_opencv_kalman_instance_t* filter_opencv_kalman_create(filter_instance_t* inst);
-bool filter_opencv_kalman__destroy(filter_instance_t* inst);
-
-bool filter_opencv_kalman_queue(filter_instance_t* inst,tracker_measurement_t* measurement);
-bool filter_opencv_kalman_get_state(filter_instance_t* inst,filter_state_t* state);
-bool filter_opencv_kalman_set_state(filter_instance_t* inst,filter_state_t* state);
-bool filter_opencv_kalman_predict_state(filter_instance_t* inst, filter_state_t*, timepoint_ns time);
-bool filter_opencv_kalman_configure(filter_instance_t* inst, opencv_filter_configuration_t* config);
+filter_opencv_kalman_instance_t*
+filter_opencv_kalman_create(filter_instance_t* inst);
+bool
+filter_opencv_kalman__destroy(filter_instance_t* inst);
+
+bool
+filter_opencv_kalman_queue(filter_instance_t* inst,
+                           tracker_measurement_t* measurement);
+bool
+filter_opencv_kalman_get_state(filter_instance_t* inst, filter_state_t* state);
+bool
+filter_opencv_kalman_set_state(filter_instance_t* inst, filter_state_t* state);
+bool
+filter_opencv_kalman_predict_state(filter_instance_t* inst,
+                                   filter_state_t*,
+                                   timepoint_ns time);
+bool
+filter_opencv_kalman_configure(filter_instance_t* inst,
+                               opencv_filter_configuration_t* config);
 
 #ifdef __cplusplus
-} //extern "C"
+} // extern "C"
 #endif
 
-#endif //FILTER_OPENCV_KALMAN_H
+#endif // FILTER_OPENCV_KALMAN_H
diff --git a/src/xrt/drivers/montrack/frameservers/common/frameserver.c b/src/xrt/drivers/montrack/frameservers/common/frameserver.c
index cb2689c7f139e3c980b922d1f7686ab274bb2664..82fd318e80d392808ee3e536639aeee261f0f150 100644
--- a/src/xrt/drivers/montrack/frameservers/common/frameserver.c
+++ b/src/xrt/drivers/montrack/frameservers/common/frameserver.c
@@ -4,20 +4,18 @@
 #include "uvc/uvc_frameserver.h";
 
 
-float format_bytes_per_pixel(frame_format_t f){
-	switch (f){
-	case FORMAT_Y_UINT8:
-		return 1.0f;
-	case FORMAT_YUV420_UINT8:
-		return 1.5f;
+float
+format_bytes_per_pixel(frame_format_t f)
+{
+	switch (f) {
+	case FORMAT_Y_UINT8: return 1.0f;
+	case FORMAT_YUV420_UINT8: return 1.5f;
 	case FORMAT_Y_UINT16:
 	case FORMAT_YUV422_UINT8:
-	case FORMAT_YUYV_UINT8:
-		return 2.0f;
+	case FORMAT_YUYV_UINT8: return 2.0f;
 	case FORMAT_BGR_UINT8:
 	case FORMAT_RGB_UINT8:
-	case FORMAT_YUV444_UINT8:
-		return 3.0f;
+	case FORMAT_YUV444_UINT8: return 3.0f;
 	case FORMAT_RAW:
 	case FORMAT_JPG:
 	default:
@@ -28,11 +26,13 @@ float format_bytes_per_pixel(frame_format_t f){
 }
 
 
-int32_t frame_size_in_bytes(frame_t* f) {
+int32_t
+frame_size_in_bytes(frame_t* f)
+{
 	if (f) {
 		int32_t frame_bytes = -1;
-		//TODO: alpha formats, padding etc.
-		switch (f->format){
+		// TODO: alpha formats, padding etc.
+		switch (f->format) {
 		case FORMAT_Y_UINT8:
 		case FORMAT_YUV420_UINT8:
 		case FORMAT_Y_UINT16:
@@ -44,37 +44,41 @@ int32_t frame_size_in_bytes(frame_t* f) {
 			frame_bytes = f->stride * f->height;
 			break;
 		case FORMAT_JPG:
-			//this is a maximum (assuming YUV444)
+			// this is a maximum (assuming YUV444)
 			frame_bytes = f->width * f->height * 3;
 		case FORMAT_RAW:
 		case FORMAT_NONE:
-		default:
-			printf("cannot compute frame size for this format\n");
+		default: printf("cannot compute frame size for this format\n");
 		}
-	return frame_bytes;
+		return frame_bytes;
 	}
 	return -1;
 }
 
-int32_t frame_bytes_per_pixel(frame_t* f){
+int32_t
+frame_bytes_per_pixel(frame_t* f)
+{
 	printf("ERROR: Not implemented\n");
 	return -1;
 }
 
 
-bool frame_split_stereo(frame_t* source, frame_t* left,  frame_t* right){
+bool
+frame_split_stereo(frame_t* source, frame_t* left, frame_t* right)
+{
 	printf("ERROR: Not implemented!\n");
 	return false;
 }
 
-bool frame_extract_plane(frame_t* source, plane_t plane, frame_t* out) {
-	//only handle splitting Y out of YUYV for now
-	if (source->format != FORMAT_YUYV_UINT8 && plane != PLANE_Y){
+bool
+frame_extract_plane(frame_t* source, plane_t plane, frame_t* out)
+{
+	// only handle splitting Y out of YUYV for now
+	if (source->format != FORMAT_YUYV_UINT8 && plane != PLANE_Y) {
 		printf("ERROR: unhandled plane extraction\n");
 		return false;
 	}
-	if (! source->data)
-	{
+	if (!source->data) {
 		printf("ERROR: no frame data!\n");
 		return false;
 	}
@@ -83,140 +87,175 @@ bool frame_extract_plane(frame_t* source, plane_t plane, frame_t* out) {
 	uint8_t* dest_ptr;
 	uint8_t source_pixel_bytes = format_bytes_per_pixel(source->format);
 	uint32_t source_line_bytes = source->stride;
-    uint8_t dest_pixel_bytes= format_bytes_per_pixel(out->format);
-    uint32_t dest_line_bytes = out->width;
+	uint8_t dest_pixel_bytes = format_bytes_per_pixel(out->format);
+	uint32_t dest_line_bytes = out->width;
 
-	if (! out->data)
-	{
-		printf("allocating data for NULL plane - someone needs to free this!\n");
-        out->data = malloc(frame_size_in_bytes(out));
+	if (!out->data) {
+		printf(
+		    "allocating data for NULL plane - someone needs to free "
+		    "this!\n");
+		out->data = malloc(frame_size_in_bytes(out));
 	}
 
 	switch (source->format) {
-	    case FORMAT_YUYV_UINT8:
-        case FORMAT_YUV444_UINT8:
-		    for (uint32_t i=0;i< source->height;i++) {
-				for (uint32_t j=0;j<source->width;j++) {
-					source_ptr = source->data + (j * source_pixel_bytes) + (i * source_line_bytes);
-					dest_ptr = out->data + (j * dest_pixel_bytes) + (i * dest_line_bytes);
-					*dest_ptr = *source_ptr;
-				}
+	case FORMAT_YUYV_UINT8:
+	case FORMAT_YUV444_UINT8:
+		for (uint32_t i = 0; i < source->height; i++) {
+			for (uint32_t j = 0; j < source->width; j++) {
+				source_ptr = source->data +
+				             (j * source_pixel_bytes) +
+				             (i * source_line_bytes);
+				dest_ptr = out->data + (j * dest_pixel_bytes) +
+				           (i * dest_line_bytes);
+				*dest_ptr = *source_ptr;
 			}
-		    break;
-	    default:
-		    return false;
+		}
+		break;
+	default: return false;
 	}
 	return true;
 }
 
-bool frame_resample(frame_t* source, frame_t* out) {
-    //TODO: more complete resampling.
-    if (source->format != FORMAT_YUYV_UINT8 && out->format != FORMAT_YUV444_UINT8){
-        printf("ERROR: unhandled resample operation\n");
-        return false;
-    }
-
-    if (! source->data)
-    {
-        printf("ERROR: no frame data!\n");
-        return false;
-    }
-
-    uint8_t* source_ptr;
-    uint8_t* dest_ptr;
-    uint8_t source_pixel_bytes = format_bytes_per_pixel(source->format);
-    uint32_t source_line_bytes = source->stride;
-	uint8_t dest_pixel_bytes= format_bytes_per_pixel(out->format);
+bool
+frame_resample(frame_t* source, frame_t* out)
+{
+	// TODO: more complete resampling.
+	if (source->format != FORMAT_YUYV_UINT8 &&
+	    out->format != FORMAT_YUV444_UINT8) {
+		printf("ERROR: unhandled resample operation\n");
+		return false;
+	}
+
+	if (!source->data) {
+		printf("ERROR: no frame data!\n");
+		return false;
+	}
+
+	uint8_t* source_ptr;
+	uint8_t* dest_ptr;
+	uint8_t source_pixel_bytes = format_bytes_per_pixel(source->format);
+	uint32_t source_line_bytes = source->stride;
+	uint8_t dest_pixel_bytes = format_bytes_per_pixel(out->format);
 	uint32_t dest_line_bytes = out->stride;
 
-    if (! out->data)
-    {
-        printf("allocating data for NULL plane - someone needs to free this!\n");
-        out->data = malloc(frame_size_in_bytes(out));
-    }
-
-    switch (source->format) {
-        uint8_t lastU = 0;
-        case FORMAT_YUYV_UINT8:
-            for (uint32_t i=0;i< source->height;i++) {
-                for (uint32_t j=0;j<source->width;j++) {
-                    source_ptr = source->data + (j * source_pixel_bytes) + (i * source_line_bytes);
-                    dest_ptr = out->data + (j * dest_pixel_bytes) + (i * dest_line_bytes);
-                    *dest_ptr = *source_ptr; //Y
-                    if (j %2 == 0) {
-                        *(dest_ptr+1) = *(source_ptr+1); //U
-                        *(dest_ptr+2) = *(source_ptr+3); //V from next source pixel
-                        lastU = *(dest_ptr+1);
-                    } else {
-                        *(dest_ptr+1) = lastU;
-                        *(dest_ptr+2) = *(source_ptr+1);
-                    }
-                }
-            }
-            return true;
-            break;
-        default:
-            return false;
-    }
-    return false;
+	if (!out->data) {
+		printf(
+		    "allocating data for NULL plane - someone needs to free "
+		    "this!\n");
+		out->data = malloc(frame_size_in_bytes(out));
+	}
+
+	switch (source->format) {
+		uint8_t lastU = 0;
+	case FORMAT_YUYV_UINT8:
+		for (uint32_t i = 0; i < source->height; i++) {
+			for (uint32_t j = 0; j < source->width; j++) {
+				source_ptr = source->data +
+				             (j * source_pixel_bytes) +
+				             (i * source_line_bytes);
+				dest_ptr = out->data + (j * dest_pixel_bytes) +
+				           (i * dest_line_bytes);
+				*dest_ptr = *source_ptr; // Y
+				if (j % 2 == 0) {
+					*(dest_ptr + 1) = *(source_ptr + 1); // U
+					*(dest_ptr + 2) =
+					    *(source_ptr +
+					      3); // V from next source pixel
+					lastU = *(dest_ptr + 1);
+				} else {
+					*(dest_ptr + 1) = lastU;
+					*(dest_ptr + 2) = *(source_ptr + 1);
+				}
+			}
+		}
+		return true;
+		break;
+	default: return false;
+	}
+	return false;
 }
 
 
-frameserver_instance_t* frameserver_create(frameserver_type_t t) {
-    frameserver_instance_t* i = calloc(1,sizeof(frameserver_instance_t));
-    if (i) {
-        switch (t) {
-		    case FRAMESERVER_TYPE_FFMPEG:
-                i->frameserver_enumerate_sources = ffmpeg_frameserver_enumerate_sources;
-				i->frameserver_configure_capture = ffmpeg_frameserver_configure_capture;
-				i->frameserver_frame_get = ffmpeg_frameserver_get;
-				i->frameserver_is_running = ffmpeg_frameserver_is_running;
-				i->frameserver_register_frame_callback = ffmpeg_frameserver_register_frame_callback;
-				i->frameserver_register_event_callback = ffmpeg_frameserver_register_event_callback;
-				i->frameserver_seek = ffmpeg_frameserver_seek;
-				i->frameserver_stream_stop =ffmpeg_frameserver_stream_stop;
-				i->frameserver_stream_start=ffmpeg_frameserver_stream_start;
-                i->internal_instance = (void*) ffmpeg_frameserver_create(i);
-                break;
-		    case FRAMESERVER_TYPE_UVC:
-                i->frameserver_enumerate_sources = uvc_frameserver_enumerate_sources;
-				i->frameserver_configure_capture = uvc_frameserver_configure_capture;
-				i->frameserver_frame_get = uvc_frameserver_get;
-				i->frameserver_is_running = uvc_frameserver_is_running;
-				i->frameserver_register_frame_callback = uvc_frameserver_register_frame_callback;
-				i->frameserver_register_event_callback = uvc_frameserver_register_event_callback;
-				i->frameserver_seek = uvc_frameserver_seek;
-				i->frameserver_stream_stop =uvc_frameserver_stream_stop;
-				i->frameserver_stream_start=uvc_frameserver_stream_start;
-                i->internal_instance = (void*) uvc_frameserver_create(i);
-                break;
-		    case FRAMESERVER_TYPE_V4L2:
-                i->frameserver_enumerate_sources = v4l2_frameserver_enumerate_sources;
-				i->frameserver_configure_capture = v4l2_frameserver_configure_capture;
-				i->frameserver_frame_get = v4l2_frameserver_get;
-				i->frameserver_is_running = v4l2_frameserver_is_running;
-				i->frameserver_register_frame_callback = v4l2_frameserver_register_frame_callback;
-				i->frameserver_register_event_callback = v4l2_frameserver_register_event_callback;
-				i->frameserver_seek = v4l2_frameserver_seek;
-				i->frameserver_stream_stop =v4l2_frameserver_stream_stop;
-				i->frameserver_stream_start=v4l2_frameserver_stream_start;
-                i->internal_instance = (void*) v4l2_frameserver_create(i);
-                break;
-		    case FRAMESERVER_TYPE_NONE:
-            default:
-                free(i);
-                return NULL;
-            break;
+frameserver_instance_t*
+frameserver_create(frameserver_type_t t)
+{
+	frameserver_instance_t* i = calloc(1, sizeof(frameserver_instance_t));
+	if (i) {
+		switch (t) {
+		case FRAMESERVER_TYPE_FFMPEG:
+			i->frameserver_enumerate_sources =
+			    ffmpeg_frameserver_enumerate_sources;
+			i->frameserver_configure_capture =
+			    ffmpeg_frameserver_configure_capture;
+			i->frameserver_frame_get = ffmpeg_frameserver_get;
+			i->frameserver_is_running =
+			    ffmpeg_frameserver_is_running;
+			i->frameserver_register_frame_callback =
+			    ffmpeg_frameserver_register_frame_callback;
+			i->frameserver_register_event_callback =
+			    ffmpeg_frameserver_register_event_callback;
+			i->frameserver_seek = ffmpeg_frameserver_seek;
+			i->frameserver_stream_stop =
+			    ffmpeg_frameserver_stream_stop;
+			i->frameserver_stream_start =
+			    ffmpeg_frameserver_stream_start;
+			i->internal_instance =
+			    (void*)ffmpeg_frameserver_create(i);
+			break;
+		case FRAMESERVER_TYPE_UVC:
+			i->frameserver_enumerate_sources =
+			    uvc_frameserver_enumerate_sources;
+			i->frameserver_configure_capture =
+			    uvc_frameserver_configure_capture;
+			i->frameserver_frame_get = uvc_frameserver_get;
+			i->frameserver_is_running = uvc_frameserver_is_running;
+			i->frameserver_register_frame_callback =
+			    uvc_frameserver_register_frame_callback;
+			i->frameserver_register_event_callback =
+			    uvc_frameserver_register_event_callback;
+			i->frameserver_seek = uvc_frameserver_seek;
+			i->frameserver_stream_stop =
+			    uvc_frameserver_stream_stop;
+			i->frameserver_stream_start =
+			    uvc_frameserver_stream_start;
+			i->internal_instance = (void*)uvc_frameserver_create(i);
+			break;
+		case FRAMESERVER_TYPE_V4L2:
+			i->frameserver_enumerate_sources =
+			    v4l2_frameserver_enumerate_sources;
+			i->frameserver_configure_capture =
+			    v4l2_frameserver_configure_capture;
+			i->frameserver_frame_get = v4l2_frameserver_get;
+			i->frameserver_is_running = v4l2_frameserver_is_running;
+			i->frameserver_register_frame_callback =
+			    v4l2_frameserver_register_frame_callback;
+			i->frameserver_register_event_callback =
+			    v4l2_frameserver_register_event_callback;
+			i->frameserver_seek = v4l2_frameserver_seek;
+			i->frameserver_stream_stop =
+			    v4l2_frameserver_stream_stop;
+			i->frameserver_stream_start =
+			    v4l2_frameserver_stream_start;
+			i->internal_instance =
+			    (void*)v4l2_frameserver_create(i);
+			break;
+		case FRAMESERVER_TYPE_NONE:
+		default:
+			free(i);
+			return NULL;
+			break;
 		}
-        return i;
-    }
-    return NULL;
+		return i;
+	}
+	return NULL;
 }
 
-bool frameservers_test() {
+bool
+frameservers_test()
+{
 
-    ffmpeg_frameserver_test();
-	//uvc_frameserver_test();
-   // v4l2_frameserver_test();
-    return true;
+	ffmpeg_frameserver_test();
+	// uvc_frameserver_test();
+	// v4l2_frameserver_test();
+	return true;
 }
diff --git a/src/xrt/drivers/montrack/frameservers/common/frameserver.h b/src/xrt/drivers/montrack/frameservers/common/frameserver.h
index 70a82c978366ea32b176db12dce7b34b7fad4d31..eff618eddc119197d46253d019af3b9aa91a8b1d 100644
--- a/src/xrt/drivers/montrack/frameservers/common/frameserver.h
+++ b/src/xrt/drivers/montrack/frameservers/common/frameserver.h
@@ -12,24 +12,68 @@
 extern "C" {
 #endif
 
-#define MAX_PLANES 3 //this is what we see currently in e.g. RGB,YUV
-
-//frame
-typedef enum frame_format {FORMAT_NONE,FORMAT_RAW,FORMAT_Y_UINT8,FORMAT_Y_UINT16,FORMAT_RGB_UINT8,FORMAT_BGR_UINT8,FORMAT_YUYV_UINT8,FORMAT_YUV444_UINT8,FORMAT_YUV422_UINT8,FORMAT_YUV420_UINT8,FORMAT_JPG} frame_format_t;
-typedef enum stereo_format {STEREO_NONE,STEREO_SBS,STEREO_OAU} stereo_format_t;
-typedef enum plane {PLANE_NONE,PLANE_R,PLANE_G,PLANE_B,PLANE_Y,PLANE_U,PLANE_V} plane_t;
-typedef enum chroma_sampling {CHROMA_SAMP_NONE,CHROMA_SAMP_444,CHROMA_SAMP_422,CHROMA_SAMP_411} chroma_sampling_t;
-typedef enum plane_layout {PLANE_LAYOUT_COMPOSITE,PLANE_LAYOUT_SEPARATE} plane_layout_t;
-typedef enum sampling {SAMPLING_NONE,SAMPLING_UPSAMPLED, SAMPLING_DOWNSAMPLED} sampling_t;
-
-//unnormalised pixel coordinates for clipping ROIs
-typedef struct frame_rect {
-    struct xrt_vec2 tl;
-    struct xrt_vec2 br;
+#define MAX_PLANES 3 // this is what we see currently in e.g. RGB,YUV
+
+// frame
+typedef enum frame_format
+{
+	FORMAT_NONE,
+	FORMAT_RAW,
+	FORMAT_Y_UINT8,
+	FORMAT_Y_UINT16,
+	FORMAT_RGB_UINT8,
+	FORMAT_BGR_UINT8,
+	FORMAT_YUYV_UINT8,
+	FORMAT_YUV444_UINT8,
+	FORMAT_YUV422_UINT8,
+	FORMAT_YUV420_UINT8,
+	FORMAT_JPG
+} frame_format_t;
+typedef enum stereo_format
+{
+	STEREO_NONE,
+	STEREO_SBS,
+	STEREO_OAU
+} stereo_format_t;
+typedef enum plane
+{
+	PLANE_NONE,
+	PLANE_R,
+	PLANE_G,
+	PLANE_B,
+	PLANE_Y,
+	PLANE_U,
+	PLANE_V
+} plane_t;
+typedef enum chroma_sampling
+{
+	CHROMA_SAMP_NONE,
+	CHROMA_SAMP_444,
+	CHROMA_SAMP_422,
+	CHROMA_SAMP_411
+} chroma_sampling_t;
+typedef enum plane_layout
+{
+	PLANE_LAYOUT_COMPOSITE,
+	PLANE_LAYOUT_SEPARATE
+} plane_layout_t;
+typedef enum sampling
+{
+	SAMPLING_NONE,
+	SAMPLING_UPSAMPLED,
+	SAMPLING_DOWNSAMPLED
+} sampling_t;
+
+// unnormalised pixel coordinates for clipping ROIs
+typedef struct frame_rect
+{
+	struct xrt_vec2 tl;
+	struct xrt_vec2 br;
 } frame_rect_t;
 
-//basic frame data structure - holds a pointer to buffer.
-typedef struct frame {
+// basic frame data structure - holds a pointer to buffer.
+typedef struct frame
+{
 	uint16_t width;
 	uint16_t height;
 	uint16_t stride;
@@ -37,18 +81,22 @@ typedef struct frame {
 	stereo_format_t stereo_format;
 	uint32_t size_bytes;
 	uint8_t* data;
-	chroma_sampling_t chroma_sampling; //unused
-	plane_layout_t plane_layout; //unused
-	uint8_t* u_data; //unused
-	uint8_t* v_data; //unused
+	chroma_sampling_t chroma_sampling; // unused
+	plane_layout_t plane_layout;       // unused
+	uint8_t* u_data;                   // unused
+	uint8_t* v_data;                   // unused
 	uint64_t timestamp;
 	uint64_t source_timestamp;
-	uint64_t source_id; //used to tag frames with the source they originated from
+	uint64_t source_id; // used to tag frames with the source they
+	                    // originated from
 } frame_t;
 
-typedef struct capture_parameters{
-	// used to configure cameras. since there is no guarantee every frameserver will support any/all of these
-	// params, a 'best effort' should be made to apply them. all numeric values are normalised floats for broad applicability
+typedef struct capture_parameters
+{
+	// used to configure cameras. since there is no guarantee every
+	// frameserver will support any/all of these params, a 'best effort'
+	// should be made to apply them. all numeric values are normalised
+	// floats for broad applicability
 	float gain;
 	float exposure;
 } capture_parameters_t;
@@ -56,51 +104,81 @@ typedef struct capture_parameters{
 
 // frameserver
 
-typedef enum frameserver_type { FRAMESERVER_TYPE_NONE,FRAMESERVER_TYPE_FFMPEG,FRAMESERVER_TYPE_UVC,FRAMESERVER_TYPE_V4L2 } frameserver_type_t;
+typedef enum frameserver_type
+{
+	FRAMESERVER_TYPE_NONE,
+	FRAMESERVER_TYPE_FFMPEG,
+	FRAMESERVER_TYPE_UVC,
+	FRAMESERVER_TYPE_V4L2
+} frameserver_type_t;
 
-//Interface types
+// Interface types
 typedef void* frameserver_internal_instance_ptr;
 typedef void* frameserver_source_descriptor_ptr;
 typedef void* frameserver_instance_ptr;
 
 
 
-
 typedef void (*frame_consumer_callback_func)(void* instance, frame_t* frame);
 
 
 // Frameserver API
 
-typedef struct _frameserver_instance {
-    frameserver_type_t frameserver_type;
-	bool (*frameserver_enumerate_sources)(frameserver_instance_ptr inst, frameserver_source_descriptor_ptr sources, uint32_t* count);
-	bool (*frameserver_configure_capture)(frameserver_instance_ptr,capture_parameters_t cp);
-	bool (*frameserver_frame_get)(frameserver_instance_ptr inst,frame_t* _frame);
-	void (*frameserver_register_frame_callback)(frameserver_instance_ptr inst, void* target_instance, frame_consumer_callback_func target_func);
-	void (*frameserver_register_event_callback)(frameserver_instance_ptr inst, void* target_instance, event_consumer_callback_func target_func);
-	bool (*frameserver_seek)(frameserver_instance_ptr inst, uint64_t timestamp);
-	bool (*frameserver_stream_start)(frameserver_instance_ptr inst, frameserver_source_descriptor_ptr source);
+typedef struct _frameserver_instance
+{
+	frameserver_type_t frameserver_type;
+	bool (*frameserver_enumerate_sources)(
+	    frameserver_instance_ptr inst,
+	    frameserver_source_descriptor_ptr sources,
+	    uint32_t* count);
+	bool (*frameserver_configure_capture)(frameserver_instance_ptr,
+	                                      capture_parameters_t cp);
+	bool (*frameserver_frame_get)(frameserver_instance_ptr inst,
+	                              frame_t* _frame);
+	void (*frameserver_register_frame_callback)(
+	    frameserver_instance_ptr inst,
+	    void* target_instance,
+	    frame_consumer_callback_func target_func);
+	void (*frameserver_register_event_callback)(
+	    frameserver_instance_ptr inst,
+	    void* target_instance,
+	    event_consumer_callback_func target_func);
+	bool (*frameserver_seek)(frameserver_instance_ptr inst,
+	                         uint64_t timestamp);
+	bool (*frameserver_stream_start)(
+	    frameserver_instance_ptr inst,
+	    frameserver_source_descriptor_ptr source);
 	bool (*frameserver_stream_stop)(frameserver_instance_ptr inst);
 	bool (*frameserver_is_running)(frameserver_instance_ptr inst);
 	frameserver_internal_instance_ptr internal_instance;
-    } frameserver_instance_t;
-
-frameserver_instance_t* frameserver_create(frameserver_type_t t);
-bool frameserver_destroy(frameserver_instance_t* inst);
-
-//bool frame_data_alloc(frame_t*);
-//bool frame_data_free(frame_t*);
-int32_t frame_size_in_bytes(frame_t* f);
-int32_t frame_bytes_per_pixel(frame_t* f);
-float format_bytes_per_pixel(frame_format_t f); //this is a float to support e.g. YUV420
-bool frame_split_stereo(frame_t* source, frame_t* left, frame_t* right);
-bool frame_extract_plane(frame_t* source,plane_t plane,frame_t* out);
-bool frame_resample(frame_t* source, frame_t* out);
-
-bool frameservers_test();
+} frameserver_instance_t;
+
+frameserver_instance_t*
+frameserver_create(frameserver_type_t t);
+bool
+frameserver_destroy(frameserver_instance_t* inst);
+
+// bool frame_data_alloc(frame_t*);
+// bool frame_data_free(frame_t*);
+int32_t
+frame_size_in_bytes(frame_t* f);
+int32_t
+frame_bytes_per_pixel(frame_t* f);
+float
+format_bytes_per_pixel(
+    frame_format_t f); // this is a float to support e.g. YUV420
+bool
+frame_split_stereo(frame_t* source, frame_t* left, frame_t* right);
+bool
+frame_extract_plane(frame_t* source, plane_t plane, frame_t* out);
+bool
+frame_resample(frame_t* source, frame_t* out);
+
+bool
+frameservers_test();
 
 #ifdef __cplusplus
 }
 #endif
 
-#endif //FRAMESERVER_H
+#endif // FRAMESERVER_H
diff --git a/src/xrt/drivers/montrack/frameservers/ffmpeg/ffmpeg_frameserver.c b/src/xrt/drivers/montrack/frameservers/ffmpeg/ffmpeg_frameserver.c
index a41afd8068704efaa12fb6e3486be4cad249d016..23eafa1ea13ab5c1b2bff07ac277193b0ecc61b8 100644
--- a/src/xrt/drivers/montrack/frameservers/ffmpeg/ffmpeg_frameserver.c
+++ b/src/xrt/drivers/montrack/frameservers/ffmpeg/ffmpeg_frameserver.c
@@ -5,236 +5,302 @@
 
 #define DUMMY_FILE "/home/pblack/tracker_test.avi"
 
-bool ffmpeg_source_create(ffmpeg_source_descriptor_t* desc)
+bool
+ffmpeg_source_create(ffmpeg_source_descriptor_t* desc)
 {
-   // do nothing right now
-    return true;
+	// do nothing right now
+	return true;
 }
 
-bool ffmpeg_source_destroy(ffmpeg_source_descriptor_t* desc)
+bool
+ffmpeg_source_destroy(ffmpeg_source_descriptor_t* desc)
 {
-   // do nothing right now
-    return true;
+	// do nothing right now
+	return true;
 }
 
-ffmpeg_frameserver_instance_t* ffmpeg_frameserver_create(frameserver_instance_t* inst) {
-    //TODO use macro here
-    ffmpeg_frameserver_instance_t* i = calloc(1,sizeof(ffmpeg_frameserver_instance_t));
-    if (i) {
-		i->is_running=false;
-        return i;
-    }
-    return NULL;
+ffmpeg_frameserver_instance_t*
+ffmpeg_frameserver_create(frameserver_instance_t* inst)
+{
+	// TODO use macro here
+	ffmpeg_frameserver_instance_t* i =
+	    calloc(1, sizeof(ffmpeg_frameserver_instance_t));
+	if (i) {
+		i->is_running = false;
+		return i;
+	}
+	return NULL;
 }
 
-bool ffmpeg_frameserver_configure_capture(frameserver_instance_t* inst,capture_parameters_t cp) {
+bool
+ffmpeg_frameserver_configure_capture(frameserver_instance_t* inst,
+                                     capture_parameters_t cp)
+{
 	printf("ffmpeg is file-only, no capture params supported\n");
 	return true;
 }
 
 
-bool ffmpeg_frameserver_destroy(ffmpeg_frameserver_instance_t* inst) {
-    //TODO: cleanup
-    free(inst);
-    return true;
+bool
+ffmpeg_frameserver_destroy(ffmpeg_frameserver_instance_t* inst)
+{
+	// TODO: cleanup
+	free(inst);
+	return true;
 }
-bool ffmpeg_frameserver_enumerate_sources(frameserver_instance_t* inst, ffmpeg_source_descriptor_t* sources, uint32_t* count) {
-	//TODO: this is hardcoded, we need to query the source for its properties
-	if (sources == NULL)
-	{
-		*count=2; //we advertise support for YUV420 or just a Y frame
+bool
+ffmpeg_frameserver_enumerate_sources(frameserver_instance_t* inst,
+                                     ffmpeg_source_descriptor_t* sources,
+                                     uint32_t* count)
+{
+	// TODO: this is hardcoded, we need to query the source for its
+	// properties
+	if (sources == NULL) {
+		*count = 2; // we advertise support for YUV420 or just a Y frame
 		return true;
 	}
-	char* filepath= DUMMY_FILE;
+	char* filepath = DUMMY_FILE;
 	char* source_name = "FFMPEG Test Source";
-	uint32_t source_id=666;
-
-	//this is all hardcoded but in a more developed implementation we would extend and tidy this up.
-	sources[0].current_frame=0;
-	sources[0].filepath=calloc(1,strlen(filepath)+1); //TODO: free this up somehow
-	memcpy(sources[0].filepath,filepath,strlen(filepath)+1);
-	sources[0].frame_count=99;
-	memcpy(sources[0].name,source_name,strlen(source_name)+1);
-	sources[0].name[127]=0; //unecessary in this context, but why not?
+	uint32_t source_id = 666;
+
+	// this is all hardcoded but in a more developed implementation we would
+	// extend and tidy this up.
+	sources[0].current_frame = 0;
+	sources[0].filepath =
+	    calloc(1, strlen(filepath) + 1); // TODO: free this up somehow
+	memcpy(sources[0].filepath, filepath, strlen(filepath) + 1);
+	sources[0].frame_count = 99;
+	memcpy(sources[0].name, source_name, strlen(source_name) + 1);
+	sources[0].name[127] = 0; // unecessary in this context, but why not?
 	sources[0].format = FORMAT_YUV420_UINT8;
 
-	sources[1].current_frame=0;
-	sources[1].filepath=calloc(1,strlen(filepath)+1);
-	memcpy(sources[1].filepath,filepath,strlen(filepath)+1);
-	sources[1].frame_count=99;
-	memcpy(sources[1].name,source_name,strlen(source_name)+1);
-	sources[1].name[127]=0; //unecessary in this context, but why not?
+	sources[1].current_frame = 0;
+	sources[1].filepath = calloc(1, strlen(filepath) + 1);
+	memcpy(sources[1].filepath, filepath, strlen(filepath) + 1);
+	sources[1].frame_count = 99;
+	memcpy(sources[1].name, source_name, strlen(source_name) + 1);
+	sources[1].name[127] = 0; // unecessary in this context, but why not?
 	sources[1].format = FORMAT_Y_UINT8;
 	return true;
 }
 
-bool ffmpeg_frameserver_get(frameserver_instance_t* inst, frame_t* _frame) {
+bool
+ffmpeg_frameserver_get(frameserver_instance_t* inst, frame_t* _frame)
+{
 	return false;
 }
-void ffmpeg_frameserver_register_frame_callback(frameserver_instance_t* inst, void* target_instance, frame_consumer_callback_func target_func) {
+void
+ffmpeg_frameserver_register_frame_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    frame_consumer_callback_func target_func)
+{
 	ffmpeg_frameserver_instance_t* internal = inst->internal_instance;
 	internal->frame_target_instance = target_instance;
 	internal->frame_target_callback = target_func;
 }
 
-void ffmpeg_frameserver_register_event_callback(frameserver_instance_t* inst, void* target_instance, event_consumer_callback_func target_func) {
+void
+ffmpeg_frameserver_register_event_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func)
+{
 	ffmpeg_frameserver_instance_t* internal = inst->internal_instance;
 	internal->event_target_instance = target_instance;
 	internal->event_target_callback = target_func;
 }
-bool ffmpeg_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp) {
+bool
+ffmpeg_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp)
+{
 	return false;
 }
-bool ffmpeg_frameserver_stream_start(frameserver_instance_t* inst, ffmpeg_source_descriptor_t* source) {
+bool
+ffmpeg_frameserver_stream_start(frameserver_instance_t* inst,
+                                ffmpeg_source_descriptor_t* source)
+{
 	ffmpeg_frameserver_instance_t* internal = inst->internal_instance;
-	if(pthread_create(&internal->stream_thread, NULL, ffmpeg_stream_run, inst)) {
-	printf("ERROR: could not createv thread\n");
-	return false;
+	if (pthread_create(&internal->stream_thread, NULL, ffmpeg_stream_run,
+	                   inst)) {
+		printf("ERROR: could not createv thread\n");
+		return false;
 	}
 	internal->source_descriptor = *source;
 	internal->is_running = true;
-	//we're off to the races!
+	// we're off to the races!
 	return true;
 }
 
-bool ffmpeg_frameserver_stream_stop(frameserver_instance_t* inst) {
+bool
+ffmpeg_frameserver_stream_stop(frameserver_instance_t* inst)
+{
 	ffmpeg_frameserver_instance_t* internal = inst->internal_instance;
-	//TODO: signal shutdown to thread
+	// TODO: signal shutdown to thread
 	pthread_join(&internal->stream_thread);
 	return true;
 }
-bool ffmpeg_frameserver_is_running(frameserver_instance_t* inst) {
+bool
+ffmpeg_frameserver_is_running(frameserver_instance_t* inst)
+{
 	ffmpeg_frameserver_instance_t* internal = inst->internal_instance;
 	return internal->is_running;
-
 }
 
-bool ffmpeg_frameserver_test() {
-    printf("Running FFMPEG Frameserver Test\n");
-	frameserver_instance_t* ffm_server = frameserver_create(FRAMESERVER_TYPE_FFMPEG);
-    if (! ffm_server)
-    {
-        printf("FAILURE: Could not init FFMPEG frameserver.\n");
-        return false;
-    }
-    uint32_t source_count =0;
-    if (! ffm_server->frameserver_enumerate_sources(ffm_server,NULL,&source_count)) {
-        printf("FAILURE: Could not get source count.\n");
-        return false;
-    }
-    ffmpeg_source_descriptor_t* source_list = calloc(source_count,sizeof(ffmpeg_source_descriptor_t));
-    if (! ffm_server->frameserver_enumerate_sources(ffm_server, source_list,&source_count)) {
-        printf("FAILURE: Could not get source descriptors\n");
-        return false;
-    }
-    for (uint32_t i=0;i<source_count;i++)
-    {
-        printf("%d FFMPEG source name: %s\n",i,source_list[i].name);
-    }
-    return true;
+bool
+ffmpeg_frameserver_test()
+{
+	printf("Running FFMPEG Frameserver Test\n");
+	frameserver_instance_t* ffm_server =
+	    frameserver_create(FRAMESERVER_TYPE_FFMPEG);
+	if (!ffm_server) {
+		printf("FAILURE: Could not init FFMPEG frameserver.\n");
+		return false;
+	}
+	uint32_t source_count = 0;
+	if (!ffm_server->frameserver_enumerate_sources(ffm_server, NULL,
+	                                               &source_count)) {
+		printf("FAILURE: Could not get source count.\n");
+		return false;
+	}
+	ffmpeg_source_descriptor_t* source_list =
+	    calloc(source_count, sizeof(ffmpeg_source_descriptor_t));
+	if (!ffm_server->frameserver_enumerate_sources(ffm_server, source_list,
+	                                               &source_count)) {
+		printf("FAILURE: Could not get source descriptors\n");
+		return false;
+	}
+	for (uint32_t i = 0; i < source_count; i++) {
+		printf("%d FFMPEG source name: %s\n", i, source_list[i].name);
+	}
+	return true;
 }
 
 
-void ffmpeg_stream_run(frameserver_instance_t* inst)
+void
+ffmpeg_stream_run(frameserver_instance_t* inst)
 {
 	int ret;
 	ffmpeg_frameserver_instance_t* internal = inst->internal_instance;
-	internal->av_video_streamid=-1;
+	internal->av_video_streamid = -1;
 	av_register_all();
 	internal->av_format_context = avformat_alloc_context();
-	//TODO: check file exists - avformat_open_input just crashes if it does not exist
-	ret = avformat_open_input(&(internal->av_format_context),internal->source_descriptor.filepath,NULL,NULL);
+	// TODO: check file exists - avformat_open_input just crashes if it does
+	// not exist
+	ret = avformat_open_input(&(internal->av_format_context),
+	                          internal->source_descriptor.filepath, NULL,
+	                          NULL);
 	if (ret < 0) {
-		printf("ERROR: could not open file! %s",internal->source_descriptor.filepath);
+		printf("ERROR: could not open file! %s",
+		       internal->source_descriptor.filepath);
 		return;
 	}
-	ret = avformat_find_stream_info(internal->av_format_context,NULL);
+	ret = avformat_find_stream_info(internal->av_format_context, NULL);
 	if (ret < 0) {
-		printf("ERROR: could find stream info! %s",internal->source_descriptor.filepath);
+		printf("ERROR: could find stream info! %s",
+		       internal->source_descriptor.filepath);
 		return;
 	}
 
-	//find our video stream id
-	for (uint32_t i=0;i<internal->av_format_context->nb_streams;i++){
-		if (internal->av_format_context->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_VIDEO) {
+	// find our video stream id
+	for (uint32_t i = 0; i < internal->av_format_context->nb_streams; i++) {
+		if (internal->av_format_context->streams[i]
+		        ->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
 			internal->av_video_streamid = i;
 		}
 	}
 
-	if (internal->av_video_streamid == -1){
-		printf("ERROR: could find video stream in source! %s",internal->source_descriptor.filepath);
+	if (internal->av_video_streamid == -1) {
+		printf("ERROR: could find video stream in source! %s",
+		       internal->source_descriptor.filepath);
 		return;
 	}
 
 	internal->av_video_codec = NULL;
 	internal->av_codec_context = avcodec_alloc_context3(NULL);
-	avcodec_parameters_to_context(internal->av_codec_context,internal->av_format_context->streams[internal->av_video_streamid]->codecpar);
-	//TODO: deprecated - there is probably a better way to do this
-	av_codec_set_pkt_timebase(internal->av_codec_context,internal->av_format_context->streams[internal->av_video_streamid]->time_base);
-	internal->av_video_codec = avcodec_find_decoder(internal->av_codec_context->codec_id);
+	avcodec_parameters_to_context(
+	    internal->av_codec_context,
+	    internal->av_format_context->streams[internal->av_video_streamid]
+	        ->codecpar);
+	// TODO: deprecated - there is probably a better way to do this
+	av_codec_set_pkt_timebase(
+	    internal->av_codec_context,
+	    internal->av_format_context->streams[internal->av_video_streamid]
+	        ->time_base);
+	internal->av_video_codec =
+	    avcodec_find_decoder(internal->av_codec_context->codec_id);
 
-	if (! internal->av_video_codec) {
-		printf("ERROR: unsupported video codec %d\n",internal->av_codec_context->codec_id);
+	if (!internal->av_video_codec) {
+		printf("ERROR: unsupported video codec %d\n",
+		       internal->av_codec_context->codec_id);
 		return;
 	}
 
-	ret = avcodec_open2(internal->av_codec_context,internal->av_video_codec,NULL);
+	ret = avcodec_open2(internal->av_codec_context,
+	                    internal->av_video_codec, NULL);
 	if (ret < 0) {
-		printf("ERROR: could not open codec %d\n",internal->av_codec_context->codec_id);
+		printf("ERROR: could not open codec %d\n",
+		       internal->av_codec_context->codec_id);
 		return;
 	}
 
-	while(1)
-	{
+	while (1) {
 		AVPacket packet;
 		av_init_packet(&packet);
-		if (! internal->av_current_frame) {
-			internal->av_current_frame=av_frame_alloc();
+		if (!internal->av_current_frame) {
+			internal->av_current_frame = av_frame_alloc();
 		}
-		//we have no guarantees the next packet will be a video packet, and
+		// we have no guarantees the next packet will be a video packet,
+		// and
 		// we may need to read multiple packets to get a valid frame
-		int video_frame_finished=0;
-		while(1){
-			while(1){
-				if (av_read_frame(internal->av_format_context,&packet ) >=0 ) {
-					if (packet.stream_index == internal->av_video_streamid){
-					break;
-				}
+		int video_frame_finished = 0;
+		while (1) {
+			while (1) {
+				if (av_read_frame(internal->av_format_context,
+				                  &packet) >= 0) {
+					if (packet.stream_index ==
+					    internal->av_video_streamid) {
+						break;
+					}
 				} else {
-					packet=empty_packet;
+					packet = empty_packet;
 					break;
 				}
 			}
-			ret = avcodec_decode_video2(internal->av_codec_context,internal->av_current_frame,&video_frame_finished,&packet);
-			if (ret < 0)
-			{
+			ret = avcodec_decode_video2(internal->av_codec_context,
+			                            internal->av_current_frame,
+			                            &video_frame_finished,
+			                            &packet);
+			if (ret < 0) {
 				printf("ERROR: decode problem!\n");
 				return;
 			}
 			if (video_frame_finished > 0) {
-				//we have our frame! w00t!
-				//printf("got frame!\n");
-				//now we need to invoke our callback with a frame.
+				// we have our frame! w00t!
+				// printf("got frame!\n");
+				// now we need to invoke our callback with a
+				// frame.
 				frame_t f;
-				f.source_id = internal->source_descriptor.source_id;
+				f.source_id =
+				    internal->source_descriptor.source_id;
 				f.format = internal->source_descriptor.format;
 				f.width = internal->av_current_frame->width;
 				f.height = internal->av_current_frame->height;
-				f.stride=internal->av_current_frame->linesize[0];
+				f.stride =
+				    internal->av_current_frame->linesize[0];
 				f.size_bytes = frame_size_in_bytes(&f);
 
-				// since we are just PoCing, we can just pass the
-				// Y plane.
+				// since we are just PoCing, we can just pass
+				// the Y plane.
 				f.data = internal->av_current_frame->data[0];
-				internal->frame_target_callback(internal->frame_target_instance,&f);
-				driver_event_t e ={};
+				internal->frame_target_callback(
+				    internal->frame_target_instance, &f);
+				driver_event_t e = {};
 				e.type = EVENT_FRAMESERVER_GOTFRAME;
-				if (internal->event_target_callback){
-					internal->event_target_callback(internal->event_target_instance,e);
+				if (internal->event_target_callback) {
+					internal->event_target_callback(
+					    internal->event_target_instance, e);
 				}
 			}
-
 		}
-
 	}
 }
diff --git a/src/xrt/drivers/montrack/frameservers/ffmpeg/ffmpeg_frameserver.h b/src/xrt/drivers/montrack/frameservers/ffmpeg/ffmpeg_frameserver.h
index 48c7ae217466937097f4939f90023c7a8314ca1a..11710daa5f3756b37794e7ab1fe608430fe122b9 100644
--- a/src/xrt/drivers/montrack/frameservers/ffmpeg/ffmpeg_frameserver.h
+++ b/src/xrt/drivers/montrack/frameservers/ffmpeg/ffmpeg_frameserver.h
@@ -4,7 +4,7 @@
 /* Almost all of the ground covered here would be covered
  * by the v4l2 frameserver on linux, but uvc may be the
  * simplest approach for cross-platform e.g. OS X
-*/
+ */
 
 #include <stdint.h>
 #include <stdio.h>
@@ -18,7 +18,8 @@
 
 static AVPacket empty_packet;
 
-typedef struct ffmpeg_source_descriptor {
+typedef struct ffmpeg_source_descriptor
+{
 	char name[128];
 	char* filepath;
 	uint64_t source_id;
@@ -29,8 +30,9 @@ typedef struct ffmpeg_source_descriptor {
 	uint32_t height;
 } ffmpeg_source_descriptor_t;
 
-typedef struct ffmpeg_frameserver_instance {
-    int64_t videoCodecTimebase;
+typedef struct ffmpeg_frameserver_instance
+{
+	int64_t videoCodecTimebase;
 	int32_t av_video_streamid;
 	AVFormatContext* av_format_context;
 	AVCodecContext* av_codec_context;
@@ -38,8 +40,8 @@ typedef struct ffmpeg_frameserver_instance {
 	AVFrame* av_current_frame;
 	frame_consumer_callback_func frame_target_callback;
 	event_consumer_callback_func event_target_callback;
-	void* frame_target_instance; //where we send our frames
-	void* event_target_instance; //where we send our events
+	void* frame_target_instance; // where we send our frames
+	void* event_target_instance; // where we send our events
 	pthread_t stream_thread;
 	bool is_running;
 	ffmpeg_source_descriptor_t source_descriptor;
@@ -48,26 +50,49 @@ typedef struct ffmpeg_frameserver_instance {
 
 
 
-
-
-ffmpeg_frameserver_instance_t* ffmpeg_frameserver_create(frameserver_instance_t* inst);
-bool ffmpeg_frameserver_destroy(ffmpeg_frameserver_instance_t* inst);
-
-bool ffmpeg_source_create(ffmpeg_source_descriptor_t* desc);
-bool ffmpeg_source_destroy(ffmpeg_source_descriptor_t* desc);
-
-bool ffmpeg_frameserver_configure_capture(frameserver_instance_t* inst, capture_parameters_t cp);
-bool ffmpeg_frameserver_enumerate_sources(frameserver_instance_t* inst, ffmpeg_source_descriptor_t* sources, uint32_t* count);
-bool ffmpeg_frameserver_get(frameserver_instance_t* inst, frame_t* _frame);
-void ffmpeg_frameserver_register_frame_callback(frameserver_instance_t* inst, void* target_instance,frame_consumer_callback_func target_func);
-void ffmpeg_frameserver_register_event_callback(frameserver_instance_t* inst, void* target_instance,event_consumer_callback_func target_func);
-bool ffmpeg_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp);
-bool ffmpeg_frameserver_stream_start(frameserver_instance_t* inst, ffmpeg_source_descriptor_t* source);
-bool ffmpeg_frameserver_stream_stop(frameserver_instance_t* inst);
-bool ffmpeg_frameserver_is_running(frameserver_instance_t* inst);
-bool ffmpeg_frameserver_test();
-
-static void ffmpeg_stream_run(frameserver_instance_t* inst);  //streaming thread entrypoint
-
-
-#endif //UVC_FRAMESERVER_H
+ffmpeg_frameserver_instance_t*
+ffmpeg_frameserver_create(frameserver_instance_t* inst);
+bool
+ffmpeg_frameserver_destroy(ffmpeg_frameserver_instance_t* inst);
+
+bool
+ffmpeg_source_create(ffmpeg_source_descriptor_t* desc);
+bool
+ffmpeg_source_destroy(ffmpeg_source_descriptor_t* desc);
+
+bool
+ffmpeg_frameserver_configure_capture(frameserver_instance_t* inst,
+                                     capture_parameters_t cp);
+bool
+ffmpeg_frameserver_enumerate_sources(frameserver_instance_t* inst,
+                                     ffmpeg_source_descriptor_t* sources,
+                                     uint32_t* count);
+bool
+ffmpeg_frameserver_get(frameserver_instance_t* inst, frame_t* _frame);
+void
+ffmpeg_frameserver_register_frame_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    frame_consumer_callback_func target_func);
+void
+ffmpeg_frameserver_register_event_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func);
+bool
+ffmpeg_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp);
+bool
+ffmpeg_frameserver_stream_start(frameserver_instance_t* inst,
+                                ffmpeg_source_descriptor_t* source);
+bool
+ffmpeg_frameserver_stream_stop(frameserver_instance_t* inst);
+bool
+ffmpeg_frameserver_is_running(frameserver_instance_t* inst);
+bool
+ffmpeg_frameserver_test();
+
+static void
+ffmpeg_stream_run(frameserver_instance_t* inst); // streaming thread entrypoint
+
+
+#endif // UVC_FRAMESERVER_H
diff --git a/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.c b/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.c
index 27eb48bb61e72d5c3748c8a674bc6afcc06b58b6..d0bf4686731141b78d0fc41e0432fe2466365247 100644
--- a/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.c
+++ b/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.c
@@ -9,524 +9,726 @@
 
 static uvc_error_t res;
 
-bool uvc_source_create(uvc_source_descriptor_t* desc)
+bool
+uvc_source_create(uvc_source_descriptor_t* desc)
 {
-   // do nothing right now
-    return true;
+	// do nothing right now
+	return true;
 }
 
-bool uvc_source_destroy(uvc_source_descriptor_t* desc)
+bool
+uvc_source_destroy(uvc_source_descriptor_t* desc)
 {
-   // do nothing right now
-    return true;
+	// do nothing right now
+	return true;
 }
 
 
-uvc_frameserver_instance_t* uvc_frameserver_create(frameserver_instance_t* inst) {
-    //TODO: calloc macro
+uvc_frameserver_instance_t*
+uvc_frameserver_create(frameserver_instance_t* inst)
+{
+	// TODO: calloc macro
 	printf("creating uvc frameserver\n");
-	uvc_frameserver_instance_t* i = calloc(1,sizeof(uvc_frameserver_instance_t));
-    if (i) {
-        i->device_list =NULL;
-        res = uvc_init(&(i->context), NULL);
-        if (res < 0)
-        {
-            uvc_perror(res, "UVC Context init failed");
-            return NULL;
-        }
-
-        return i;
-    }
-
-    return NULL;
+	uvc_frameserver_instance_t* i =
+	    calloc(1, sizeof(uvc_frameserver_instance_t));
+	if (i) {
+		i->device_list = NULL;
+		res = uvc_init(&(i->context), NULL);
+		if (res < 0) {
+			uvc_perror(res, "UVC Context init failed");
+			return NULL;
+		}
+
+		return i;
+	}
+
+	return NULL;
 }
 
-bool uvc_frameserver_enumerate_sources(frameserver_instance_t* inst, uvc_source_descriptor_t* cameras, uint32_t* count)
+bool
+uvc_frameserver_enumerate_sources(frameserver_instance_t* inst,
+                                  uvc_source_descriptor_t* cameras,
+                                  uint32_t* count)
 {
 	uvc_error_t res;
 	uvc_frameserver_instance_t* internal = inst->internal_instance;
-	//if (internal->device_list != NULL) {
+	// if (internal->device_list != NULL) {
 	//	uvc_free_device_list(internal->device_list,0);
 	//}
 	uint32_t device_count = 0;
 	res = uvc_get_device_list(internal->context, &(internal->device_list));
-	if (res < 0)
-	{
-		printf("ERROR: %s\n",uvc_strerror(res));
+	if (res < 0) {
+		printf("ERROR: %s\n", uvc_strerror(res));
 		return false;
 	}
-	while (1)
-	{
+	while (1) {
 		uvc_device_t* uvc_device = internal->device_list[device_count];
-		if (uvc_device == NULL)
-		{
+		if (uvc_device == NULL) {
 			break;
 		}
 		device_count++;
 	}
 
-	uint32_t source_count=0;
+	uint32_t source_count = 0;
 
-	if (cameras == NULL)
-	{
+	if (cameras == NULL) {
 		printf("counting formats\n");
-		for (uint32_t i=0;i<device_count;i++){
-		uvc_source_descriptor_t* temp_sds_count= NULL;
-        // we need to free the source descriptors, even though we only use the count
-		uint32_t c = uvc_frameserver_get_source_descriptors(&temp_sds_count,internal->device_list[i],i);
-		source_count += c;
-		free(temp_sds_count);
+		for (uint32_t i = 0; i < device_count; i++) {
+			uvc_source_descriptor_t* temp_sds_count = NULL;
+			// we need to free the source descriptors, even though
+			// we only use the count
+			uint32_t c = uvc_frameserver_get_source_descriptors(
+			    &temp_sds_count, internal->device_list[i], i);
+			source_count += c;
+			free(temp_sds_count);
 		}
 
 		*count = source_count;
-		//uvc_free_device_list(internal->device_list,1);
-		//internal->device_list = NULL;
+		// uvc_free_device_list(internal->device_list,1);
+		// internal->device_list = NULL;
 		return true;
 	}
 
 	printf("returning formats\n");
 
-	//if we were passed an array of camera descriptors, fill them in
-	uvc_source_descriptor_t* temp_sds=NULL;
+	// if we were passed an array of camera descriptors, fill them in
+	uvc_source_descriptor_t* temp_sds = NULL;
 
-	uint32_t cameras_offset=0;
-	for (uint32_t i=0;i<device_count;i++)
-	{
-        // last parameter will end up in source_id
-        uint32_t c = uvc_frameserver_get_source_descriptors(&temp_sds,internal->device_list[i],i);
-		printf("Got %d sources\n",c);
+	uint32_t cameras_offset = 0;
+	for (uint32_t i = 0; i < device_count; i++) {
+		// last parameter will end up in source_id
+		uint32_t c = uvc_frameserver_get_source_descriptors(
+		    &temp_sds, internal->device_list[i], i);
+		printf("Got %d sources\n", c);
 		if (c > 0) {
-            source_count +=c;
-            memcpy(cameras+cameras_offset,temp_sds,c * sizeof(uvc_source_descriptor_t));
-            cameras_offset+=c;
-        }
+			source_count += c;
+			memcpy(cameras + cameras_offset, temp_sds,
+			       c * sizeof(uvc_source_descriptor_t));
+			cameras_offset += c;
+		}
 	}
 
-	if (source_count==0)
-	{
+	if (source_count == 0) {
 		return false;
 	}
 
-    //free(temp_sds);
-	//uvc_free_device_list(internal->device_list,1);
-	//internal->device_list = NULL;
+	// free(temp_sds);
+	// uvc_free_device_list(internal->device_list,1);
+	// internal->device_list = NULL;
 	return true;
 }
 
 
-bool uvc_frameserver_configure_capture(frameserver_instance_t* inst, capture_parameters_t cp) {
+bool
+uvc_frameserver_configure_capture(frameserver_instance_t* inst,
+                                  capture_parameters_t cp)
+{
 	uvc_frameserver_instance_t* internal = inst->internal_instance;
 	internal->capture_params = cp;
 	internal->is_configured = false;
 	return true;
 }
 
-void uvc_frameserver_register_frame_callback(frameserver_instance_t* inst, void* target_instance, frame_consumer_callback_func target_func) {
+void
+uvc_frameserver_register_frame_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    frame_consumer_callback_func target_func)
+{
 	uvc_frameserver_instance_t* internal = inst->internal_instance;
 	internal->frame_target_instance = target_instance;
 	internal->frame_target_callback = target_func;
 }
 
-void uvc_frameserver_register_event_callback(frameserver_instance_t* inst, void* target_instance, event_consumer_callback_func target_func) {
-	//do nothing
+void
+uvc_frameserver_register_event_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func)
+{
+	// do nothing
 }
 
-bool uvc_frameserver_get(frameserver_instance_t* inst, frame_t* _frame) {
+bool
+uvc_frameserver_get(frameserver_instance_t* inst, frame_t* _frame)
+{
 	return false;
 }
-bool uvc_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp) {
+bool
+uvc_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp)
+{
 	return false;
 }
-bool uvc_frameserver_stream_start(frameserver_instance_t* inst,uvc_source_descriptor_t* source) {
+bool
+uvc_frameserver_stream_start(frameserver_instance_t* inst,
+                             uvc_source_descriptor_t* source)
+{
 	uvc_frameserver_instance_t* internal = inst->internal_instance;
 	internal->source_descriptor = *source;
 	internal->is_running = true;
-	if(pthread_create(&internal->stream_thread, NULL, uvc_frameserver_stream_run, inst)) {
-	printf("ERROR: could not create thread\n");
-	return false;
+	if (pthread_create(&internal->stream_thread, NULL,
+	                   uvc_frameserver_stream_run, inst)) {
+		printf("ERROR: could not create thread\n");
+		return false;
 	}
-	//we're off to the races!
+	// we're off to the races!
 	return true;
 }
 
-bool uvc_frameserver_stream_stop(frameserver_instance_t* inst) {
-    //TODO: stop the stream, join the thread, cleanup.
-    return false;
+bool
+uvc_frameserver_stream_stop(frameserver_instance_t* inst)
+{
+	// TODO: stop the stream, join the thread, cleanup.
+	return false;
 }
 
-bool uvc_frameserver_is_running(frameserver_instance_t* inst) {
+bool
+uvc_frameserver_is_running(frameserver_instance_t* inst)
+{
 	return false;
 }
 
-void uvc_frameserver_stream_run(frameserver_instance_t* inst)
+void
+uvc_frameserver_stream_run(frameserver_instance_t* inst)
 {
 	uvc_error_t res;
 	uvc_frameserver_instance_t* internal = inst->internal_instance;
 	bool split_planes;
-	plane_t planes[MAX_PLANES] ={};
+	plane_t planes[MAX_PLANES] = {};
 
-	//clear our kill_handler_thread flag, likely set when closing
-	//devices during format enumeration
+	// clear our kill_handler_thread flag, likely set when closing
+	// devices during format enumeration
 	internal->context->kill_handler_thread = 0;
 
-	//our jpeg decoder stuff
+	// our jpeg decoder stuff
 	struct jpeg_decompress_struct cinfo;
 	struct jpeg_error_mgr jerr;
 
 
-	res = uvc_open(internal->device_list[internal->source_descriptor.uvc_device_index],&internal->device_handle);
-	if (res < 0)
-	{
-		printf("ERROR: %s open %s\n",&internal->source_descriptor.name,uvc_strerror(res));
+	res = uvc_open(
+	    internal->device_list[internal->source_descriptor.uvc_device_index],
+	    &internal->device_handle);
+	if (res < 0) {
+		printf("ERROR: %s open %s\n", &internal->source_descriptor.name,
+		       uvc_strerror(res));
 		return;
 	}
-	int fps = 1.0 / (internal->source_descriptor.rate/10000000.0f);
-	res = uvc_get_stream_ctrl_format_size(internal->device_handle, &internal->stream_ctrl,internal->source_descriptor.stream_format,internal->source_descriptor.width, internal->source_descriptor.height,fps);
-	if (res < 0)
-	{
-		printf("ERROR: %s get_stream_ctrl_format %s\n",&internal->source_descriptor.name,uvc_strerror(res));
+	int fps = 1.0 / (internal->source_descriptor.rate / 10000000.0f);
+	res = uvc_get_stream_ctrl_format_size(
+	    internal->device_handle, &internal->stream_ctrl,
+	    internal->source_descriptor.stream_format,
+	    internal->source_descriptor.width,
+	    internal->source_descriptor.height, fps);
+	if (res < 0) {
+		printf("ERROR: %s get_stream_ctrl_format %s\n",
+		       &internal->source_descriptor.name, uvc_strerror(res));
 		return;
 	}
 
 	uvc_print_stream_ctrl(&internal->stream_ctrl, stdout);
 
-	res = uvc_stream_open_ctrl(internal->device_handle, &internal->stream_handle, &internal->stream_ctrl);
-	if (res < 0)
-	{
-		printf("ERROR: stream_open_ctrl %s\n",uvc_strerror(res));
+	res = uvc_stream_open_ctrl(internal->device_handle,
+	                           &internal->stream_handle,
+	                           &internal->stream_ctrl);
+	if (res < 0) {
+		printf("ERROR: stream_open_ctrl %s\n", uvc_strerror(res));
 		return;
 	}
 
-	res = uvc_stream_start(internal->stream_handle,NULL,NULL,0);
-	if (res < 0)
-	{
-		printf("ERROR: stream_start %s\n",uvc_strerror(res));
+	res = uvc_stream_start(internal->stream_handle, NULL, NULL, 0);
+	if (res < 0) {
+		printf("ERROR: stream_start %s\n", uvc_strerror(res));
 		return;
 	}
 
-	frame_t f = {}; //our buffer
+	frame_t f = {}; // our buffer
 	f.source_id = internal->source_descriptor.source_id;
 	switch (internal->source_descriptor.stream_format) {
-	    case UVC_FRAME_FORMAT_YUYV:
-		    f.format = FORMAT_YUYV_UINT8;
-		    break;
-	    case UVC_FRAME_FORMAT_MJPEG:
-		    f.format = FORMAT_JPG; //this will get reset to YUV444
-			cinfo.err = jpeg_std_error(&jerr);
-			jpeg_create_decompress(&cinfo);
-		    break;
-	    default:
-		    printf("ERROR: unhandled format!\n");
+	case UVC_FRAME_FORMAT_YUYV: f.format = FORMAT_YUYV_UINT8; break;
+	case UVC_FRAME_FORMAT_MJPEG:
+		f.format = FORMAT_JPG; // this will get reset to YUV444
+		cinfo.err = jpeg_std_error(&jerr);
+		jpeg_create_decompress(&cinfo);
+		break;
+	default: printf("ERROR: unhandled format!\n");
 	}
 
-    frame_t sampled_frame = {};
+	frame_t sampled_frame = {};
 
-    //replaced by sampled_frame but may be useful for planar output.
-    frame_t plane_frame = {};
+	// replaced by sampled_frame but may be useful for planar output.
+	frame_t plane_frame = {};
 	uint8_t* plane_data[MAX_PLANES];
 
-    uint8_t* temp_data = NULL;
+	uint8_t* temp_data = NULL;
 	uint8_t* data_ptr = NULL;
 
-	uvc_frame_t* frame = uvc_allocate_frame(internal->stream_ctrl.dwMaxVideoFrameSize);
+	uvc_frame_t* frame =
+	    uvc_allocate_frame(internal->stream_ctrl.dwMaxVideoFrameSize);
 	while (internal->is_running) {
 
-		//if our config is invalidated at runtime, reconfigure
-		if (! internal->is_configured) {
-			//defaults - auto-anything off
+		// if our config is invalidated at runtime, reconfigure
+		if (!internal->is_configured) {
+			// defaults - auto-anything off
 			uvc_set_ae_mode(internal->device_handle, 1);
-			uvc_set_ae_priority(internal->device_handle,0);
-			//we may need to enumerate the control range..
-			uint32_t exp_time = internal->capture_params.exposure * 2048;
-			uvc_set_exposure_abs(internal->device_handle,50);
-			uvc_set_gain(internal->device_handle,internal->capture_params.gain * 10);
+			uvc_set_ae_priority(internal->device_handle, 0);
+			// we may need to enumerate the control range..
+			uint32_t exp_time =
+			    internal->capture_params.exposure * 2048;
+			uvc_set_exposure_abs(internal->device_handle, 50);
+			uvc_set_gain(internal->device_handle,
+			             internal->capture_params.gain * 10);
 
 			internal->is_configured = true;
 		}
 
-		res =  uvc_stream_get_frame	(internal->stream_handle, &frame,0);
-        if (res < 0) {
-			printf("ERROR: stream_get_frame %s\n",uvc_strerror(res));
+		res = uvc_stream_get_frame(internal->stream_handle, &frame, 0);
+		if (res < 0) {
+			printf("ERROR: stream_get_frame %s\n",
+			       uvc_strerror(res));
 		} else {
 			if (frame) {
-				//printf("got frame\n");
+				// printf("got frame\n");
 
 				f.width = frame->width;
 				f.height = frame->height;
-				f.stride= frame->step;
+				f.stride = frame->step;
 				f.size_bytes = frame->data_bytes;
 				f.data = frame->data;
 
-                switch (internal->source_descriptor.stream_format) {
-                    case UVC_FRAME_FORMAT_MJPEG:
-                        //immediately set this to YUV444 as this is what we decode to.
-                        f.format = FORMAT_YUV444_UINT8;
-                        f.stride = f.width * 3; //jpg format does not supply stride
-                        //decode our jpg frame.
-                        if (! temp_data) {
-                            temp_data=malloc(frame_size_in_bytes(&f));
-                        }
-                        jpeg_mem_src(&cinfo,frame->data,frame->data_bytes);
-                        jpeg_read_header(&cinfo, TRUE);
-                        //we will bypass colour conversion as we want YUV
-                        cinfo.out_color_space = cinfo.jpeg_color_space;
-                        jpeg_start_decompress(&cinfo);
-                        uint32_t scanlines_read = 0;
-                        data_ptr=temp_data;
-                        while (scanlines_read < cinfo.image_height ) {
-                             int read_count = jpeg_read_scanlines(&cinfo,&data_ptr,16);
-                            data_ptr += read_count * frame->width*3;
-                            scanlines_read += read_count;
-                        }
-                        f.data = temp_data;
-                        jpeg_finish_decompress(&cinfo);
-
-                        switch (internal->source_descriptor.format) {
-                            case FORMAT_Y_UINT8:
-                                //split our Y plane out
-                                sampled_frame = f; //copy our buffer frames attributes
-                                sampled_frame.format = FORMAT_Y_UINT8;
-                                sampled_frame.stride = f.width;
-                                sampled_frame.size_bytes = frame_size_in_bytes(&sampled_frame);
-
-                                if (! sampled_frame.data) {
-                                    sampled_frame.data = malloc(sampled_frame.size_bytes);
-                                }
-
-                                frame_extract_plane(&f,PLANE_Y,&sampled_frame);
-
-                                if (internal->frame_target_callback){
-                                    internal->frame_target_callback(internal->frame_target_instance,&sampled_frame);
-                                }
-                                break;
-                            default:
-                                //supply our YUV444 directly
-                                if (internal->frame_target_callback){
-                                    internal->frame_target_callback(internal->frame_target_instance,&f);
-                                }
-                        }
-                        break;
-                    case UVC_FRAME_FORMAT_YUYV:
-                        switch (internal->source_descriptor.format) {
-                            case FORMAT_Y_UINT8:
-                                //split our Y plane out
-                                sampled_frame = f; //copy our buffer frames attributes
-                                sampled_frame.format = FORMAT_Y_UINT8;
-                                sampled_frame.stride = f.width;
-                                sampled_frame.size_bytes = frame_size_in_bytes(&sampled_frame);
-
-                                if (! sampled_frame.data) {
-                                    sampled_frame.data = malloc(sampled_frame.size_bytes);
-                                }
-
-                                frame_extract_plane(&f,PLANE_Y,&sampled_frame);
-
-                                if (internal->frame_target_callback){
-                                    internal->frame_target_callback(internal->frame_target_instance,&sampled_frame);
-                                }
-                                break;
-                            case FORMAT_YUV444_UINT8:
-                                //upsample our YUYV to YUV444
-                                sampled_frame = f; //copy our buffer frames attributes
-                                sampled_frame.format = FORMAT_YUV444_UINT8;
-                                sampled_frame.stride = f.width * 3;
-                                sampled_frame.size_bytes = frame_size_in_bytes(&sampled_frame);
-                                //allocate on first access
-                                if (! sampled_frame.data) {
-                                    sampled_frame.data = malloc(sampled_frame.size_bytes);
-                                }
-                                if (frame_resample(&f,&sampled_frame)) {
-                                    if (internal->frame_target_callback) {
-                                        internal->frame_target_callback(internal->frame_target_instance,&sampled_frame);
-                                    }
-                                break;
-                                }
-                                printf("ERROR: could not resample frame from %d to %d\n",f.format,sampled_frame.format);
-                                break;
-                            default:
-                                //supply our YUYV directly
-                                if (internal->frame_target_callback){
-                                    internal->frame_target_callback(internal->frame_target_instance,&f);
-                                }
-                            }
-                            break;
-                        default:
-                            printf("ERROR: Unknown stream format\n");
-                    }
-				driver_event_t e ={};
-				e.type =EVENT_FRAMESERVER_GOTFRAME;
-                if (internal->event_target_callback){
-					internal->event_target_callback(internal->event_target_instance,e);
-                }
-            }
+				switch (
+				    internal->source_descriptor.stream_format) {
+				case UVC_FRAME_FORMAT_MJPEG:
+					// immediately set this to YUV444 as
+					// this is what we decode to.
+					f.format = FORMAT_YUV444_UINT8;
+					f.stride =
+					    f.width * 3; // jpg format does not
+					                 // supply stride
+					// decode our jpg frame.
+					if (!temp_data) {
+						temp_data = malloc(
+						    frame_size_in_bytes(&f));
+					}
+					jpeg_mem_src(&cinfo, frame->data,
+					             frame->data_bytes);
+					jpeg_read_header(&cinfo, TRUE);
+					// we will bypass colour conversion as
+					// we want YUV
+					cinfo.out_color_space =
+					    cinfo.jpeg_color_space;
+					jpeg_start_decompress(&cinfo);
+					uint32_t scanlines_read = 0;
+					data_ptr = temp_data;
+					while (scanlines_read <
+					       cinfo.image_height) {
+						int read_count =
+						    jpeg_read_scanlines(
+						        &cinfo, &data_ptr, 16);
+						data_ptr += read_count *
+						            frame->width * 3;
+						scanlines_read += read_count;
+					}
+					f.data = temp_data;
+					jpeg_finish_decompress(&cinfo);
+
+					switch (internal->source_descriptor
+					            .format) {
+					case FORMAT_Y_UINT8:
+						// split our Y plane out
+						sampled_frame =
+						    f; // copy our buffer frames
+						       // attributes
+						sampled_frame.format =
+						    FORMAT_Y_UINT8;
+						sampled_frame.stride = f.width;
+						sampled_frame.size_bytes =
+						    frame_size_in_bytes(
+						        &sampled_frame);
+
+						if (!sampled_frame.data) {
+							sampled_frame
+							    .data = malloc(
+							    sampled_frame
+							        .size_bytes);
+						}
+
+						frame_extract_plane(
+						    &f, PLANE_Y,
+						    &sampled_frame);
+
+						if (internal
+						        ->frame_target_callback) {
+							internal->frame_target_callback(
+							    internal
+							        ->frame_target_instance,
+							    &sampled_frame);
+						}
+						break;
+					default:
+						// supply our YUV444 directly
+						if (internal
+						        ->frame_target_callback) {
+							internal->frame_target_callback(
+							    internal
+							        ->frame_target_instance,
+							    &f);
+						}
+					}
+					break;
+				case UVC_FRAME_FORMAT_YUYV:
+					switch (internal->source_descriptor
+					            .format) {
+					case FORMAT_Y_UINT8:
+						// split our Y plane out
+						sampled_frame =
+						    f; // copy our buffer frames
+						       // attributes
+						sampled_frame.format =
+						    FORMAT_Y_UINT8;
+						sampled_frame.stride = f.width;
+						sampled_frame.size_bytes =
+						    frame_size_in_bytes(
+						        &sampled_frame);
+
+						if (!sampled_frame.data) {
+							sampled_frame
+							    .data = malloc(
+							    sampled_frame
+							        .size_bytes);
+						}
+
+						frame_extract_plane(
+						    &f, PLANE_Y,
+						    &sampled_frame);
+
+						if (internal
+						        ->frame_target_callback) {
+							internal->frame_target_callback(
+							    internal
+							        ->frame_target_instance,
+							    &sampled_frame);
+						}
+						break;
+					case FORMAT_YUV444_UINT8:
+						// upsample our YUYV to YUV444
+						sampled_frame =
+						    f; // copy our buffer frames
+						       // attributes
+						sampled_frame.format =
+						    FORMAT_YUV444_UINT8;
+						sampled_frame.stride =
+						    f.width * 3;
+						sampled_frame.size_bytes =
+						    frame_size_in_bytes(
+						        &sampled_frame);
+						// allocate on first access
+						if (!sampled_frame.data) {
+							sampled_frame
+							    .data = malloc(
+							    sampled_frame
+							        .size_bytes);
+						}
+						if (frame_resample(
+						        &f, &sampled_frame)) {
+							if (internal
+							        ->frame_target_callback) {
+								internal->frame_target_callback(
+								    internal
+								        ->frame_target_instance,
+								    &sampled_frame);
+							}
+							break;
+						}
+						printf(
+						    "ERROR: could not resample "
+						    "frame from %d to %d\n",
+						    f.format,
+						    sampled_frame.format);
+						break;
+					default:
+						// supply our YUYV directly
+						if (internal
+						        ->frame_target_callback) {
+							internal->frame_target_callback(
+							    internal
+							        ->frame_target_instance,
+							    &f);
+						}
+					}
+					break;
+				default:
+					printf(
+					    "ERROR: Unknown stream format\n");
+				}
+				driver_event_t e = {};
+				e.type = EVENT_FRAMESERVER_GOTFRAME;
+				if (internal->event_target_callback) {
+					internal->event_target_callback(
+					    internal->event_target_instance, e);
+				}
+			}
 		}
 	}
 	uvc_free_frame(frame);
-	if (temp_data){
+	if (temp_data) {
 		free(temp_data);
-		temp_data=NULL;
+		temp_data = NULL;
 	}
-    if (sampled_frame.data) {
-        free (sampled_frame.data);
+	if (sampled_frame.data) {
+		free(sampled_frame.data);
 	}
 	return;
 }
 
 
-bool uvc_frameserver_test(){
+bool
+uvc_frameserver_test()
+{
 	printf("Running UVC Frameserver Test\n");
-	frameserver_instance_t* uvc_frameserver = frameserver_create(FRAMESERVER_TYPE_UVC);
-	if (!uvc_frameserver )
-	{
+	frameserver_instance_t* uvc_frameserver =
+	    frameserver_create(FRAMESERVER_TYPE_UVC);
+	if (!uvc_frameserver) {
 		printf("FAILURE: Could not create frameserver.\n");
 		return false;
 	}
 	uint32_t source_count = 0;
-	if (! uvc_frameserver->frameserver_enumerate_sources(uvc_frameserver,NULL,&source_count)) {
+	if (!uvc_frameserver->frameserver_enumerate_sources(
+	        uvc_frameserver, NULL, &source_count)) {
 		printf("FAILURE: Could not get source count.\n");
 		return false;
 	}
-	uvc_source_descriptor_t* source_list = calloc(source_count,sizeof(uvc_source_descriptor_t));
-	if (! uvc_frameserver->frameserver_enumerate_sources(uvc_frameserver, source_list,&source_count)) {
+	uvc_source_descriptor_t* source_list =
+	    calloc(source_count, sizeof(uvc_source_descriptor_t));
+	if (!uvc_frameserver->frameserver_enumerate_sources(
+	        uvc_frameserver, source_list, &source_count)) {
 		printf("FAILURE: Could not get source descriptors\n");
 		return false;
 	}
-	for (uint32_t i=0;i<source_count;i++)
-	{
-		printf("%d source name: %s\n",i,source_list[i].name);
+	for (uint32_t i = 0; i < source_count; i++) {
+		printf("%d source name: %s\n", i, source_list[i].name);
 	}
-    free(source_list);
-    return true;
+	free(source_list);
+	return true;
 }
 
-//TODO: fix this so we dont need to alloc?
-uint32_t uvc_frameserver_get_source_descriptors(uvc_source_descriptor_t** sds,uvc_device_t* uvc_device, uint32_t device_index) {
+// TODO: fix this so we dont need to alloc?
+uint32_t
+uvc_frameserver_get_source_descriptors(uvc_source_descriptor_t** sds,
+                                       uvc_device_t* uvc_device,
+                                       uint32_t device_index)
+{
 
-	uint32_t sd_count=0;
+	uint32_t sd_count = 0;
 	uvc_device_descriptor_t* uvc_device_descriptor;
 	res = uvc_get_device_descriptor(uvc_device, &uvc_device_descriptor);
 	if (res < 0) {
-		printf("ERROR: %s\n",uvc_strerror(res));
+		printf("ERROR: %s\n", uvc_strerror(res));
 	}
 	uvc_device_handle_t* temp_handle;
-	res = uvc_open(uvc_device,&temp_handle);
-	if (res == UVC_SUCCESS)
-	{
-		const uvc_format_desc_t* format_desc = uvc_get_format_descs(temp_handle);
+	res = uvc_open(uvc_device, &temp_handle);
+	if (res == UVC_SUCCESS) {
+		const uvc_format_desc_t* format_desc =
+		    uvc_get_format_descs(temp_handle);
 		uvc_source_descriptor_t* desc = *sds;
-        uvc_source_descriptor_t* temp_alloc = calloc(1,sizeof(uvc_device_descriptor_t));
-        while(format_desc  != NULL)
-		{
-			printf("Found format: %d FOURCC %c%c%c%c\n",format_desc->bFormatIndex,format_desc->fourccFormat[0],format_desc->fourccFormat[1],format_desc->fourccFormat[2],format_desc->fourccFormat[3]);
+		uvc_source_descriptor_t* temp_alloc =
+		    calloc(1, sizeof(uvc_device_descriptor_t));
+		while (format_desc != NULL) {
+			printf("Found format: %d FOURCC %c%c%c%c\n",
+			       format_desc->bFormatIndex,
+			       format_desc->fourccFormat[0],
+			       format_desc->fourccFormat[1],
+			       format_desc->fourccFormat[2],
+			       format_desc->fourccFormat[3]);
 			uvc_frame_desc_t* frame_desc = format_desc->frame_descs;
-            while (frame_desc != NULL)
-			{
-				printf("W %d H %d\n",frame_desc->wWidth,frame_desc->wHeight);
-				uint32_t* frame_duration = frame_desc->intervals;
+			while (frame_desc != NULL) {
+				printf("W %d H %d\n", frame_desc->wWidth,
+				       frame_desc->wHeight);
+				uint32_t* frame_duration =
+				    frame_desc->intervals;
 				while (*frame_duration != 0) {
-					printf("rate: %d %f\n",*frame_duration,1.0/(*frame_duration / 10000000.0f));
-					if (*frame_duration < 400000) { //anything quicker than 25fps
-						// if we are a YUV mode, write out a descriptor + the Y-only descriptor
-						if (format_desc->fourccFormat[0] == 'Y') {
-
-                            temp_alloc = realloc(*sds,(sd_count + 3) * sizeof(uvc_source_descriptor_t));
-							if (! temp_alloc) {
-							printf("ERROR: could not allocate memory\n");
-							exit(1);
+					printf("rate: %d %f\n", *frame_duration,
+					       1.0 / (*frame_duration /
+					              10000000.0f));
+					if (*frame_duration <
+					    400000) { // anything quicker than
+						      // 25fps
+						// if we are a YUV mode, write
+						// out a descriptor + the Y-only
+						// descriptor
+						if (format_desc
+						        ->fourccFormat[0] ==
+						    'Y') {
+
+							temp_alloc = realloc(
+							    *sds,
+							    (sd_count + 3) *
+							        sizeof(
+							            uvc_source_descriptor_t));
+							if (!temp_alloc) {
+								printf(
+								    "ERROR: "
+								    "could not "
+								    "allocate "
+								    "memory\n");
+								exit(1);
 							}
 
 							*sds = temp_alloc;
-							desc = temp_alloc + sd_count;
-
-							desc->uvc_device_index=device_index;
-							desc->rate = *frame_duration;
-							source_descriptor_from_uvc_descriptor(desc,uvc_device_descriptor,frame_desc);
-                            desc->stream_format = UVC_FRAME_FORMAT_YUYV;
-							desc->format = FORMAT_YUYV_UINT8;
-                            desc->sampling = SAMPLING_NONE;
+							desc = temp_alloc +
+							       sd_count;
+
+							desc->uvc_device_index =
+							    device_index;
+							desc->rate =
+							    *frame_duration;
+							source_descriptor_from_uvc_descriptor(
+							    desc,
+							    uvc_device_descriptor,
+							    frame_desc);
+							desc->stream_format =
+							    UVC_FRAME_FORMAT_YUYV;
+							desc->format =
+							    FORMAT_YUYV_UINT8;
+							desc->sampling =
+							    SAMPLING_NONE;
+							sd_count++;
+							desc++;
+
+							// YUV444 format
+							desc->uvc_device_index =
+							    device_index;
+							desc->rate =
+							    *frame_duration;
+							source_descriptor_from_uvc_descriptor(
+							    desc,
+							    uvc_device_descriptor,
+							    frame_desc);
+							desc->stream_format =
+							    UVC_FRAME_FORMAT_YUYV;
+							desc->format =
+							    FORMAT_YUV444_UINT8;
+							desc->sampling =
+							    SAMPLING_UPSAMPLED;
 							sd_count++;
 							desc++;
 
-                            //YUV444 format
-                            desc->uvc_device_index=device_index;
-                            desc->rate = *frame_duration;
-                            source_descriptor_from_uvc_descriptor(desc,uvc_device_descriptor,frame_desc);
-                            desc->stream_format = UVC_FRAME_FORMAT_YUYV;
-                            desc->format = FORMAT_YUV444_UINT8;
-                            desc->sampling=SAMPLING_UPSAMPLED;
-                            sd_count++;
-                            desc++;
-
-							//also output our 'one plane Y' format
-							desc->uvc_device_index=device_index;
-							desc->rate = *frame_duration;
-							source_descriptor_from_uvc_descriptor(desc,uvc_device_descriptor,frame_desc);
-							desc->stream_format = UVC_FRAME_FORMAT_YUYV;
-							desc->format = FORMAT_Y_UINT8;
-                            desc->sampling=SAMPLING_DOWNSAMPLED;
-                            sd_count++;
-                            desc++;
-						} else if (format_desc->fourccFormat[0] == 'M') {
-							//MJPG, most likely - TODO: check more than the first letter
-
-							temp_alloc = realloc(*sds,(sd_count + 2) * sizeof(uvc_source_descriptor_t));
-							if (! temp_alloc) {
-								printf("ERROR: could not allocate memory\n");
+							// also output our 'one
+							// plane Y' format
+							desc->uvc_device_index =
+							    device_index;
+							desc->rate =
+							    *frame_duration;
+							source_descriptor_from_uvc_descriptor(
+							    desc,
+							    uvc_device_descriptor,
+							    frame_desc);
+							desc->stream_format =
+							    UVC_FRAME_FORMAT_YUYV;
+							desc->format =
+							    FORMAT_Y_UINT8;
+							desc->sampling =
+							    SAMPLING_DOWNSAMPLED;
+							sd_count++;
+							desc++;
+						} else if (format_desc
+						               ->fourccFormat
+						                   [0] == 'M') {
+							// MJPG, most likely -
+							// TODO: check more than
+							// the first letter
+
+							temp_alloc = realloc(
+							    *sds,
+							    (sd_count + 2) *
+							        sizeof(
+							            uvc_source_descriptor_t));
+							if (!temp_alloc) {
+								printf(
+								    "ERROR: "
+								    "could not "
+								    "allocate "
+								    "memory\n");
 								exit(1);
 							}
 							*sds = temp_alloc;
-							desc = temp_alloc + sd_count;
-
-							desc->uvc_device_index=device_index;
-							desc->rate = *frame_duration;
-							source_descriptor_from_uvc_descriptor(desc,uvc_device_descriptor,frame_desc);
-							desc->stream_format = UVC_FRAME_FORMAT_MJPEG;
-							desc->format = FORMAT_YUV444_UINT8;
-                            desc->sampling = SAMPLING_UPSAMPLED;
+							desc = temp_alloc +
+							       sd_count;
+
+							desc->uvc_device_index =
+							    device_index;
+							desc->rate =
+							    *frame_duration;
+							source_descriptor_from_uvc_descriptor(
+							    desc,
+							    uvc_device_descriptor,
+							    frame_desc);
+							desc->stream_format =
+							    UVC_FRAME_FORMAT_MJPEG;
+							desc->format =
+							    FORMAT_YUV444_UINT8;
+							desc->sampling =
+							    SAMPLING_UPSAMPLED;
 							sd_count++;
 							desc++;
 
-							desc->uvc_device_index=device_index;
-							desc->rate = *frame_duration;
-							source_descriptor_from_uvc_descriptor(desc,uvc_device_descriptor,frame_desc);
-							desc->stream_format = UVC_FRAME_FORMAT_MJPEG;
-							desc->format = FORMAT_Y_UINT8;
-                            desc->sampling = SAMPLING_DOWNSAMPLED;
-                            sd_count++;
-                            desc++;
+							desc->uvc_device_index =
+							    device_index;
+							desc->rate =
+							    *frame_duration;
+							source_descriptor_from_uvc_descriptor(
+							    desc,
+							    uvc_device_descriptor,
+							    frame_desc);
+							desc->stream_format =
+							    UVC_FRAME_FORMAT_MJPEG;
+							desc->format =
+							    FORMAT_Y_UINT8;
+							desc->sampling =
+							    SAMPLING_DOWNSAMPLED;
+							sd_count++;
+							desc++;
 						}
-
-
 					}
-					frame_duration++; //incrementing pointer
+					frame_duration++; // incrementing
+					                  // pointer
 				}
-				frame_desc=frame_desc->next;
+				frame_desc = frame_desc->next;
 			}
-			format_desc=format_desc->next;
+			format_desc = format_desc->next;
 		}
 		uvc_close(temp_handle);
 	}
-	//this crashes - i guess we only care about closing if we have started streaming
+	// this crashes - i guess we only care about closing if we have started
+	// streaming
 	//
-	//uvc_free_device_descriptor(uvc_device_descriptor);
-	printf("RETURNING %d\n",sd_count);
+	// uvc_free_device_descriptor(uvc_device_descriptor);
+	printf("RETURNING %d\n", sd_count);
 	return sd_count;
 }
 
-bool source_descriptor_from_uvc_descriptor(uvc_source_descriptor_t* source_descriptor, uvc_device_descriptor_t* uvc_device_descriptor,uvc_frame_desc_t* uvc_frame_descriptor){
-	snprintf(source_descriptor->name,128,"%s %s %s %04x:%04x",uvc_device_descriptor->manufacturer,uvc_device_descriptor->product,uvc_device_descriptor->serialNumber,uvc_device_descriptor->idProduct,uvc_device_descriptor->idVendor);
-	source_descriptor->name[127]=0;
+bool
+source_descriptor_from_uvc_descriptor(
+    uvc_source_descriptor_t* source_descriptor,
+    uvc_device_descriptor_t* uvc_device_descriptor,
+    uvc_frame_desc_t* uvc_frame_descriptor)
+{
+	snprintf(
+	    source_descriptor->name, 128, "%s %s %s %04x:%04x",
+	    uvc_device_descriptor->manufacturer, uvc_device_descriptor->product,
+	    uvc_device_descriptor->serialNumber,
+	    uvc_device_descriptor->idProduct, uvc_device_descriptor->idVendor);
+	source_descriptor->name[127] = 0;
 	source_descriptor->product_id = uvc_device_descriptor->idProduct;
 	source_descriptor->vendor_id = uvc_device_descriptor->idVendor;
-	//TODO check lengths
-	if (uvc_device_descriptor->serialNumber){
-		memcpy(source_descriptor->serial,uvc_device_descriptor->serialNumber,strlen(uvc_device_descriptor->serialNumber)+1);
+	// TODO check lengths
+	if (uvc_device_descriptor->serialNumber) {
+		memcpy(source_descriptor->serial,
+		       uvc_device_descriptor->serialNumber,
+		       strlen(uvc_device_descriptor->serialNumber) + 1);
 	} else {
-		sprintf(source_descriptor->serial,"NONE");
+		sprintf(source_descriptor->serial, "NONE");
 	}
-	source_descriptor->serial[127]=0;
+	source_descriptor->serial[127] = 0;
 	source_descriptor->width = uvc_frame_descriptor->wWidth;
 	source_descriptor->height = uvc_frame_descriptor->wHeight;
-
 }
-
diff --git a/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.h b/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.h
index 8f6379ce9d73023076d56994312fedba720383e2..10c978b445f4635dc5636a3bfe5df5c4fa538d1b 100644
--- a/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.h
+++ b/src/xrt/drivers/montrack/frameservers/uvc/uvc_frameserver.h
@@ -4,7 +4,7 @@
 /* Almost all of the ground covered here would be covered
  * by the v4l2 frameserver on linux, but uvc may be the
  * simplest approach for cross-platform e.g. OS X
-*/
+ */
 
 #include <stdint.h>
 #include <stdio.h>
@@ -13,20 +13,22 @@
 #include <libuvc/libuvc.h>
 #include <pthread.h>
 
-//we need this to do a bit of hackery with multiple opens/closes
-struct uvc_context {
-  /** Underlying context for USB communication */
-  struct libusb_context *usb_ctx;
-  /** True iff libuvc initialized the underlying USB context */
-  uint8_t own_usb_ctx;
-  /** List of open devices in this context */
-  uvc_device_handle_t *open_devices;
-  pthread_t handler_thread;
-  int kill_handler_thread;
+// we need this to do a bit of hackery with multiple opens/closes
+struct uvc_context
+{
+	/** Underlying context for USB communication */
+	struct libusb_context* usb_ctx;
+	/** True iff libuvc initialized the underlying USB context */
+	uint8_t own_usb_ctx;
+	/** List of open devices in this context */
+	uvc_device_handle_t* open_devices;
+	pthread_t handler_thread;
+	int kill_handler_thread;
 };
 
-//TODO: unify device descriptors across apis
-typedef struct uvc_source_descriptor {
+// TODO: unify device descriptors across apis
+typedef struct uvc_source_descriptor
+{
 	char name[128];
 	uint16_t vendor_id;
 	uint16_t product_id;
@@ -35,23 +37,24 @@ typedef struct uvc_source_descriptor {
 	uint32_t uvc_device_index;
 	enum uvc_frame_format stream_format;
 	frame_format_t format;
-    sampling_t sampling;
+	sampling_t sampling;
 	uint32_t width;
 	uint32_t height;
 	uint32_t rate;
 } uvc_source_descriptor_t;
 
-typedef struct uvc_frameserver_instance {
-    uvc_device_t** device_list;
-    uvc_context_t* context;
+typedef struct uvc_frameserver_instance
+{
+	uvc_device_t** device_list;
+	uvc_context_t* context;
 	uvc_device_t* device;
 	uvc_device_handle_t* device_handle;
 	uvc_stream_handle_t* stream_handle;
 	uvc_stream_ctrl_t stream_ctrl;
 	frame_consumer_callback_func frame_target_callback;
 	event_consumer_callback_func event_target_callback;
-	void* frame_target_instance; //where we send our frames
-	void* event_target_instance; //where we send our events
+	void* frame_target_instance; // where we send our frames
+	void* event_target_instance; // where we send our events
 	uvc_source_descriptor_t source_descriptor;
 	pthread_t stream_thread;
 	capture_parameters_t capture_params;
@@ -61,22 +64,55 @@ typedef struct uvc_frameserver_instance {
 
 
 
-uvc_frameserver_instance_t* uvc_frameserver_create(frameserver_instance_t* inst);
-bool uvc_frameserver_destroy(frameserver_instance_t* inst);
-bool uvc_source_alloc(uvc_source_descriptor_t* desc);
-bool uvc_source_destroy(uvc_source_descriptor_t* desc);
-bool uvc_frameserver_configure_capture(frameserver_instance_t* inst, capture_parameters_t cp);
-bool uvc_frameserver_enumerate_sources(frameserver_instance_t*, uvc_source_descriptor_t* sources, uint32_t* count);
-bool uvc_frameserver_get(frameserver_instance_t* inst, frame_t* _frame);
-void uvc_frameserver_register_event_callback(frameserver_instance_t* inst, void* target_instance,event_consumer_callback_func target_func);
-void uvc_frameserver_register_frame_callback(frameserver_instance_t* inst, void* target_instance,frame_consumer_callback_func target_func);
-bool uvc_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp);
-bool uvc_frameserver_stream_start(frameserver_instance_t* inst,uvc_source_descriptor_t* source);
-bool uvc_frameserver_stream_stop(frameserver_instance_t* inst);
-bool uvc_frameserver_is_running(frameserver_instance_t* inst);
-bool uvc_frameserver_test();
+uvc_frameserver_instance_t*
+uvc_frameserver_create(frameserver_instance_t* inst);
+bool
+uvc_frameserver_destroy(frameserver_instance_t* inst);
+bool
+uvc_source_alloc(uvc_source_descriptor_t* desc);
+bool
+uvc_source_destroy(uvc_source_descriptor_t* desc);
+bool
+uvc_frameserver_configure_capture(frameserver_instance_t* inst,
+                                  capture_parameters_t cp);
+bool
+uvc_frameserver_enumerate_sources(frameserver_instance_t*,
+                                  uvc_source_descriptor_t* sources,
+                                  uint32_t* count);
+bool
+uvc_frameserver_get(frameserver_instance_t* inst, frame_t* _frame);
+void
+uvc_frameserver_register_event_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func);
+void
+uvc_frameserver_register_frame_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    frame_consumer_callback_func target_func);
+bool
+uvc_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp);
+bool
+uvc_frameserver_stream_start(frameserver_instance_t* inst,
+                             uvc_source_descriptor_t* source);
+bool
+uvc_frameserver_stream_stop(frameserver_instance_t* inst);
+bool
+uvc_frameserver_is_running(frameserver_instance_t* inst);
+bool
+uvc_frameserver_test();
 
-static void uvc_frameserver_stream_run(frameserver_instance_t* inst);  //streaming thread entrypoint
-static uint32_t  uvc_frameserver_get_source_descriptors(uvc_source_descriptor_t** sds,uvc_device_t* device,uint32_t uvc_device_index);
-static bool source_descriptor_from_uvc_descriptor(uvc_source_descriptor_t* source_descriptor, uvc_device_descriptor_t* uvc_device_descriptor, uvc_frame_desc_t* uvc_frame_descriptor);
-#endif //UVC_FRAMESERVER_H
+static void
+uvc_frameserver_stream_run(
+    frameserver_instance_t* inst); // streaming thread entrypoint
+static uint32_t
+uvc_frameserver_get_source_descriptors(uvc_source_descriptor_t** sds,
+                                       uvc_device_t* device,
+                                       uint32_t uvc_device_index);
+static bool
+source_descriptor_from_uvc_descriptor(
+    uvc_source_descriptor_t* source_descriptor,
+    uvc_device_descriptor_t* uvc_device_descriptor,
+    uvc_frame_desc_t* uvc_frame_descriptor);
+#endif // UVC_FRAMESERVER_H
diff --git a/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.c b/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.c
index 33abf77584fcb78f0899314e7f1c86fef105ef38..785a78802830ae3b020037e3bc22571e25b08b74 100644
--- a/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.c
+++ b/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.c
@@ -12,79 +12,102 @@
 #include "unistd.h"
 
 
-uint32_t v4l2_frameserver_get_source_descriptors(v4l2_source_descriptor_t** sds,char* v4l2_device, uint32_t device_index);
+uint32_t
+v4l2_frameserver_get_source_descriptors(v4l2_source_descriptor_t** sds,
+                                        char* v4l2_device,
+                                        uint32_t device_index);
 
-bool v4l2_source_create(v4l2_source_descriptor_t* desc)
+bool
+v4l2_source_create(v4l2_source_descriptor_t* desc)
 {
-   // do nothing right now
-    return true;
+	// do nothing right now
+	return true;
 }
 
-bool v4l2_source_destroy(v4l2_source_descriptor_t* desc) {
-    if (desc->device_path) {
-        free (desc->device_path);
-    }
-    return true;
+bool
+v4l2_source_destroy(v4l2_source_descriptor_t* desc)
+{
+	if (desc->device_path) {
+		free(desc->device_path);
+	}
+	return true;
 }
 
-v4l2_frameserver_instance_t* v4l2_frameserver_create(frameserver_instance_t* inst) {
-    v4l2_frameserver_instance_t* i = calloc(sizeof(v4l2_frameserver_instance_t),1);
-    if (i) {
-        return i;
-    }
-    return false;
+v4l2_frameserver_instance_t*
+v4l2_frameserver_create(frameserver_instance_t* inst)
+{
+	v4l2_frameserver_instance_t* i =
+	    calloc(sizeof(v4l2_frameserver_instance_t), 1);
+	if (i) {
+		return i;
+	}
+	return false;
 }
-bool v4l2_frameserver_destroy(frameserver_instance_t* inst) {
-	if (inst->internal_instance){
+bool
+v4l2_frameserver_destroy(frameserver_instance_t* inst)
+{
+	if (inst->internal_instance) {
 		free(inst->internal_instance);
 		return true;
 	}
 	return false;
 }
 
-bool v4l2_frameserver_enumerate_sources(frameserver_instance_t* inst, v4l2_source_descriptor_t* sources, uint32_t* count)
+bool
+v4l2_frameserver_enumerate_sources(frameserver_instance_t* inst,
+                                   v4l2_source_descriptor_t* sources,
+                                   uint32_t* count)
 {
 	v4l2_frameserver_instance_t* internal = inst->internal_instance;
 
-	char device_files[64][256]; // max of 64 video4linux devices supported TODO: maybe 256 too small
-	char* base_path="/dev/v4l/by-id"; //TODO: does this path work everywhere?
+	char device_files[64][256]; // max of 64 video4linux devices supported
+	                            // TODO: maybe 256 too small
+	char* base_path =
+	    "/dev/v4l/by-id"; // TODO: does this path work everywhere?
 	DIR* dir;
 	struct dirent* dentry;
 
-	dir = opendir (base_path);
-	if (! dir) {
-	printf("ERROR: could not open %s\n",base_path);
-	return false;
+	dir = opendir(base_path);
+	if (!dir) {
+		printf("ERROR: could not open %s\n", base_path);
+		return false;
 	}
 
-	uint32_t device_count =0;
-	while ((dentry = readdir (dir)) != NULL ) {
-		if(strcmp(dentry->d_name,".") !=0 && strcmp(dentry->d_name,"..") !=0){
-		snprintf (device_files[device_count],256,"%s/%s", base_path, dentry->d_name); //TODO: hardcoded 256
-		device_count++;
+	uint32_t device_count = 0;
+	while ((dentry = readdir(dir)) != NULL) {
+		if (strcmp(dentry->d_name, ".") != 0 &&
+		    strcmp(dentry->d_name, "..") != 0) {
+			snprintf(device_files[device_count], 256, "%s/%s",
+			         base_path,
+			         dentry->d_name); // TODO: hardcoded 256
+			device_count++;
 		}
 	}
-	closedir (dir);
+	closedir(dir);
 
 	uint32_t source_count = 0;
 
-	if (sources == NULL)
-	{
-		for (uint32_t i=0;i<device_count;i++){
-			v4l2_source_descriptor_t* temp_sds_count= NULL;
-			uint32_t c = v4l2_frameserver_get_source_descriptors(&temp_sds_count,device_files[i],i);
-			source_count+=c;
+	if (sources == NULL) {
+		for (uint32_t i = 0; i < device_count; i++) {
+			v4l2_source_descriptor_t* temp_sds_count = NULL;
+			uint32_t c = v4l2_frameserver_get_source_descriptors(
+			    &temp_sds_count, device_files[i], i);
+			source_count += c;
 		}
 		*count = source_count;
-		printf("counting available source descriptors - %d\n",source_count);
+		printf("counting available source descriptors - %d\n",
+		       source_count);
 		return true;
 	}
 
-	//our caller should now have alloced the array of source descriptors, fill them out
+	// our caller should now have alloced the array of source descriptors,
+	// fill them out
 
-	for (uint32_t i=0;i<device_count;i++){
-		v4l2_source_descriptor_t* device_sources = sources+source_count;
-		uint32_t c = v4l2_frameserver_get_source_descriptors(&device_sources,device_files[i],i);
+	for (uint32_t i = 0; i < device_count; i++) {
+		v4l2_source_descriptor_t* device_sources =
+		    sources + source_count;
+		uint32_t c = v4l2_frameserver_get_source_descriptors(
+		    &device_sources, device_files[i], i);
 		source_count += c;
 	}
 	*count = source_count;
@@ -92,17 +115,28 @@ bool v4l2_frameserver_enumerate_sources(frameserver_instance_t* inst, v4l2_sourc
 	return true;
 }
 
-bool v4l2_frameserver_configure_capture(frameserver_instance_t* inst, capture_parameters_t cp) {
+bool
+v4l2_frameserver_configure_capture(frameserver_instance_t* inst,
+                                   capture_parameters_t cp)
+{
 	return true;
 }
 
-void v4l2_frameserver_register_event_callback(frameserver_instance_t* inst, void* target_instance,event_consumer_callback_func target_func)
+void
+v4l2_frameserver_register_event_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func)
 {
-	//do nothing
+	// do nothing
 }
 
 
-void v4l2_frameserver_register_frame_callback(frameserver_instance_t* inst, void* target_instance,frame_consumer_callback_func target_func)
+void
+v4l2_frameserver_register_frame_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    frame_consumer_callback_func target_func)
 {
 	v4l2_frameserver_instance_t* internal = inst->internal_instance;
 	internal->frame_target_instance = target_instance;
@@ -110,156 +144,167 @@ void v4l2_frameserver_register_frame_callback(frameserver_instance_t* inst, void
 }
 
 
-bool v4l2_frameserver_get(frameserver_instance_t* inst, frame_t* frame) {
+bool
+v4l2_frameserver_get(frameserver_instance_t* inst, frame_t* frame)
+{
 	return false;
 }
 
-bool v4l2_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp) {
-//do nothing
-return false;
+bool
+v4l2_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp)
+{
+	// do nothing
+	return false;
 }
 
-bool v4l2_frameserver_stream_start(frameserver_instance_t* inst, v4l2_source_descriptor_t* source){
+bool
+v4l2_frameserver_stream_start(frameserver_instance_t* inst,
+                              v4l2_source_descriptor_t* source)
+{
 	v4l2_frameserver_instance_t* internal = inst->internal_instance;
 	internal->source_descriptor = *source;
 	internal->is_running = true;
-	if(pthread_create(&internal->stream_thread, NULL, v4l2_frameserver_stream_run, inst)) {
-	printf("ERROR: could not create thread\n");
-	return false;
+	if (pthread_create(&internal->stream_thread, NULL,
+	                   v4l2_frameserver_stream_run, inst)) {
+		printf("ERROR: could not create thread\n");
+		return false;
 	}
-	//we're off to the races!
+	// we're off to the races!
 	return true;
 }
 
 
-void v4l2_frameserver_stream_run(frameserver_instance_t* inst) {
+void
+v4l2_frameserver_stream_run(frameserver_instance_t* inst)
+{
 	v4l2_frameserver_instance_t* internal = inst->internal_instance;
-	//our jpeg decoder stuff
+	// our jpeg decoder stuff
 	struct jpeg_decompress_struct cinfo;
 	struct jpeg_error_mgr jerr;
 
 	int fd = open(internal->source_descriptor.device_path, O_RDWR, 0);
-	if (fd== -1) {
-		printf("ERROR Cannot open '%s %d %s\n",internal->source_descriptor.device_path,errno, strerror(errno));
+	if (fd == -1) {
+		printf("ERROR Cannot open '%s %d %s\n",
+		       internal->source_descriptor.device_path, errno,
+		       strerror(errno));
 		return;
 	}
 
-	//set up our capture format
+	// set up our capture format
 
 	struct v4l2_format v_format;
-	memset(&v_format,0,sizeof(v_format));
+	memset(&v_format, 0, sizeof(v_format));
 	v_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 	v_format.fmt.pix.width = internal->source_descriptor.width;
 	v_format.fmt.pix.height = internal->source_descriptor.height;
-	v_format.fmt.pix.pixelformat = internal->source_descriptor.stream_format;
+	v_format.fmt.pix.pixelformat =
+	    internal->source_descriptor.stream_format;
 	v_format.fmt.pix.field = V4L2_FIELD_ANY;
 	if (internal->source_descriptor.extended_format > 0) {
 		v_format.fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
 	}
 
 
-	if (ioctl(fd,VIDIOC_S_FMT,&v_format) < 0)
-	{
+	if (ioctl(fd, VIDIOC_S_FMT, &v_format) < 0) {
 		printf("ERROR: could not set up format\n");
 		return;
 	}
 
-	//set up our buffers - prefer userptr (client alloc) vs mmap (kernel alloc)
-	//TODO: using buffer caps may be better than 'fallthrough to mmap'
+	// set up our buffers - prefer userptr (client alloc) vs mmap (kernel
+	// alloc)
+	// TODO: using buffer caps may be better than 'fallthrough to mmap'
 
 	bool capture_userptr = true;
 
 	struct v4l2_requestbuffers v_bufrequest;
-	memset(&v_bufrequest,0,sizeof(v_bufrequest));
+	memset(&v_bufrequest, 0, sizeof(v_bufrequest));
 	v_bufrequest.count = NUM_V4L2_BUFFERS;
 	v_bufrequest.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 	v_bufrequest.memory = V4L2_MEMORY_USERPTR;
 
-	if (ioctl(fd,VIDIOC_REQBUFS,&v_bufrequest) < 0)
-	{
+	if (ioctl(fd, VIDIOC_REQBUFS, &v_bufrequest) < 0) {
 		printf("INFO: driver does not handle userptr buffers\n");
 		v_bufrequest.memory = V4L2_MEMORY_MMAP;
-		capture_userptr=false;
-		if (ioctl(fd,VIDIOC_REQBUFS,&v_bufrequest) < 0)
-		{
+		capture_userptr = false;
+		if (ioctl(fd, VIDIOC_REQBUFS, &v_bufrequest) < 0) {
 			printf("ERROR: driver does not handle mmap buffers\n");
 			return;
 		}
 	}
 
-	//set up our buffers
+	// set up our buffers
 
 	void* mem[NUM_V4L2_BUFFERS];
 
 	struct v4l2_buffer v_buf;
-	    for (uint32_t i=0;i<NUM_V4L2_BUFFERS;i++) {
-			v_buf.index =i;
-			v_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-			v_buf.memory = V4L2_MEMORY_USERPTR;
-			if (! capture_userptr) {
-				v_buf.memory = V4L2_MEMORY_MMAP;
-			}
-			if (ioctl(fd,VIDIOC_QUERYBUF,&v_buf) < 0)
-			{
-				printf("ERROR: could not query buffers!\n");
-				return;
-			}
+	for (uint32_t i = 0; i < NUM_V4L2_BUFFERS; i++) {
+		v_buf.index = i;
+		v_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+		v_buf.memory = V4L2_MEMORY_USERPTR;
+		if (!capture_userptr) {
+			v_buf.memory = V4L2_MEMORY_MMAP;
+		}
+		if (ioctl(fd, VIDIOC_QUERYBUF, &v_buf) < 0) {
+			printf("ERROR: could not query buffers!\n");
+			return;
+		}
 
-			if (capture_userptr)
-			{
-				mem[i] = aligned_alloc(getpagesize(),v_buf.length); //align this to a memory page, v4l2 likes it that way
-				//mem[i] = malloc(v_buf.length);
-				if (! mem[i]) {
-					printf("ERROR: could not alloc page-aligned memory\n");
-					return;
-				}
-				v_buf.m.userptr = mem[i];
-			}
-			else
-			{
-				mem[i] = mmap(0,v_buf.length,PROT_READ,MAP_SHARED,fd,v_buf.m.offset);
-				if (mem[i] == MAP_FAILED) {
-					printf("ERROR: mmap failed!\n");
-					return;
-				}
+		if (capture_userptr) {
+			mem[i] = aligned_alloc(
+			    getpagesize(),
+			    v_buf.length); // align this to a memory page, v4l2
+			                   // likes it that way
+			// mem[i] = malloc(v_buf.length);
+			if (!mem[i]) {
+				printf(
+				    "ERROR: could not alloc page-aligned "
+				    "memory\n");
+				return;
 			}
-
-			///queue this buffer
-			if (ioctl(fd,VIDIOC_QBUF,&v_buf)< 0)
-			{
-				printf("ERROR: queueing buffer failed!\n");
+			v_buf.m.userptr = mem[i];
+		} else {
+			mem[i] = mmap(0, v_buf.length, PROT_READ, MAP_SHARED,
+			              fd, v_buf.m.offset);
+			if (mem[i] == MAP_FAILED) {
+				printf("ERROR: mmap failed!\n");
 				return;
 			}
-
 		}
-		int start_capture = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-		if (ioctl(fd,VIDIOC_STREAMON,&start_capture) < 0) {
-			printf("ERROR: could not start capture!\n");
+
+		/// queue this buffer
+		if (ioctl(fd, VIDIOC_QBUF, &v_buf) < 0) {
+			printf("ERROR: queueing buffer failed!\n");
 			return;
 		}
+	}
+	int start_capture = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	if (ioctl(fd, VIDIOC_STREAMON, &start_capture) < 0) {
+		printf("ERROR: could not start capture!\n");
+		return;
+	}
 
 	uint8_t* cropped_buffer = NULL;
-	frame_t f = {}; //we dequeue buffers into this frame in our main loop
+	frame_t f = {}; // we dequeue buffers into this frame in our main loop
 	if (internal->source_descriptor.crop_scanline_bytes_start > 0) {
-		uint32_t alloc_size = internal->source_descriptor.crop_width * internal->source_descriptor.height * format_bytes_per_pixel(internal->source_descriptor.format);
+		uint32_t alloc_size =
+		    internal->source_descriptor.crop_width *
+		    internal->source_descriptor.height *
+		    format_bytes_per_pixel(internal->source_descriptor.format);
 		cropped_buffer = malloc(alloc_size);
-		if (! cropped_buffer){
+		if (!cropped_buffer) {
 			printf("ERROR: could not alloc memory!");
 			exit(0);
 		}
 	}
 	f.source_id = internal->source_descriptor.source_id;
 	switch (internal->source_descriptor.stream_format) {
-	    case V4L2_PIX_FMT_YUYV:
-		    f.format = FORMAT_YUYV_UINT8;
-		    break;
-	    case V4L2_PIX_FMT_JPEG:
-		    f.format = FORMAT_JPG; //this will get reset to YUV444
-			cinfo.err = jpeg_std_error(&jerr);
-			jpeg_create_decompress(&cinfo);
-		    break;
-	    default:
-		    printf("ERROR: unhandled format!\n");
+	case V4L2_PIX_FMT_YUYV: f.format = FORMAT_YUYV_UINT8; break;
+	case V4L2_PIX_FMT_JPEG:
+		f.format = FORMAT_JPG; // this will get reset to YUV444
+		cinfo.err = jpeg_std_error(&jerr);
+		jpeg_create_decompress(&cinfo);
+		break;
+	default: printf("ERROR: unhandled format!\n");
 	}
 
 	frame_t sampled_frame;
@@ -269,381 +314,542 @@ void v4l2_frameserver_stream_run(frameserver_instance_t* inst) {
 
 	while (internal->is_running) {
 
-		//if our config is invalidated at runtime, reconfigure
-		if (! internal->is_configured) {
-			//defaults - auto-anything off
-			//uvc_set_ae_mode(internal->device_handle, 1);
-			//uvc_set_ae_priority(internal->device_handle,0);
-			//we may need to enumerate the control range..
-			//uvc_set_exposure_abs(internal->device_handle,internal->capture_params.exposure * 2048);
-			//uvc_set_gain(internal->device_handle,internal->capture_params.gain * 10);
+		// if our config is invalidated at runtime, reconfigure
+		if (!internal->is_configured) {
+			// defaults - auto-anything off
+			// uvc_set_ae_mode(internal->device_handle, 1);
+			// uvc_set_ae_priority(internal->device_handle,0);
+			// we may need to enumerate the control range..
+			// uvc_set_exposure_abs(internal->device_handle,internal->capture_params.exposure
+			// * 2048);
+			// uvc_set_gain(internal->device_handle,internal->capture_params.gain
+			// * 10);
 			internal->is_configured = true;
 		}
-		//dequeue our frame, process it, requeue it.
+		// dequeue our frame, process it, requeue it.
 
-		if (ioctl(fd,VIDIOC_DQBUF,&v_buf) < 0) {
+		if (ioctl(fd, VIDIOC_DQBUF, &v_buf) < 0) {
 			printf("dequeue failed\n");
 		} else {
-			//printf("dequeue succeeded %d used %d of %d \n",v_buf.index,v_buf.bytesused,v_buf.length);
-			if (internal->source_descriptor.crop_scanline_bytes_start > 0) {
-				//we need to crop our stream frame into a new buffer
+			// printf("dequeue succeeded %d used %d of %d
+			// \n",v_buf.index,v_buf.bytesused,v_buf.length);
+			if (internal->source_descriptor
+			        .crop_scanline_bytes_start > 0) {
+				// we need to crop our stream frame into a new
+				// buffer
 				uint32_t stream_bytes_per_pixel = 0;
-				switch (internal->source_descriptor.stream_format) {
-				    case V4L2_PIX_FMT_YUYV:
-					    stream_bytes_per_pixel = 2;
-					    break;
-				    default:
-					    printf("ERROR: No crop support for non-YUYV stream formats\n");
-						exit(0);
+				switch (
+				    internal->source_descriptor.stream_format) {
+				case V4L2_PIX_FMT_YUYV:
+					stream_bytes_per_pixel = 2;
+					break;
+				default:
+					printf(
+					    "ERROR: No crop support for "
+					    "non-YUYV stream formats\n");
+					exit(0);
 				}
 
-				uint32_t raw_stride = internal->source_descriptor.width * stream_bytes_per_pixel;
-				uint32_t cropped_stride = internal->source_descriptor.crop_width * stream_bytes_per_pixel;
+				uint32_t raw_stride =
+				    internal->source_descriptor.width *
+				    stream_bytes_per_pixel;
+				uint32_t cropped_stride =
+				    internal->source_descriptor.crop_width *
+				    stream_bytes_per_pixel;
 				uint8_t* bufptr = mem[v_buf.index];
-				for (uint32_t i=0;i < internal->source_descriptor.height;i++) {
-					uint8_t* dstptr = cropped_buffer + (i*cropped_stride);
-					uint8_t* srcptr = bufptr + (i* raw_stride) + internal->source_descriptor.crop_scanline_bytes_start;
-					//printf("dstptr %d srcptr %d\n ",(i*cropped_stride),(i* raw_stride) + internal->source_descriptor.crop_scanline_bytes_start);
-					memcpy(dstptr,srcptr,cropped_stride);
+				for (uint32_t i = 0;
+				     i < internal->source_descriptor.height;
+				     i++) {
+					uint8_t* dstptr = cropped_buffer +
+					                  (i * cropped_stride);
+					uint8_t* srcptr =
+					    bufptr + (i * raw_stride) +
+					    internal->source_descriptor
+					        .crop_scanline_bytes_start;
+					// printf("dstptr %d srcptr %d\n
+					// ",(i*cropped_stride),(i* raw_stride) +
+					// internal->source_descriptor.crop_scanline_bytes_start);
+					memcpy(dstptr, srcptr, cropped_stride);
 				}
-				//fix up the frame we supply to the consumer
-				f.width = internal->source_descriptor.crop_width;
+				// fix up the frame we supply to the consumer
+				f.width =
+				    internal->source_descriptor.crop_width;
 				f.height = internal->source_descriptor.height;
-				//reasonable default - will get reset
-				f.stride = internal->source_descriptor.crop_width * format_bytes_per_pixel(internal->source_descriptor.format);
+				// reasonable default - will get reset
+				f.stride =
+				    internal->source_descriptor.crop_width *
+				    format_bytes_per_pixel(
+				        internal->source_descriptor.format);
 				f.size_bytes = cropped_stride * f.height;
-				f.data=cropped_buffer;
+				f.data = cropped_buffer;
 			} else {
-				//process frame
+				// process frame
 				f.width = internal->source_descriptor.width;
 				f.height = internal->source_descriptor.height;
-				//reasonable default - will get reset
-				f.stride= internal->source_descriptor.width * format_bytes_per_pixel(internal->source_descriptor.format);
+				// reasonable default - will get reset
+				f.stride =
+				    internal->source_descriptor.width *
+				    format_bytes_per_pixel(
+				        internal->source_descriptor.format);
 				f.size_bytes = v_buf.bytesused;
 				f.data = mem[v_buf.index];
 			}
 
 			switch (internal->source_descriptor.stream_format) {
-			    case V4L2_PIX_FMT_JPEG:
-				    //immediately set this to YUV444 as this is what we decode to.
-				    f.format = FORMAT_YUV444_UINT8;
-					f.stride = f.width * 3; //jpg format does not supply stride
-					//decode our jpg frame.
-					if (! temp_data) {
-						temp_data=malloc(frame_size_in_bytes(&f));
+			case V4L2_PIX_FMT_JPEG:
+				// immediately set this to YUV444 as this is
+				// what we decode to.
+				f.format = FORMAT_YUV444_UINT8;
+				f.stride =
+				    f.width *
+				    3; // jpg format does not supply stride
+				// decode our jpg frame.
+				if (!temp_data) {
+					temp_data =
+					    malloc(frame_size_in_bytes(&f));
+				}
+				jpeg_mem_src(&cinfo, mem[v_buf.index],
+				             v_buf.bytesused);
+				jpeg_read_header(&cinfo, TRUE);
+				// we will bypass colour conversion as we want
+				// YUV
+				cinfo.out_color_space = cinfo.jpeg_color_space;
+				jpeg_start_decompress(&cinfo);
+				uint32_t scanlines_read = 0;
+				data_ptr = temp_data;
+				while (scanlines_read < cinfo.image_height) {
+					int read_count = jpeg_read_scanlines(
+					    &cinfo, &data_ptr, 16);
+					data_ptr +=
+					    read_count *
+					    internal->source_descriptor.width *
+					    3;
+					scanlines_read += read_count;
+				}
+				f.data = temp_data;
+				jpeg_finish_decompress(&cinfo);
+
+				switch (internal->source_descriptor.format) {
+				case FORMAT_Y_UINT8:
+					// split our Y plane out
+					sampled_frame = f; // copy our buffer
+					                   // frames attributes
+					sampled_frame.data = NULL;
+					sampled_frame.format = FORMAT_Y_UINT8;
+					sampled_frame.stride = f.width;
+					sampled_frame.size_bytes =
+					    frame_size_in_bytes(&sampled_frame);
+
+					if (!sampled_frame.data) {
+						sampled_frame.data = malloc(
+						    sampled_frame.size_bytes);
+					}
+
+					frame_extract_plane(&f, PLANE_Y,
+					                    &sampled_frame);
+
+					if (internal->frame_target_callback) {
+						internal->frame_target_callback(
+						    internal
+						        ->frame_target_instance,
+						    &sampled_frame);
 					}
-					jpeg_mem_src(&cinfo,mem[v_buf.index],v_buf.bytesused);
-					jpeg_read_header(&cinfo, TRUE);
-					//we will bypass colour conversion as we want YUV
-					cinfo.out_color_space = cinfo.jpeg_color_space;
-					jpeg_start_decompress(&cinfo);
-					uint32_t scanlines_read = 0;
-					data_ptr=temp_data;
-					while (scanlines_read < cinfo.image_height ) {
-						 int read_count = jpeg_read_scanlines(&cinfo,&data_ptr,16);
-						data_ptr += read_count * internal->source_descriptor.width*3;
-						scanlines_read += read_count;
+					break;
+				default:
+					// supply our YUV444 directly
+					if (internal->frame_target_callback) {
+						internal->frame_target_callback(
+						    internal
+						        ->frame_target_instance,
+						    &f);
 					}
-					f.data = temp_data;
-					jpeg_finish_decompress(&cinfo);
-
-					switch (internal->source_descriptor.format) {
-					    case FORMAT_Y_UINT8:
-						    //split our Y plane out
-						    sampled_frame = f; //copy our buffer frames attributes
-							sampled_frame.data = NULL;
-							sampled_frame.format = FORMAT_Y_UINT8;
-							sampled_frame.stride = f.width;
-							sampled_frame.size_bytes = frame_size_in_bytes(&sampled_frame);
-
-							if (! sampled_frame.data) {
-								sampled_frame.data = malloc(sampled_frame.size_bytes);
-							}
-
-							frame_extract_plane(&f,PLANE_Y,&sampled_frame);
-
-							if (internal->frame_target_callback){
-								internal->frame_target_callback(internal->frame_target_instance,&sampled_frame);
-							}
-						    break;
-					    default:
-						    //supply our YUV444 directly
-						    if (internal->frame_target_callback){
-								internal->frame_target_callback(internal->frame_target_instance,&f);
-							}
+				}
+				break;
+			case V4L2_PIX_FMT_YUYV:
+				f.stride =
+				    f.width * 2; // 2 bytes per pixel for yuyv
+				switch (internal->source_descriptor.format) {
+				case FORMAT_Y_UINT8:
+					// split our Y plane out
+					sampled_frame = f; // copy our buffer
+					                   // frames attributes
+					sampled_frame.data = NULL;
+					sampled_frame.format = FORMAT_Y_UINT8;
+					sampled_frame.stride = f.width;
+					sampled_frame.size_bytes =
+					    frame_size_in_bytes(&sampled_frame);
+
+					if (!sampled_frame.data) {
+						sampled_frame.data = malloc(
+						    sampled_frame.size_bytes);
 					}
-				    break;
-			    case V4L2_PIX_FMT_YUYV:
-				    f.stride = f.width *2; //2 bytes per pixel for yuyv
-					switch (internal->source_descriptor.format) {
-					    case FORMAT_Y_UINT8:
-						    //split our Y plane out
-						    sampled_frame = f; //copy our buffer frames attributes
-							sampled_frame.data = NULL;
-							sampled_frame.format = FORMAT_Y_UINT8;
-							sampled_frame.stride = f.width;
-							sampled_frame.size_bytes = frame_size_in_bytes(&sampled_frame);
-
-							if (! sampled_frame.data) {
-								sampled_frame.data = malloc(sampled_frame.size_bytes);
-							}
-
-							frame_extract_plane(&f,PLANE_Y,&sampled_frame);
-
-							if (internal->frame_target_callback){
-								internal->frame_target_callback(internal->frame_target_instance,&sampled_frame);
-							}
-						    break;
-					    case FORMAT_YUV444_UINT8:
-						    //upsample our YUYV to YUV444
-						    sampled_frame = f; //copy our buffer frames attributes
-							sampled_frame.data = NULL;
-							sampled_frame.format = FORMAT_YUV444_UINT8;
-							sampled_frame.stride = f.width * 3;
-							sampled_frame.size_bytes = frame_size_in_bytes(&sampled_frame);
-							//allocate on first access
-							if (! sampled_frame.data) {
-								sampled_frame.data = malloc(sampled_frame.size_bytes);
-							}
-							if (frame_resample(&f,&sampled_frame)) {
-								if (internal->frame_target_callback) {
-									internal->frame_target_callback(internal->frame_target_instance,&sampled_frame);
-								}
-							break;
-							}
-							printf("ERROR: could not resample frame from %d to %d\n",f.format,sampled_frame.format);
-						    break;
-					    default:
-						    //supply our YUYV directly
-						    if (internal->frame_target_callback){
-								internal->frame_target_callback(internal->frame_target_instance,&f);
-							}
-					    }
-				        break;
-			        default:
-				        printf("ERROR: Unknown stream format\n");
-			    }
-			driver_event_t e ={};
-			e.type =EVENT_FRAMESERVER_GOTFRAME;
-			if (internal->event_target_callback){
-				internal->event_target_callback(internal->event_target_instance,e);
+
+					frame_extract_plane(&f, PLANE_Y,
+					                    &sampled_frame);
+
+					if (internal->frame_target_callback) {
+						internal->frame_target_callback(
+						    internal
+						        ->frame_target_instance,
+						    &sampled_frame);
+					}
+					break;
+				case FORMAT_YUV444_UINT8:
+					// upsample our YUYV to YUV444
+					sampled_frame = f; // copy our buffer
+					                   // frames attributes
+					sampled_frame.data = NULL;
+					sampled_frame.format =
+					    FORMAT_YUV444_UINT8;
+					sampled_frame.stride = f.width * 3;
+					sampled_frame.size_bytes =
+					    frame_size_in_bytes(&sampled_frame);
+					// allocate on first access
+					if (!sampled_frame.data) {
+						sampled_frame.data = malloc(
+						    sampled_frame.size_bytes);
+					}
+					if (frame_resample(&f,
+					                   &sampled_frame)) {
+						if (internal
+						        ->frame_target_callback) {
+							internal->frame_target_callback(
+							    internal
+							        ->frame_target_instance,
+							    &sampled_frame);
+						}
+						break;
+					}
+					printf(
+					    "ERROR: could not resample frame "
+					    "from %d to %d\n",
+					    f.format, sampled_frame.format);
+					break;
+				default:
+					// supply our YUYV directly
+					if (internal->frame_target_callback) {
+						internal->frame_target_callback(
+						    internal
+						        ->frame_target_instance,
+						    &f);
+					}
+				}
+				break;
+			default: printf("ERROR: Unknown stream format\n");
+			}
+			driver_event_t e = {};
+			e.type = EVENT_FRAMESERVER_GOTFRAME;
+			if (internal->event_target_callback) {
+				internal->event_target_callback(
+				    internal->event_target_instance, e);
 			}
 
 
-			if (ioctl(fd,VIDIOC_QBUF,&v_buf) < 0) {
+			if (ioctl(fd, VIDIOC_QBUF, &v_buf) < 0) {
 				printf("requeue failed\n");
 			}
 		}
 	}
-	/*	res =  uvc_stream_get_frame	(internal->stream_handle, &frame,0);
-		if (res < 0) {
-			printf("ERROR: stream_get_frame %s\n",uvc_strerror(res));
-		} else {
-			if (frame) {
-				//printf("got frame\n");
+	/*	res =  uvc_stream_get_frame	(internal->stream_handle,
+	&frame,0); if (res < 0) { printf("ERROR: stream_get_frame
+	%s\n",uvc_strerror(res)); } else { if (frame) {
+	                        //printf("got frame\n");
 
 
-			}
-		}
+	                }
+	        }
 	}
 	uvc_free_frame(frame);
 	if (temp_data){
-		free(temp_data);
-		temp_data=NULL;
+	        free(temp_data);
+	        temp_data=NULL;
 	}
 	if (sampled_frame.data) {
-		free (sampled_frame.data);
+	        free (sampled_frame.data);
 	}
 	return;*/
-
 }
 
-bool v4l2_frameserver_stream_stop(frameserver_instance_t* inst){
+bool
+v4l2_frameserver_stream_stop(frameserver_instance_t* inst)
+{
 	return false;
 }
 
 
-bool v4l2_frameserver_is_running(frameserver_instance_t* inst) {
-//do nothing
-return false;
+bool
+v4l2_frameserver_is_running(frameserver_instance_t* inst)
+{
+	// do nothing
+	return false;
 }
 
-uint32_t v4l2_frameserver_get_source_descriptors(v4l2_source_descriptor_t** sds,char* v4l2_device, uint32_t device_index) {
+uint32_t
+v4l2_frameserver_get_source_descriptors(v4l2_source_descriptor_t** sds,
+                                        char* v4l2_device,
+                                        uint32_t device_index)
+{
 
-	uint32_t sd_count=0;
+	uint32_t sd_count = 0;
 	struct v4l2_capability cap;
 	struct v4l2_format fmt;
-	//open the device, and check if is a video source
+	// open the device, and check if is a video source
 
 	int fd = open(v4l2_device, O_RDWR, 0);
-	if (fd== -1) {
-		printf("ERROR Cannot open '%s %d %s\n",v4l2_device,errno, strerror(errno));
+	if (fd == -1) {
+		printf("ERROR Cannot open '%s %d %s\n", v4l2_device, errno,
+		       strerror(errno));
 		return 0;
 	}
 
 	v4l2_source_descriptor_t* descriptor = *sds;
-	if (ioctl(fd, VIDIOC_QUERYCAP, &cap) ==0) {
+	if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) {
 		if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
-			return 0; //not a video device
+			return 0; // not a video device
 		}
 		if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
-			return 0; //cannot stream
+			return 0; // cannot stream
 		}
 		if (!(cap.capabilities & V4L2_CAP_TIMEPERFRAME)) {
-			printf("WARNING: device does not support setting frame intervals\n");
+			printf(
+			    "WARNING: device does not support setting frame "
+			    "intervals\n");
 		}
-		if (*sds) { //skip this if we are just counting, descriptor will be NULL
+		if (*sds) { // skip this if we are just counting, descriptor
+			    // will be NULL
 			if ((cap.capabilities & V4L2_CAP_EXT_PIX_FORMAT)) {
-				descriptor->extended_format=1; //need to query for extended format info
+				descriptor->extended_format =
+				    1; // need to query for extended format info
 			}
 		}
 	}
 	struct v4l2_fmtdesc desc;
-	memset(&desc,0,sizeof(desc));
+	memset(&desc, 0, sizeof(desc));
 	desc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	
-	while (ioctl(fd,VIDIOC_ENUM_FMT,&desc) == 0) {
-		printf("FORMAT: %s %04x %d\n", desc.description,desc.pixelformat, desc.type);
+
+	while (ioctl(fd, VIDIOC_ENUM_FMT, &desc) == 0) {
+		printf("FORMAT: %s %04x %d\n", desc.description,
+		       desc.pixelformat, desc.type);
 		struct v4l2_frmsizeenum frame_size;
 		struct v4l2_frmivalenum frame_interval;
-		memset(&frame_size,0,sizeof(frame_size));
-		memset(&frame_size,0,sizeof(frame_interval));
+		memset(&frame_size, 0, sizeof(frame_size));
+		memset(&frame_size, 0, sizeof(frame_interval));
 		frame_size.pixel_format = desc.pixelformat;
 		frame_size.index = 0;
 		while (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frame_size) >= 0) {
 			if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
-				    printf("%dx%d\n", frame_size.discrete.width,frame_size.discrete.height);
-				    frame_interval.pixel_format = frame_size.pixel_format;
-					frame_interval.width = frame_size.discrete.width;
-					frame_interval.height = frame_size.discrete.height;
-					frame_interval.index=0;
-					while(ioctl(fd,VIDIOC_ENUM_FRAMEINTERVALS,&frame_interval) >=0) {
-						float fps = frame_interval.discrete.denominator / frame_interval.discrete.numerator;
-						uint32_t rate = (frame_interval.discrete.numerator /  (float)frame_interval.discrete.denominator ) * 10000000;
-						printf("FPS: %f %d\n", fps,rate);
-
-						if (*sds) {
-							//only fill in this struct if we were not passed NULL
-
-							switch(desc.pixelformat){
-							case V4L2_PIX_FMT_YUYV:
-								descriptor->format = FORMAT_YUYV_UINT8;
-								descriptor->width = frame_interval.width;
-								descriptor->height = frame_interval.height;
-								descriptor->rate = rate;
-								descriptor->sampling = SAMPLING_NONE;
-								source_descriptor_from_v4l2(descriptor,v4l2_device,&cap,&desc);
-								descriptor++;
-
-								descriptor->format = FORMAT_YUV444_UINT8;
-								descriptor->width = frame_interval.width;
-								descriptor->height = frame_interval.height;
-								descriptor->rate = rate;
-								descriptor->sampling = SAMPLING_UPSAMPLED;
-								source_descriptor_from_v4l2(descriptor,v4l2_device,&cap,&desc);
-								descriptor++;
-
-								descriptor->format = FORMAT_Y_UINT8;
-								descriptor->width = frame_interval.width;
-								descriptor->height = frame_interval.height;
-								descriptor->rate = rate;
-								descriptor->sampling = SAMPLING_DOWNSAMPLED;
-								source_descriptor_from_v4l2(descriptor,v4l2_device,&cap,&desc);
-								descriptor++;
-								sd_count += 3;
-								break;
-							case V4L2_PIX_FMT_JPEG: //MJPEG stream format
-								descriptor->format = FORMAT_YUV444_UINT8;
-								descriptor->width = frame_interval.width;
-								descriptor->height = frame_interval.height;
-								descriptor->rate = rate;
-								descriptor->sampling = SAMPLING_UPSAMPLED;
-								source_descriptor_from_v4l2(descriptor,v4l2_device,&cap,&desc);
-								descriptor++;
-
-								descriptor->format = FORMAT_Y_UINT8;
-								descriptor->width = frame_interval.width;
-								descriptor->height = frame_interval.height;
-								descriptor->rate = rate;
-								descriptor->sampling = SAMPLING_DOWNSAMPLED;
-								source_descriptor_from_v4l2(descriptor,v4l2_device,&cap,&desc);
-								descriptor++;
-								sd_count +=2;
-								break;
-							default:
-								printf("ERROR: unknown pixelformat encountered\n");
-							}
-						} else {
-							//we just need the count of the sources we would create
-							switch(desc.pixelformat){
-							case 0x56595559: //YUYV stream format
-								sd_count += 3; //YUYV, YUV444, Y
-								break;
-							case 0x47504a4d: //MJPEG  stream format
-								sd_count += 2; //YUV444,Y
-								break;
-							default:
-								printf("ERROR: unknown pixelformat encountered\n");
-							}
+				printf("%dx%d\n", frame_size.discrete.width,
+				       frame_size.discrete.height);
+				frame_interval.pixel_format =
+				    frame_size.pixel_format;
+				frame_interval.width =
+				    frame_size.discrete.width;
+				frame_interval.height =
+				    frame_size.discrete.height;
+				frame_interval.index = 0;
+				while (ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS,
+				             &frame_interval) >= 0) {
+					float fps =
+					    frame_interval.discrete
+					        .denominator /
+					    frame_interval.discrete.numerator;
+					uint32_t rate =
+					    (frame_interval.discrete.numerator /
+					     (float)frame_interval.discrete
+					         .denominator) *
+					    10000000;
+					printf("FPS: %f %d\n", fps, rate);
+
+					if (*sds) {
+						// only fill in this struct if
+						// we were not passed NULL
+
+						switch (desc.pixelformat) {
+						case V4L2_PIX_FMT_YUYV:
+							descriptor->format =
+							    FORMAT_YUYV_UINT8;
+							descriptor->width =
+							    frame_interval
+							        .width;
+							descriptor->height =
+							    frame_interval
+							        .height;
+							descriptor->rate = rate;
+							descriptor->sampling =
+							    SAMPLING_NONE;
+							source_descriptor_from_v4l2(
+							    descriptor,
+							    v4l2_device, &cap,
+							    &desc);
+							descriptor++;
+
+							descriptor->format =
+							    FORMAT_YUV444_UINT8;
+							descriptor->width =
+							    frame_interval
+							        .width;
+							descriptor->height =
+							    frame_interval
+							        .height;
+							descriptor->rate = rate;
+							descriptor->sampling =
+							    SAMPLING_UPSAMPLED;
+							source_descriptor_from_v4l2(
+							    descriptor,
+							    v4l2_device, &cap,
+							    &desc);
+							descriptor++;
+
+							descriptor->format =
+							    FORMAT_Y_UINT8;
+							descriptor->width =
+							    frame_interval
+							        .width;
+							descriptor->height =
+							    frame_interval
+							        .height;
+							descriptor->rate = rate;
+							descriptor->sampling =
+							    SAMPLING_DOWNSAMPLED;
+							source_descriptor_from_v4l2(
+							    descriptor,
+							    v4l2_device, &cap,
+							    &desc);
+							descriptor++;
+							sd_count += 3;
+							break;
+						case V4L2_PIX_FMT_JPEG: // MJPEG
+						                        // stream
+						                        // format
+							descriptor->format =
+							    FORMAT_YUV444_UINT8;
+							descriptor->width =
+							    frame_interval
+							        .width;
+							descriptor->height =
+							    frame_interval
+							        .height;
+							descriptor->rate = rate;
+							descriptor->sampling =
+							    SAMPLING_UPSAMPLED;
+							source_descriptor_from_v4l2(
+							    descriptor,
+							    v4l2_device, &cap,
+							    &desc);
+							descriptor++;
+
+							descriptor->format =
+							    FORMAT_Y_UINT8;
+							descriptor->width =
+							    frame_interval
+							        .width;
+							descriptor->height =
+							    frame_interval
+							        .height;
+							descriptor->rate = rate;
+							descriptor->sampling =
+							    SAMPLING_DOWNSAMPLED;
+							source_descriptor_from_v4l2(
+							    descriptor,
+							    v4l2_device, &cap,
+							    &desc);
+							descriptor++;
+							sd_count += 2;
+							break;
+						default:
+							printf(
+							    "ERROR: unknown "
+							    "pixelformat "
+							    "encountered\n");
+						}
+					} else {
+						// we just need the count of the
+						// sources we would create
+						switch (desc.pixelformat) {
+						case 0x56595559: // YUYV stream
+						                 // format
+							sd_count +=
+							    3; // YUYV, YUV444,
+							       // Y
+							break;
+						case 0x47504a4d: // MJPEG stream
+						                 // format
+							sd_count +=
+							    2; // YUV444,Y
+							break;
+						default:
+							printf(
+							    "ERROR: unknown "
+							    "pixelformat "
+							    "encountered\n");
 						}
-						frame_interval.index++;
 					}
-			    }
+					frame_interval.index++;
+				}
+			}
 			frame_size.index++;
-		    }
+		}
 		desc.index++;
-	    }
+	}
 
 	close(fd);
 	return sd_count;
 }
 
-bool v4l2_frameserver_test(){
+bool
+v4l2_frameserver_test()
+{
 	printf("Running V4L2 Frameserver Test\n");
-    v4l2_frameserver_instance_t instance;
-	if (! v4l2_frameserver_init(&instance))
-	{
+	v4l2_frameserver_instance_t instance;
+	if (!v4l2_frameserver_init(&instance)) {
 		printf("FAILURE: Could not init frameserver.\n");
 		return false;
 	}
-	uint32_t camera_count =0;
-	if (! v4l2_frameserver_enumerate_devices(&instance,NULL,&camera_count)) {
+	uint32_t camera_count = 0;
+	if (!v4l2_frameserver_enumerate_devices(&instance, NULL,
+	                                        &camera_count)) {
 		printf("FAILURE: Could not get camera count.\n");
 		return false;
 	}
-    v4l2_source_descriptor_t* camera_list = calloc(camera_count,sizeof(v4l2_source_descriptor_t));
-	if (! v4l2_frameserver_enumerate_devices(&instance, camera_list,&camera_count)) {
+	v4l2_source_descriptor_t* camera_list =
+	    calloc(camera_count, sizeof(v4l2_source_descriptor_t));
+	if (!v4l2_frameserver_enumerate_devices(&instance, camera_list,
+	                                        &camera_count)) {
 		printf("FAILURE: Could not get camera descriptors\n");
 		return false;
 	}
-	for (uint32_t i=0;i<camera_count;i++)
-	{
-		printf("%d camera name: %s\n",i,camera_list[i].name);
+	for (uint32_t i = 0; i < camera_count; i++) {
+		printf("%d camera name: %s\n", i, camera_list[i].name);
 	}
 	return true;
 }
 
-static bool source_descriptor_from_v4l2(v4l2_source_descriptor_t* descriptor, char* v4l2_device, struct v4l2_capability* cap,struct v4l2_fmtdesc* desc) {
+static bool
+source_descriptor_from_v4l2(v4l2_source_descriptor_t* descriptor,
+                            char* v4l2_device,
+                            struct v4l2_capability* cap,
+                            struct v4l2_fmtdesc* desc)
+{
 
-	strncpy(descriptor->device_path,v4l2_device,256); //TODO: hardcoded 256
-	descriptor->device_path[255]=0x0; //TODO: hardcoded 256
-	strncpy(descriptor->name,cap->driver,32);
-	descriptor->name[127]=0x0;
-	strncpy(descriptor->model,cap->card,32);
-	descriptor->model[127]=0x0;
+	strncpy(descriptor->device_path, v4l2_device,
+	        256);                       // TODO: hardcoded 256
+	descriptor->device_path[255] = 0x0; // TODO: hardcoded 256
+	strncpy(descriptor->name, cap->driver, 32);
+	descriptor->name[127] = 0x0;
+	strncpy(descriptor->model, cap->card, 32);
+	descriptor->model[127] = 0x0;
 	descriptor->stream_format = desc->pixelformat;
 
-	//special-case the PS4 Eye camera  - need to crop the main stereo image out
-	//of the composite (header+audio + main + interlaced) frame the driver
-	//produces
-	if (strcmp(cap->card,"USB Camera-OV580: USB Camera-OV") == 0) {
-		    descriptor->crop_scanline_bytes_start = 96;
-			descriptor->crop_width=2560; //assume highest res
-			if (descriptor->width < 900) {
-				descriptor->crop_width=640;
-			}
-			else if (descriptor->width < 2000) {
-				descriptor->crop_width = 1280;
-			}
-	    }
-    }
+	// special-case the PS4 Eye camera  - need to crop the main stereo image
+	// out of the composite (header+audio + main + interlaced) frame the
+	// driver produces
+	if (strcmp(cap->card, "USB Camera-OV580: USB Camera-OV") == 0) {
+		descriptor->crop_scanline_bytes_start = 96;
+		descriptor->crop_width = 2560; // assume highest res
+		if (descriptor->width < 900) {
+			descriptor->crop_width = 640;
+		} else if (descriptor->width < 2000) {
+			descriptor->crop_width = 1280;
+		}
+	}
+}
diff --git a/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.h b/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.h
index 7ee30b0cdc246876c4ab67dde15d5350a4e84a0d..c2c97c321cb4386359bdf5ccea7bb350edffe439 100644
--- a/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.h
+++ b/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.h
@@ -13,9 +13,10 @@
 
 #include <pthread.h>
 
-//TODO: unify device descriptors across apis
-typedef struct v4l2_source_descriptor {
-	char device_path[256]; //TODO: might not be enough
+// TODO: unify device descriptors across apis
+typedef struct v4l2_source_descriptor
+{
+	char device_path[256]; // TODO: might not be enough
 	char name[128];
 	char model[128];
 	uint64_t source_id;
@@ -26,43 +27,74 @@ typedef struct v4l2_source_descriptor {
 	uint32_t height;
 	uint32_t rate;
 	uint8_t extended_format;
-	uint32_t crop_scanline_bytes_start; //byte offset - special case for ps4 camera
+	uint32_t crop_scanline_bytes_start; // byte offset - special case for
+	                                    // ps4 camera
 	uint32_t crop_width; // pixels - special case for ps4 camera
 } v4l2_source_descriptor_t;
 
-typedef struct v4l2_frameserver_instance {
+typedef struct v4l2_frameserver_instance
+{
 	frame_consumer_callback_func frame_target_callback;
 	event_consumer_callback_func event_target_callback;
-	void* frame_target_instance; //where we send our frames
-	void* event_target_instance; //where we send our events
+	void* frame_target_instance; // where we send our frames
+	void* event_target_instance; // where we send our events
 	v4l2_source_descriptor_t source_descriptor;
 	pthread_t stream_thread;
 	capture_parameters_t capture_params;
 	bool is_configured;
 	bool is_running;
 
-    } v4l2_frameserver_instance_t;
+} v4l2_frameserver_instance_t;
 
 
 
+v4l2_frameserver_instance_t*
+v4l2_frameserver_create(frameserver_instance_t* inst);
+bool
+v4l2_frameserver_destroy(frameserver_instance_t* inst);
+bool
+v4l2_frameserver_source_create(v4l2_source_descriptor_t*);
+bool
+v4l2_frameserver_source_destroy(v4l2_source_descriptor_t*);
+bool
+v4l2_frameserver_configure_capture(frameserver_instance_t* inst,
+                                   capture_parameters_t cp);
+bool
+v4l2_frameserver_enumerate_sources(frameserver_instance_t* inst,
+                                   v4l2_source_descriptor_t* sources,
+                                   uint32_t* count);
+bool
+v4l2_frameserver_get(frameserver_instance_t* inst, frame_t* frame);
+void
+v4l2_frameserver_register_event_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func);
+void
+v4l2_frameserver_register_frame_callback(
+    frameserver_instance_t* inst,
+    void* target_instance,
+    frame_consumer_callback_func target_func);
+bool
+v4l2_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp);
+bool
+v4l2_frameserver_stream_start(frameserver_instance_t* inst,
+                              v4l2_source_descriptor_t* source);
+bool
+v4l2_frameserver_stream_stop(frameserver_instance_t* inst);
+bool
+v4l2_frameserver_is_running(frameserver_instance_t* inst);
+bool
+v4l2_frameserver_test();
 
-v4l2_frameserver_instance_t* v4l2_frameserver_create(frameserver_instance_t* inst);
-bool v4l2_frameserver_destroy(frameserver_instance_t* inst);
-bool v4l2_frameserver_source_create(v4l2_source_descriptor_t*);
-bool v4l2_frameserver_source_destroy(v4l2_source_descriptor_t*);
-bool v4l2_frameserver_configure_capture(frameserver_instance_t* inst, capture_parameters_t cp);
-bool v4l2_frameserver_enumerate_sources(frameserver_instance_t* inst, v4l2_source_descriptor_t* sources, uint32_t* count);
-bool v4l2_frameserver_get(frameserver_instance_t* inst, frame_t* frame);
-void v4l2_frameserver_register_event_callback(frameserver_instance_t* inst, void* target_instance,event_consumer_callback_func target_func);
-void v4l2_frameserver_register_frame_callback(frameserver_instance_t* inst, void* target_instance,frame_consumer_callback_func target_func);
-bool v4l2_frameserver_seek(frameserver_instance_t* inst, uint64_t timestamp);
-bool v4l2_frameserver_stream_start(frameserver_instance_t* inst, v4l2_source_descriptor_t* source);
-bool v4l2_frameserver_stream_stop(frameserver_instance_t* inst);
-bool v4l2_frameserver_is_running(frameserver_instance_t* inst);
-bool v4l2_frameserver_test();
+static void
+v4l2_frameserver_stream_run(
+    frameserver_instance_t* inst); // streaming thread entrypoint
+static bool
+source_descriptor_from_v4l2(v4l2_source_descriptor_t* source_descriptor,
+                            char* v4l2_device,
+                            struct v4l2_capability* cap,
+                            struct v4l2_fmtdesc* desc);
 
-static void v4l2_frameserver_stream_run(frameserver_instance_t* inst);  //streaming thread entrypoint
-static bool source_descriptor_from_v4l2(v4l2_source_descriptor_t* source_descriptor, char* v4l2_device, struct v4l2_capability* cap,struct v4l2_fmtdesc* desc);
 
-
-#endif //V4L2_FRAMESERVER_H
+#endif // V4L2_FRAMESERVER_H
diff --git a/src/xrt/drivers/montrack/mt_device.c b/src/xrt/drivers/montrack/mt_device.c
index d612695c4dd59ea175c71ad448197272a130dd1c..f5dbe0eb368728d4f99df4c174701fdabb50de78 100644
--- a/src/xrt/drivers/montrack/mt_device.c
+++ b/src/xrt/drivers/montrack/mt_device.c
@@ -33,7 +33,7 @@
 
 
 static void
-mt_device_destroy(struct xrt_device *xdev)
+mt_device_destroy(struct xrt_device* xdev)
 {
 	mt_device_t* md = mt_device(xdev);
 
@@ -42,52 +42,51 @@ mt_device_destroy(struct xrt_device *xdev)
 }
 
 static void
-mt_device_get_tracked_pose(struct xrt_device *xdev,
-                           struct time_state *timekeeping,
-                           int64_t *out_timestamp,
-                           struct xrt_space_relation *out_relation)
+mt_device_get_tracked_pose(struct xrt_device* xdev,
+                           struct time_state* timekeeping,
+                           int64_t* out_timestamp,
+                           struct xrt_space_relation* out_relation)
 {
 	mt_device_t* md = mt_device(xdev);
 	struct xrt_pose pose;
 	filter_state_t filtered;
 	switch (md->tracker->tracker_type) {
-	    case TRACKER_TYPE_SPHERE_MONO:
-		    out_relation->relation_flags = (enum xrt_space_relation_flags)(
+	case TRACKER_TYPE_SPHERE_MONO:
+		out_relation->relation_flags = (enum xrt_space_relation_flags)(
 		    XRT_SPACE_RELATION_POSITION_VALID_BIT |
 		    XRT_SPACE_RELATION_POSITION_TRACKED_BIT);
-			md->filter->filter_predict_state(md->filter,&filtered,0);
-			out_relation->pose = filtered.pose;
-		    break;
-	    case TRACKER_TYPE_SPHERE_STEREO:
-		    out_relation->relation_flags = (enum xrt_space_relation_flags)(
-			XRT_SPACE_RELATION_POSITION_VALID_BIT |
-			XRT_SPACE_RELATION_POSITION_TRACKED_BIT);
-			md->filter->filter_predict_state(md->filter,&filtered,0);
-			out_relation->pose = filtered.pose;
-		    break;
-	    case TRACKER_TYPE_OSVR_UVBI:
-		    out_relation->relation_flags = (enum xrt_space_relation_flags)(
-			XRT_SPACE_RELATION_POSITION_VALID_BIT |
-			XRT_SPACE_RELATION_POSITION_TRACKED_BIT);
-			//TODO: get pose from osvr tracker
-			//out_relation->pose = filtered.pose;
+		md->filter->filter_predict_state(md->filter, &filtered, 0);
+		out_relation->pose = filtered.pose;
+		break;
+	case TRACKER_TYPE_SPHERE_STEREO:
+		out_relation->relation_flags = (enum xrt_space_relation_flags)(
+		    XRT_SPACE_RELATION_POSITION_VALID_BIT |
+		    XRT_SPACE_RELATION_POSITION_TRACKED_BIT);
+		md->filter->filter_predict_state(md->filter, &filtered, 0);
+		out_relation->pose = filtered.pose;
+		break;
+	case TRACKER_TYPE_OSVR_UVBI:
+		out_relation->relation_flags = (enum xrt_space_relation_flags)(
+		    XRT_SPACE_RELATION_POSITION_VALID_BIT |
+		    XRT_SPACE_RELATION_POSITION_TRACKED_BIT);
+		// TODO: get pose from osvr tracker
+		// out_relation->pose = filtered.pose;
 		break;
 
-	    default:
-		    printf("ERROR: Unknown tracker type\n");
+	default: printf("ERROR: Unknown tracker type\n");
 	}
 
 
 	// Update state within driver
-	//md->last_update = *out_timestamp;
-	//md->last_relation = *out_relation;
+	// md->last_update = *out_timestamp;
+	// md->last_relation = *out_relation;
 }
 
 static void
-mt_device_get_view_pose(struct xrt_device *xdev,
-                        struct xrt_vec3 *eye_relation,
+mt_device_get_view_pose(struct xrt_device* xdev,
+                        struct xrt_vec3* eye_relation,
                         uint32_t view_index,
-                        struct xrt_pose *out_pose)
+                        struct xrt_pose* out_pose)
 {
 	struct xrt_pose pose = {{0.0f, 0.0f, 0.0f, 1.0f}, {0.0f, 0.0f, 0.0f}};
 	*out_pose = pose;
@@ -96,33 +95,34 @@ mt_device_get_view_pose(struct xrt_device *xdev,
 
 
 mt_device_t*
-mt_device_create(char* device_name,bool log_verbose, bool log_debug) {
+mt_device_create(char* device_name, bool log_verbose, bool log_debug)
+{
 	mt_device_t* md = U_TYPED_CALLOC(mt_device_t);
 
 	dummy_init_mt_device(md);
 
-	if (strcmp(device_name,"MONO_PS3EYE") == 0) {
+	if (strcmp(device_name, "MONO_PS3EYE") == 0) {
 		if (mt_create_mono_ps3eye(md)) {
 			return md;
 		}
 	}
-	if (strcmp(device_name,"MONO_LOGITECH_C270") == 0) {
+	if (strcmp(device_name, "MONO_LOGITECH_C270") == 0) {
 		if (mt_create_mono_c270(md)) {
 			return md;
 		}
 	}
-	if (strcmp(device_name,"STEREO_ELP_60FPS") == 0) {
+	if (strcmp(device_name, "STEREO_ELP_60FPS") == 0) {
 		if (mt_create_stereo_elp(md)) {
 			return md;
 		}
 	}
-	if (strcmp(device_name,"OSVR_ELP_60FPS") == 0) {
+	if (strcmp(device_name, "OSVR_ELP_60FPS") == 0) {
 		if (mt_create_osvr_elp(md)) {
 			return md;
 		}
 	}
 
-	if (strcmp(device_name,"STEREO_PS4_60FPS") == 0) {
+	if (strcmp(device_name, "STEREO_PS4_60FPS") == 0) {
 		if (mt_create_stereo_ps4(md)) {
 			return md;
 		}
@@ -130,428 +130,562 @@ mt_device_create(char* device_name,bool log_verbose, bool log_debug) {
 
 
 	return NULL;
-
-
-
 }
 
-bool mt_create_mono_ps3eye(mt_device_t* md) {
-	md->frameserver_count=1; // this driver uses a single camera source
+bool
+mt_create_mono_ps3eye(mt_device_t* md)
+{
+	md->frameserver_count = 1; // this driver uses a single camera source
 	md->frameservers[0] = frameserver_create(FRAMESERVER_TYPE_V4L2);
 	// ask our frameserver for available sources - note this will return a
-	// type-specific struct that we need to deal with e.g. UVC-specific, FFMPEG-specific.
-	uint32_t source_count=0;
-	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],NULL,&source_count);
-	if (source_count == 0){
-		//we have no sources, we cannot continue
+	// type-specific struct that we need to deal with e.g. UVC-specific,
+	// FFMPEG-specific.
+	uint32_t source_count = 0;
+	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],
+	                                                   NULL, &source_count);
+	if (source_count == 0) {
+		// we have no sources, we cannot continue
 		return false;
 	}
-	v4l2_source_descriptor_t* descriptors = calloc(source_count,sizeof(v4l2_source_descriptor_t));
-	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],descriptors,&source_count);
-	// defer further configuration and stream start until the rest of our chain is set up.
+	v4l2_source_descriptor_t* descriptors =
+	    calloc(source_count, sizeof(v4l2_source_descriptor_t));
+	md->frameservers[0]->frameserver_enumerate_sources(
+	    md->frameservers[0], descriptors, &source_count);
+	// defer further configuration and stream start until the rest of our
+	// chain is set up.
 
 	md->tracker = tracker_create(TRACKER_TYPE_SPHERE_MONO);
 	tracker_mono_configuration_t tracker_config = {};
 
-	//start in calibration mode
+	// start in calibration mode
 	tracker_config.calibration_mode = CALIBRATION_MODE_CHESSBOARD;
 
 
 	// configure our ps3 eye when we find it during enumeration
-	uint32_t source_index; //our frameserver config descriptor index
-	for (uint32_t i=0; i< source_count;i++){
+	uint32_t source_index; // our frameserver config descriptor index
+	for (uint32_t i = 0; i < source_count; i++) {
 		v4l2_source_descriptor_t temp = descriptors[i];
-		if (strcmp(descriptors[i].name,"ov534") == 0  && descriptors[i].format == FORMAT_Y_UINT8) {
-			if (descriptors[i].width == 640 && descriptors[i].height == 480 && descriptors[i].rate == 166666) {
+		if (strcmp(descriptors[i].name, "ov534") == 0 &&
+		    descriptors[i].format == FORMAT_Y_UINT8) {
+			if (descriptors[i].width == 640 &&
+			    descriptors[i].height == 480 &&
+			    descriptors[i].rate == 166666) {
 				tracker_config.format = descriptors[i].format;
-				tracker_config.source_id =descriptors[i].source_id;
-				source_index =i;
+				tracker_config.source_id =
+				    descriptors[i].source_id;
+				source_index = i;
 			}
 		}
-
 	}
-	snprintf(tracker_config.configuration_filename,128,"PS3eye_mono");
+	snprintf(tracker_config.configuration_filename, 128, "PS3eye_mono");
 
 	// configure our tracker for this frame source
 	bool configured = false;
-	configured = md->tracker->tracker_configure(md->tracker,&tracker_config);
+	configured =
+	    md->tracker->tracker_configure(md->tracker, &tracker_config);
 
-	if (! configured) {
+	if (!configured) {
 		printf("ERROR: tracker rejected frameserver configuration!\n");
 		return false;
 	}
 
 	// tracker is happy - connect our frameserver to our tracker
-	md->frameservers[0]->frameserver_register_frame_callback(md->frameservers[0],md->tracker,md->tracker->tracker_queue);
+	md->frameservers[0]->frameserver_register_frame_callback(
+	    md->frameservers[0], md->tracker, md->tracker->tracker_queue);
 
-	//create a filter for the trackers output
+	// create a filter for the trackers output
 	opencv_filter_configuration_t filter_config = {};
-	filter_config.measurement_noise_cov =0.1f;
-	filter_config.process_noise_cov =0.1f;
+	filter_config.measurement_noise_cov = 0.1f;
+	filter_config.process_noise_cov = 0.1f;
 
 	md->filter = filter_create(FILTER_TYPE_OPENCV_KALMAN);
-	md->filter->filter_configure(md->filter,&filter_config);
-	//connect our tracker to our filter
-	md->tracker->tracker_register_measurement_callback(md->tracker,md->filter,md->filter->filter_queue);
-	//and our driver to tracker events
-	md->tracker->tracker_register_event_callback(md->tracker,md,mt_handle_event);
+	md->filter->filter_configure(md->filter, &filter_config);
+	// connect our tracker to our filter
+	md->tracker->tracker_register_measurement_callback(
+	    md->tracker, md->filter, md->filter->filter_queue);
+	// and our driver to tracker events
+	md->tracker->tracker_register_event_callback(md->tracker, md,
+	                                             mt_handle_event);
 
 	// now we can configure our frameserver and start the stream
 
-	printf("INFO: frame source path: %s %d x %d %d interval: %d\n",descriptors[source_index].name, descriptors[source_index].width,descriptors[source_index].height,descriptors[source_index].format,descriptors[source_index].rate);
-	md->frameservers[0]->frameserver_configure_capture(md->frameservers[0],md->tracker->tracker_get_capture_params(md->tracker));
-	md->frameservers[0]->frameserver_stream_start(md->frameservers[0],&(descriptors[source_index]));
-
+	printf("INFO: frame source path: %s %d x %d %d interval: %d\n",
+	       descriptors[source_index].name, descriptors[source_index].width,
+	       descriptors[source_index].height,
+	       descriptors[source_index].format,
+	       descriptors[source_index].rate);
+	md->frameservers[0]->frameserver_configure_capture(
+	    md->frameservers[0],
+	    md->tracker->tracker_get_capture_params(md->tracker));
+	md->frameservers[0]->frameserver_stream_start(
+	    md->frameservers[0], &(descriptors[source_index]));
 }
 
-bool mt_create_mono_c270(mt_device_t* md) {
+bool
+mt_create_mono_c270(mt_device_t* md)
+{
 	// TODO - add IMU input source -> filter
 
-	md->frameserver_count=1; // this driver uses a single camera source
+	md->frameserver_count = 1; // this driver uses a single camera source
 	md->frameservers[0] = frameserver_create(FRAMESERVER_TYPE_UVC);
 	// ask our frameserver for available sources - note this will return a
-	// type-specific struct that we need to deal with e.g. UVC-specific, FFMPEG-specific.
-	uint32_t source_count=0;
-	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],NULL,&source_count);
-	if (source_count == 0){
-		//we have no sources, we cannot continue
+	// type-specific struct that we need to deal with e.g. UVC-specific,
+	// FFMPEG-specific.
+	uint32_t source_count = 0;
+	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],
+	                                                   NULL, &source_count);
+	if (source_count == 0) {
+		// we have no sources, we cannot continue
 		return false;
 	}
-	uvc_source_descriptor_t* descriptors = calloc(source_count,sizeof(uvc_source_descriptor_t));
-	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],descriptors,&source_count);
-	// defer further configuration and stream start until the rest of our chain is set up.
+	uvc_source_descriptor_t* descriptors =
+	    calloc(source_count, sizeof(uvc_source_descriptor_t));
+	md->frameservers[0]->frameserver_enumerate_sources(
+	    md->frameservers[0], descriptors, &source_count);
+	// defer further configuration and stream start until the rest of our
+	// chain is set up.
 
 	md->tracker = tracker_create(TRACKER_TYPE_SPHERE_MONO);
 	tracker_mono_configuration_t tracker_config = {};
 
-	//start in calibration mode
+	// start in calibration mode
 	tracker_config.calibration_mode = CALIBRATION_MODE_CHESSBOARD;
 
 
 	// configure our logitech c270 when we find it during enumeration
-	uint32_t source_index; //our frameserver config descriptor index
-	for (uint32_t i=0; i< source_count;i++){
-		if (descriptors[i].product_id == 0x0825 && descriptors[i].vendor_id == 0x046d && descriptors[i].format == FORMAT_Y_UINT8) {
-			if (descriptors[i].width == 640 && descriptors[i].height == 480 && descriptors[i].rate == 333333) {
+	uint32_t source_index; // our frameserver config descriptor index
+	for (uint32_t i = 0; i < source_count; i++) {
+		if (descriptors[i].product_id == 0x0825 &&
+		    descriptors[i].vendor_id == 0x046d &&
+		    descriptors[i].format == FORMAT_Y_UINT8) {
+			if (descriptors[i].width == 640 &&
+			    descriptors[i].height == 480 &&
+			    descriptors[i].rate == 333333) {
 				tracker_config.format = descriptors[i].format;
-				tracker_config.source_id =descriptors[i].source_id;
-				source_index =i;
+				tracker_config.source_id =
+				    descriptors[i].source_id;
+				source_index = i;
 			}
 		}
-
 	}
-	snprintf(tracker_config.configuration_filename,128,"C270_mono_%s",descriptors[source_index].serial);
+	snprintf(tracker_config.configuration_filename, 128, "C270_mono_%s",
+	         descriptors[source_index].serial);
 
 	// configure our tracker for this frame source
 	bool configured = false;
-	configured = md->tracker->tracker_configure(md->tracker,&tracker_config);
+	configured =
+	    md->tracker->tracker_configure(md->tracker, &tracker_config);
 
-	if (! configured) {
+	if (!configured) {
 		printf("ERROR: tracker rejected frameserver configuration!\n");
 		return false;
 	}
 
 	// tracker is happy - connect our frameserver to our tracker
-	md->frameservers[0]->frameserver_register_frame_callback(md->frameservers[0],md->tracker,md->tracker->tracker_queue);
+	md->frameservers[0]->frameserver_register_frame_callback(
+	    md->frameservers[0], md->tracker, md->tracker->tracker_queue);
 
-	//create a filter for the trackers output
+	// create a filter for the trackers output
 	opencv_filter_configuration_t filter_config = {};
-	filter_config.measurement_noise_cov =0.1f;
-	filter_config.process_noise_cov =0.1f;
+	filter_config.measurement_noise_cov = 0.1f;
+	filter_config.process_noise_cov = 0.1f;
 
 	md->filter = filter_create(FILTER_TYPE_OPENCV_KALMAN);
-	md->filter->filter_configure(md->filter,&filter_config);
-	//connect our tracker to our filter
-	md->tracker->tracker_register_measurement_callback(md->tracker,md->filter,md->filter->filter_queue);
-	//and our driver to tracker events
-	md->tracker->tracker_register_event_callback(md->tracker,md,mt_handle_event);
+	md->filter->filter_configure(md->filter, &filter_config);
+	// connect our tracker to our filter
+	md->tracker->tracker_register_measurement_callback(
+	    md->tracker, md->filter, md->filter->filter_queue);
+	// and our driver to tracker events
+	md->tracker->tracker_register_event_callback(md->tracker, md,
+	                                             mt_handle_event);
 
 	// now we can configure our frameserver and start the stream
 
-	printf("INFO: frame source path: %s %d x %d interval: %d\n",&(descriptors[source_index].name), descriptors[source_index].width,descriptors[source_index].height,descriptors[source_index].format,descriptors[source_index].rate);
-	md->frameservers[0]->frameserver_configure_capture(md->frameservers[0],md->tracker->tracker_get_capture_params(md->tracker));
-	md->frameservers[0]->frameserver_stream_start(md->frameservers[0],&(descriptors[source_index]));
+	printf(
+	    "INFO: frame source path: %s %d x %d interval: %d\n",
+	    &(descriptors[source_index].name), descriptors[source_index].width,
+	    descriptors[source_index].height, descriptors[source_index].format,
+	    descriptors[source_index].rate);
+	md->frameservers[0]->frameserver_configure_capture(
+	    md->frameservers[0],
+	    md->tracker->tracker_get_capture_params(md->tracker));
+	md->frameservers[0]->frameserver_stream_start(
+	    md->frameservers[0], &(descriptors[source_index]));
 
 	return true;
 }
 
-bool mt_create_stereo_elp(mt_device_t* md) {
+bool
+mt_create_stereo_elp(mt_device_t* md)
+{
 
 	// TODO - add IMU input source -> filter
 
-	md->frameserver_count=1; // this driver uses a single, composite stereo, camera source
+	md->frameserver_count =
+	    1; // this driver uses a single, composite stereo, camera source
 	md->frameservers[0] = frameserver_create(FRAMESERVER_TYPE_UVC);
 	// ask our frameserver for available sources - note this will return a
-	// type-specific struct that we need to deal with e.g. UVC-specific, FFMPEG-specific.
-	uint32_t source_count=0;
-	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],NULL,&source_count);
-	if (source_count == 0){
-		//we have no sources, we cannot continue
+	// type-specific struct that we need to deal with e.g. UVC-specific,
+	// FFMPEG-specific.
+	uint32_t source_count = 0;
+	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],
+	                                                   NULL, &source_count);
+	if (source_count == 0) {
+		// we have no sources, we cannot continue
 		return false;
 	}
-	uvc_source_descriptor_t* descriptors = calloc(source_count,sizeof(uvc_source_descriptor_t));
-	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],descriptors,&source_count);
-	// defer further configuration and stream start until the rest of our chain is set up.
+	uvc_source_descriptor_t* descriptors =
+	    calloc(source_count, sizeof(uvc_source_descriptor_t));
+	md->frameservers[0]->frameserver_enumerate_sources(
+	    md->frameservers[0], descriptors, &source_count);
+	// defer further configuration and stream start until the rest of our
+	// chain is set up.
 
 	md->tracker = tracker_create(TRACKER_TYPE_SPHERE_STEREO);
 	tracker_stereo_configuration_t tracker_config = {};
 
 	// configure our ELP camera when we find it during enumeration
-	uint32_t source_index; // our frameserver config descriptor index - we would have an array for multiple devices
-	for (uint32_t i=0; i< source_count;i++){
+	uint32_t source_index; // our frameserver config descriptor index - we
+	                       // would have an array for multiple devices
+	for (uint32_t i = 0; i < source_count; i++) {
 		uvc_source_descriptor_t s = descriptors[i];
-		if (descriptors[i].product_id == 0x9750 && descriptors[i].vendor_id == 0x05a3 && descriptors[i].format == FORMAT_YUV444_UINT8) {
-			if (descriptors[i].width == 1280 && descriptors[i].height == 480 && descriptors[i].rate == 166666) {
+		if (descriptors[i].product_id == 0x9750 &&
+		    descriptors[i].vendor_id == 0x05a3 &&
+		    descriptors[i].format == FORMAT_YUV444_UINT8) {
+			if (descriptors[i].width == 1280 &&
+			    descriptors[i].height == 480 &&
+			    descriptors[i].rate == 166666) {
 				tracker_config.l_format = descriptors[i].format;
-				tracker_config.l_source_id =descriptors[i].source_id;
-				snprintf(tracker_config.configuration_filename,128,"ELP_60FPS_stereo_%s",descriptors[i].serial);
+				tracker_config.l_source_id =
+				    descriptors[i].source_id;
+				snprintf(tracker_config.configuration_filename,
+				         128, "ELP_60FPS_stereo_%s",
+				         descriptors[i].serial);
 
 
-                //start in calibration mode
+				// start in calibration mode
 
-				tracker_config.calibration_mode = CALIBRATION_MODE_CHESSBOARD;
+				tracker_config.calibration_mode =
+				    CALIBRATION_MODE_CHESSBOARD;
 
-				// set up 50/50 horizontal stereo split - may need to put this in calibration data
+				// set up 50/50 horizontal stereo split - may
+				// need to put this in calibration data
 
-				struct xrt_vec2 ltl = {0.0f,0.0f};
-				struct xrt_vec2 lbr = {descriptors[i].width / 2.0f,descriptors[i].height};
-				struct xrt_vec2 rtl = {descriptors[i].width / 2.0f,0.0f};
-				struct xrt_vec2 rbr = {descriptors[i].width ,descriptors[i].height};
+				struct xrt_vec2 ltl = {0.0f, 0.0f};
+				struct xrt_vec2 lbr = {descriptors[i].width /
+				                           2.0f,
+				                       descriptors[i].height};
+				struct xrt_vec2 rtl = {
+				    descriptors[i].width / 2.0f, 0.0f};
+				struct xrt_vec2 rbr = {descriptors[i].width,
+				                       descriptors[i].height};
 
-				tracker_config.l_rect.tl=ltl;
-				tracker_config.l_rect.br=lbr;
-				tracker_config.r_rect.tl=rtl;
-				tracker_config.r_rect.br=rbr;
+				tracker_config.l_rect.tl = ltl;
+				tracker_config.l_rect.br = lbr;
+				tracker_config.r_rect.tl = rtl;
+				tracker_config.r_rect.br = rbr;
 
 				tracker_config.split_left = true;
 
-				source_index =i;
+				source_index = i;
 			}
 		}
-
 	}
 	// configure our tracker for this frame source
-	bool configured = md->tracker->tracker_configure(md->tracker,&tracker_config);
+	bool configured =
+	    md->tracker->tracker_configure(md->tracker, &tracker_config);
 
-	if (! configured) {
+	if (!configured) {
 		printf("ERROR: tracker rejected frameserver configuration!\n");
 		return false;
 	}
 
 	// tracker is happy - connect our frameserver to our tracker
-	md->frameservers[0]->frameserver_register_frame_callback(md->frameservers[0],md->tracker,md->tracker->tracker_queue);
+	md->frameservers[0]->frameserver_register_frame_callback(
+	    md->frameservers[0], md->tracker, md->tracker->tracker_queue);
 
-	//create a filter for the trackers output
+	// create a filter for the trackers output
 	opencv_filter_configuration_t filter_config = {};
-	filter_config.measurement_noise_cov =0.1f;
-	filter_config.process_noise_cov =0.1f;
+	filter_config.measurement_noise_cov = 0.1f;
+	filter_config.process_noise_cov = 0.1f;
 
 	md->filter = filter_create(FILTER_TYPE_OPENCV_KALMAN);
-	md->filter->filter_configure(md->filter,&filter_config);
-
-	//connect our tracker to our filter
-	md->tracker->tracker_register_measurement_callback(md->tracker,md->filter,md->filter->filter_queue);
-	md->tracker->tracker_register_event_callback(md->tracker,md,mt_handle_event);
-
-	//nw our chain is setup up we can start streaming data through it
-	printf("INFO: frame source path: %s %d x %d interval: %d\n",&(descriptors[source_index].name), descriptors[source_index].width,descriptors[source_index].height,descriptors[source_index].format,descriptors[source_index].rate);
-	md->frameservers[0]->frameserver_configure_capture(md->frameservers[0],md->tracker->tracker_get_capture_params(md->tracker));
-	md->frameservers[0]->frameserver_stream_start(md->frameservers[0],&(descriptors[source_index]));
+	md->filter->filter_configure(md->filter, &filter_config);
+
+	// connect our tracker to our filter
+	md->tracker->tracker_register_measurement_callback(
+	    md->tracker, md->filter, md->filter->filter_queue);
+	md->tracker->tracker_register_event_callback(md->tracker, md,
+	                                             mt_handle_event);
+
+	// nw our chain is setup up we can start streaming data through it
+	printf(
+	    "INFO: frame source path: %s %d x %d interval: %d\n",
+	    &(descriptors[source_index].name), descriptors[source_index].width,
+	    descriptors[source_index].height, descriptors[source_index].format,
+	    descriptors[source_index].rate);
+	md->frameservers[0]->frameserver_configure_capture(
+	    md->frameservers[0],
+	    md->tracker->tracker_get_capture_params(md->tracker));
+	md->frameservers[0]->frameserver_stream_start(
+	    md->frameservers[0], &(descriptors[source_index]));
 
 	return true;
 }
 
 
-bool mt_create_osvr_elp(mt_device_t* md) {
+bool
+mt_create_osvr_elp(mt_device_t* md)
+{
 
 	// TODO - add IMU input source -> filter
 
-	md->frameserver_count=1; // this driver uses a single, mono camera source
+	md->frameserver_count =
+	    1; // this driver uses a single, mono camera source
 	md->frameservers[0] = frameserver_create(FRAMESERVER_TYPE_UVC);
 	// ask our frameserver for available sources - note this will return a
-	// type-specific struct that we need to deal with e.g. UVC-specific, FFMPEG-specific.
-	uint32_t source_count=0;
-	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],NULL,&source_count);
-	if (source_count == 0){
-		//we have no sources, we cannot continue
+	// type-specific struct that we need to deal with e.g. UVC-specific,
+	// FFMPEG-specific.
+	uint32_t source_count = 0;
+	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],
+	                                                   NULL, &source_count);
+	if (source_count == 0) {
+		// we have no sources, we cannot continue
 		return false;
 	}
-	uvc_source_descriptor_t* descriptors = calloc(source_count,sizeof(uvc_source_descriptor_t));
-	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],descriptors,&source_count);
-	// defer further configuration and stream start until the rest of our chain is set up.
+	uvc_source_descriptor_t* descriptors =
+	    calloc(source_count, sizeof(uvc_source_descriptor_t));
+	md->frameservers[0]->frameserver_enumerate_sources(
+	    md->frameservers[0], descriptors, &source_count);
+	// defer further configuration and stream start until the rest of our
+	// chain is set up.
 
 	md->tracker = tracker_create(TRACKER_TYPE_OSVR_UVBI);
 	tracker_mono_configuration_t tracker_config = {};
 
 	// configure our ELP camera when we find it during enumeration
-	uint32_t source_index; // our frameserver config descriptor index - we would have an array for multiple devices
-	for (uint32_t i=0; i< source_count;i++){
+	uint32_t source_index; // our frameserver config descriptor index - we
+	                       // would have an array for multiple devices
+	for (uint32_t i = 0; i < source_count; i++) {
 		uvc_source_descriptor_t s = descriptors[i];
-		if (descriptors[i].product_id == 0x9750 && descriptors[i].vendor_id == 0x05a3 && descriptors[i].format == FORMAT_Y_UINT8) {
-			if (descriptors[i].width == 1280 && descriptors[i].height == 480 && descriptors[i].rate == 166666) {
+		if (descriptors[i].product_id == 0x9750 &&
+		    descriptors[i].vendor_id == 0x05a3 &&
+		    descriptors[i].format == FORMAT_Y_UINT8) {
+			if (descriptors[i].width == 1280 &&
+			    descriptors[i].height == 480 &&
+			    descriptors[i].rate == 166666) {
 				tracker_config.format = descriptors[i].format;
-				tracker_config.source_id =descriptors[i].source_id;
-				snprintf(tracker_config.configuration_filename,128,"ELP_60FPS_osvr_%s",descriptors[i].serial);
-				source_index =i;
+				tracker_config.source_id =
+				    descriptors[i].source_id;
+				snprintf(tracker_config.configuration_filename,
+				         128, "ELP_60FPS_osvr_%s",
+				         descriptors[i].serial);
+				source_index = i;
 			}
 		}
-
 	}
 	// configure our tracker for this frame source
-	bool configured = md->tracker->tracker_configure(md->tracker,&tracker_config);
+	bool configured =
+	    md->tracker->tracker_configure(md->tracker, &tracker_config);
 
-	if (! configured) {
+	if (!configured) {
 		printf("ERROR: tracker rejected frameserver configuration!\n");
 		return false;
 	}
 
 	// tracker is happy - connect our frameserver to our tracker
-	md->frameservers[0]->frameserver_register_frame_callback(md->frameservers[0],md->tracker,md->tracker->tracker_queue);
-
-	//now our chain is setup up we can start streaming data through it
-	printf("INFO: frame source path: %s %d x %d interval: %d\n",&(descriptors[source_index].name), descriptors[source_index].width,descriptors[source_index].height,descriptors[source_index].format,descriptors[source_index].rate);
-	md->frameservers[0]->frameserver_configure_capture(md->frameservers[0],md->tracker->tracker_get_capture_params(md->tracker));
-	md->frameservers[0]->frameserver_stream_start(md->frameservers[0],&(descriptors[source_index]));
+	md->frameservers[0]->frameserver_register_frame_callback(
+	    md->frameservers[0], md->tracker, md->tracker->tracker_queue);
+
+	// now our chain is setup up we can start streaming data through it
+	printf(
+	    "INFO: frame source path: %s %d x %d interval: %d\n",
+	    &(descriptors[source_index].name), descriptors[source_index].width,
+	    descriptors[source_index].height, descriptors[source_index].format,
+	    descriptors[source_index].rate);
+	md->frameservers[0]->frameserver_configure_capture(
+	    md->frameservers[0],
+	    md->tracker->tracker_get_capture_params(md->tracker));
+	md->frameservers[0]->frameserver_stream_start(
+	    md->frameservers[0], &(descriptors[source_index]));
 
 	return true;
 }
 
 
-bool mt_create_stereo_ps4(mt_device_t* md) {
+bool
+mt_create_stereo_ps4(mt_device_t* md)
+{
 
 	// TODO - add IMU input source -> filter
 
-	md->frameserver_count=1; // this driver uses a single, composite stereo, camera source
+	md->frameserver_count =
+	    1; // this driver uses a single, composite stereo, camera source
 	md->frameservers[0] = frameserver_create(FRAMESERVER_TYPE_V4L2);
 	// ask our frameserver for available sources - note this will return a
-	// type-specific struct that we need to deal with e.g. UVC-specific, FFMPEG-specific.
-	uint32_t source_count=0;
-	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],NULL,&source_count);
-	if (source_count == 0){
-		//we have no sources, we cannot continue
+	// type-specific struct that we need to deal with e.g. UVC-specific,
+	// FFMPEG-specific.
+	uint32_t source_count = 0;
+	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],
+	                                                   NULL, &source_count);
+	if (source_count == 0) {
+		// we have no sources, we cannot continue
 		return false;
 	}
-	v4l2_source_descriptor_t* descriptors = calloc(source_count,sizeof(v4l2_source_descriptor_t));
-	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],descriptors,&source_count);
-	// defer further configuration and stream start until the rest of our chain is set up.
+	v4l2_source_descriptor_t* descriptors =
+	    calloc(source_count, sizeof(v4l2_source_descriptor_t));
+	md->frameservers[0]->frameserver_enumerate_sources(
+	    md->frameservers[0], descriptors, &source_count);
+	// defer further configuration and stream start until the rest of our
+	// chain is set up.
 
 	md->tracker = tracker_create(TRACKER_TYPE_SPHERE_STEREO);
 	tracker_stereo_configuration_t tracker_config = {};
 
 	// configure our PS4 camera when we find it during enumeration
-	uint32_t source_index; // our frameserver config descriptor index - we would have an array for multiple devices
-	for (uint32_t i=0; i< source_count;i++){
+	uint32_t source_index; // our frameserver config descriptor index - we
+	                       // would have an array for multiple devices
+	for (uint32_t i = 0; i < source_count; i++) {
 		v4l2_source_descriptor_t s = descriptors[i];
-		if (strcmp(descriptors[i].model,"USB Camera-OV580: USB Camera-OV���������������������������������������������������������������������") == 0 && descriptors[i].format == FORMAT_YUV444_UINT8) {
-		    if (descriptors[i].width == 1748 && descriptors[i].height == 408 && descriptors[i].rate == 166666) {
-		        tracker_config.l_format = descriptors[i].format;
-		        tracker_config.l_source_id =descriptors[i].source_id;
-		        snprintf(tracker_config.configuration_filename,128,"PS4_60FPS_stereo_%s",descriptors[i].model);
-
-
-		        //start in calibration mode
-
-		        tracker_config.calibration_mode = CALIBRATION_MODE_CHESSBOARD;
-
-		        // set up 50/50 horizontal stereo split - may need to put this in calibration data
-		        uint32_t effective_width = descriptors[i].width;
-		        if (descriptors[i].crop_width >0) {
-		           effective_width=descriptors[i].crop_width;
-	            }
-
-		        struct xrt_vec2 ltl = { 0.0f,0.0f };
-		        struct xrt_vec2 lbr = { effective_width / 2.0f, descriptors[i].height };
-		        struct xrt_vec2 rtl = { effective_width / 2.0f,0.0f };
-		        struct xrt_vec2 rbr = { effective_width ,descriptors[i].height};
-
-		        tracker_config.l_rect.tl=ltl;
-		        tracker_config.l_rect.br=lbr;
-		        tracker_config.r_rect.tl=rtl;
-		        tracker_config.r_rect.br=rbr;
-
-		        tracker_config.split_left = true;
+		if (strcmp(descriptors[i].model,
+		           "USB Camera-OV580: USB "
+		           "Camera-OV������������������������������������������"
+		           "���������������������������") == 0 &&
+		    descriptors[i].format == FORMAT_YUV444_UINT8) {
+			if (descriptors[i].width == 1748 &&
+			    descriptors[i].height == 408 &&
+			    descriptors[i].rate == 166666) {
+				tracker_config.l_format = descriptors[i].format;
+				tracker_config.l_source_id =
+				    descriptors[i].source_id;
+				snprintf(tracker_config.configuration_filename,
+				         128, "PS4_60FPS_stereo_%s",
+				         descriptors[i].model);
+
+
+				// start in calibration mode
+
+				tracker_config.calibration_mode =
+				    CALIBRATION_MODE_CHESSBOARD;
+
+				// set up 50/50 horizontal stereo split - may
+				// need to put this in calibration data
+				uint32_t effective_width = descriptors[i].width;
+				if (descriptors[i].crop_width > 0) {
+					effective_width =
+					    descriptors[i].crop_width;
+				}
+
+				struct xrt_vec2 ltl = {0.0f, 0.0f};
+				struct xrt_vec2 lbr = {effective_width / 2.0f,
+				                       descriptors[i].height};
+				struct xrt_vec2 rtl = {effective_width / 2.0f,
+				                       0.0f};
+				struct xrt_vec2 rbr = {effective_width,
+				                       descriptors[i].height};
+
+				tracker_config.l_rect.tl = ltl;
+				tracker_config.l_rect.br = lbr;
+				tracker_config.r_rect.tl = rtl;
+				tracker_config.r_rect.br = rbr;
 
-		        source_index =i;
-	        }
-	    }
+				tracker_config.split_left = true;
 
+				source_index = i;
+			}
+		}
 	}
 	// configure our tracker for this frame source
-	bool configured = md->tracker->tracker_configure(md->tracker,&tracker_config);
+	bool configured =
+	    md->tracker->tracker_configure(md->tracker, &tracker_config);
 
-	if (! configured) {
+	if (!configured) {
 		printf("ERROR: tracker rejected frameserver configuration!\n");
 		return false;
 	}
 
 	// tracker is happy - connect our frameserver to our tracker
-	md->frameservers[0]->frameserver_register_frame_callback(md->frameservers[0],md->tracker,md->tracker->tracker_queue);
+	md->frameservers[0]->frameserver_register_frame_callback(
+	    md->frameservers[0], md->tracker, md->tracker->tracker_queue);
 
-	//create a filter for the trackers output
+	// create a filter for the trackers output
 	opencv_filter_configuration_t filter_config = {};
-	filter_config.measurement_noise_cov =0.1f;
-	filter_config.process_noise_cov =0.1f;
+	filter_config.measurement_noise_cov = 0.1f;
+	filter_config.process_noise_cov = 0.1f;
 
 	md->filter = filter_create(FILTER_TYPE_OPENCV_KALMAN);
-	md->filter->filter_configure(md->filter,&filter_config);
-
-	//connect our tracker to our filter
-	md->tracker->tracker_register_measurement_callback(md->tracker,md->filter,md->filter->filter_queue);
-	md->tracker->tracker_register_event_callback(md->tracker,md,mt_handle_event);
-
-	//nw our chain is setup up we can start streaming data through it
-	printf("INFO: frame source path: %s %d x %d interval: %d\n",&(descriptors[source_index].name), descriptors[source_index].width,descriptors[source_index].height,descriptors[source_index].format,descriptors[source_index].rate);
-	md->frameservers[0]->frameserver_configure_capture(md->frameservers[0],md->tracker->tracker_get_capture_params(md->tracker));
-	md->frameservers[0]->frameserver_stream_start(md->frameservers[0],&(descriptors[source_index]));
+	md->filter->filter_configure(md->filter, &filter_config);
+
+	// connect our tracker to our filter
+	md->tracker->tracker_register_measurement_callback(
+	    md->tracker, md->filter, md->filter->filter_queue);
+	md->tracker->tracker_register_event_callback(md->tracker, md,
+	                                             mt_handle_event);
+
+	// nw our chain is setup up we can start streaming data through it
+	printf(
+	    "INFO: frame source path: %s %d x %d interval: %d\n",
+	    &(descriptors[source_index].name), descriptors[source_index].width,
+	    descriptors[source_index].height, descriptors[source_index].format,
+	    descriptors[source_index].rate);
+	md->frameservers[0]->frameserver_configure_capture(
+	    md->frameservers[0],
+	    md->tracker->tracker_get_capture_params(md->tracker));
+	md->frameservers[0]->frameserver_stream_start(
+	    md->frameservers[0], &(descriptors[source_index]));
 
 	return true;
 }
 
-void mt_handle_event(mt_device_t* md, driver_event_t e){
-	switch (e.type){
+void
+mt_handle_event(mt_device_t* md, driver_event_t e)
+{
+	switch (e.type) {
 	case EVENT_TRACKER_RECONFIGURED:
-		switch (md->tracker->tracker_type){
-		    case TRACKER_TYPE_SPHERE_STEREO:
-		    case TRACKER_TYPE_SPHERE_MONO:
-			for (uint32_t i = 0; i < md->frameserver_count;i++) {
-				md->frameservers[i]->frameserver_configure_capture(md->frameservers[i],md->tracker->tracker_get_capture_params(md->tracker));
+		switch (md->tracker->tracker_type) {
+		case TRACKER_TYPE_SPHERE_STEREO:
+		case TRACKER_TYPE_SPHERE_MONO:
+			for (uint32_t i = 0; i < md->frameserver_count; i++) {
+				md->frameservers[i]
+				    ->frameserver_configure_capture(
+				        md->frameservers[i],
+				        md->tracker->tracker_get_capture_params(
+				            md->tracker));
 			}
-			    break;
-		    default:
-			    break;
-
+			break;
+		default: break;
 		}
 		break;
-	default:
-		break;
+	default: break;
 	}
 }
 
-void dummy_init_mt_device(mt_device_t* md){
-	//if this stuff isn't filled in we crash.
+void
+dummy_init_mt_device(mt_device_t* md)
+{
+	// if this stuff isn't filled in we crash.
 
 	md->base.destroy = mt_device_destroy;
 	md->base.get_view_pose = mt_device_get_view_pose;
 	md->base.get_tracked_pose = mt_device_get_tracked_pose;
-	md->base.blend_mode=XRT_BLEND_MODE_OPAQUE;
+	md->base.blend_mode = XRT_BLEND_MODE_OPAQUE;
 	md->base.screens[0].w_pixels = 512;
 	md->base.screens[0].h_pixels = 256;
 	md->base.screens[0].nominal_frame_interval_ns = 11000;
 	md->base.views[0].viewport.w_pixels = 256;
 	md->base.views[0].viewport.h_pixels = 256;
-	md->base.views[0].viewport.x_pixels=0;
-	md->base.views[0].viewport.x_pixels=0;
+	md->base.views[0].viewport.x_pixels = 0;
+	md->base.views[0].viewport.x_pixels = 0;
 	md->base.views[1].viewport.w_pixels = 256;
 	md->base.views[1].viewport.h_pixels = 256;
-	md->base.views[1].viewport.x_pixels= 256;
-	md->base.views[1].viewport.y_pixels= 0;
+	md->base.views[1].viewport.x_pixels = 256;
+	md->base.views[1].viewport.y_pixels = 0;
 	md->base.views[0].display.w_pixels = 256;
 	md->base.views[0].display.w_meters = 0.1f;
-	md->base.views[0].display.h_pixels=256;
-	md->base.views[0].display.h_meters=0.1f;
+	md->base.views[0].display.h_pixels = 256;
+	md->base.views[0].display.h_meters = 0.1f;
 	md->base.views[1].display.w_pixels = 256;
 	md->base.views[1].display.w_meters = 0.1f;
-	md->base.views[1].display.h_pixels=256;
-	md->base.views[1].display.h_meters=0.1f;
+	md->base.views[1].display.h_pixels = 256;
+	md->base.views[1].display.h_meters = 0.1f;
 }
-
diff --git a/src/xrt/drivers/montrack/mt_device.h b/src/xrt/drivers/montrack/mt_device.h
index 655be757509791bcbac14031062bcf3ff70438b9..e36e813ebb90b4f97ae88deaa6a16f656bbafd09 100644
--- a/src/xrt/drivers/montrack/mt_device.h
+++ b/src/xrt/drivers/montrack/mt_device.h
@@ -37,23 +37,33 @@ typedef struct mt_device
 } mt_device_t;
 
 static inline mt_device_t*
-mt_device(struct xrt_device *xdev)
+mt_device(struct xrt_device* xdev)
 {
-	return (mt_device_t*) xdev;
+	return (mt_device_t*)xdev;
 }
 
-mt_device_t *
-mt_device_create(char* device_name,bool log_verbose, bool log_debug);
+mt_device_t*
+mt_device_create(char* device_name, bool log_verbose, bool log_debug);
 
-bool mt_create_mono_ps3eye(mt_device_t* md); //mono blob tracker, ps3 60fps camera
-bool mt_create_mono_c270(mt_device_t* md); //mono blob tracker, logitech 30fps c270 camera
-bool mt_create_stereo_elp(mt_device_t* md); //stereo tracker, ELP 60fps stereo camera
-bool mt_create_osvr_elp(mt_device_t* md); //osvr tracker, ELP 60fps stereo camera
-bool mt_create_stereo_ps4(mt_device_t* md); //stereo tracker, PS4 60fps stereo camera
+bool
+mt_create_mono_ps3eye(mt_device_t* md); // mono blob tracker, ps3 60fps camera
+bool
+mt_create_mono_c270(
+    mt_device_t* md); // mono blob tracker, logitech 30fps c270 camera
+bool
+mt_create_stereo_elp(
+    mt_device_t* md); // stereo tracker, ELP 60fps stereo camera
+bool
+mt_create_osvr_elp(mt_device_t* md); // osvr tracker, ELP 60fps stereo camera
+bool
+mt_create_stereo_ps4(
+    mt_device_t* md); // stereo tracker, PS4 60fps stereo camera
 
-void mt_handle_event(mt_device_t* md, driver_event_t e);
+void
+mt_handle_event(mt_device_t* md, driver_event_t e);
 
-void dummy_init_mt_device(mt_device_t* md);
+void
+dummy_init_mt_device(mt_device_t* md);
 
 
 #ifdef __cplusplus
diff --git a/src/xrt/drivers/montrack/mt_events.h b/src/xrt/drivers/montrack/mt_events.h
index 13c4edebb33e6ae2c6757729873c9031437f9a7b..115c82c878ad9d35bfe5f6119ba1a72865ce85ff 100644
--- a/src/xrt/drivers/montrack/mt_events.h
+++ b/src/xrt/drivers/montrack/mt_events.h
@@ -1,10 +1,17 @@
 
-typedef enum driver_event_type {EVENT_NONE, EVENT_FRAMESERVER_GOTFRAME,EVENT_TRACKER_RECONFIGURED} driver_event_type_t;
+typedef enum driver_event_type
+{
+	EVENT_NONE,
+	EVENT_FRAMESERVER_GOTFRAME,
+	EVENT_TRACKER_RECONFIGURED
+} driver_event_type_t;
 
-typedef struct driver_event {
+typedef struct driver_event
+{
 	driver_event_type_t type;
 	// extra data to go along with events
 	// can be added here
 } driver_event_t;
 
-typedef void (*event_consumer_callback_func)(void* instance, driver_event_t event);
+typedef void (*event_consumer_callback_func)(void* instance,
+                                             driver_event_t event);
diff --git a/src/xrt/drivers/montrack/mt_prober.c b/src/xrt/drivers/montrack/mt_prober.c
index 6c2589ccfe1e5c7785897f38566776f523110172..ebe0b9bef64e13a87832e7e2d9e95f1476e77b64 100644
--- a/src/xrt/drivers/montrack/mt_prober.c
+++ b/src/xrt/drivers/montrack/mt_prober.c
@@ -46,15 +46,16 @@ mt_prober_autoprobe(struct xrt_prober* p)
 	struct mt_prober* mp = mt_prober(p);
 
 	// here we would call functions to consult our config, check devices
-	// are present etc. - for now we will attempt to create a mono blob tracker, with any
-	// uvc camera we can use
+	// are present etc. - for now we will attempt to create a mono blob
+	// tracker, with any uvc camera we can use
 
-	//mt_device_t* mtd = mt_device_create("MONO_LOGITECH_C270",true,true);
-	//mt_device_t* mtd = mt_device_create("STEREO_ELP_60FPS",true,true);
-	//mt_device_t* mtd = mt_device_create("MONO_PS3EYE",true,true);
+	// mt_device_t* mtd = mt_device_create("MONO_LOGITECH_C270",true,true);
+	// mt_device_t* mtd = mt_device_create("STEREO_ELP_60FPS",true,true);
+	// mt_device_t* mtd = mt_device_create("MONO_PS3EYE",true,true);
 
-	//mt_device_t* mtd = mt_device_create("STEREO_LOGITECH_C270",true,true);
-	mt_device_t* mtd = mt_device_create("STEREO_PS4_60FPS",true,true);
+	// mt_device_t* mtd =
+	// mt_device_create("STEREO_LOGITECH_C270",true,true);
+	mt_device_t* mtd = mt_device_create("STEREO_PS4_60FPS", true, true);
 
 
 	return &mtd->base;
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/calibration.h b/src/xrt/drivers/montrack/optical_tracking/common/calibration.h
index 7c95d57a9a697b0bbc26af31932a5109cf0ba397..1d4bab5c5df9cf5756cc78dd9378fdcd428cd41f 100644
--- a/src/xrt/drivers/montrack/optical_tracking/common/calibration.h
+++ b/src/xrt/drivers/montrack/optical_tracking/common/calibration.h
@@ -8,4 +8,4 @@
 #define DISTORTION_SIZE 5
 #define DISTORTION_FISHEYE_SIZE 4
 
-#endif //CALIBRATION_H
+#endif // CALIBRATION_H
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/tracked_object.h b/src/xrt/drivers/montrack/optical_tracking/common/tracked_object.h
index 923b0557bf0cace0354eee28dafab09a83ae379b..3ed7423a7fccc26ce21d3b6a39f83ff57d3c8f17 100644
--- a/src/xrt/drivers/montrack/optical_tracking/common/tracked_object.h
+++ b/src/xrt/drivers/montrack/optical_tracking/common/tracked_object.h
@@ -3,21 +3,24 @@
 #include <xrt/xrt_defines.h>
 #include "../auxiliary/util/u_time.h"
 
-typedef struct tracked_blob {
+typedef struct tracked_blob
+{
 	struct xrt_vec2 center;
 	float diameter;
 	struct xrt_vec2 velocity;
 } tracked_blob_t;
 
-typedef struct tracked_object {
-    struct xrt_pose pose;
-    time_t pose_time;
-    time_t frame_time;
-	uint32_t tracking_tag; // a tracker may assign an opaque tag to denote object type
-	uint32_t tracking_id; // a tracker may assign an opaque id to facilitate interframe correlation
+typedef struct tracked_object
+{
+	struct xrt_pose pose;
+	time_t pose_time;
+	time_t frame_time;
+	uint32_t tracking_tag; // a tracker may assign an opaque tag to denote
+	                       // object type
+	uint32_t tracking_id; // a tracker may assign an opaque id to facilitate
+	                      // interframe correlation
 } tracked_object_t;
 
 
 
-
-#endif //TRACKEDOBJECT_H
+#endif // TRACKEDOBJECT_H
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/tracker.c b/src/xrt/drivers/montrack/optical_tracking/common/tracker.c
index de3130db1f0c96c7f5616433bd9e5692da80ab62..73bd507624f6ca9d6b0b65555c6fd8c7b410eb83 100644
--- a/src/xrt/drivers/montrack/optical_tracking/common/tracker.c
+++ b/src/xrt/drivers/montrack/optical_tracking/common/tracker.c
@@ -8,123 +8,148 @@
 #include <sys/signal.h>
 #include <errno.h>
 
-tracker_instance_t* tracker_create(tracker_type_t t) {
-	tracker_instance_t* i = calloc(1,sizeof(tracker_instance_t));
+tracker_instance_t*
+tracker_create(tracker_type_t t)
+{
+	tracker_instance_t* i = calloc(1, sizeof(tracker_instance_t));
 	if (i) {
 		switch (t) {
-		    case TRACKER_TYPE_SPHERE_MONO:
-			    i->tracker_type = t;
-				i->internal_instance = tracker3D_sphere_mono_create(i);
-				i->tracker_get_capture_params = tracker3D_sphere_mono_get_capture_params;
-				i->tracker_get_poses = tracker3D_sphere_mono_get_poses;
-				i->tracker_get_debug_frame= tracker3D_sphere_mono_get_debug_frame;
-				i->tracker_queue = tracker3D_sphere_mono_queue;
-				i->tracker_register_measurement_callback = tracker3D_sphere_mono_register_measurement_callback;
-				i->tracker_register_event_callback = tracker3D_sphere_mono_register_event_callback;
-				i->tracker_has_new_poses = tracker3D_sphere_mono_new_poses;
-				i->tracker_configure = tracker3D_sphere_mono_configure;
-			    break;
-		    case TRACKER_TYPE_SPHERE_STEREO:
-			    i->tracker_type = t;
-				i->internal_instance = tracker3D_sphere_stereo_create(i);
-				i->tracker_get_capture_params = tracker3D_sphere_stereo_get_capture_params;
-				i->tracker_get_poses = tracker3D_sphere_stereo_get_poses;
-				i->tracker_get_debug_frame= tracker3D_sphere_stereo_get_debug_frame;
-				i->tracker_queue = tracker3D_sphere_stereo_queue;
-				i->tracker_register_measurement_callback = tracker3D_sphere_stereo_register_measurement_callback;
-				i->tracker_register_event_callback = tracker3D_sphere_stereo_register_event_callback;
-				i->tracker_has_new_poses = tracker3D_sphere_stereo_new_poses;
-				i->tracker_configure = tracker3D_sphere_stereo_configure;
-			    break;
-		    case TRACKER_TYPE_OSVR_UVBI:
-			    i->tracker_type = t;
-				i->internal_instance = tracker3D_osvr_uvbi_create(i);
-				i->tracker_get_capture_params = tracker3D_osvr_uvbi_get_capture_params;
-				i->tracker_get_poses = tracker3D_osvr_uvbi_get_poses;
-				i->tracker_get_debug_frame= tracker3D_osvr_uvbi_get_debug_frame;
-				i->tracker_queue = tracker3D_osvr_uvbi_queue;
-				i->tracker_register_measurement_callback = tracker3D_osvr_uvbi_register_measurement_callback;
-				i->tracker_register_event_callback = tracker3D_osvr_uvbi_register_event_callback;
-				i->tracker_has_new_poses = tracker3D_osvr_uvbi_new_poses;
-				i->tracker_configure = tracker3D_osvr_uvbi_configure;
+		case TRACKER_TYPE_SPHERE_MONO:
+			i->tracker_type = t;
+			i->internal_instance = tracker3D_sphere_mono_create(i);
+			i->tracker_get_capture_params =
+			    tracker3D_sphere_mono_get_capture_params;
+			i->tracker_get_poses = tracker3D_sphere_mono_get_poses;
+			i->tracker_get_debug_frame =
+			    tracker3D_sphere_mono_get_debug_frame;
+			i->tracker_queue = tracker3D_sphere_mono_queue;
+			i->tracker_register_measurement_callback =
+			    tracker3D_sphere_mono_register_measurement_callback;
+			i->tracker_register_event_callback =
+			    tracker3D_sphere_mono_register_event_callback;
+			i->tracker_has_new_poses =
+			    tracker3D_sphere_mono_new_poses;
+			i->tracker_configure = tracker3D_sphere_mono_configure;
+			break;
+		case TRACKER_TYPE_SPHERE_STEREO:
+			i->tracker_type = t;
+			i->internal_instance =
+			    tracker3D_sphere_stereo_create(i);
+			i->tracker_get_capture_params =
+			    tracker3D_sphere_stereo_get_capture_params;
+			i->tracker_get_poses =
+			    tracker3D_sphere_stereo_get_poses;
+			i->tracker_get_debug_frame =
+			    tracker3D_sphere_stereo_get_debug_frame;
+			i->tracker_queue = tracker3D_sphere_stereo_queue;
+			i->tracker_register_measurement_callback =
+			    tracker3D_sphere_stereo_register_measurement_callback;
+			i->tracker_register_event_callback =
+			    tracker3D_sphere_stereo_register_event_callback;
+			i->tracker_has_new_poses =
+			    tracker3D_sphere_stereo_new_poses;
+			i->tracker_configure =
+			    tracker3D_sphere_stereo_configure;
+			break;
+		case TRACKER_TYPE_OSVR_UVBI:
+			i->tracker_type = t;
+			i->internal_instance = tracker3D_osvr_uvbi_create(i);
+			i->tracker_get_capture_params =
+			    tracker3D_osvr_uvbi_get_capture_params;
+			i->tracker_get_poses = tracker3D_osvr_uvbi_get_poses;
+			i->tracker_get_debug_frame =
+			    tracker3D_osvr_uvbi_get_debug_frame;
+			i->tracker_queue = tracker3D_osvr_uvbi_queue;
+			i->tracker_register_measurement_callback =
+			    tracker3D_osvr_uvbi_register_measurement_callback;
+			i->tracker_register_event_callback =
+			    tracker3D_osvr_uvbi_register_event_callback;
+			i->tracker_has_new_poses =
+			    tracker3D_osvr_uvbi_new_poses;
+			i->tracker_configure = tracker3D_osvr_uvbi_configure;
 			break;
-		    case TRACKER_TYPE_NONE:
-		    default:
-			    free(i);
-			    return NULL;
+		case TRACKER_TYPE_NONE:
+		default:
+			free(i);
+			return NULL;
 			break;
 		}
-		//TODO: make this optional - we should use pipewire or similar here
+		// TODO: make this optional - we should use pipewire or similar
+		// here
 		// Create debug socket file descriptor
-        if ((i->debug_fd = socket(AF_INET, SOCK_STREAM, 0)) == 0) {
-            printf("ERROR: socket creation failed\n");
-            return NULL;
-        }
-        int opt = 1;
-        if (setsockopt(i->debug_fd, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT,
-                                                          &opt, sizeof(opt))) {
-            printf("ERROR: socket option setting failed\n");
-            return NULL;
-        }
-        i->debug_address.sin_family = AF_INET;
-        i->debug_address.sin_addr.s_addr = INADDR_ANY;
-        i->debug_address.sin_port = htons( 6666 );
-        if (bind(i->debug_fd, (struct sockaddr *)&i->debug_address,
-                                     sizeof(i->debug_address))<0) {
-            printf("ERROR: socket option setting failed\n");
-            return NULL;
-        }
-        if (listen(i->debug_fd, 3) < 0)
-            {
-            printf("ERROR: socket listen failed\n");
-            return NULL;
-        }
+		if ((i->debug_fd = socket(AF_INET, SOCK_STREAM, 0)) == 0) {
+			printf("ERROR: socket creation failed\n");
+			return NULL;
+		}
+		int opt = 1;
+		if (setsockopt(i->debug_fd, SOL_SOCKET,
+		               SO_REUSEADDR | SO_REUSEPORT, &opt,
+		               sizeof(opt))) {
+			printf("ERROR: socket option setting failed\n");
+			return NULL;
+		}
+		i->debug_address.sin_family = AF_INET;
+		i->debug_address.sin_addr.s_addr = INADDR_ANY;
+		i->debug_address.sin_port = htons(6666);
+		if (bind(i->debug_fd, (struct sockaddr*)&i->debug_address,
+		         sizeof(i->debug_address)) < 0) {
+			printf("ERROR: socket option setting failed\n");
+			return NULL;
+		}
+		if (listen(i->debug_fd, 3) < 0) {
+			printf("ERROR: socket listen failed\n");
+			return NULL;
+		}
 
-        if (ioctl(i->debug_fd, FIONBIO, (char *)&opt) < 0) {
-             printf("ERROR: non-blocking ioctl failed");
-             close(i->debug_fd);
-             return NULL;
-          }
-		signal(SIGPIPE, SIG_IGN); //ignore sigpipe.
-        i->client_connected = false;
+		if (ioctl(i->debug_fd, FIONBIO, (char*)&opt) < 0) {
+			printf("ERROR: non-blocking ioctl failed");
+			close(i->debug_fd);
+			return NULL;
+		}
+		signal(SIGPIPE, SIG_IGN); // ignore sigpipe.
+		i->client_connected = false;
 		return i;
 	}
 	return NULL;
 }
 
-bool tracker_send_debug_frame(tracker_instance_t* inst)
+bool
+tracker_send_debug_frame(tracker_instance_t* inst)
 {
 	frame_t f = {};
-    if (inst->tracker_get_debug_frame(inst,&f))
-    {
-        if (! inst->client_connected) {
-            inst->debug_client_fd = accept(inst->debug_fd, NULL, NULL);
+	if (inst->tracker_get_debug_frame(inst, &f)) {
+		if (!inst->client_connected) {
+			inst->debug_client_fd =
+			    accept(inst->debug_fd, NULL, NULL);
 
-            if (inst->debug_client_fd == -1) {
-                if (errno == EWOULDBLOCK) {
-                    return false;
+			if (inst->debug_client_fd == -1) {
+				if (errno == EWOULDBLOCK) {
+					return false;
 				} else {
-					//some other socket problem, assume we cannot continue
+					// some other socket problem, assume we
+					// cannot continue
 					close(inst->debug_client_fd);
 					inst->client_connected = false;
 					return false;
-                }
-            }
-            inst->client_connected = true;
-        }
+				}
+			}
+			inst->client_connected = true;
+		}
 
 		if (inst->client_connected) {
-			int ret = recv(inst->debug_client_fd,NULL,0,MSG_DONTWAIT);
-			if ( ret == 0) {
-				//we are disconnected
+			int ret =
+			    recv(inst->debug_client_fd, NULL, 0, MSG_DONTWAIT);
+			if (ret == 0) {
+				// we are disconnected
 				close(inst->debug_client_fd);
 				inst->client_connected = false;
 				return false;
 			}
-			//just firehose the data at the client, and hope it can handle it.
+			// just firehose the data at the client, and hope it can
+			// handle it.
 			send(inst->debug_client_fd, f.data, f.size_bytes, 0);
 			return true;
 		}
-    }
+	}
 	return false;
 }
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/tracker.h b/src/xrt/drivers/montrack/optical_tracking/common/tracker.h
index ae29ab85b0d04af57b39728a75293398a983f5b0..2671b01230b61366f6d0bfd16709e9daa0e0874b 100644
--- a/src/xrt/drivers/montrack/optical_tracking/common/tracker.h
+++ b/src/xrt/drivers/montrack/optical_tracking/common/tracker.h
@@ -6,30 +6,42 @@
 #include "tracked_object.h"
 #include <sys/socket.h>
 #include <netinet/in.h>
-#define MAX_FRAMESERVERS 8 //maximum number of cameras/sources that can be bound to a tracker
+#define MAX_FRAMESERVERS                                                       \
+	8 // maximum number of cameras/sources that can be bound to a tracker
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-typedef enum tracker_calibration_mode {CALIBRATION_MODE_NONE,CALIBRATION_MODE_CHESSBOARD} tracker_calibration_mode_t;
-typedef enum tracker_event_desc {TRACKER_EVENT_NONE,TRACKER_EVENT_RECONFIGURED} tracker_event_desc_t;
-
-typedef struct tracker_measurement {
+typedef enum tracker_calibration_mode
+{
+	CALIBRATION_MODE_NONE,
+	CALIBRATION_MODE_CHESSBOARD
+} tracker_calibration_mode_t;
+typedef enum tracker_event_desc
+{
+	TRACKER_EVENT_NONE,
+	TRACKER_EVENT_RECONFIGURED
+} tracker_event_desc_t;
+
+typedef struct tracker_measurement
+{
 	struct xrt_pose pose;
 	bool has_position;
 	bool has_rotation;
 	timepoint_ns timestamp;
 } tracker_measurement_t;
 
-typedef enum tracker_type {
+typedef enum tracker_type
+{
 	TRACKER_TYPE_NONE,
 	TRACKER_TYPE_SPHERE_STEREO,
 	TRACKER_TYPE_SPHERE_MONO,
 	TRACKER_TYPE_OSVR_UVBI
 } tracker_type_t;
 
-typedef struct tracker_event {
+typedef struct tracker_event
+{
 	tracker_type_t type;
 	tracker_event_desc_t event;
 } tracker_event_t;
@@ -38,61 +50,80 @@ typedef void* tracker_instance_ptr;
 typedef void* tracker_internal_instance_ptr;
 typedef void* tracker_configuration_ptr;
 
-typedef void (*measurement_consumer_callback_func)(void* instance, tracker_measurement_t* measurement);
+typedef void (*measurement_consumer_callback_func)(
+    void* instance, tracker_measurement_t* measurement);
 
-typedef struct tracker_mono_configuration {
-	char configuration_filename[256]; //TODO: maybe too small?
+typedef struct tracker_mono_configuration
+{
+	char configuration_filename[256]; // TODO: maybe too small?
 	tracker_calibration_mode_t calibration_mode;
-	//camera_calibration_t calibration;
+	// camera_calibration_t calibration;
 	frame_format_t format;
 	uint64_t source_id;
-    } tracker_mono_configuration_t;
+} tracker_mono_configuration_t;
 
-typedef struct tracker_stereo_configuration {
-	char configuration_filename[256]; //TODO: maybe too small?
+typedef struct tracker_stereo_configuration
+{
+	char configuration_filename[256]; // TODO: maybe too small?
 	tracker_calibration_mode_t calibration_mode;
-	//camera_calibration_t l_calibration;
-	//camera_calibration_t r_calibration;
+	// camera_calibration_t l_calibration;
+	// camera_calibration_t r_calibration;
 	frame_format_t l_format;
 	uint64_t l_source_id;
 	frame_format_t r_format;
 	uint64_t r_source_id;
-    bool split_left; // single-frame stereo will split the left frame
-    frame_rect_t l_rect;
-    frame_rect_t r_rect;
+	bool split_left; // single-frame stereo will split the left frame
+	frame_rect_t l_rect;
+	frame_rect_t r_rect;
 
 
 } tracker_stereo_configuration_t;
 
 
 
-//tracker interface
-
-typedef struct _tracker_instance {
-     tracker_type_t tracker_type;
-	 capture_parameters_t (*tracker_get_capture_params)(tracker_instance_ptr inst);
-	 bool (*tracker_queue)(tracker_instance_ptr inst,frame_t* frame);
-	 bool (*tracker_get_debug_frame)(tracker_instance_ptr inst,frame_t* frame);
-	 bool (*tracker_get_poses)(tracker_instance_ptr inst,tracked_object_t* tracked_objects,uint32_t* count);
-	 bool (*tracker_has_new_poses)(tracker_instance_ptr inst);
-	 void (*tracker_register_measurement_callback)(tracker_instance_ptr inst, void* target_instance, measurement_consumer_callback_func target_func);
-	 void (*tracker_register_event_callback)(tracker_instance_ptr inst, void* target_instance, event_consumer_callback_func target_func);
-	 bool (*tracker_configure)(tracker_instance_ptr inst, tracker_configuration_ptr config);
-	 tracker_internal_instance_ptr internal_instance;
-     int debug_fd, debug_socket, socket_read;
-     int debug_client_fd;
-     bool client_connected;
-     struct sockaddr_in debug_address;
+// tracker interface
+
+typedef struct _tracker_instance
+{
+	tracker_type_t tracker_type;
+	capture_parameters_t (*tracker_get_capture_params)(
+	    tracker_instance_ptr inst);
+	bool (*tracker_queue)(tracker_instance_ptr inst, frame_t* frame);
+	bool (*tracker_get_debug_frame)(tracker_instance_ptr inst,
+	                                frame_t* frame);
+	bool (*tracker_get_poses)(tracker_instance_ptr inst,
+	                          tracked_object_t* tracked_objects,
+	                          uint32_t* count);
+	bool (*tracker_has_new_poses)(tracker_instance_ptr inst);
+	void (*tracker_register_measurement_callback)(
+	    tracker_instance_ptr inst,
+	    void* target_instance,
+	    measurement_consumer_callback_func target_func);
+	void (*tracker_register_event_callback)(
+	    tracker_instance_ptr inst,
+	    void* target_instance,
+	    event_consumer_callback_func target_func);
+	bool (*tracker_configure)(tracker_instance_ptr inst,
+	                          tracker_configuration_ptr config);
+	tracker_internal_instance_ptr internal_instance;
+	int debug_fd, debug_socket, socket_read;
+	int debug_client_fd;
+	bool client_connected;
+	struct sockaddr_in debug_address;
 } tracker_instance_t;
 
-tracker_instance_t* tracker_create(tracker_type_t t);
-bool tracker_destroy(tracker_instance_t* inst);
-bool tracker_send_debug_frame(tracker_instance_t* inst);
+tracker_instance_t*
+tracker_create(tracker_type_t t);
+bool
+tracker_destroy(tracker_instance_t* inst);
+bool
+tracker_send_debug_frame(tracker_instance_t* inst);
 
-bool trackers_test();
+bool
+trackers_test();
 
 #ifdef __cplusplus
 }
 #endif
 
-#endif //TRACKER_H
+#endif // TRACKER_H
diff --git a/src/xrt/drivers/montrack/optical_tracking/track_psvr.cpp b/src/xrt/drivers/montrack/optical_tracking/track_psvr.cpp
index 73c0b3fe837ab7ce01b1c806c29ad69f67ff49aa..5e79b0e37b9e48d6d6261d059282d894708fb607 100644
--- a/src/xrt/drivers/montrack/optical_tracking/track_psvr.cpp
+++ b/src/xrt/drivers/montrack/optical_tracking/track_psvr.cpp
@@ -1,53 +1,54 @@
 #include "track_psvr.h"
 
-static float dist_3d(xrt_vec3 a, xrt_vec3 b)
+static float
+dist_3d(xrt_vec3 a, xrt_vec3 b)
 {
 	xrt_vec3 d;
 	d.x = a.x - b.x;
 	d.y = a.y - b.y;
 	d.z = a.z - b.z;
-	return sqrt(d.x*d.x + d.y*d.y + d.z * d.z);
+	return sqrt(d.x * d.x + d.y * d.y + d.z * d.z);
 }
 
-bool psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data* t){
-	//create a list of the corners, ignoring the center
+bool
+psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data* t)
+{
+	// create a list of the corners, ignoring the center
 	std::vector<uint32_t> corner_indices;
-	for (uint32_t i=0;i<leds->size();i++)
-	{
+	for (uint32_t i = 0; i < leds->size(); i++) {
 		psvr_led_t p = leds->at(i);
-	  if (p.sign_x == 0 && p.sign_y == 0)
-	  {
-		  t->translation = p.position;
-		  t->confidence[2] = i;
-		  t->positions_3d[2] = p.position;
-		  t->l_positions_2d[2] = p.l_position_2d;
-		  t->r_positions_2d[2] = p.r_position_2d;
-	  }
-	  else
-	  {corner_indices.push_back(i);}
+		if (p.sign_x == 0 && p.sign_y == 0) {
+			t->translation = p.position;
+			t->confidence[2] = i;
+			t->positions_3d[2] = p.position;
+			t->l_positions_2d[2] = p.l_position_2d;
+			t->r_positions_2d[2] = p.r_position_2d;
+		} else {
+			corner_indices.push_back(i);
+		}
 	}
 
-	//find the leftmost and rightmost points - these will belong to our left and right side edges.
+	// find the leftmost and rightmost points - these will belong to our
+	// left and right side edges.
 
-	float lowest_x=65535.0;
-	float highest_x=-65535.0;
-	uint32_t lowest_x_index,highest_x_index;
-	for (uint32_t i=0;i< corner_indices.size();i++)
-	{
+	float lowest_x = 65535.0;
+	float highest_x = -65535.0;
+	uint32_t lowest_x_index, highest_x_index;
+	for (uint32_t i = 0; i < corner_indices.size(); i++) {
 		psvr_led_t p = leds->at(corner_indices[i]);
-		if (p.position.x < lowest_x)
-		{
-			lowest_x=p.position.x;
-			lowest_x_index=corner_indices[i];
+		if (p.position.x < lowest_x) {
+			lowest_x = p.position.x;
+			lowest_x_index = corner_indices[i];
 		}
-		if (p.position.x > highest_x)
-		{
-			highest_x=p.position.x;
-			highest_x_index=corner_indices[i];
+		if (p.position.x > highest_x) {
+			highest_x = p.position.x;
+			highest_x_index = corner_indices[i];
 		}
 	}
-	//printf("lowestX %f lowestXIndex %d highestX %f highestXIndex %d\n",lowestX,lowestXIndex,highestX,highestXIndex);
-	//find the corresponding (closest) point on the 'short side' for the left and right extremities
+	// printf("lowestX %f lowestXIndex %d highestX %f highestXIndex
+	// %d\n",lowestX,lowestXIndex,highestX,highestXIndex); find the
+	// corresponding (closest) point on the 'short side' for the left and
+	// right extremities
 
 	float lowest_l_x_distance = 65535.0f;
 	float lowest_h_x_distance = 65535.0f;
@@ -55,93 +56,82 @@ bool psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data* t
 	uint32_t highest_x_pair_index;
 
 	psvr_led_t lcA = leds->at(lowest_x_index);
-	for (uint32_t i=0; i < leds->size();i++)
-	{
+	for (uint32_t i = 0; i < leds->size(); i++) {
 		psvr_led_t lcB = leds->at(corner_indices[i]);
-		if  (corner_indices[i] != lowest_x_index)
-		{
-			float dist_l_x = dist_3d(lcA.position,lcB.position);
-			if ( dist_l_x < lowest_l_x_distance)
-			{
+		if (corner_indices[i] != lowest_x_index) {
+			float dist_l_x = dist_3d(lcA.position, lcB.position);
+			if (dist_l_x < lowest_l_x_distance) {
 				lowest_x_pair_index = corner_indices[i];
 				lowest_l_x_distance = dist_l_x;
 			}
 		}
-	 }
+	}
 	psvr_led_t hcA = leds->at(highest_x_index);
-	for (uint32_t i=0; i < corner_indices.size();i++)
-	{
+	for (uint32_t i = 0; i < corner_indices.size(); i++) {
 		psvr_led_t hcB = leds->at(corner_indices[i]);
-		if  (corner_indices[i] != highest_x_index)
-		{
-			float dist_h_x = dist_3d(hcA.position,hcB.position);
-			if (dist_h_x < lowest_h_x_distance)
-			{
+		if (corner_indices[i] != highest_x_index) {
+			float dist_h_x = dist_3d(hcA.position, hcB.position);
+			if (dist_h_x < lowest_h_x_distance) {
 				highest_x_pair_index = corner_indices[i];
-				lowest_h_x_distance=dist_h_x;
+				lowest_h_x_distance = dist_h_x;
 			}
 		}
-
 	}
-	//printf("lowestLXDistance %f lowestXPairIndex %d lowestHXDistance %f highestXPairIndex %d\n",lowestLXDistance,lowestXPairIndex,lowestHXDistance,highestXPairIndex);
+	// printf("lowestLXDistance %f lowestXPairIndex %d lowestHXDistance %f
+	// highestXPairIndex
+	// %d\n",lowestLXDistance,lowestXPairIndex,lowestHXDistance,highestXPairIndex);
 
-	//now we have 4 points, and can know which 2 are left and which 2 are right.
+	// now we have 4 points, and can know which 2 are left and which 2 are
+	// right.
 
 	psvr_led_t lA = leds->at(lowest_x_index);
 	psvr_led_t lB = leds->at(lowest_x_pair_index);
-	if (lA.position.y < lB.position.y)
-	{
-		//lA is upper left and lB is lower left
+	if (lA.position.y < lB.position.y) {
+		// lA is upper left and lB is lower left
 		t->positions_3d[0] = lA.position;
-		t->l_positions_2d[0]=lA.l_position_2d;
-		t->r_positions_2d[0]=lA.r_position_2d;
+		t->l_positions_2d[0] = lA.l_position_2d;
+		t->r_positions_2d[0] = lA.r_position_2d;
 		t->confidence[0] = 1;
 		t->positions_3d[3] = lB.position;
-		t->l_positions_2d[3]=lB.l_position_2d;
-		t->r_positions_2d[3]=lB.r_position_2d;
+		t->l_positions_2d[3] = lB.l_position_2d;
+		t->r_positions_2d[3] = lB.r_position_2d;
 		t->confidence[3] = 1;
 
-	}
-	else
-	{
-		//lA is lower left and lB is upper left
+	} else {
+		// lA is lower left and lB is upper left
 		t->positions_3d[0] = lB.position;
-		t->l_positions_2d[0]=lB.l_position_2d;
-		t->r_positions_2d[0]=lB.r_position_2d;
+		t->l_positions_2d[0] = lB.l_position_2d;
+		t->r_positions_2d[0] = lB.r_position_2d;
 		t->confidence[0] = 1;
 
 		t->positions_3d[3] = lA.position;
-		t->l_positions_2d[3]=lA.l_position_2d;
-		t->r_positions_2d[3]=lA.r_position_2d;
+		t->l_positions_2d[3] = lA.l_position_2d;
+		t->r_positions_2d[3] = lA.r_position_2d;
 		t->confidence[3] = 1;
-
 	}
 
 	psvr_led_t hA = leds->at(highest_x_index);
 	psvr_led_t hB = leds->at(highest_x_pair_index);
-	if (hA.position.y < hB.position.y)
-	{
-		//hA is upper right and rB is lower right
+	if (hA.position.y < hB.position.y) {
+		// hA is upper right and rB is lower right
 		t->positions_3d[1] = hA.position;
-		t->l_positions_2d[1]=hA.l_position_2d;
-		t->r_positions_2d[1]=hA.r_position_2d;
+		t->l_positions_2d[1] = hA.l_position_2d;
+		t->r_positions_2d[1] = hA.r_position_2d;
 
 		t->confidence[1] = 1;
 		t->positions_3d[4] = hB.position;
-		t->l_positions_2d[4]=hB.l_position_2d;
-		t->r_positions_2d[4]=hB.r_position_2d;
+		t->l_positions_2d[4] = hB.l_position_2d;
+		t->r_positions_2d[4] = hB.r_position_2d;
 		t->confidence[4] = 1;
-	}
-	else
-	{
-		//hA is lower right and hB is upper right
+	} else {
+		// hA is lower right and hB is upper right
 		t->positions_3d[1] = hB.position;
-		t->l_positions_2d[1]=hB.l_position_2d;
-		t->r_positions_2d[1]=hB.r_position_2d;
+		t->l_positions_2d[1] = hB.l_position_2d;
+		t->r_positions_2d[1] = hB.r_position_2d;
 		t->confidence[1] = 1;
 		t->positions_3d[4] = hA.position;
-		t->l_positions_2d[4]=hA.l_position_2d;
-		t->r_positions_2d[4]=hA.l_position_2d;
+		t->l_positions_2d[4] = hA.l_position_2d;
+		t->r_positions_2d[4] = hA.l_position_2d;
 		t->confidence[4] = 1;
 	}
 	return true;
@@ -150,55 +140,55 @@ bool psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data* t
 /*//TODO: we dont need to pass a TrackData* here
 bool psvr_compute_svd()
 {
-	//compute SVD for the points we have found, assuming we have at least 3 points
-
-	uint8_t pointCount=0;
-	for (uint32_t i=0;i<MAX_POINTS;i++)
-	{
-		if (t->confidence[i] > 0)
-		{
-			pointCount++;
-		}
-	}
-
-	if (pointCount > 2)
-	{
-		cv::Mat measurement(pointCount, 3, cv::DataType<float>::type);
-		cv::Mat model(pointCount, 3, cv::DataType<float>::type);
-		cv::Mat xCovar;
-		uint8_t c = 0;
-		for (uint32_t i=0;i<MAX_POINTS;i++)
-		{
-			if (t->confidence[i] > 0)
-			{
-				measurement.at<float>(c,0) = t->positions[i].x;
-				measurement.at<float>(c,1) = t->positions[i].y;
-				measurement.at<float>(c,2) = t->positions[i].z;
-				model.at<float>(c,0) = ledPositions[i].x;
-				model.at<float>(c,1) = ledPositions[i].y;
-				model.at<float>(c,2) = ledPositions[i].z;
-				c++;
-			}
-		}
-
-		// create our cross-covariance matrix
-		cv::transpose(model,model);
-		xCovar =  model * measurement;
-		cv::Mat w,u,v,ut;
-		decomposer->compute(xCovar,w,u,v);
-		cv::transpose(u,ut);
-		//TODO: compute determinant
-		cv::Mat rot = v * ut;
-		glm::mat3 glmRot;
-		memcpy((void*)&(glmRot[0][0]),rot.data, 9 * sizeof(float));
-		glm::mat3 tRot = glm::transpose(glmRot);
-		t->rotationMatrix = glm::mat4(tRot);
-		//cout << "R = "<< endl << " "  << rotationMatrix << endl << endl;
-		return true;
-	}
-	else
-	{
-		return false;
-	}
+        //compute SVD for the points we have found, assuming we have at least 3
+points
+
+        uint8_t pointCount=0;
+        for (uint32_t i=0;i<MAX_POINTS;i++)
+        {
+                if (t->confidence[i] > 0)
+                {
+                        pointCount++;
+                }
+        }
+
+        if (pointCount > 2)
+        {
+                cv::Mat measurement(pointCount, 3, cv::DataType<float>::type);
+                cv::Mat model(pointCount, 3, cv::DataType<float>::type);
+                cv::Mat xCovar;
+                uint8_t c = 0;
+                for (uint32_t i=0;i<MAX_POINTS;i++)
+                {
+                        if (t->confidence[i] > 0)
+                        {
+                                measurement.at<float>(c,0) = t->positions[i].x;
+                                measurement.at<float>(c,1) = t->positions[i].y;
+                                measurement.at<float>(c,2) = t->positions[i].z;
+                                model.at<float>(c,0) = ledPositions[i].x;
+                                model.at<float>(c,1) = ledPositions[i].y;
+                                model.at<float>(c,2) = ledPositions[i].z;
+                                c++;
+                        }
+                }
+
+                // create our cross-covariance matrix
+                cv::transpose(model,model);
+                xCovar =  model * measurement;
+                cv::Mat w,u,v,ut;
+                decomposer->compute(xCovar,w,u,v);
+                cv::transpose(u,ut);
+                //TODO: compute determinant
+                cv::Mat rot = v * ut;
+                glm::mat3 glmRot;
+                memcpy((void*)&(glmRot[0][0]),rot.data, 9 * sizeof(float));
+                glm::mat3 tRot = glm::transpose(glmRot);
+                t->rotationMatrix = glm::mat4(tRot);
+                //cout << "R = "<< endl << " "  << rotationMatrix << endl <<
+endl; return true;
+        }
+        else
+        {
+                return false;
+        }
 }*/
-
diff --git a/src/xrt/drivers/montrack/optical_tracking/track_psvr.h b/src/xrt/drivers/montrack/optical_tracking/track_psvr.h
index ac94dda7e078616fcff881b48fc88f9c2df990d2..b0102169160c6350bc494b666e887076dc1423b9 100644
--- a/src/xrt/drivers/montrack/optical_tracking/track_psvr.h
+++ b/src/xrt/drivers/montrack/optical_tracking/track_psvr.h
@@ -8,7 +8,8 @@
 
 #define NUM_LEDS 9
 
-static const char* LED_LABELS[]={"LU","RU","C","LL","RL","LS","RS","LB","RB"};
+static const char* LED_LABELS[] = {"LU", "RU", "C",  "LL", "RL",
+                                   "LS", "RS", "LB", "RB"};
 
 #ifdef __cplusplus
 extern "C" {
@@ -18,12 +19,15 @@ extern "C" {
 typedef struct psvr_track_data
 {
 	uint64_t timestamp;
-	xrt_vec3 positions_3d[NUM_LEDS]; //x,y,z position for up to (currently) 9 points - LU,RU,C,LL,RL,LS,RS,LB,RB
-	xrt_vec2 l_positions_2d[NUM_LEDS]; //2d positions in left and right images
+	xrt_vec3 positions_3d[NUM_LEDS]; // x,y,z position for up to (currently)
+	                                 // 9 points - LU,RU,C,LL,RL,LS,RS,LB,RB
+	xrt_vec2
+	    l_positions_2d[NUM_LEDS]; // 2d positions in left and right images
 	xrt_vec2 r_positions_2d[NUM_LEDS];
-	int8_t confidence[NUM_LEDS]; //-1 if point is not tracked, TODO: 0-128 for confidence
+	int8_t confidence[NUM_LEDS]; //-1 if point is not tracked, TODO: 0-128
+	                             //for confidence
 	xrt_matrix_4x4 rotation_matrix; // SVD-fitted head rotation matrix
-	xrt_vec3 translation; // head translation
+	xrt_vec3 translation;           // head translation
 } psvr_track_data_t;
 
 
@@ -50,13 +54,14 @@ typedef struct psvr_led
 	int sign_y;
 } psvr_led_t;
 
-bool psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data* t);
+bool
+psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data* t);
 
-//bool psvr_computeSVD();
+// bool psvr_computeSVD();
 
 
 #ifdef __cplusplus
 }
 #endif
 
-#endif //TRACKPSVR_H
+#endif // TRACKPSVR_H
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.cpp b/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.cpp
index eef55a3da6ab48aedb6e8dac81a5df2f3ca3facc..a9919c546f3c2a6721fa4af41d30f409a05d3863 100644
--- a/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.cpp
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.cpp
@@ -3,12 +3,13 @@
 #include "TrackingDebugDisplay.h"
 #include "opencv4/opencv2/opencv.hpp"
 
-typedef struct tracker3D_osvr_uvbi_instance {
+typedef struct tracker3D_osvr_uvbi_instance
+{
 	bool configured;
 	measurement_consumer_callback_func measurement_target_callback;
-	void* measurement_target_instance; //where we send our measurements
+	void* measurement_target_instance; // where we send our measurements
 	event_consumer_callback_func event_target_callback;
-	void* event_target_instance; //where we send our measurements
+	void* event_target_instance; // where we send our measurements
 	tracker_mono_configuration_t configuration;
 	osvr::vbtracker::TrackingSystem* system;
 	osvr::vbtracker::TrackedBody* hmd;
@@ -22,22 +23,29 @@ typedef struct tracker3D_osvr_uvbi_instance {
 
 
 
-capture_parameters_t tracker3D_osvr_uvbi_get_capture_params(tracker_instance_t* inst) {
-	capture_parameters_t cp ={};
+capture_parameters_t
+tracker3D_osvr_uvbi_get_capture_params(tracker_instance_t* inst)
+{
+	capture_parameters_t cp = {};
 	cp.exposure = 0.5f;
 	cp.gain = 0.5f;
 	return cp;
 }
 
-tracker3D_osvr_uvbi_instance_t* tracker3D_osvr_uvbi_create(tracker_instance_t* inst) {
+tracker3D_osvr_uvbi_instance_t*
+tracker3D_osvr_uvbi_create(tracker_instance_t* inst)
+{
 
-	tracker3D_osvr_uvbi_instance_t* i = (tracker3D_osvr_uvbi_instance_t*)calloc(1,sizeof(tracker3D_osvr_uvbi_instance_t));
+	tracker3D_osvr_uvbi_instance_t* i =
+	    (tracker3D_osvr_uvbi_instance_t*)calloc(
+	        1, sizeof(tracker3D_osvr_uvbi_instance_t));
 	if (i) {
 		osvr::vbtracker::ConfigParams cp;
-		i->system= new osvr::vbtracker::TrackingSystem(cp);
+		i->system = new osvr::vbtracker::TrackingSystem(cp);
 		i->debug = new osvr::vbtracker::TrackingDebugDisplay(cp);
-		i->debug_rgb = cv::Mat(480,1280,CV_8UC3,cv::Scalar(0,0,0));
-		i->camera_params = osvr::vbtracker::getSimulatedHDKCameraParameters();
+		i->debug_rgb = cv::Mat(480, 1280, CV_8UC3, cv::Scalar(0, 0, 0));
+		i->camera_params =
+		    osvr::vbtracker::getSimulatedHDKCameraParameters();
 		i->hmd = i->system->createTrackedBody();
 		i->alloced_frames = false;
 		return i;
@@ -46,62 +54,89 @@ tracker3D_osvr_uvbi_instance_t* tracker3D_osvr_uvbi_create(tracker_instance_t* i
 }
 
 
-bool tracker3D_osvr_uvbi_get_debug_frame(tracker_instance_t* inst,frame_t* frame){
-	tracker3D_osvr_uvbi_instance_t* internal = (tracker3D_osvr_uvbi_instance_t*)inst->internal_instance;
+bool
+tracker3D_osvr_uvbi_get_debug_frame(tracker_instance_t* inst, frame_t* frame)
+{
+	tracker3D_osvr_uvbi_instance_t* internal =
+	    (tracker3D_osvr_uvbi_instance_t*)inst->internal_instance;
 	cv::Mat rgbFrame;
-	cv::cvtColor(internal->frame_gray,rgbFrame,CV_GRAY2BGR);
-	cv::Mat osvr_debug = internal->debug->createStatusImage(*(internal->system),internal->camera_params,rgbFrame);
+	cv::cvtColor(internal->frame_gray, rgbFrame, CV_GRAY2BGR);
+	cv::Mat osvr_debug = internal->debug->createStatusImage(
+	    *(internal->system), internal->camera_params, rgbFrame);
 
 	osvr_debug.copyTo(internal->debug_rgb);
 
 
 	frame->format = FORMAT_RGB_UINT8;
 	frame->width = internal->debug_rgb.cols;
-	frame->stride = internal->debug_rgb.cols * format_bytes_per_pixel(frame->format);
+	frame->stride =
+	    internal->debug_rgb.cols * format_bytes_per_pixel(frame->format);
 	frame->height = internal->debug_rgb.rows;
 	frame->data = internal->debug_rgb.data;
 	frame->size_bytes = frame_size_in_bytes(frame);
 
 	return true;
 }
-bool tracker3D_osvr_uvbi_queue(tracker_instance_t* inst,frame_t* frame){
-	tracker3D_osvr_uvbi_instance_t* internal = (tracker3D_osvr_uvbi_instance_t*)inst->internal_instance;
+bool
+tracker3D_osvr_uvbi_queue(tracker_instance_t* inst, frame_t* frame)
+{
+	tracker3D_osvr_uvbi_instance_t* internal =
+	    (tracker3D_osvr_uvbi_instance_t*)inst->internal_instance;
 	printf("received frame, tracking!\n");
-	if (!internal->alloced_frames)
-	{
-		internal->frame_gray = cv::Mat(frame->height,frame->stride,CV_8UC1,cv::Scalar(0,0,0));
-		internal->debug_rgb = cv::Mat(frame->height,frame->width,CV_8UC3,cv::Scalar(0,0,0));
-		internal->alloced_frames =true;
+	if (!internal->alloced_frames) {
+		internal->frame_gray = cv::Mat(frame->height, frame->stride,
+		                               CV_8UC1, cv::Scalar(0, 0, 0));
+		internal->debug_rgb = cv::Mat(frame->height, frame->width,
+		                              CV_8UC3, cv::Scalar(0, 0, 0));
+		internal->alloced_frames = true;
 	}
-	//we will just 'do the work' here.
-	//TODO: asynchronous tracker thread
-
-	memcpy(internal->frame_gray.data,frame->data,frame->size_bytes);
-	internal->system->processFrame(internal->current_time,internal->frame_gray,internal->frame_gray,internal->camera_params);
-	tracker_send_debug_frame(inst); //publish our debug frame
+	// we will just 'do the work' here.
+	// TODO: asynchronous tracker thread
+
+	memcpy(internal->frame_gray.data, frame->data, frame->size_bytes);
+	internal->system->processFrame(
+	    internal->current_time, internal->frame_gray, internal->frame_gray,
+	    internal->camera_params);
+	tracker_send_debug_frame(inst); // publish our debug frame
 	return true;
 }
-bool tracker3D_osvr_uvbi_get_poses(tracker_instance_t* inst,tracked_object_t* objects,uint32_t* count){
+bool
+tracker3D_osvr_uvbi_get_poses(tracker_instance_t* inst,
+                              tracked_object_t* objects,
+                              uint32_t* count)
+{
 	return false;
 }
-bool tracker3D_osvr_uvbi_new_poses(tracker_instance_t* inst){
+bool
+tracker3D_osvr_uvbi_new_poses(tracker_instance_t* inst)
+{
 	return false;
 }
-bool tracker3D_osvr_uvbi_configure(tracker_instance_t* inst, tracker_mono_configuration_t* config){
-	tracker3D_osvr_uvbi_instance_t*  internal = (tracker3D_osvr_uvbi_instance_t*)inst->internal_instance;
-	//return false if we cannot handle this config
+bool
+tracker3D_osvr_uvbi_configure(tracker_instance_t* inst,
+                              tracker_mono_configuration_t* config)
+{
+	tracker3D_osvr_uvbi_instance_t* internal =
+	    (tracker3D_osvr_uvbi_instance_t*)inst->internal_instance;
+	// return false if we cannot handle this config
 
 	if (config->format != FORMAT_Y_UINT8) {
-		internal->configured=false;
+		internal->configured = false;
 		return false;
 	}
 	internal->configuration = *config;
-	internal->configured=true;
+	internal->configured = true;
 	return true;
 }
-void tracker3D_osvr_uvbi_register_measurement_callback (tracker_instance_t* inst, void* target_instance, measurement_consumer_callback_func target_func){
-
-}
-void tracker3D_osvr_uvbi_register_event_callback (tracker_instance_t* inst, void* target_instance, event_consumer_callback_func target_func){
-
-}
+void
+tracker3D_osvr_uvbi_register_measurement_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    measurement_consumer_callback_func target_func)
+{}
+void
+tracker3D_osvr_uvbi_register_event_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func)
+{}
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.h b/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.h
index 32b473af2ab8a7a20ba8eead7fda2c70865776d9..f285be6d2217d40623a77a7e6cfdc0301aba0424 100644
--- a/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.h
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.h
@@ -11,23 +11,41 @@ extern "C" {
 #endif
 
 
-//forward declare this
+// forward declare this
 typedef struct tracker3D_osvr_uvbi_instance tracker3D_osvr_uvbi_instance_t;
 
 
-tracker3D_osvr_uvbi_instance_t* tracker3D_osvr_uvbi_create(tracker_instance_t* inst);
-bool tracker3D_osvr_uvbi_destroy(tracker_instance_t* inst);
-
-capture_parameters_t tracker3D_osvr_uvbi_get_capture_params(tracker_instance_t* inst);
-
-bool tracker3D_osvr_uvbi_get_debug_frame(tracker_instance_t* inst,frame_t* frame);
-bool tracker3D_osvr_uvbi_queue(tracker_instance_t* inst,frame_t* frame);
-bool tracker3D_osvr_uvbi_get_poses(tracker_instance_t* inst,tracked_object_t* objects,uint32_t* count);
-bool tracker3D_osvr_uvbi_new_poses(tracker_instance_t* inst);
-bool tracker3D_osvr_uvbi_configure(tracker_instance_t* inst, tracker_mono_configuration_t* config);
-void tracker3D_osvr_uvbi_register_measurement_callback (tracker_instance_t* inst, void* target_instance, measurement_consumer_callback_func target_func);
-void tracker3D_osvr_uvbi_register_event_callback (tracker_instance_t* inst, void* target_instance, event_consumer_callback_func target_func);
-
+tracker3D_osvr_uvbi_instance_t*
+tracker3D_osvr_uvbi_create(tracker_instance_t* inst);
+bool
+tracker3D_osvr_uvbi_destroy(tracker_instance_t* inst);
+
+capture_parameters_t
+tracker3D_osvr_uvbi_get_capture_params(tracker_instance_t* inst);
+
+bool
+tracker3D_osvr_uvbi_get_debug_frame(tracker_instance_t* inst, frame_t* frame);
+bool
+tracker3D_osvr_uvbi_queue(tracker_instance_t* inst, frame_t* frame);
+bool
+tracker3D_osvr_uvbi_get_poses(tracker_instance_t* inst,
+                              tracked_object_t* objects,
+                              uint32_t* count);
+bool
+tracker3D_osvr_uvbi_new_poses(tracker_instance_t* inst);
+bool
+tracker3D_osvr_uvbi_configure(tracker_instance_t* inst,
+                              tracker_mono_configuration_t* config);
+void
+tracker3D_osvr_uvbi_register_measurement_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    measurement_consumer_callback_func target_func);
+void
+tracker3D_osvr_uvbi_register_event_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func);
 
 
 
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.cpp b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.cpp
index 328cb698fc6dc6d70f25bda74f9a82e9379c0904..529a4a5d22ff6895aeefc45c76142a1268c65baf 100644
--- a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.cpp
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.cpp
@@ -5,12 +5,13 @@
 
 #define MAX_CALIBRATION_SAMPLES 23
 
-typedef struct tracker3D_sphere_mono_instance {
+typedef struct tracker3D_sphere_mono_instance
+{
 	bool configured;
 	measurement_consumer_callback_func measurement_target_callback;
-	void* measurement_target_instance; //where we send our measurements
+	void* measurement_target_instance; // where we send our measurements
 	event_consumer_callback_func event_target_callback;
-	void* event_target_instance; //where we send our measurements
+	void* event_target_instance; // where we send our measurements
 	tracker_mono_configuration_t configuration;
 	tracked_object_t tracked_object;
 	tracked_blob_t tracked_blob;
@@ -29,7 +30,7 @@ typedef struct tracker3D_sphere_mono_instance {
 	cv::Mat undistort_map_x;
 	cv::Mat undistort_map_y;
 
-	//calibration data structures
+	// calibration data structures
 	std::vector<std::vector<cv::Point3f>> chessboards_model;
 	std::vector<std::vector<cv::Point2f>> chessboards_measured;
 
@@ -37,351 +38,451 @@ typedef struct tracker3D_sphere_mono_instance {
 	bool alloced_frames;
 } tracker3D_sphere_mono_instance_t;
 
-tracker3D_sphere_mono_instance_t* tracker3D_sphere_mono_create(tracker_instance_t* inst) {
-	tracker3D_sphere_mono_instance_t* i = (tracker3D_sphere_mono_instance_t*)calloc(1,sizeof(tracker3D_sphere_mono_instance_t));
+tracker3D_sphere_mono_instance_t*
+tracker3D_sphere_mono_create(tracker_instance_t* inst)
+{
+	tracker3D_sphere_mono_instance_t* i =
+	    (tracker3D_sphere_mono_instance_t*)calloc(
+	        1, sizeof(tracker3D_sphere_mono_instance_t));
 	if (i) {
-		i->params.filterByArea=false;
-		i->params.filterByConvexity=false;
-		i->params.filterByInertia=false;
-		i->params.filterByColor=true;
-		i->params.blobColor=255; //0 or 255 - color comes from binarized image?
-		i->params.minArea=1;
-		i->params.maxArea=1000;
-		i->params.maxThreshold=51; //using a wide threshold span slows things down bigtime
-		i->params.minThreshold=50;
-		i->params.thresholdStep=1;
-		i->params.minDistBetweenBlobs=5;
-		i->params.minRepeatability=1; //need this to avoid error?
+		i->params.filterByArea = false;
+		i->params.filterByConvexity = false;
+		i->params.filterByInertia = false;
+		i->params.filterByColor = true;
+		i->params.blobColor =
+		    255; // 0 or 255 - color comes from binarized image?
+		i->params.minArea = 1;
+		i->params.maxArea = 1000;
+		i->params.maxThreshold =
+		    51; // using a wide threshold span slows things down bigtime
+		i->params.minThreshold = 50;
+		i->params.thresholdStep = 1;
+		i->params.minDistBetweenBlobs = 5;
+		i->params.minRepeatability = 1; // need this to avoid error?
 
 		i->sbd = cv::SimpleBlobDetector::create(i->params);
-        i->background_subtractor = cv::createBackgroundSubtractorMOG2(32,16,false);
+		i->background_subtractor =
+		    cv::createBackgroundSubtractorMOG2(32, 16, false);
 
-		i->poses_consumed=false;
-		i->configured=false;
-		i->alloced_frames=false;
+		i->poses_consumed = false;
+		i->configured = false;
+		i->alloced_frames = false;
 		int intrinsics_dim = sqrt(INTRINSICS_SIZE);
-		i->intrinsics = cv::Mat(intrinsics_dim,intrinsics_dim,CV_32F);
-		i->distortion = cv::Mat(DISTORTION_SIZE,1,CV_32F);
+		i->intrinsics = cv::Mat(intrinsics_dim, intrinsics_dim, CV_32F);
+		i->distortion = cv::Mat(DISTORTION_SIZE, 1, CV_32F);
 
 		return i;
 	}
 	return NULL;
 }
-bool tracker3D_sphere_mono_get_debug_frame(tracker_instance_t* inst,frame_t* frame){
-    tracker3D_sphere_mono_instance_t* internal = (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
-    frame->format = FORMAT_RGB_UINT8;
-    frame->width = internal->debug_rgb.cols;
-    frame->stride = internal->debug_rgb.cols * format_bytes_per_pixel(frame->format);
-    frame->height = internal->debug_rgb.rows;
-    frame->data = internal->debug_rgb.data;
-    frame->size_bytes = frame_size_in_bytes(frame);
-    return true;
+bool
+tracker3D_sphere_mono_get_debug_frame(tracker_instance_t* inst, frame_t* frame)
+{
+	tracker3D_sphere_mono_instance_t* internal =
+	    (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
+	frame->format = FORMAT_RGB_UINT8;
+	frame->width = internal->debug_rgb.cols;
+	frame->stride =
+	    internal->debug_rgb.cols * format_bytes_per_pixel(frame->format);
+	frame->height = internal->debug_rgb.rows;
+	frame->data = internal->debug_rgb.data;
+	frame->size_bytes = frame_size_in_bytes(frame);
+	return true;
 }
-capture_parameters_t tracker3D_sphere_mono_get_capture_params(tracker_instance_t* inst) {
-	capture_parameters_t cp={};
-    cp.exposure = 0.5f;
-    cp.gain=0.1f;
+capture_parameters_t
+tracker3D_sphere_mono_get_capture_params(tracker_instance_t* inst)
+{
+	capture_parameters_t cp = {};
+	cp.exposure = 0.5f;
+	cp.gain = 0.1f;
 	return cp;
 }
 
-bool tracker3D_sphere_mono_queue(tracker_instance_t* inst,frame_t* frame) {
-	tracker3D_sphere_mono_instance_t* internal = (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
-	if (! internal->configured){
-		printf("ERROR: you must configure this tracker before it can accept frames\n");
+bool
+tracker3D_sphere_mono_queue(tracker_instance_t* inst, frame_t* frame)
+{
+	tracker3D_sphere_mono_instance_t* internal =
+	    (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
+	if (!internal->configured) {
+		printf(
+		    "ERROR: you must configure this tracker before it can "
+		    "accept frames\n");
 		return false;
 	}
 	printf("received frame, tracking!\n");
-	if (!internal->alloced_frames)
-	{
-        internal->frame_gray = cv::Mat(frame->height,frame->stride,CV_8UC1,cv::Scalar(0,0,0));
-        internal->mask_gray = cv::Mat(frame->height,frame->stride,CV_8UC1,cv::Scalar(0,0,0));
-		internal->debug_rgb = cv::Mat(frame->height,frame->width,CV_8UC3,cv::Scalar(0,0,0));
-		internal->alloced_frames =true;
+	if (!internal->alloced_frames) {
+		internal->frame_gray = cv::Mat(frame->height, frame->stride,
+		                               CV_8UC1, cv::Scalar(0, 0, 0));
+		internal->mask_gray = cv::Mat(frame->height, frame->stride,
+		                              CV_8UC1, cv::Scalar(0, 0, 0));
+		internal->debug_rgb = cv::Mat(frame->height, frame->width,
+		                              CV_8UC3, cv::Scalar(0, 0, 0));
+		internal->alloced_frames = true;
 	}
-	//we will just 'do the work' here.
-	//TODO: asynchronous tracker thread
+	// we will just 'do the work' here.
+	// TODO: asynchronous tracker thread
 
-	memcpy(internal->frame_gray.data,frame->data,frame->size_bytes);
+	memcpy(internal->frame_gray.data, frame->data, frame->size_bytes);
 
 	switch (internal->configuration.calibration_mode) {
-	        case CALIBRATION_MODE_NONE:
-		        return tracker3D_sphere_mono_track(inst);
-		        break;
-	        case CALIBRATION_MODE_CHESSBOARD:
-		        return tracker3D_sphere_mono_calibrate(inst);
-		        break;
-	        default:
-		        printf("ERROR: unrecognised calibration mode\n");
-		        return false;
-	    }
+	case CALIBRATION_MODE_NONE:
+		return tracker3D_sphere_mono_track(inst);
+		break;
+	case CALIBRATION_MODE_CHESSBOARD:
+		return tracker3D_sphere_mono_calibrate(inst);
+		break;
+	default: printf("ERROR: unrecognised calibration mode\n"); return false;
+	}
 	return true;
 }
 
 
 
-
-bool tracker3D_sphere_mono_track(tracker_instance_t* inst)
+bool
+tracker3D_sphere_mono_track(tracker_instance_t* inst)
 {
 	printf("tracking...\n");
-	tracker3D_sphere_mono_instance_t* internal = (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
+	tracker3D_sphere_mono_instance_t* internal =
+	    (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
 
 	internal->keypoints.clear();
-	cv::Size image_size(internal->frame_gray.cols,internal->frame_gray.rows);
+	cv::Size image_size(internal->frame_gray.cols,
+	                    internal->frame_gray.rows);
 	// TODO: save data indicating calibration image size
 	// and multiply intrinsics accordingly
 
 
-	//add this frame to the background average mask generator
-	internal->background_subtractor->apply(internal->frame_gray,internal->mask_gray);
-	//we always want to be able to track small motions, so write white blocks into the masks that encompass the last seen positions of the blobs
+	// add this frame to the background average mask generator
+	internal->background_subtractor->apply(internal->frame_gray,
+	                                       internal->mask_gray);
+	// we always want to be able to track small motions, so write white
+	// blocks into the masks that encompass the last seen positions of the
+	// blobs
 	xrt_vec2 lastPos = internal->tracked_blob.center;
 	float offset = ROI_OFFSET;
 	if (internal->tracked_blob.diameter > ROI_OFFSET) {
 		offset = internal->tracked_blob.diameter;
 	}
-	//ensure we dont mask out our blob
-	cv::rectangle(internal->mask_gray, cv::Point2f(lastPos.x-offset,lastPos.y-offset),cv::Point2f(lastPos.x+offset,lastPos.y+offset),cv::Scalar( 255 ),-1,0);
-
-	//write something into our debug image
-	cv::rectangle(internal->debug_rgb, cv::Point2f(0,0),cv::Point2f(internal->debug_rgb.cols,internal->debug_rgb.rows),cv::Scalar( 0,0,0 ),-1,0);
-	cv::rectangle(internal->debug_rgb, cv::Point2f(lastPos.x-offset,lastPos.y-offset),cv::Point2f(lastPos.x+offset,lastPos.y+offset),cv::Scalar( 0,0,255 ),-1,0);
-
-	//do blob detection with our mask
-	internal->sbd->detect(internal->frame_gray, internal->keypoints,internal->mask_gray);
-	bool ret = cv::imwrite("/tmp/out.jpg",internal->frame_gray);
-	//ret = cv::imwrite("/tmp/mask.jpg",internal->mask_gray);
+	// ensure we dont mask out our blob
+	cv::rectangle(internal->mask_gray,
+	              cv::Point2f(lastPos.x - offset, lastPos.y - offset),
+	              cv::Point2f(lastPos.x + offset, lastPos.y + offset),
+	              cv::Scalar(255), -1, 0);
+
+	// write something into our debug image
+	cv::rectangle(
+	    internal->debug_rgb, cv::Point2f(0, 0),
+	    cv::Point2f(internal->debug_rgb.cols, internal->debug_rgb.rows),
+	    cv::Scalar(0, 0, 0), -1, 0);
+	cv::rectangle(internal->debug_rgb,
+	              cv::Point2f(lastPos.x - offset, lastPos.y - offset),
+	              cv::Point2f(lastPos.x + offset, lastPos.y + offset),
+	              cv::Scalar(0, 0, 255), -1, 0);
+
+	// do blob detection with our mask
+	internal->sbd->detect(internal->frame_gray, internal->keypoints,
+	                      internal->mask_gray);
+	bool ret = cv::imwrite("/tmp/out.jpg", internal->frame_gray);
+	// ret = cv::imwrite("/tmp/mask.jpg",internal->mask_gray);
 
 	cv::KeyPoint blob;
 	tracker_measurement_t m = {};
-	//we can just grab the last blob in our list.
+	// we can just grab the last blob in our list.
 
-	//TODO: select the most likely blob here
-	for (uint32_t i=0;i<internal->keypoints.size();i++)
-	{
+	// TODO: select the most likely blob here
+	for (uint32_t i = 0; i < internal->keypoints.size(); i++) {
 		blob = internal->keypoints.at(i);
-		printf ("2D blob X: %f Y: %f D:%f\n",blob.pt.x,blob.pt.y,blob.size);
+		printf("2D blob X: %f Y: %f D:%f\n", blob.pt.x, blob.pt.y,
+		       blob.size);
 	}
-	cv::drawKeypoints(internal->frame_gray,internal->keypoints,internal->debug_rgb,cv::Scalar(128,255,18),cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
+	cv::drawKeypoints(internal->frame_gray, internal->keypoints,
+	                  internal->debug_rgb, cv::Scalar(128, 255, 18),
+	                  cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
 
 	if (internal->keypoints.size() > 0) {
-		internal->tracked_blob.center = {blob.pt.x,blob.pt.y};
-		internal->tracked_blob.diameter=blob.size;
+		internal->tracked_blob.center = {blob.pt.x, blob.pt.y};
+		internal->tracked_blob.diameter = blob.size;
 
-		float cx = internal->intrinsics.at<double>(0,2);
-		float cy = internal->intrinsics.at<double>(1,2);
-		float focalx=internal->intrinsics.at<double>(0,0);
-		float focaly=internal->intrinsics.at<double>(1,1);
+		float cx = internal->intrinsics.at<double>(0, 2);
+		float cy = internal->intrinsics.at<double>(1, 2);
+		float focalx = internal->intrinsics.at<double>(0, 0);
+		float focaly = internal->intrinsics.at<double>(1, 1);
 
-		// we can just undistort our tracked blob-center, rather than undistorting
-		// every pixel in the frame
+		// we can just undistort our tracked blob-center, rather than
+		// undistorting every pixel in the frame
 		std::vector<cv::Point2f> src;
 		cv::Mat dst;
 
-		src.push_back(cv::Point2f(internal->tracked_blob.center.x,internal->tracked_blob.center.y));
+		src.push_back(cv::Point2f(internal->tracked_blob.center.x,
+		                          internal->tracked_blob.center.y));
 
-		cv::undistortPoints(src,dst,internal->intrinsics,internal->distortion,cv::noArray(),cv::noArray());
-		float pixelConstant =1.0f;
+		cv::undistortPoints(src, dst, internal->intrinsics,
+		                    internal->distortion, cv::noArray(),
+		                    cv::noArray());
+		float pixelConstant = 1.0f;
 		float z = internal->tracked_blob.diameter * pixelConstant;
-		float x = dst.at<float>(0,0);
-		float y = dst.at<float>(0,1);
+		float x = dst.at<float>(0, 0);
+		float y = dst.at<float>(0, 1);
 
-		cv::circle(internal->debug_rgb,cv::Point2f(x*focalx + cx,y *focaly +cy),3,cv::Scalar(32,32,192));
+		cv::circle(internal->debug_rgb,
+		           cv::Point2f(x * focalx + cx, y * focaly + cy), 3,
+		           cv::Scalar(32, 32, 192));
 
-		//printf("%f %f %f\n",x,y,z);
+		// printf("%f %f %f\n",x,y,z);
 
 		m.has_position = true;
-		m.timestamp =0;
+		m.timestamp = 0;
 		m.pose.position.x = x;
 		m.pose.position.y = y;
 		m.pose.position.z = z;
 
 
-		if (internal->measurement_target_callback){
-			internal->measurement_target_callback(internal->measurement_target_instance,&m);
+		if (internal->measurement_target_callback) {
+			internal->measurement_target_callback(
+			    internal->measurement_target_instance, &m);
 		}
 	}
 
-	//publish our debug frame
-	tracker_send_debug_frame(inst); //publish our debug frame
+	// publish our debug frame
+	tracker_send_debug_frame(inst); // publish our debug frame
 
 	return true;
 }
-bool tracker3D_sphere_mono_calibrate(tracker_instance_t* inst)
+bool
+tracker3D_sphere_mono_calibrate(tracker_instance_t* inst)
 {
 	printf("calibrating...\n");
-	tracker3D_sphere_mono_instance_t* internal = (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
-	cv::Size image_size(internal->frame_gray.cols,internal->frame_gray.rows);
+	tracker3D_sphere_mono_instance_t* internal =
+	    (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
+	cv::Size image_size(internal->frame_gray.cols,
+	                    internal->frame_gray.rows);
 
-	bool ret = cv::imwrite("/tmp/out.jpg",internal->frame_gray);
+	bool ret = cv::imwrite("/tmp/out.jpg", internal->frame_gray);
 
 
-	//TODO: use multiple env vars? - centralise this
+	// TODO: use multiple env vars? - centralise this
 	char path_string[1024];
 	char* config_path = secure_getenv("HOME");
-	snprintf(path_string,1024,"%s/.config/monado/%s.calibration",config_path,internal->configuration.configuration_filename);
+	snprintf(path_string, 1024, "%s/.config/monado/%s.calibration",
+	         config_path, internal->configuration.configuration_filename);
 
-	printf("TRY LOADING CONFIG FROM %s\n",path_string);
-	FILE* calib_file = fopen(path_string,"rb");
+	printf("TRY LOADING CONFIG FROM %s\n", path_string);
+	FILE* calib_file = fopen(path_string, "rb");
 	if (calib_file) {
-		//read our calibration from this file
-		read_mat(calib_file,&internal->intrinsics);
-		read_mat(calib_file,&internal->distortion);
-		read_mat(calib_file,&internal->distortion_fisheye);
+		// read our calibration from this file
+		read_mat(calib_file, &internal->intrinsics);
+		read_mat(calib_file, &internal->distortion);
+		read_mat(calib_file, &internal->distortion_fisheye);
 
 		// TODO: save data indicating calibration image size
 		// and multiply intrinsics accordingly
 
-		cv::initUndistortRectifyMap(internal->intrinsics, internal->distortion, cv::noArray(), internal->intrinsics, image_size, CV_32FC1, internal->undistort_map_x, internal->undistort_map_y);
+		cv::initUndistortRectifyMap(
+		    internal->intrinsics, internal->distortion, cv::noArray(),
+		    internal->intrinsics, image_size, CV_32FC1,
+		    internal->undistort_map_x, internal->undistort_map_y);
 
 		printf("calibrated cameras! setting tracking mode\n");
-		internal->calibrated=true;
+		internal->calibrated = true;
 
-		internal->configuration.calibration_mode = CALIBRATION_MODE_NONE;
-		//send an event to notify our driver of the switch into tracking mode.
-		driver_event_t e ={};
+		internal->configuration.calibration_mode =
+		    CALIBRATION_MODE_NONE;
+		// send an event to notify our driver of the switch into
+		// tracking mode.
+		driver_event_t e = {};
 		e.type = EVENT_TRACKER_RECONFIGURED;
-		internal->event_target_callback(internal->event_target_instance,e);
+		internal->event_target_callback(internal->event_target_instance,
+		                                e);
 		return true;
 	}
 
-	//no saved file - perform interactive calibration.
-	//try and find a chessboard in the image, and run the calibration.
-	//TODO: we need to define some mechanism for UI/user interaction.
+	// no saved file - perform interactive calibration.
+	// try and find a chessboard in the image, and run the calibration.
+	// TODO: we need to define some mechanism for UI/user interaction.
 
 	// TODO: initialise this on construction and move this to internal state
-	cv::Size board_size(8,6);
+	cv::Size board_size(8, 6);
 	std::vector<cv::Point3f> chessboard_model;
 
-	for (uint32_t i=0;i< board_size.width * board_size.height;i++) {
-		cv::Point3f p(i/board_size.width,i % board_size.width,0.0f);
+	for (uint32_t i = 0; i < board_size.width * board_size.height; i++) {
+		cv::Point3f p(i / board_size.width, i % board_size.width, 0.0f);
 		chessboard_model.push_back(p);
 	}
 
 	cv::Mat chessboard_measured;
 
-	//clear our debug image
-	cv::rectangle(internal->debug_rgb, cv::Point2f(0,0),cv::Point2f(internal->debug_rgb.cols,internal->debug_rgb.rows),cv::Scalar( 0,0,0 ),-1,0);
+	// clear our debug image
+	cv::rectangle(
+	    internal->debug_rgb, cv::Point2f(0, 0),
+	    cv::Point2f(internal->debug_rgb.cols, internal->debug_rgb.rows),
+	    cv::Scalar(0, 0, 0), -1, 0);
 
-	//we will collect samples continuously - the user should be able to wave a chessboard around randomly
-	//while the system calibrates..
+	// we will collect samples continuously - the user should be able to
+	// wave a chessboard around randomly while the system calibrates..
 
-	//TODO: we need a coverage measurement and an accuracy measurement,
-	// so we can converge to something that is as complete and correct as possible.
+	// TODO: we need a coverage measurement and an accuracy measurement,
+	// so we can converge to something that is as complete and correct as
+	// possible.
 
-	bool found_board = cv::findChessboardCorners(internal->frame_gray,board_size,chessboard_measured);
+	bool found_board = cv::findChessboardCorners(
+	    internal->frame_gray, board_size, chessboard_measured);
 	char message[128];
-	message[0]=0x0;
-
-	if ( found_board ){
-		//we will use the last n samples to calculate our calibration
-		if (internal->chessboards_measured.size() > MAX_CALIBRATION_SAMPLES)
-		{
-			internal->chessboards_measured.erase(internal->chessboards_measured.begin());
-		}
-		else
-		{
+	message[0] = 0x0;
+
+	if (found_board) {
+		// we will use the last n samples to calculate our calibration
+		if (internal->chessboards_measured.size() >
+		    MAX_CALIBRATION_SAMPLES) {
+			internal->chessboards_measured.erase(
+			    internal->chessboards_measured.begin());
+		} else {
 			internal->chessboards_model.push_back(chessboard_model);
 		}
 
 		internal->chessboards_measured.push_back(chessboard_measured);
 
-		if (internal->chessboards_measured.size() == MAX_CALIBRATION_SAMPLES)
-		{
-			//TODO - run this if coverage test passes
-			cv::Mat rvecs,tvecs;
+		if (internal->chessboards_measured.size() ==
+		    MAX_CALIBRATION_SAMPLES) {
+			// TODO - run this if coverage test passes
+			cv::Mat rvecs, tvecs;
 
-			float rp_error = cv::calibrateCamera(internal->chessboards_model,internal->chessboards_measured,image_size,internal->intrinsics,internal->distortion,rvecs,tvecs);
+			float rp_error = cv::calibrateCamera(
+			    internal->chessboards_model,
+			    internal->chessboards_measured, image_size,
+			    internal->intrinsics, internal->distortion, rvecs,
+			    tvecs);
 
-			cv::initUndistortRectifyMap(internal->intrinsics, internal->distortion, cv::noArray(), internal->intrinsics, image_size, CV_32FC1, internal->undistort_map_x, internal->undistort_map_y);
+			cv::initUndistortRectifyMap(
+			    internal->intrinsics, internal->distortion,
+			    cv::noArray(), internal->intrinsics, image_size,
+			    CV_32FC1, internal->undistort_map_x,
+			    internal->undistort_map_y);
 
 			char path_string[PATH_MAX];
 			char file_string[PATH_MAX];
-			//TODO: use multiple env vars?
+			// TODO: use multiple env vars?
 			char* config_path = secure_getenv("HOME");
-			snprintf(path_string,PATH_MAX,"%s/.config/monado",config_path);
-			snprintf(file_string,PATH_MAX,"%s/.config/monado/%s.calibration",config_path,internal->configuration.configuration_filename);
-
-			printf("TRY WRITING CONFIG TO %s\n",file_string);
-			FILE* calib_file = fopen(file_string,"wb");
-			if (! calib_file) {
+			snprintf(path_string, PATH_MAX, "%s/.config/monado",
+			         config_path);
+			snprintf(
+			    file_string, PATH_MAX,
+			    "%s/.config/monado/%s.calibration", config_path,
+			    internal->configuration.configuration_filename);
+
+			printf("TRY WRITING CONFIG TO %s\n", file_string);
+			FILE* calib_file = fopen(file_string, "wb");
+			if (!calib_file) {
 				mkpath(path_string);
 			}
-			calib_file = fopen(file_string,"wb");
-			if (! calib_file) {
-				printf("ERROR. could not create calibration file %s\n",file_string);
+			calib_file = fopen(file_string, "wb");
+			if (!calib_file) {
+				printf(
+				    "ERROR. could not create calibration file "
+				    "%s\n",
+				    file_string);
 			} else {
-				write_mat(calib_file,&internal->intrinsics);
-				write_mat(calib_file,&internal->distortion);
-				write_mat(calib_file,&internal->distortion_fisheye);
+				write_mat(calib_file, &internal->intrinsics);
+				write_mat(calib_file, &internal->distortion);
+				write_mat(calib_file,
+				          &internal->distortion_fisheye);
 				fclose(calib_file);
 			}
 
 			printf("calibrated cameras! setting tracking mode\n");
-			internal->calibrated=true;
-			internal->configuration.calibration_mode = CALIBRATION_MODE_NONE;
-			//send an event to notify our driver of the switch into tracking mode.
-			driver_event_t e ={};
+			internal->calibrated = true;
+			internal->configuration.calibration_mode =
+			    CALIBRATION_MODE_NONE;
+			// send an event to notify our driver of the switch into
+			// tracking mode.
+			driver_event_t e = {};
 			e.type = EVENT_TRACKER_RECONFIGURED;
-			internal->event_target_callback(internal->event_target_instance,e);
+			internal->event_target_callback(
+			    internal->event_target_instance, e);
 		} else {
-			snprintf(message,128,"COLLECTING SAMPLE: %d/%d",internal->chessboards_measured.size() +1,MAX_CALIBRATION_SAMPLES);
+			snprintf(message, 128, "COLLECTING SAMPLE: %d/%d",
+			         internal->chessboards_measured.size() + 1,
+			         MAX_CALIBRATION_SAMPLES);
 		}
 	}
 
-	cv::drawChessboardCorners(internal->debug_rgb,board_size,chessboard_measured,found_board);
+	cv::drawChessboardCorners(internal->debug_rgb, board_size,
+	                          chessboard_measured, found_board);
 
-	cv::putText(internal->debug_rgb,"CALIBRATION MODE",cv::Point2i(160,240),0,1.0f,cv::Scalar(192,192,192));
-	cv::putText(internal->debug_rgb,message,cv::Point2i(160,460),0,0.5f,cv::Scalar(192,192,192));
+	cv::putText(internal->debug_rgb, "CALIBRATION MODE",
+	            cv::Point2i(160, 240), 0, 1.0f, cv::Scalar(192, 192, 192));
+	cv::putText(internal->debug_rgb, message, cv::Point2i(160, 460), 0,
+	            0.5f, cv::Scalar(192, 192, 192));
 
 	tracker_send_debug_frame(inst);
 
 	return true;
 }
 
-bool tracker3D_sphere_mono_get_poses(tracker_instance_t* inst,tracked_object_t* objects, uint32_t* count) {
-	if (objects == NULL)
-    {
-		*count = TRACKED_POINTS; //tracking a single object
-        return true;
-    }
+bool
+tracker3D_sphere_mono_get_poses(tracker_instance_t* inst,
+                                tracked_object_t* objects,
+                                uint32_t* count)
+{
+	if (objects == NULL) {
+		*count = TRACKED_POINTS; // tracking a single object
+		return true;
+	}
 
-	tracker3D_sphere_mono_instance_t* internal = (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
-	for (uint32_t i = 0;i< 1;i++) {
+	tracker3D_sphere_mono_instance_t* internal =
+	    (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
+	for (uint32_t i = 0; i < 1; i++) {
 
 		objects[i] = internal->tracked_object;
-    }
-	*count=1;
-	internal->poses_consumed=true;
+	}
+	*count = 1;
+	internal->poses_consumed = true;
 	return true;
 }
 
-bool tracker3D_sphere_mono_new_poses(tracker_instance_t* inst)
+bool
+tracker3D_sphere_mono_new_poses(tracker_instance_t* inst)
 {
-	tracker3D_sphere_mono_instance_t*  internal = (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
+	tracker3D_sphere_mono_instance_t* internal =
+	    (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
 	return internal->poses_consumed;
 }
 
-bool tracker3D_sphere_mono_configure(tracker_instance_t* inst,tracker_mono_configuration_t* config)
+bool
+tracker3D_sphere_mono_configure(tracker_instance_t* inst,
+                                tracker_mono_configuration_t* config)
 {
-	tracker3D_sphere_mono_instance_t*  internal = (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
-	//return false if we cannot handle this config
+	tracker3D_sphere_mono_instance_t* internal =
+	    (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
+	// return false if we cannot handle this config
 
 	if (config->format != FORMAT_Y_UINT8) {
-		internal->configured=false;
+		internal->configured = false;
 		return false;
 	}
 	internal->configuration = *config;
-	internal->configured=true;
+	internal->configured = true;
 	return true;
 }
 
-void tracker3D_sphere_mono_register_measurement_callback (tracker_instance_t* inst, void* target_instance, measurement_consumer_callback_func target_func) {
-	tracker3D_sphere_mono_instance_t* internal = (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
+void
+tracker3D_sphere_mono_register_measurement_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    measurement_consumer_callback_func target_func)
+{
+	tracker3D_sphere_mono_instance_t* internal =
+	    (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
 	internal->measurement_target_instance = target_instance;
 	internal->measurement_target_callback = target_func;
 }
-void tracker3D_sphere_mono_register_event_callback (tracker_instance_t* inst, void* target_instance, event_consumer_callback_func target_func) {
-	tracker3D_sphere_mono_instance_t* internal = (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
+void
+tracker3D_sphere_mono_register_event_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func)
+{
+	tracker3D_sphere_mono_instance_t* internal =
+	    (tracker3D_sphere_mono_instance_t*)inst->internal_instance;
 	internal->event_target_instance = target_instance;
 	internal->event_target_callback = target_func;
 }
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.h b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.h
index 71d3dc72c382843640fee551727c565baef23b86..b31223e9674e0821245daca1d9156f038874d022 100644
--- a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.h
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_mono.h
@@ -14,28 +14,49 @@
 extern "C" {
 #endif
 
-//forward declare this
+// forward declare this
 typedef struct tracker3D_sphere_mono_instance tracker3D_sphere_mono_instance_t;
 
 
-tracker3D_sphere_mono_instance_t* tracker3D_sphere_mono_create(tracker_instance_t* inst);
-bool tracker3D_sphere_mono_destroy(tracker_instance_t* inst);
-
-capture_parameters_t tracker3D_sphere_mono_get_capture_params(tracker_instance_t* inst);
-
-bool tracker3D_sphere_mono_get_debug_frame(tracker_instance_t* inst,frame_t* frame);
-bool tracker3D_sphere_mono_queue(tracker_instance_t* inst,frame_t* frame);
-bool tracker3D_sphere_mono_get_poses(tracker_instance_t* inst,tracked_object_t* objects,uint32_t* count);
-bool tracker3D_sphere_mono_new_poses(tracker_instance_t* inst);
-bool tracker3D_sphere_mono_configure(tracker_instance_t* inst, tracker_mono_configuration_t* config);
-void tracker3D_sphere_mono_register_measurement_callback (tracker_instance_t* inst, void* target_instance, measurement_consumer_callback_func target_func);
-void tracker3D_sphere_mono_register_event_callback (tracker_instance_t* inst, void* target_instance, event_consumer_callback_func target_func);
-
-static bool tracker3D_sphere_mono_track(tracker_instance_t* inst);
-static bool tracker3D_sphere_mono_calibrate(tracker_instance_t* inst);
+tracker3D_sphere_mono_instance_t*
+tracker3D_sphere_mono_create(tracker_instance_t* inst);
+bool
+tracker3D_sphere_mono_destroy(tracker_instance_t* inst);
+
+capture_parameters_t
+tracker3D_sphere_mono_get_capture_params(tracker_instance_t* inst);
+
+bool
+tracker3D_sphere_mono_get_debug_frame(tracker_instance_t* inst, frame_t* frame);
+bool
+tracker3D_sphere_mono_queue(tracker_instance_t* inst, frame_t* frame);
+bool
+tracker3D_sphere_mono_get_poses(tracker_instance_t* inst,
+                                tracked_object_t* objects,
+                                uint32_t* count);
+bool
+tracker3D_sphere_mono_new_poses(tracker_instance_t* inst);
+bool
+tracker3D_sphere_mono_configure(tracker_instance_t* inst,
+                                tracker_mono_configuration_t* config);
+void
+tracker3D_sphere_mono_register_measurement_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    measurement_consumer_callback_func target_func);
+void
+tracker3D_sphere_mono_register_event_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func);
+
+static bool
+tracker3D_sphere_mono_track(tracker_instance_t* inst);
+static bool
+tracker3D_sphere_mono_calibrate(tracker_instance_t* inst);
 
 #ifdef __cplusplus
-} //extern "C"
+} // extern "C"
 #endif
 
-#endif //TRACKER3D_SPHERE_MONO_H
+#endif // TRACKER3D_SPHERE_MONO_H
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp
index 97b2c01abc3d92316c13dbed8c8170597b2e2c91..9b3d46a02c11834f977949c526249a83939eda43 100644
--- a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp
@@ -7,15 +7,17 @@
 #include <sys/stat.h>
 #include <linux/limits.h>
 
-#define MAX_CALIBRATION_SAMPLES 23 // mo' samples, mo' calibration accuracy, at the expense of time.
+#define MAX_CALIBRATION_SAMPLES                                                \
+	23 // mo' samples, mo' calibration accuracy, at the expense of time.
 
-typedef struct tracker3D_sphere_stereo_instance {
+typedef struct tracker3D_sphere_stereo_instance
+{
 	bool configured;
 	tracker_stereo_configuration_t configuration;
 	measurement_consumer_callback_func measurement_target_callback;
-	void* measurement_target_instance; //where we send our measurements
+	void* measurement_target_instance; // where we send our measurements
 	event_consumer_callback_func event_target_callback;
-	void* event_target_instance; //where we send our events
+	void* event_target_instance; // where we send our events
 
 	tracked_object_t tracked_object;
 	tracked_blob_t l_tracked_blob;
@@ -25,7 +27,8 @@ typedef struct tracker3D_sphere_stereo_instance {
 	cv::SimpleBlobDetector::Params blob_params;
 	std::vector<cv::KeyPoint> l_keypoints;
 	std::vector<cv::KeyPoint> r_keypoints;
-	//these components hold no state so we can use a single instance for l and r ?
+	// these components hold no state so we can use a single instance for l
+	// and r ?
 	cv::Ptr<cv::SimpleBlobDetector> sbd;
 	cv::Ptr<cv::BackgroundSubtractorMOG2> background_subtractor;
 	cv::Mat l_frame_gray;
@@ -69,7 +72,7 @@ typedef struct tracker3D_sphere_stereo_instance {
 	cv::Mat r_rectify_map_y;
 
 
-	//calibration data structures
+	// calibration data structures
 	std::vector<std::vector<cv::Point3f>> chessboards_model;
 	std::vector<std::vector<cv::Point2f>> l_chessboards_measured;
 	std::vector<std::vector<cv::Point2f>> r_chessboards_measured;
@@ -84,116 +87,156 @@ typedef struct tracker3D_sphere_stereo_instance {
 
 } tracker3D_sphere_stereo_instance_t;
 
-tracker3D_sphere_stereo_instance_t* tracker3D_sphere_stereo_create(tracker_instance_t* inst) {
-	tracker3D_sphere_stereo_instance_t* i = (tracker3D_sphere_stereo_instance_t*)calloc(1,sizeof(tracker3D_sphere_stereo_instance_t));
+tracker3D_sphere_stereo_instance_t*
+tracker3D_sphere_stereo_create(tracker_instance_t* inst)
+{
+	tracker3D_sphere_stereo_instance_t* i =
+	    (tracker3D_sphere_stereo_instance_t*)calloc(
+	        1, sizeof(tracker3D_sphere_stereo_instance_t));
 	if (i) {
-		i->blob_params.filterByArea=false;
-		i->blob_params.filterByConvexity=false;
-		i->blob_params.filterByInertia=false;
-		i->blob_params.filterByColor=true;
-		i->blob_params.blobColor=255; //0 or 255 - color comes from binarized image?
-		i->blob_params.minArea=1;
-		i->blob_params.maxArea=1000;
-		i->blob_params.maxThreshold=51; //using a wide threshold span slows things down bigtime
-		i->blob_params.minThreshold=50;
-		i->blob_params.thresholdStep=1;
-		i->blob_params.minDistBetweenBlobs=5;
-		i->blob_params.minRepeatability=1; //need this to avoid error?
+		i->blob_params.filterByArea = false;
+		i->blob_params.filterByConvexity = false;
+		i->blob_params.filterByInertia = false;
+		i->blob_params.filterByColor = true;
+		i->blob_params.blobColor =
+		    255; // 0 or 255 - color comes from binarized image?
+		i->blob_params.minArea = 1;
+		i->blob_params.maxArea = 1000;
+		i->blob_params.maxThreshold =
+		    51; // using a wide threshold span slows things down bigtime
+		i->blob_params.minThreshold = 50;
+		i->blob_params.thresholdStep = 1;
+		i->blob_params.minDistBetweenBlobs = 5;
+		i->blob_params.minRepeatability = 1; // need this to avoid
+		                                     // error?
 
 		i->sbd = cv::SimpleBlobDetector::create(i->blob_params);
-		i->background_subtractor = cv::createBackgroundSubtractorMOG2(32,16,false);
+		i->background_subtractor =
+		    cv::createBackgroundSubtractorMOG2(32, 16, false);
 
-		i->poses_consumed=false;
-		i->configured=false;
-		i->l_alloced_frames=false;
-		i->r_alloced_frames=false;
+		i->poses_consumed = false;
+		i->configured = false;
+		i->l_alloced_frames = false;
+		i->r_alloced_frames = false;
 		i->got_left = false;
 		i->got_right = false;
 
 
-		//alloc our debug frame here - opencv is h,w, not w,h
-		i->debug_rgb = cv::Mat(480,640,CV_8UC3,cv::Scalar(0,0,0));
+		// alloc our debug frame here - opencv is h,w, not w,h
+		i->debug_rgb = cv::Mat(480, 640, CV_8UC3, cv::Scalar(0, 0, 0));
 
-		i->zero_distortion = cv::Mat(DISTORTION_SIZE,1,CV_32F,cv::Scalar(0.0f));
-		i->zero_distortion_fisheye = cv::Mat(DISTORTION_FISHEYE_SIZE,1,CV_32F,cv::Scalar(0.0f));
+		i->zero_distortion =
+		    cv::Mat(DISTORTION_SIZE, 1, CV_32F, cv::Scalar(0.0f));
+		i->zero_distortion_fisheye = cv::Mat(DISTORTION_FISHEYE_SIZE, 1,
+		                                     CV_32F, cv::Scalar(0.0f));
 
 		return i;
 	}
 	return NULL;
 }
-bool tracker3D_sphere_stereo_get_debug_frame(tracker_instance_t* inst,frame_t* frame){
-	tracker3D_sphere_stereo_instance_t* internal = (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
+bool
+tracker3D_sphere_stereo_get_debug_frame(tracker_instance_t* inst,
+                                        frame_t* frame)
+{
+	tracker3D_sphere_stereo_instance_t* internal =
+	    (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
 
-	//wrap a frame struct around our debug cv::Mat and return it.
+	// wrap a frame struct around our debug cv::Mat and return it.
 
 	frame->format = FORMAT_RGB_UINT8;
 	frame->width = internal->debug_rgb.cols;
-	frame->stride = internal->debug_rgb.cols * format_bytes_per_pixel(frame->format);
+	frame->stride =
+	    internal->debug_rgb.cols * format_bytes_per_pixel(frame->format);
 	frame->height = internal->debug_rgb.rows;
 	frame->data = internal->debug_rgb.data;
 	frame->size_bytes = frame_size_in_bytes(frame);
 	return true;
 }
-capture_parameters_t tracker3D_sphere_stereo_get_capture_params(tracker_instance_t* inst) {
-	tracker3D_sphere_stereo_instance_t* internal = (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
-	capture_parameters_t cp={};
+capture_parameters_t
+tracker3D_sphere_stereo_get_capture_params(tracker_instance_t* inst)
+{
+	tracker3D_sphere_stereo_instance_t* internal =
+	    (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
+	capture_parameters_t cp = {};
 	switch (internal->configuration.calibration_mode) {
-	    case CALIBRATION_MODE_CHESSBOARD:
-		    cp.exposure = 0.3f;
-			cp.gain=0.01f;
-		    break;
-	    default:
-		    cp.exposure = 1.0/2048.0;
-			cp.gain=0.01f;
-		    break;
+	case CALIBRATION_MODE_CHESSBOARD:
+		cp.exposure = 0.3f;
+		cp.gain = 0.01f;
+		break;
+	default:
+		cp.exposure = 1.0 / 2048.0;
+		cp.gain = 0.01f;
+		break;
 	}
 
 	return cp;
 }
 
-bool tracker3D_sphere_stereo_queue(tracker_instance_t* inst,frame_t* frame) {
-	tracker3D_sphere_stereo_instance_t* internal = (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
-	if (! internal->configured){
-		printf("ERROR: you must configure this tracker before it can accept frames\n");
+bool
+tracker3D_sphere_stereo_queue(tracker_instance_t* inst, frame_t* frame)
+{
+	tracker3D_sphere_stereo_instance_t* internal =
+	    (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
+	if (!internal->configured) {
+		printf(
+		    "ERROR: you must configure this tracker before it can "
+		    "accept frames\n");
 		return false;
 	}
 
-	//alloc left if required - if we have a composite stereo frame, alloc both left and right eyes.
+	// alloc left if required - if we have a composite stereo frame, alloc
+	// both left and right eyes.
 
-	if (frame->source_id == internal->configuration.l_source_id &&  !internal->l_alloced_frames)
-	{
-		uint16_t eye_width = frame->width/2;
+	if (frame->source_id == internal->configuration.l_source_id &&
+	    !internal->l_alloced_frames) {
+		uint16_t eye_width = frame->width / 2;
 		if (internal->configuration.split_left == true) {
-			eye_width = frame->width /2;
-			internal->r_frame_gray = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-			internal->r_frame_u = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-			internal->r_frame_v = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-			internal->r_mask_gray = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-			internal->r_alloced_frames =true;
+			eye_width = frame->width / 2;
+			internal->r_frame_gray =
+			    cv::Mat(frame->height, eye_width, CV_8UC1,
+			            cv::Scalar(0, 0, 0));
+			internal->r_frame_u =
+			    cv::Mat(frame->height, eye_width, CV_8UC1,
+			            cv::Scalar(0, 0, 0));
+			internal->r_frame_v =
+			    cv::Mat(frame->height, eye_width, CV_8UC1,
+			            cv::Scalar(0, 0, 0));
+			internal->r_mask_gray =
+			    cv::Mat(frame->height, eye_width, CV_8UC1,
+			            cv::Scalar(0, 0, 0));
+			internal->r_alloced_frames = true;
 		}
-		internal->l_frame_gray = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-		internal->l_frame_u = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-		internal->l_frame_v = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-
-		internal->l_mask_gray = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-		internal->l_alloced_frames =true;
+		internal->l_frame_gray = cv::Mat(frame->height, eye_width,
+		                                 CV_8UC1, cv::Scalar(0, 0, 0));
+		internal->l_frame_u = cv::Mat(frame->height, eye_width, CV_8UC1,
+		                              cv::Scalar(0, 0, 0));
+		internal->l_frame_v = cv::Mat(frame->height, eye_width, CV_8UC1,
+		                              cv::Scalar(0, 0, 0));
+
+		internal->l_mask_gray = cv::Mat(frame->height, eye_width,
+		                                CV_8UC1, cv::Scalar(0, 0, 0));
+		internal->l_alloced_frames = true;
 	}
 
-	//if we have a right frame, alloc if required.
-
-	if (frame->source_id == internal->configuration.r_source_id &&  !internal->r_alloced_frames)
-	{
-		uint16_t eye_width = frame->width/2;
-		internal->r_frame_gray = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-		internal->r_frame_u = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-		internal->r_frame_v = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-
-		internal->r_mask_gray = cv::Mat(frame->height,eye_width,CV_8UC1,cv::Scalar(0,0,0));
-		internal->r_alloced_frames =true;
+	// if we have a right frame, alloc if required.
+
+	if (frame->source_id == internal->configuration.r_source_id &&
+	    !internal->r_alloced_frames) {
+		uint16_t eye_width = frame->width / 2;
+		internal->r_frame_gray = cv::Mat(frame->height, eye_width,
+		                                 CV_8UC1, cv::Scalar(0, 0, 0));
+		internal->r_frame_u = cv::Mat(frame->height, eye_width, CV_8UC1,
+		                              cv::Scalar(0, 0, 0));
+		internal->r_frame_v = cv::Mat(frame->height, eye_width, CV_8UC1,
+		                              cv::Scalar(0, 0, 0));
+
+		internal->r_mask_gray = cv::Mat(frame->height, eye_width,
+		                                CV_8UC1, cv::Scalar(0, 0, 0));
+		internal->r_alloced_frames = true;
 	}
 
-	//copy our data from our video buffer into our cv::Mats
-	//TODO: initialise once
+	// copy our data from our video buffer into our cv::Mats
+	// TODO: initialise once
 	cv::Mat l_chans[3];
 	l_chans[0] = internal->l_frame_gray;
 	l_chans[1] = internal->l_frame_u;
@@ -208,57 +251,65 @@ bool tracker3D_sphere_stereo_queue(tracker_instance_t* inst,frame_t* frame) {
 	if (frame->source_id == internal->configuration.l_source_id) {
 
 		if (internal->configuration.split_left == true) {
-			internal->got_left=true;
-			internal->got_right=true;
-			cv::Mat tmp(frame->height, frame->width, CV_8UC3, frame->data);
-			cv::Rect lr(internal->configuration.l_rect.tl.x,internal->configuration.l_rect.tl.y,internal->configuration.l_rect.br.x,internal->configuration.l_rect.br.y);
-			cv::Rect rr(internal->configuration.r_rect.tl.x,internal->configuration.r_rect.tl.y,internal->configuration.r_rect.br.x - internal->configuration.r_rect.tl.x,internal->configuration.r_rect.br.y);
-			cv::split(tmp(lr),l_chans);
-			cv::split(tmp(rr),r_chans);
-	}
-		else
-		{
-			internal->got_left=true;
-			cv::Mat tmp(frame->height, frame->width, CV_8UC3, frame->data);
-			cv::split(tmp,l_chans);
+			internal->got_left = true;
+			internal->got_right = true;
+			cv::Mat tmp(frame->height, frame->width, CV_8UC3,
+			            frame->data);
+			cv::Rect lr(internal->configuration.l_rect.tl.x,
+			            internal->configuration.l_rect.tl.y,
+			            internal->configuration.l_rect.br.x,
+			            internal->configuration.l_rect.br.y);
+			cv::Rect rr(internal->configuration.r_rect.tl.x,
+			            internal->configuration.r_rect.tl.y,
+			            internal->configuration.r_rect.br.x -
+			                internal->configuration.r_rect.tl.x,
+			            internal->configuration.r_rect.br.y);
+			cv::split(tmp(lr), l_chans);
+			cv::split(tmp(rr), r_chans);
+		} else {
+			internal->got_left = true;
+			cv::Mat tmp(frame->height, frame->width, CV_8UC3,
+			            frame->data);
+			cv::split(tmp, l_chans);
 		}
-
-
 	}
-	if (frame->source_id == internal->configuration.r_source_id && internal->configuration.split_left ==false) {
-		internal->got_right=true;
+	if (frame->source_id == internal->configuration.r_source_id &&
+	    internal->configuration.split_left == false) {
+		internal->got_right = true;
 		cv::Mat tmp(frame->height, frame->width, CV_8UC3, frame->data);
-		cv::split(tmp,r_chans);
+		cv::split(tmp, r_chans);
 	}
 
-	//we have our pair of frames, now we can process them - we should do this async, rather than in queue
+	// we have our pair of frames, now we can process them - we should do
+	// this async, rather than in queue
 
-	if (internal->got_left && internal->got_right)
-	{
+	if (internal->got_left && internal->got_right) {
 		switch (internal->configuration.calibration_mode) {
-		    case CALIBRATION_MODE_NONE:
-			    return tracker3D_sphere_stereo_track(inst);
-			    break;
-		    case CALIBRATION_MODE_CHESSBOARD:
-                return tracker3D_sphere_stereo_calibrate(inst);
-			    break;
-		    default:
-			    printf("ERROR: unrecognised calibration mode\n");
-			    return false;
+		case CALIBRATION_MODE_NONE:
+			return tracker3D_sphere_stereo_track(inst);
+			break;
+		case CALIBRATION_MODE_CHESSBOARD:
+			return tracker3D_sphere_stereo_calibrate(inst);
+			break;
+		default:
+			printf("ERROR: unrecognised calibration mode\n");
+			return false;
 		}
-
 	}
 	return true;
 }
 
-bool tracker3D_sphere_stereo_track(tracker_instance_t* inst){
+bool
+tracker3D_sphere_stereo_track(tracker_instance_t* inst)
+{
 
-	//printf("tracking...\n");
-	tracker3D_sphere_stereo_instance_t* internal = (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
+	// printf("tracking...\n");
+	tracker3D_sphere_stereo_instance_t* internal =
+	    (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
 	internal->l_keypoints.clear();
 	internal->r_keypoints.clear();
 
-	//DEBUG: dump all our planes out for inspection
+	// DEBUG: dump all our planes out for inspection
 	/*cv::imwrite("/tmp/l_out_y.jpg",internal->l_frame_gray);
 	cv::imwrite("/tmp/r_out_y.jpg",internal->r_frame_gray);
 	cv::imwrite("/tmp/l_out_u.jpg",internal->l_frame_u);
@@ -267,58 +318,71 @@ bool tracker3D_sphere_stereo_track(tracker_instance_t* inst){
 	cv::imwrite("/tmp/r_out_v.jpg",internal->r_frame_v);
 */
 
-	//disabled channel combining in favour of using v plane directly - Y is 2x resolution
+	// disabled channel combining in favour of using v plane directly - Y is
+	// 2x resolution
 	// so we do want to use that eventually
 
-	//combine our yuv channels to isolate blue leds - y channel is all we will use from now on
-	//cv::subtract(internal->l_frame_u,internal->l_frame_v,internal->l_frame_u);
-	//cv::subtract(internal->r_frame_u,internal->r_frame_v,internal->r_frame_u);
+	// combine our yuv channels to isolate blue leds - y channel is all we
+	// will use from now on
+	// cv::subtract(internal->l_frame_u,internal->l_frame_v,internal->l_frame_u);
+	// cv::subtract(internal->r_frame_u,internal->r_frame_v,internal->r_frame_u);
 
-	//cv::subtract(internal->l_frame_u,internal->l_frame_gray,internal->l_frame_gray);
-	//cv::subtract(internal->r_frame_u,internal->r_frame_gray,internal->r_frame_gray);
+	// cv::subtract(internal->l_frame_u,internal->l_frame_gray,internal->l_frame_gray);
+	// cv::subtract(internal->r_frame_u,internal->r_frame_gray,internal->r_frame_gray);
 
-	//just use the u plane directly
-	cv::bitwise_not(internal->l_frame_v,internal->l_frame_v);
-	cv::bitwise_not(internal->r_frame_v,internal->r_frame_v);
+	// just use the u plane directly
+	cv::bitwise_not(internal->l_frame_v, internal->l_frame_v);
+	cv::bitwise_not(internal->r_frame_v, internal->r_frame_v);
 
-	cv::threshold(internal->l_frame_v,internal->l_frame_gray,150.0,255.0,0);
-	cv::threshold(internal->r_frame_v,internal->r_frame_gray,150.0,255.0,0);
+	cv::threshold(internal->l_frame_v, internal->l_frame_gray, 150.0, 255.0,
+	              0);
+	cv::threshold(internal->r_frame_v, internal->r_frame_gray, 150.0, 255.0,
+	              0);
 
 
 	cv::Mat l_frame_undist;
 	cv::Mat r_frame_undist;
 
-	//undistort the whole image
-	cv::remap( internal->l_frame_gray,l_frame_undist, internal->l_undistort_map_x, internal->l_undistort_map_y, cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0) );
-	cv::remap( internal->r_frame_gray,r_frame_undist, internal->r_undistort_map_x, internal->r_undistort_map_y, cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0) );
+	// undistort the whole image
+	cv::remap(internal->l_frame_gray, l_frame_undist,
+	          internal->l_undistort_map_x, internal->l_undistort_map_y,
+	          cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
+	cv::remap(internal->r_frame_gray, r_frame_undist,
+	          internal->r_undistort_map_x, internal->r_undistort_map_y,
+	          cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
 
-	//rectify the whole image
-	cv::remap( l_frame_undist,internal->l_frame_gray, internal->l_rectify_map_x, internal->l_rectify_map_y, cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0) );
-	cv::remap( r_frame_undist,internal->r_frame_gray, internal->r_rectify_map_x, internal->r_rectify_map_y, cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0) );
+	// rectify the whole image
+	cv::remap(l_frame_undist, internal->l_frame_gray,
+	          internal->l_rectify_map_x, internal->l_rectify_map_y,
+	          cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
+	cv::remap(r_frame_undist, internal->r_frame_gray,
+	          internal->r_rectify_map_x, internal->r_rectify_map_y,
+	          cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));
 
 
-	//block-match for disparity calculation - disabled
+	// block-match for disparity calculation - disabled
 
 	cv::Mat disp;
-	cv::Ptr<cv::StereoBM> sbm = cv::StereoBM::create(128,5);
+	cv::Ptr<cv::StereoBM> sbm = cv::StereoBM::create(128, 5);
 	sbm->setNumDisparities(64);
-	   sbm->setBlockSize(5);
-	   sbm->setPreFilterCap(61);
-	   sbm->setPreFilterSize(15);
-	   sbm->setTextureThreshold(32);
-	   sbm->setSpeckleWindowSize(100);
-	   sbm->setSpeckleRange(32);
-	   sbm->setMinDisparity(2);
-	   sbm->setUniquenessRatio(10);
-	   sbm->setDisp12MaxDiff(0);
-//	sbm->compute(internal->l_frame_gray, internal->r_frame_gray,disp);
-//	cv::normalize(disp, disp8, 0.1, 255, CV_MINMAX, CV_8UC1);
+	sbm->setBlockSize(5);
+	sbm->setPreFilterCap(61);
+	sbm->setPreFilterSize(15);
+	sbm->setTextureThreshold(32);
+	sbm->setSpeckleWindowSize(100);
+	sbm->setSpeckleRange(32);
+	sbm->setMinDisparity(2);
+	sbm->setUniquenessRatio(10);
+	sbm->setDisp12MaxDiff(0);
+	//	sbm->compute(internal->l_frame_gray,
+	//internal->r_frame_gray,disp); 	cv::normalize(disp, disp8, 0.1, 255,
+	//CV_MINMAX, CV_8UC1);
 
 
-	//disabled background subtraction for now
+	// disabled background subtraction for now
 
-	//internal->background_subtractor->apply(internal->l_frame_gray,internal->l_mask_gray);
-	//internal->background_subtractor->apply(internal->r_frame_gray,internal->r_mask_gray);
+	// internal->background_subtractor->apply(internal->l_frame_gray,internal->l_mask_gray);
+	// internal->background_subtractor->apply(internal->r_frame_gray,internal->r_mask_gray);
 
 	xrt_vec2 lastPos = internal->l_tracked_blob.center;
 	float offset = ROI_OFFSET;
@@ -326,196 +390,240 @@ bool tracker3D_sphere_stereo_track(tracker_instance_t* inst){
 		offset = internal->l_tracked_blob.diameter;
 	}
 
-	//cv::rectangle(internal->l_mask_gray, cv::Point2f(lastPos.x-offset,lastPos.y-offset),cv::Point2f(lastPos.x+offset,lastPos.y+offset),cv::Scalar( 255 ),-1,0);
-	//lastPos = internal->r_tracked_blob.center;
-	//cv::rectangle(internal->r_mask_gray, cv::Point2f(lastPos.x-offset,lastPos.y-offset),cv::Point2f(lastPos.x+offset,lastPos.y+offset),cv::Scalar( 255 ),-1,0);
+	// cv::rectangle(internal->l_mask_gray,
+	// cv::Point2f(lastPos.x-offset,lastPos.y-offset),cv::Point2f(lastPos.x+offset,lastPos.y+offset),cv::Scalar(
+	// 255 ),-1,0); lastPos = internal->r_tracked_blob.center;
+	// cv::rectangle(internal->r_mask_gray,
+	// cv::Point2f(lastPos.x-offset,lastPos.y-offset),cv::Point2f(lastPos.x+offset,lastPos.y+offset),cv::Scalar(
+	// 255 ),-1,0);
 
-	//cv::rectangle(internal->debug_rgb, cv::Point2f(0,0),cv::Point2f(internal->debug_rgb.cols,internal->debug_rgb.rows),cv::Scalar( 0,0,0 ),-1,0);
+	// cv::rectangle(internal->debug_rgb,
+	// cv::Point2f(0,0),cv::Point2f(internal->debug_rgb.cols,internal->debug_rgb.rows),cv::Scalar(
+	// 0,0,0 ),-1,0);
 
-	cv::threshold(internal->l_frame_gray,internal->l_frame_gray,32.0,255.0,0);
-	cv::threshold(internal->r_frame_gray,internal->r_frame_gray,32.0,255.0,0);
+	cv::threshold(internal->l_frame_gray, internal->l_frame_gray, 32.0,
+	              255.0, 0);
+	cv::threshold(internal->r_frame_gray, internal->r_frame_gray, 32.0,
+	              255.0, 0);
 
-	//TODO: handle source images larger than debug_rgb
+	// TODO: handle source images larger than debug_rgb
 	cv::Mat debug_img;
-	cv::cvtColor(internal->l_frame_gray,debug_img,CV_GRAY2BGR);
-	cv::Mat dst_roi = internal->debug_rgb(cv::Rect(0, 0, debug_img.cols, debug_img.rows));
+	cv::cvtColor(internal->l_frame_gray, debug_img, CV_GRAY2BGR);
+	cv::Mat dst_roi =
+	    internal->debug_rgb(cv::Rect(0, 0, debug_img.cols, debug_img.rows));
 	debug_img.copyTo(dst_roi);
 
 
 	tracker_measurement_t m = {};
 
-	//do blob detection with our masks
-	internal->sbd->detect(internal->l_frame_gray, internal->l_keypoints);//,internal->l_mask_gray);
-	internal->sbd->detect(internal->r_frame_gray, internal->r_keypoints);//,internal->r_mask_gray);
+	// do blob detection with our masks
+	internal->sbd->detect(internal->l_frame_gray,
+	                      internal->l_keypoints); //,internal->l_mask_gray);
+	internal->sbd->detect(internal->r_frame_gray,
+	                      internal->r_keypoints); //,internal->r_mask_gray);
 
-	//do some basic matching to come up with likely disparity-pairs
-    std::vector<cv::KeyPoint> l_blobs,r_blobs;
-	for (uint32_t i=0;i<internal->l_keypoints.size();i++)
-	{
+	// do some basic matching to come up with likely disparity-pairs
+	std::vector<cv::KeyPoint> l_blobs, r_blobs;
+	for (uint32_t i = 0; i < internal->l_keypoints.size(); i++) {
 		cv::KeyPoint l_blob = internal->l_keypoints[i];
-		int l_index=-1;
-		int r_index=-1;
-		for (uint32_t j=0;j<internal->r_keypoints.size();j++)
-		{
+		int l_index = -1;
+		int r_index = -1;
+		for (uint32_t j = 0; j < internal->r_keypoints.size(); j++) {
 			float lowest_dist = 128;
 			cv::KeyPoint r_blob = internal->r_keypoints[j];
-			//find closest point on same-ish scanline
-			if ((l_blob.pt.y < r_blob.pt.y+3) && (l_blob.pt.y > r_blob.pt.y-3 ) && ((r_blob.pt.x - l_blob.pt.x) < lowest_dist)  ){
+			// find closest point on same-ish scanline
+			if ((l_blob.pt.y < r_blob.pt.y + 3) &&
+			    (l_blob.pt.y > r_blob.pt.y - 3) &&
+			    ((r_blob.pt.x - l_blob.pt.x) < lowest_dist)) {
 				lowest_dist = r_blob.pt.x - l_blob.pt.x;
-				r_index=j;
-				l_index=i;
+				r_index = j;
+				l_index = i;
 			}
 		}
-		if (l_index > -1 && r_index > -1)
-		{
-            l_blobs.push_back(internal->l_keypoints.at(l_index));
-            r_blobs.push_back(internal->r_keypoints.at(r_index));
+		if (l_index > -1 && r_index > -1) {
+			l_blobs.push_back(internal->l_keypoints.at(l_index));
+			r_blobs.push_back(internal->r_keypoints.at(r_index));
 		}
 	}
 
-    //draw our disparity markers into our debug frame
+	// draw our disparity markers into our debug frame
 
-    for (uint32_t i=0;i<l_blobs.size();i++){
-		cv::line(internal->debug_rgb,l_blobs[i].pt,r_blobs[i].pt,cv::Scalar(255,0,0));
-    }
+	for (uint32_t i = 0; i < l_blobs.size(); i++) {
+		cv::line(internal->debug_rgb, l_blobs[i].pt, r_blobs[i].pt,
+		         cv::Scalar(255, 0, 0));
+	}
 
-	//convert our 2d point + disparities into 3d points.
+	// convert our 2d point + disparities into 3d points.
 
 	std::vector<cv::Point3f> world_points;
 	if (l_blobs.size() > 0) {
-		for (uint32_t i=0;i < l_blobs.size();i++) {
+		for (uint32_t i = 0; i < l_blobs.size(); i++) {
 			float disp = r_blobs[i].pt.x - l_blobs[i].pt.x;
-			cv::Scalar xydw(l_blobs[i].pt.x,l_blobs[i].pt.y,disp,1.0f);
-			cv::Mat h_world =  internal->disparity_to_depth * xydw;
-			world_points.push_back(cv::Point3f(h_world.at<double>(0,0)/h_world.at<double>(3,0),h_world.at<double>(1,0)/h_world.at<double>(3,0),h_world.at<double>(2,0)/h_world.at<double>(3,0)));
+			cv::Scalar xydw(l_blobs[i].pt.x, l_blobs[i].pt.y, disp,
+			                1.0f);
+			cv::Mat h_world = internal->disparity_to_depth * xydw;
+			world_points.push_back(cv::Point3f(
+			    h_world.at<double>(0, 0) / h_world.at<double>(3, 0),
+			    h_world.at<double>(1, 0) / h_world.at<double>(3, 0),
+			    h_world.at<double>(2, 0) /
+			        h_world.at<double>(3, 0)));
 		}
-    }
+	}
 
-    int tracked_index=-1;
-    float lowest_dist=65535.0f;
-    xrt_vec3 position = internal->tracked_object.pose.position;
-    cv::Point3f last_point (position.x,position.y,position.z);
+	int tracked_index = -1;
+	float lowest_dist = 65535.0f;
+	xrt_vec3 position = internal->tracked_object.pose.position;
+	cv::Point3f last_point(position.x, position.y, position.z);
 
-	for (uint32_t i=0; i < world_points.size(); i++) {
+	for (uint32_t i = 0; i < world_points.size(); i++) {
 
 		cv::Point3f world_point = world_points[i];
 
-        //show our tracked world points (just x,y) in our debug output
-        cv::Point2f img_point;
-		img_point.x = world_point.x * internal->debug_rgb.cols/2 + internal->debug_rgb.cols/2;
-		img_point.y = world_point.y * internal->debug_rgb.rows/2 + internal->debug_rgb.rows/2;
-		cv::circle(internal->debug_rgb,img_point,3,cv::Scalar(0,255,0));
-
-        float dist = dist_3d(world_point,last_point);
-        if ( dist < lowest_dist) {
-            tracked_index=i;
-            lowest_dist = dist;
-        }
-
+		// show our tracked world points (just x,y) in our debug output
+		cv::Point2f img_point;
+		img_point.x = world_point.x * internal->debug_rgb.cols / 2 +
+		              internal->debug_rgb.cols / 2;
+		img_point.y = world_point.y * internal->debug_rgb.rows / 2 +
+		              internal->debug_rgb.rows / 2;
+		cv::circle(internal->debug_rgb, img_point, 3,
+		           cv::Scalar(0, 255, 0));
+
+		float dist = dist_3d(world_point, last_point);
+		if (dist < lowest_dist) {
+			tracked_index = i;
+			lowest_dist = dist;
+		}
 	}
 
-    if (tracked_index != -1) {
+	if (tracked_index != -1) {
 		cv::Point3f world_point = world_points[tracked_index];
 
-        //create our measurement for the filter
-        m.has_position = true;
-        m.has_rotation = false;
-        m.pose.position.x = world_point.x;
-        m.pose.position.y = world_point.y;
-        m.pose.position.z = world_point.z;
-
-        //update internal state
-        cv::KeyPoint l_kp = l_blobs[tracked_index];
-        cv::KeyPoint r_kp = l_blobs[tracked_index];
-
-        internal->l_tracked_blob.center.x = l_kp.pt.x;
-        internal->l_tracked_blob.center.y = l_kp.pt.y;
-        internal->l_tracked_blob.diameter = l_kp.size;
-
-        internal->r_tracked_blob.center.x = r_kp.pt.x;
-        internal->r_tracked_blob.center.y = r_kp.pt.y;
-        internal->r_tracked_blob.diameter = r_kp.size;
-
-        internal->tracked_object.pose.position.x = world_point.x;
-        internal->tracked_object.pose.position.y = world_point.y;
-        internal->tracked_object.pose.position.z = world_point.z;
-        internal->tracked_object.tracking_id =1;
-
-        char message[128];
-        snprintf(message,128,"X: %f Y: %f Z: %f",world_point.x,world_point.y,world_point.z);
-
-        cv::putText(internal->debug_rgb,message,cv::Point2i(10,50),0,0.5f,cv::Scalar(96,128,192));
-        if (internal->measurement_target_callback){
-            internal->measurement_target_callback(internal->measurement_target_instance,&m);
-        }
-    }
+		// create our measurement for the filter
+		m.has_position = true;
+		m.has_rotation = false;
+		m.pose.position.x = world_point.x;
+		m.pose.position.y = world_point.y;
+		m.pose.position.z = world_point.z;
+
+		// update internal state
+		cv::KeyPoint l_kp = l_blobs[tracked_index];
+		cv::KeyPoint r_kp = l_blobs[tracked_index];
+
+		internal->l_tracked_blob.center.x = l_kp.pt.x;
+		internal->l_tracked_blob.center.y = l_kp.pt.y;
+		internal->l_tracked_blob.diameter = l_kp.size;
+
+		internal->r_tracked_blob.center.x = r_kp.pt.x;
+		internal->r_tracked_blob.center.y = r_kp.pt.y;
+		internal->r_tracked_blob.diameter = r_kp.size;
+
+		internal->tracked_object.pose.position.x = world_point.x;
+		internal->tracked_object.pose.position.y = world_point.y;
+		internal->tracked_object.pose.position.z = world_point.z;
+		internal->tracked_object.tracking_id = 1;
+
+		char message[128];
+		snprintf(message, 128, "X: %f Y: %f Z: %f", world_point.x,
+		         world_point.y, world_point.z);
+
+		cv::putText(internal->debug_rgb, message, cv::Point2i(10, 50),
+		            0, 0.5f, cv::Scalar(96, 128, 192));
+		if (internal->measurement_target_callback) {
+			internal->measurement_target_callback(
+			    internal->measurement_target_instance, &m);
+		}
+	}
 
-	tracker_send_debug_frame(inst); //publish our debug frame
+	tracker_send_debug_frame(inst); // publish our debug frame
 
 
 	return true;
 }
 
-bool tracker3D_sphere_stereo_calibrate(tracker_instance_t* inst){
+bool
+tracker3D_sphere_stereo_calibrate(tracker_instance_t* inst)
+{
 
 	printf("calibrating...\n");
 
-	//check if we have saved calibration data. if so, just use it.
-    tracker3D_sphere_stereo_instance_t* internal = (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
+	// check if we have saved calibration data. if so, just use it.
+	tracker3D_sphere_stereo_instance_t* internal =
+	    (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
 
 
-	char path_string[256]; //TODO: 256 maybe not enough
-	//TODO: use multiple env vars?
-    char* config_path = secure_getenv("HOME");
-	snprintf(path_string,256,"%s/.config/monado/%s.calibration",config_path,internal->configuration.configuration_filename); //TODO: hardcoded 256
+	char path_string[256]; // TODO: 256 maybe not enough
+	// TODO: use multiple env vars?
+	char* config_path = secure_getenv("HOME");
+	snprintf(path_string, 256, "%s/.config/monado/%s.calibration",
+	         config_path,
+	         internal->configuration
+	             .configuration_filename); // TODO: hardcoded 256
 
-    printf("TRY LOADING CONFIG FROM %s\n",path_string);
-	FILE* calib_file = fopen(path_string,"rb");
+	printf("TRY LOADING CONFIG FROM %s\n", path_string);
+	FILE* calib_file = fopen(path_string, "rb");
 	if (calib_file) {
-		//read our calibration from this file
-		read_mat(calib_file,&internal->l_intrinsics);
-		read_mat(calib_file,&internal->r_intrinsics);
-		read_mat(calib_file,&internal->l_distortion);
-		read_mat(calib_file,&internal->r_distortion);
-		read_mat(calib_file,&internal->l_distortion_fisheye);
-		read_mat(calib_file,&internal->r_distortion_fisheye);
-		read_mat(calib_file,&internal->l_rotation);
-		read_mat(calib_file,&internal->r_rotation);
-		read_mat(calib_file,&internal->l_translation);
-		read_mat(calib_file,&internal->r_translation);
-		read_mat(calib_file,&internal->l_projection);
-		read_mat(calib_file,&internal->r_projection);
-		read_mat(calib_file,&internal->disparity_to_depth);
-		cv::Size image_size(internal->l_frame_gray.cols,internal->l_frame_gray.rows);
+		// read our calibration from this file
+		read_mat(calib_file, &internal->l_intrinsics);
+		read_mat(calib_file, &internal->r_intrinsics);
+		read_mat(calib_file, &internal->l_distortion);
+		read_mat(calib_file, &internal->r_distortion);
+		read_mat(calib_file, &internal->l_distortion_fisheye);
+		read_mat(calib_file, &internal->r_distortion_fisheye);
+		read_mat(calib_file, &internal->l_rotation);
+		read_mat(calib_file, &internal->r_rotation);
+		read_mat(calib_file, &internal->l_translation);
+		read_mat(calib_file, &internal->r_translation);
+		read_mat(calib_file, &internal->l_projection);
+		read_mat(calib_file, &internal->r_projection);
+		read_mat(calib_file, &internal->disparity_to_depth);
+		cv::Size image_size(internal->l_frame_gray.cols,
+		                    internal->l_frame_gray.rows);
 		// TODO: save data indicating calibration image size
 		// and multiply intrinsics accordingly
 
-		cv::fisheye::initUndistortRectifyMap(internal->l_intrinsics, internal->l_distortion_fisheye, cv::noArray(), internal->l_intrinsics, image_size, CV_32FC1, internal->l_undistort_map_x, internal->l_undistort_map_y);
-		cv::fisheye::initUndistortRectifyMap(internal->r_intrinsics, internal->r_distortion_fisheye, cv::noArray(), internal->r_intrinsics, image_size, CV_32FC1, internal->r_undistort_map_x, internal->r_undistort_map_y);
-
-		cv::initUndistortRectifyMap(internal->l_intrinsics,internal->zero_distortion,internal->l_rotation,internal->l_projection,image_size,CV_32FC1,internal->l_rectify_map_x,internal->l_rectify_map_y);
-		cv::initUndistortRectifyMap(internal->r_intrinsics,internal->zero_distortion,internal->r_rotation,internal->r_projection,image_size,CV_32FC1,internal->r_rectify_map_x,internal->r_rectify_map_y);
+		cv::fisheye::initUndistortRectifyMap(
+		    internal->l_intrinsics, internal->l_distortion_fisheye,
+		    cv::noArray(), internal->l_intrinsics, image_size, CV_32FC1,
+		    internal->l_undistort_map_x, internal->l_undistort_map_y);
+		cv::fisheye::initUndistortRectifyMap(
+		    internal->r_intrinsics, internal->r_distortion_fisheye,
+		    cv::noArray(), internal->r_intrinsics, image_size, CV_32FC1,
+		    internal->r_undistort_map_x, internal->r_undistort_map_y);
+
+		cv::initUndistortRectifyMap(
+		    internal->l_intrinsics, internal->zero_distortion,
+		    internal->l_rotation, internal->l_projection, image_size,
+		    CV_32FC1, internal->l_rectify_map_x,
+		    internal->l_rectify_map_y);
+		cv::initUndistortRectifyMap(
+		    internal->r_intrinsics, internal->zero_distortion,
+		    internal->r_rotation, internal->r_projection, image_size,
+		    CV_32FC1, internal->r_rectify_map_x,
+		    internal->r_rectify_map_y);
 
 		printf("calibrated cameras! setting tracking mode\n");
-		internal->calibrated=true;
-		internal->configuration.calibration_mode = CALIBRATION_MODE_NONE;
-		//send an event to notify our driver of the switch into tracking mode.
-		driver_event_t e ={};
+		internal->calibrated = true;
+		internal->configuration.calibration_mode =
+		    CALIBRATION_MODE_NONE;
+		// send an event to notify our driver of the switch into
+		// tracking mode.
+		driver_event_t e = {};
 		e.type = EVENT_TRACKER_RECONFIGURED;
-		internal->event_target_callback(internal->event_target_instance,e);
+		internal->event_target_callback(internal->event_target_instance,
+		                                e);
 		return true;
 	}
 
-	//no saved file - perform interactive calibration.
-	//try and find a chessboard in both images, and run the calibration.
+	// no saved file - perform interactive calibration.
+	// try and find a chessboard in both images, and run the calibration.
 	// - we need to define some mechanism for UI/user interaction.
 
 	// TODO: initialise this on construction and move this to internal state
-	cv::Size board_size(8,6);
+	cv::Size board_size(8, 6);
 	std::vector<cv::Point3f> chessboard_model;
 
-	for (uint32_t i=0;i< board_size.width * board_size.height;i++) {
-		cv::Point3f p(i/board_size.width,i % board_size.width,0.0f);
+	for (uint32_t i = 0; i < board_size.width * board_size.height; i++) {
+		cv::Point3f p(i / board_size.width, i % board_size.width, 0.0f);
 		chessboard_model.push_back(p);
 	}
 
@@ -526,139 +634,209 @@ bool tracker3D_sphere_stereo_calibrate(tracker_instance_t* inst){
 	cv::Mat camera_essential;
 	cv::Mat camera_fundamental;
 
-	//clear our debug image
-	cv::rectangle(internal->debug_rgb, cv::Point2f(0,0),cv::Point2f(internal->debug_rgb.cols,internal->debug_rgb.rows),cv::Scalar( 0,0,0 ),-1,0);
+	// clear our debug image
+	cv::rectangle(
+	    internal->debug_rgb, cv::Point2f(0, 0),
+	    cv::Point2f(internal->debug_rgb.cols, internal->debug_rgb.rows),
+	    cv::Scalar(0, 0, 0), -1, 0);
 	cv::Mat disp8;
-	cv::cvtColor(internal->r_frame_gray,disp8,CV_GRAY2BGR);
-	//disp8.copyTo(internal->debug_rgb);
-	// we will collect samples continuously - the user should be able to wave a
-	// chessboard around randomly while the system calibrates.. we only add a
-	// sample when it increases the coverage area substantially, to give the solver
-	// a decent dataset.
-
-	bool found_left = cv::findChessboardCorners(internal->l_frame_gray,board_size,l_chessboard_measured);
-	bool found_right = cv::findChessboardCorners(internal->r_frame_gray,board_size,r_chessboard_measured);
+	cv::cvtColor(internal->r_frame_gray, disp8, CV_GRAY2BGR);
+	// disp8.copyTo(internal->debug_rgb);
+	// we will collect samples continuously - the user should be able to
+	// wave a chessboard around randomly while the system calibrates.. we
+	// only add a sample when it increases the coverage area substantially,
+	// to give the solver a decent dataset.
+
+	bool found_left = cv::findChessboardCorners(
+	    internal->l_frame_gray, board_size, l_chessboard_measured);
+	bool found_right = cv::findChessboardCorners(
+	    internal->r_frame_gray, board_size, r_chessboard_measured);
 	char message[128];
-	message[0]=0x0;
+	message[0] = 0x0;
 
-	if ( found_left && found_right ){
+	if (found_left && found_right) {
 		std::vector<cv::Point2f> coverage;
-		for (uint32_t i = 0; i < internal->l_chessboards_measured.size();i++)
-		{
-			cv::Rect brect = cv::boundingRect(internal->l_chessboards_measured[i]);
-			cv::rectangle(internal->debug_rgb,brect.tl(),brect.br(),cv::Scalar(0,64,32));
+		for (uint32_t i = 0;
+		     i < internal->l_chessboards_measured.size(); i++) {
+			cv::Rect brect = cv::boundingRect(
+			    internal->l_chessboards_measured[i]);
+			cv::rectangle(internal->debug_rgb, brect.tl(),
+			              brect.br(), cv::Scalar(0, 64, 32));
 
 			coverage.push_back(cv::Point2f(brect.tl()));
 			coverage.push_back(cv::Point2f(brect.br()));
 		}
-		cv::Rect  pre_rect = cv::boundingRect(coverage);
+		cv::Rect pre_rect = cv::boundingRect(coverage);
 		cv::Rect brect = cv::boundingRect(l_chessboard_measured);
 		coverage.push_back(cv::Point2f(brect.tl()));
 		coverage.push_back(cv::Point2f(brect.br()));
 		cv::Rect post_rect = cv::boundingRect(coverage);
 
-		//std::cout << "COVERAGE: " << brect.area() << "\n";
+		// std::cout << "COVERAGE: " << brect.area() << "\n";
 
-		cv::rectangle(internal->debug_rgb,post_rect.tl(),post_rect.br(),cv::Scalar(0,255,0));
+		cv::rectangle(internal->debug_rgb, post_rect.tl(),
+		              post_rect.br(), cv::Scalar(0, 255, 0));
 
 		if (post_rect.area() > pre_rect.area() + 500) {
-		//we will use the last n samples to calculate our calibration
-
-			if (internal->l_chessboards_measured.size() > MAX_CALIBRATION_SAMPLES)
-		{
-			internal->l_chessboards_measured.erase(internal->l_chessboards_measured.begin());
-			internal->r_chessboards_measured.erase(internal->r_chessboards_measured.begin());
-		}
-		else
-		{
-			internal->chessboards_model.push_back(chessboard_model);
-		}
-
-			internal->l_chessboards_measured.push_back(l_chessboard_measured);
-			internal->r_chessboards_measured.push_back(r_chessboard_measured);
+			// we will use the last n samples to calculate our
+			// calibration
+
+			if (internal->l_chessboards_measured.size() >
+			    MAX_CALIBRATION_SAMPLES) {
+				internal->l_chessboards_measured.erase(
+				    internal->l_chessboards_measured.begin());
+				internal->r_chessboards_measured.erase(
+				    internal->r_chessboards_measured.begin());
+			} else {
+				internal->chessboards_model.push_back(
+				    chessboard_model);
+			}
 
+			internal->l_chessboards_measured.push_back(
+			    l_chessboard_measured);
+			internal->r_chessboards_measured.push_back(
+			    r_chessboard_measured);
 		}
 
-		if (internal->l_chessboards_measured.size() == MAX_CALIBRATION_SAMPLES )
-		{
-			cv::Size image_size(internal->l_frame_gray.cols,internal->l_frame_gray.rows);
+		if (internal->l_chessboards_measured.size() ==
+		    MAX_CALIBRATION_SAMPLES) {
+			cv::Size image_size(internal->l_frame_gray.cols,
+			                    internal->l_frame_gray.rows);
 			cv::Mat errors;
 
-			//float rp_error = cv::stereoCalibrate(internal->chessboards_model,internal->l_chessboards_measured,internal->r_chessboards_measured,internal->l_intrinsics,internal->l_distortion,internal->r_intrinsics,internal->r_distortion,image_size,camera_rotation,camera_translation,camera_essential,camera_fundamental,errors,0);
+			// float rp_error =
+			// cv::stereoCalibrate(internal->chessboards_model,internal->l_chessboards_measured,internal->r_chessboards_measured,internal->l_intrinsics,internal->l_distortion,internal->r_intrinsics,internal->r_distortion,image_size,camera_rotation,camera_translation,camera_essential,camera_fundamental,errors,0);
 
-			float rp_error = cv::fisheye::stereoCalibrate(internal->chessboards_model,internal->l_chessboards_measured,internal->r_chessboards_measured,internal->l_intrinsics,internal->l_distortion_fisheye,internal->r_intrinsics,internal->r_distortion_fisheye,image_size,camera_rotation,camera_translation,cv::fisheye::CALIB_RECOMPUTE_EXTRINSIC);
+			float rp_error = cv::fisheye::stereoCalibrate(
+			    internal->chessboards_model,
+			    internal->l_chessboards_measured,
+			    internal->r_chessboards_measured,
+			    internal->l_intrinsics,
+			    internal->l_distortion_fisheye,
+			    internal->r_intrinsics,
+			    internal->r_distortion_fisheye, image_size,
+			    camera_rotation, camera_translation,
+			    cv::fisheye::CALIB_RECOMPUTE_EXTRINSIC);
 
-			//we will generate undistort and rectify mappings separately
+			// we will generate undistort and rectify mappings
+			// separately
 
 
 			std::cout << "calibration rp_error" << rp_error << "\n";
-			std::cout << "calibration camera_translation" << camera_translation << "\n";
-
-			//TODO: handle both fisheye and normal cameras -right now I only have the fisheye
-
-			cv::fisheye::initUndistortRectifyMap(internal->l_intrinsics, internal->l_distortion_fisheye, cv::noArray(), internal->l_intrinsics, image_size, CV_32FC1, internal->l_undistort_map_x, internal->l_undistort_map_y);
-			cv::fisheye::initUndistortRectifyMap(internal->r_intrinsics, internal->r_distortion_fisheye, cv::noArray(), internal->r_intrinsics, image_size, CV_32FC1, internal->r_undistort_map_x, internal->r_undistort_map_y);
-
-			cv::stereoRectify(internal->l_intrinsics,internal->zero_distortion,internal->r_intrinsics,internal->zero_distortion,image_size,camera_rotation,camera_translation,internal->l_rotation,internal->r_rotation,internal->l_projection,internal->r_projection,internal->disparity_to_depth,cv::CALIB_ZERO_DISPARITY);
-
-			cv::initUndistortRectifyMap(internal->l_intrinsics,internal->zero_distortion,internal->l_rotation,internal->l_projection,image_size,CV_32FC1,internal->l_rectify_map_x,internal->l_rectify_map_y);
-			cv::initUndistortRectifyMap(internal->r_intrinsics,internal->zero_distortion,internal->r_rotation,internal->r_projection,image_size,CV_32FC1,internal->r_rectify_map_x,internal->r_rectify_map_y);
+			std::cout << "calibration camera_translation"
+			          << camera_translation << "\n";
+
+			// TODO: handle both fisheye and normal cameras -right
+			// now I only have the fisheye
+
+			cv::fisheye::initUndistortRectifyMap(
+			    internal->l_intrinsics,
+			    internal->l_distortion_fisheye, cv::noArray(),
+			    internal->l_intrinsics, image_size, CV_32FC1,
+			    internal->l_undistort_map_x,
+			    internal->l_undistort_map_y);
+			cv::fisheye::initUndistortRectifyMap(
+			    internal->r_intrinsics,
+			    internal->r_distortion_fisheye, cv::noArray(),
+			    internal->r_intrinsics, image_size, CV_32FC1,
+			    internal->r_undistort_map_x,
+			    internal->r_undistort_map_y);
+
+			cv::stereoRectify(
+			    internal->l_intrinsics, internal->zero_distortion,
+			    internal->r_intrinsics, internal->zero_distortion,
+			    image_size, camera_rotation, camera_translation,
+			    internal->l_rotation, internal->r_rotation,
+			    internal->l_projection, internal->r_projection,
+			    internal->disparity_to_depth,
+			    cv::CALIB_ZERO_DISPARITY);
+
+			cv::initUndistortRectifyMap(
+			    internal->l_intrinsics, internal->zero_distortion,
+			    internal->l_rotation, internal->l_projection,
+			    image_size, CV_32FC1, internal->l_rectify_map_x,
+			    internal->l_rectify_map_y);
+			cv::initUndistortRectifyMap(
+			    internal->r_intrinsics, internal->zero_distortion,
+			    internal->r_rotation, internal->r_projection,
+			    image_size, CV_32FC1, internal->r_rectify_map_x,
+			    internal->r_rectify_map_y);
 
 			char path_string[PATH_MAX];
 			char file_string[PATH_MAX];
-			//TODO: centralise this - use multiple env vars?
+			// TODO: centralise this - use multiple env vars?
 			char* config_path = secure_getenv("HOME");
-			snprintf(path_string,PATH_MAX,"%s/.config/monado",config_path);
-			snprintf(file_string,PATH_MAX,"%s/.config/monado/%s.calibration",config_path,internal->configuration.configuration_filename);
-
-			printf("TRY WRITING CONFIG TO %s\n",file_string);
-			FILE* calib_file = fopen(file_string,"wb");
-			if (! calib_file) {
-				//try creating it
+			snprintf(path_string, PATH_MAX, "%s/.config/monado",
+			         config_path);
+			snprintf(
+			    file_string, PATH_MAX,
+			    "%s/.config/monado/%s.calibration", config_path,
+			    internal->configuration.configuration_filename);
+
+			printf("TRY WRITING CONFIG TO %s\n", file_string);
+			FILE* calib_file = fopen(file_string, "wb");
+			if (!calib_file) {
+				// try creating it
 				mkpath(path_string);
 			}
-			calib_file = fopen(file_string,"wb");
-			if (! calib_file) {
-				printf("ERROR. could not create calibration file %s\n",file_string);
+			calib_file = fopen(file_string, "wb");
+			if (!calib_file) {
+				printf(
+				    "ERROR. could not create calibration file "
+				    "%s\n",
+				    file_string);
 			} else {
-				write_mat(calib_file,&internal->l_intrinsics);
-				write_mat(calib_file,&internal->r_intrinsics);
-				write_mat(calib_file,&internal->l_distortion);
-				write_mat(calib_file,&internal->r_distortion);
-				write_mat(calib_file,&internal->l_distortion_fisheye);
-				write_mat(calib_file,&internal->r_distortion_fisheye);
-				write_mat(calib_file,&internal->l_rotation);
-				write_mat(calib_file,&internal->r_rotation);
-				write_mat(calib_file,&internal->l_translation);
-				write_mat(calib_file,&internal->r_translation);
-				write_mat(calib_file,&internal->l_projection);
-				write_mat(calib_file,&internal->r_projection);
-				write_mat(calib_file,&internal->disparity_to_depth);
+				write_mat(calib_file, &internal->l_intrinsics);
+				write_mat(calib_file, &internal->r_intrinsics);
+				write_mat(calib_file, &internal->l_distortion);
+				write_mat(calib_file, &internal->r_distortion);
+				write_mat(calib_file,
+				          &internal->l_distortion_fisheye);
+				write_mat(calib_file,
+				          &internal->r_distortion_fisheye);
+				write_mat(calib_file, &internal->l_rotation);
+				write_mat(calib_file, &internal->r_rotation);
+				write_mat(calib_file, &internal->l_translation);
+				write_mat(calib_file, &internal->r_translation);
+				write_mat(calib_file, &internal->l_projection);
+				write_mat(calib_file, &internal->r_projection);
+				write_mat(calib_file,
+				          &internal->disparity_to_depth);
 
 
 				fclose(calib_file);
 			}
 
 			printf("calibrated cameras! setting tracking mode\n");
-			internal->calibrated=true;
-			internal->configuration.calibration_mode = CALIBRATION_MODE_NONE;
-			//send an event to notify our driver of the switch into tracking mode.
-			driver_event_t e ={};
+			internal->calibrated = true;
+			internal->configuration.calibration_mode =
+			    CALIBRATION_MODE_NONE;
+			// send an event to notify our driver of the switch into
+			// tracking mode.
+			driver_event_t e = {};
 			e.type = EVENT_TRACKER_RECONFIGURED;
-			internal->event_target_callback(internal->event_target_instance,e);
+			internal->event_target_callback(
+			    internal->event_target_instance, e);
 		}
 
-		    snprintf(message,128,"COLLECTING SAMPLE: %d/%d",internal->l_chessboards_measured.size() +1,MAX_CALIBRATION_SAMPLES);
+		snprintf(message, 128, "COLLECTING SAMPLE: %d/%d",
+		         internal->l_chessboards_measured.size() + 1,
+		         MAX_CALIBRATION_SAMPLES);
 	}
 
 
-	cv::drawChessboardCorners(internal->debug_rgb,board_size,l_chessboard_measured,found_left);
-	cv::drawChessboardCorners(internal->debug_rgb,board_size,r_chessboard_measured,found_right);
+	cv::drawChessboardCorners(internal->debug_rgb, board_size,
+	                          l_chessboard_measured, found_left);
+	cv::drawChessboardCorners(internal->debug_rgb, board_size,
+	                          r_chessboard_measured, found_right);
 
-	cv::putText(internal->debug_rgb,"CALIBRATION MODE",cv::Point2i(160,240),0,1.0f,cv::Scalar(192,192,192));
-	cv::putText(internal->debug_rgb,message,cv::Point2i(160,460),0,0.5f,cv::Scalar(192,192,192));
+	cv::putText(internal->debug_rgb, "CALIBRATION MODE",
+	            cv::Point2i(160, 240), 0, 1.0f, cv::Scalar(192, 192, 192));
+	cv::putText(internal->debug_rgb, message, cv::Point2i(160, 460), 0,
+	            0.5f, cv::Scalar(192, 192, 192));
 
-	//DEBUG: write out our image planes to confirm imagery is arriving as expected
+	// DEBUG: write out our image planes to confirm imagery is arriving as
+	// expected
 	/*cv::imwrite("/tmp/l_out_y.jpg",internal->l_frame_gray);
 	cv::imwrite("/tmp/r_out_y.jpg",internal->r_frame_gray);
 	cv::imwrite("/tmp/l_out_u.jpg",internal->l_frame_u);
@@ -672,54 +850,72 @@ bool tracker3D_sphere_stereo_calibrate(tracker_instance_t* inst){
 }
 
 
-bool tracker3D_sphere_stereo_get_poses(tracker_instance_t* inst,tracked_object_t* objects, uint32_t* count) {
-	if (objects == NULL)
-	{
-		*count = TRACKED_POINTS; //tracking a single object
+bool
+tracker3D_sphere_stereo_get_poses(tracker_instance_t* inst,
+                                  tracked_object_t* objects,
+                                  uint32_t* count)
+{
+	if (objects == NULL) {
+		*count = TRACKED_POINTS; // tracking a single object
 		return true;
 	}
 
-	tracker3D_sphere_stereo_instance_t* internal = (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
-	for (uint32_t i = 0;i< 1;i++) {
+	tracker3D_sphere_stereo_instance_t* internal =
+	    (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
+	for (uint32_t i = 0; i < 1; i++) {
 
 		objects[i] = internal->tracked_object;
 	}
-	*count=1;
-	internal->poses_consumed=true;
+	*count = 1;
+	internal->poses_consumed = true;
 	return true;
 }
 
-bool tracker3D_sphere_stereo_new_poses(tracker_instance_t* inst)
+bool
+tracker3D_sphere_stereo_new_poses(tracker_instance_t* inst)
 {
-	tracker3D_sphere_stereo_instance_t*  internal = (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
+	tracker3D_sphere_stereo_instance_t* internal =
+	    (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
 	return internal->poses_consumed;
 }
 
-bool tracker3D_sphere_stereo_configure(tracker_instance_t* inst,tracker_stereo_configuration_t* config)
+bool
+tracker3D_sphere_stereo_configure(tracker_instance_t* inst,
+                                  tracker_stereo_configuration_t* config)
 {
-	tracker3D_sphere_stereo_instance_t*  internal = (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
-	//return false if we cannot handle this config
+	tracker3D_sphere_stereo_instance_t* internal =
+	    (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
+	// return false if we cannot handle this config
 
 	if (config->l_format != FORMAT_YUV444_UINT8) {
-		internal->configured=false;
+		internal->configured = false;
 		return false;
 	}
 	internal->configuration = *config;
-	internal->configured=true;
+	internal->configured = true;
 	return true;
 }
 
-void tracker3D_sphere_stereo_register_measurement_callback (tracker_instance_t* inst, void* target_instance, measurement_consumer_callback_func target_func) {
-	tracker3D_sphere_stereo_instance_t* internal = (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
+void
+tracker3D_sphere_stereo_register_measurement_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    measurement_consumer_callback_func target_func)
+{
+	tracker3D_sphere_stereo_instance_t* internal =
+	    (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
 	internal->measurement_target_instance = target_instance;
 	internal->measurement_target_callback = target_func;
 }
 
-void tracker3D_sphere_stereo_register_event_callback (tracker_instance_t* inst, void* target_instance, event_consumer_callback_func target_func) {
-	tracker3D_sphere_stereo_instance_t* internal = (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
+void
+tracker3D_sphere_stereo_register_event_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func)
+{
+	tracker3D_sphere_stereo_instance_t* internal =
+	    (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
 	internal->event_target_instance = target_instance;
 	internal->event_target_callback = target_func;
 }
-
-
-
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.h b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.h
index 2e87a170a362788297209d2818fc38c11db35e7b..2ce519b65902b656aa92d3ed6b5516500f2cf7b0 100644
--- a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.h
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.h
@@ -15,30 +15,51 @@
 extern "C" {
 #endif
 
-//forward declare this
-typedef struct tracker3D_sphere_stereo_instance tracker3D_sphere_stereo_instance_t;
+// forward declare this
+typedef struct tracker3D_sphere_stereo_instance
+    tracker3D_sphere_stereo_instance_t;
 
 
-tracker3D_sphere_stereo_instance_t* tracker3D_sphere_stereo_create(tracker_instance_t* inst);
-bool tracker3D_sphere_stereo_destroy(tracker_instance_t* inst);
+tracker3D_sphere_stereo_instance_t*
+tracker3D_sphere_stereo_create(tracker_instance_t* inst);
+bool
+tracker3D_sphere_stereo_destroy(tracker_instance_t* inst);
 
-capture_parameters_t tracker3D_sphere_stereo_get_capture_params(tracker_instance_t* inst);
+capture_parameters_t
+tracker3D_sphere_stereo_get_capture_params(tracker_instance_t* inst);
 
-bool tracker3D_sphere_stereo_get_debug_frame(tracker_instance_t* inst,frame_t* frame);
-bool tracker3D_sphere_stereo_queue(tracker_instance_t* inst,frame_t* frame);
-bool tracker3D_sphere_stereo_get_poses(tracker_instance_t* inst,tracked_object_t* objects,uint32_t* count);
-bool tracker3D_sphere_stereo_new_poses(tracker_instance_t* inst);
-bool tracker3D_sphere_stereo_configure(tracker_instance_t* inst, tracker_stereo_configuration_t* config);
-void tracker3D_sphere_stereo_register_measurement_callback (tracker_instance_t* inst, void* target_instance, measurement_consumer_callback_func target_func);
-void tracker3D_sphere_stereo_register_event_callback (tracker_instance_t* inst, void* target_instance, event_consumer_callback_func target_func);
+bool
+tracker3D_sphere_stereo_get_debug_frame(tracker_instance_t* inst,
+                                        frame_t* frame);
+bool
+tracker3D_sphere_stereo_queue(tracker_instance_t* inst, frame_t* frame);
+bool
+tracker3D_sphere_stereo_get_poses(tracker_instance_t* inst,
+                                  tracked_object_t* objects,
+                                  uint32_t* count);
+bool
+tracker3D_sphere_stereo_new_poses(tracker_instance_t* inst);
+bool
+tracker3D_sphere_stereo_configure(tracker_instance_t* inst,
+                                  tracker_stereo_configuration_t* config);
+void
+tracker3D_sphere_stereo_register_measurement_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    measurement_consumer_callback_func target_func);
+void
+tracker3D_sphere_stereo_register_event_callback(
+    tracker_instance_t* inst,
+    void* target_instance,
+    event_consumer_callback_func target_func);
 
-static bool tracker3D_sphere_stereo_track(tracker_instance_t* inst);
-static bool tracker3D_sphere_stereo_calibrate(tracker_instance_t* inst);
+static bool
+tracker3D_sphere_stereo_track(tracker_instance_t* inst);
+static bool
+tracker3D_sphere_stereo_calibrate(tracker_instance_t* inst);
 
 #ifdef __cplusplus
-} //extern "C"
+} // extern "C"
 #endif
 
-#endif //TRACKER3D_SPHERE_STEREO_H
-
-
+#endif // TRACKER3D_SPHERE_STEREO_H