diff --git a/src/xrt/drivers/montrack/frameservers/common/frameserver.c b/src/xrt/drivers/montrack/frameservers/common/frameserver.c
index 27da59da44f0ef35a01abf401349c1847ef6eebe..cb2689c7f139e3c980b922d1f7686ab274bb2664 100644
--- a/src/xrt/drivers/montrack/frameservers/common/frameserver.c
+++ b/src/xrt/drivers/montrack/frameservers/common/frameserver.c
@@ -126,8 +126,8 @@ bool frame_resample(frame_t* source, frame_t* out) {
     uint8_t* dest_ptr;
     uint8_t source_pixel_bytes = format_bytes_per_pixel(source->format);
     uint32_t source_line_bytes = source->stride;
-    uint8_t dest_pixel_bytes= format_bytes_per_pixel(out->format);
-    uint32_t dest_line_bytes = out->width;
+	uint8_t dest_pixel_bytes= format_bytes_per_pixel(out->format);
+	uint32_t dest_line_bytes = out->stride;
 
     if (! out->data)
     {
diff --git a/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.c b/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.c
index b38ea23c6ad3e4e43dad75279094397fedec1e21..33abf77584fcb78f0899314e7f1c86fef105ef38 100644
--- a/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.c
+++ b/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.c
@@ -153,6 +153,10 @@ void v4l2_frameserver_stream_run(frameserver_instance_t* inst) {
 	v_format.fmt.pix.height = internal->source_descriptor.height;
 	v_format.fmt.pix.pixelformat = internal->source_descriptor.stream_format;
 	v_format.fmt.pix.field = V4L2_FIELD_ANY;
+	if (internal->source_descriptor.extended_format > 0) {
+		v_format.fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+	}
+
 
 	if (ioctl(fd,VIDIOC_S_FMT,&v_format) < 0)
 	{
@@ -204,6 +208,7 @@ void v4l2_frameserver_stream_run(frameserver_instance_t* inst) {
 			if (capture_userptr)
 			{
 				mem[i] = aligned_alloc(getpagesize(),v_buf.length); //align this to a memory page, v4l2 likes it that way
+				//mem[i] = malloc(v_buf.length);
 				if (! mem[i]) {
 					printf("ERROR: could not alloc page-aligned memory\n");
 					return;
@@ -229,12 +234,20 @@ void v4l2_frameserver_stream_run(frameserver_instance_t* inst) {
 		}
 		int start_capture = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 		if (ioctl(fd,VIDIOC_STREAMON,&start_capture) < 0) {
-			printf("could not start capture!\n");
+			printf("ERROR: could not start capture!\n");
 			return;
 		}
 
-
+	uint8_t* cropped_buffer = NULL;
 	frame_t f = {}; //we dequeue buffers into this frame in our main loop
+	if (internal->source_descriptor.crop_scanline_bytes_start > 0) {
+		uint32_t alloc_size = internal->source_descriptor.crop_width * internal->source_descriptor.height * format_bytes_per_pixel(internal->source_descriptor.format);
+		cropped_buffer = malloc(alloc_size);
+		if (! cropped_buffer){
+			printf("ERROR: could not alloc memory!");
+			exit(0);
+		}
+	}
 	f.source_id = internal->source_descriptor.source_id;
 	switch (internal->source_descriptor.stream_format) {
 	    case V4L2_PIX_FMT_YUYV:
@@ -271,15 +284,44 @@ void v4l2_frameserver_stream_run(frameserver_instance_t* inst) {
 		if (ioctl(fd,VIDIOC_DQBUF,&v_buf) < 0) {
 			printf("dequeue failed\n");
 		} else {
-			printf("dequeue succeeded %d\n",v_buf.index);
+			//printf("dequeue succeeded %d used %d of %d \n",v_buf.index,v_buf.bytesused,v_buf.length);
+			if (internal->source_descriptor.crop_scanline_bytes_start > 0) {
+				//we need to crop our stream frame into a new buffer
+				uint32_t stream_bytes_per_pixel = 0;
+				switch (internal->source_descriptor.stream_format) {
+				    case V4L2_PIX_FMT_YUYV:
+					    stream_bytes_per_pixel = 2;
+					    break;
+				    default:
+					    printf("ERROR: No crop support for non-YUYV stream formats\n");
+						exit(0);
+				}
 
-			//process frame
-			f.width = internal->source_descriptor.width;
-			f.height = internal->source_descriptor.height;
-			//reasonable default
-			f.stride= internal->source_descriptor.width * format_bytes_per_pixel(internal->source_descriptor.format);
-			f.size_bytes = v_buf.bytesused;
-			f.data = mem[v_buf.index];
+				uint32_t raw_stride = internal->source_descriptor.width * stream_bytes_per_pixel;
+				uint32_t cropped_stride = internal->source_descriptor.crop_width * stream_bytes_per_pixel;
+				uint8_t* bufptr = mem[v_buf.index];
+				for (uint32_t i=0;i < internal->source_descriptor.height;i++) {
+					uint8_t* dstptr = cropped_buffer + (i*cropped_stride);
+					uint8_t* srcptr = bufptr + (i* raw_stride) + internal->source_descriptor.crop_scanline_bytes_start;
+					//printf("dstptr %d srcptr %d\n ",(i*cropped_stride),(i* raw_stride) + internal->source_descriptor.crop_scanline_bytes_start);
+					memcpy(dstptr,srcptr,cropped_stride);
+				}
+				//fix up the frame we supply to the consumer
+				f.width = internal->source_descriptor.crop_width;
+				f.height = internal->source_descriptor.height;
+				//reasonable default - will get reset
+				f.stride = internal->source_descriptor.crop_width * format_bytes_per_pixel(internal->source_descriptor.format);
+				f.size_bytes = cropped_stride * f.height;
+				f.data=cropped_buffer;
+			} else {
+				//process frame
+				f.width = internal->source_descriptor.width;
+				f.height = internal->source_descriptor.height;
+				//reasonable default - will get reset
+				f.stride= internal->source_descriptor.width * format_bytes_per_pixel(internal->source_descriptor.format);
+				f.size_bytes = v_buf.bytesused;
+				f.data = mem[v_buf.index];
+			}
 
 			switch (internal->source_descriptor.stream_format) {
 			    case V4L2_PIX_FMT_JPEG:
@@ -309,6 +351,7 @@ void v4l2_frameserver_stream_run(frameserver_instance_t* inst) {
 					    case FORMAT_Y_UINT8:
 						    //split our Y plane out
 						    sampled_frame = f; //copy our buffer frames attributes
+							sampled_frame.data = NULL;
 							sampled_frame.format = FORMAT_Y_UINT8;
 							sampled_frame.stride = f.width;
 							sampled_frame.size_bytes = frame_size_in_bytes(&sampled_frame);
@@ -336,6 +379,7 @@ void v4l2_frameserver_stream_run(frameserver_instance_t* inst) {
 					    case FORMAT_Y_UINT8:
 						    //split our Y plane out
 						    sampled_frame = f; //copy our buffer frames attributes
+							sampled_frame.data = NULL;
 							sampled_frame.format = FORMAT_Y_UINT8;
 							sampled_frame.stride = f.width;
 							sampled_frame.size_bytes = frame_size_in_bytes(&sampled_frame);
@@ -353,6 +397,7 @@ void v4l2_frameserver_stream_run(frameserver_instance_t* inst) {
 					    case FORMAT_YUV444_UINT8:
 						    //upsample our YUYV to YUV444
 						    sampled_frame = f; //copy our buffer frames attributes
+							sampled_frame.data = NULL;
 							sampled_frame.format = FORMAT_YUV444_UINT8;
 							sampled_frame.stride = f.width * 3;
 							sampled_frame.size_bytes = frame_size_in_bytes(&sampled_frame);
@@ -436,7 +481,8 @@ uint32_t v4l2_frameserver_get_source_descriptors(v4l2_source_descriptor_t** sds,
 		return 0;
 	}
 
-	if (ioctl(fd, VIDIOC_QUERYCAP, &cap)) {
+	v4l2_source_descriptor_t* descriptor = *sds;
+	if (ioctl(fd, VIDIOC_QUERYCAP, &cap) ==0) {
 		if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
 			return 0; //not a video device
 		}
@@ -444,16 +490,20 @@ uint32_t v4l2_frameserver_get_source_descriptors(v4l2_source_descriptor_t** sds,
 			return 0; //cannot stream
 		}
 		if (!(cap.capabilities & V4L2_CAP_TIMEPERFRAME)) {
-			printf("WARNING: device does not select setting frame intervals\n");
+			printf("WARNING: device does not support setting frame intervals\n");
+		}
+		if (*sds) { //skip this if we are just counting, descriptor will be NULL
+			if ((cap.capabilities & V4L2_CAP_EXT_PIX_FORMAT)) {
+				descriptor->extended_format=1; //need to query for extended format info
+			}
 		}
 	}
-
 	struct v4l2_fmtdesc desc;
 	memset(&desc,0,sizeof(desc));
 	desc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	v4l2_source_descriptor_t* descriptor = *sds;
+	
 	while (ioctl(fd,VIDIOC_ENUM_FMT,&desc) == 0) {
-		//printf("FORMAT: %s %04x\n", desc.description,desc.pixelformat);
+		printf("FORMAT: %s %04x %d\n", desc.description,desc.pixelformat, desc.type);
 		struct v4l2_frmsizeenum frame_size;
 		struct v4l2_frmivalenum frame_interval;
 		memset(&frame_size,0,sizeof(frame_size));
@@ -462,7 +512,7 @@ uint32_t v4l2_frameserver_get_source_descriptors(v4l2_source_descriptor_t** sds,
 		frame_size.index = 0;
 		while (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frame_size) >= 0) {
 			if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
-				    //printf("%dx%d\n", frame_size.discrete.width,frame_size.discrete.height);
+				    printf("%dx%d\n", frame_size.discrete.width,frame_size.discrete.height);
 				    frame_interval.pixel_format = frame_size.pixel_format;
 					frame_interval.width = frame_size.discrete.width;
 					frame_interval.height = frame_size.discrete.height;
@@ -478,65 +528,45 @@ uint32_t v4l2_frameserver_get_source_descriptors(v4l2_source_descriptor_t** sds,
 							switch(desc.pixelformat){
 							case V4L2_PIX_FMT_YUYV:
 								descriptor->format = FORMAT_YUYV_UINT8;
-								strncpy(descriptor->device_path,v4l2_device,256); //TODO: hardcoded 256
-								descriptor->device_path[255]=0x0; //TODO: hardcoded 256
-								strncpy(descriptor->name,cap.driver,128);
-								descriptor->device_path[127]=0x0;
 								descriptor->width = frame_interval.width;
 								descriptor->height = frame_interval.height;
-								descriptor->stream_format = desc.pixelformat;
 								descriptor->rate = rate;
 								descriptor->sampling = SAMPLING_NONE;
+								source_descriptor_from_v4l2(descriptor,v4l2_device,&cap,&desc);
 								descriptor++;
 
 								descriptor->format = FORMAT_YUV444_UINT8;
-								strncpy(descriptor->device_path,v4l2_device,256); //TODO: hardcoded 256
-								descriptor->device_path[255]=0x0; //TODO: hardcoded 256
-								strncpy(descriptor->name,cap.driver,128);
-								descriptor->device_path[127]=0x0;
 								descriptor->width = frame_interval.width;
 								descriptor->height = frame_interval.height;
-								descriptor->stream_format = desc.pixelformat;
 								descriptor->rate = rate;
 								descriptor->sampling = SAMPLING_UPSAMPLED;
+								source_descriptor_from_v4l2(descriptor,v4l2_device,&cap,&desc);
 								descriptor++;
 
 								descriptor->format = FORMAT_Y_UINT8;
-								strncpy(descriptor->device_path,v4l2_device,256); //TODO: hardcoded 256
-								descriptor->device_path[255]=0x0; //TODO: hardcoded 256
-								strncpy(descriptor->name,cap.driver,128);
-								descriptor->device_path[127]=0x0;
 								descriptor->width = frame_interval.width;
 								descriptor->height = frame_interval.height;
-								descriptor->stream_format = desc.pixelformat;
 								descriptor->rate = rate;
 								descriptor->sampling = SAMPLING_DOWNSAMPLED;
+								source_descriptor_from_v4l2(descriptor,v4l2_device,&cap,&desc);
 								descriptor++;
 								sd_count += 3;
 								break;
 							case V4L2_PIX_FMT_JPEG: //MJPEG stream format
 								descriptor->format = FORMAT_YUV444_UINT8;
-								strncpy(descriptor->device_path,v4l2_device,256); //TODO: hardcoded 256
-								descriptor->device_path[255]=0x0; //TODO: hardcoded 256
-								strncpy(descriptor->name,cap.driver,128);
-								descriptor->device_path[127]=0x0;
 								descriptor->width = frame_interval.width;
 								descriptor->height = frame_interval.height;
-								descriptor->stream_format = desc.pixelformat;
 								descriptor->rate = rate;
 								descriptor->sampling = SAMPLING_UPSAMPLED;
+								source_descriptor_from_v4l2(descriptor,v4l2_device,&cap,&desc);
 								descriptor++;
 
 								descriptor->format = FORMAT_Y_UINT8;
-								strncpy(descriptor->device_path,v4l2_device,256); //TODO: hardcoded 256
-								descriptor->device_path[255]=0x0; //TODO: hardcoded 256
-								strncpy(descriptor->name,cap.driver,128);
-								descriptor->device_path[127]=0x0;
 								descriptor->width = frame_interval.width;
 								descriptor->height = frame_interval.height;
-								descriptor->stream_format = desc.pixelformat;
 								descriptor->rate = rate;
 								descriptor->sampling = SAMPLING_DOWNSAMPLED;
+								source_descriptor_from_v4l2(descriptor,v4l2_device,&cap,&desc);
 								descriptor++;
 								sd_count +=2;
 								break;
@@ -592,3 +622,28 @@ bool v4l2_frameserver_test(){
 	}
 	return true;
 }
+
+static bool source_descriptor_from_v4l2(v4l2_source_descriptor_t* descriptor, char* v4l2_device, struct v4l2_capability* cap,struct v4l2_fmtdesc* desc) {
+
+	strncpy(descriptor->device_path,v4l2_device,256); //TODO: hardcoded 256
+	descriptor->device_path[255]=0x0; //TODO: hardcoded 256
+	strncpy(descriptor->name,cap->driver,32);
+	descriptor->name[127]=0x0;
+	strncpy(descriptor->model,cap->card,32);
+	descriptor->model[127]=0x0;
+	descriptor->stream_format = desc->pixelformat;
+
+	//special-case the PS4 Eye camera  - need to crop the main stereo image out
+	//of the composite (header+audio + main + interlaced) frame the driver
+	//produces
+	if (strcmp(cap->card,"USB Camera-OV580: USB Camera-OV") == 0) {
+		    descriptor->crop_scanline_bytes_start = 96;
+			descriptor->crop_width=2560; //assume highest res
+			if (descriptor->width < 900) {
+				descriptor->crop_width=640;
+			}
+			else if (descriptor->width < 2000) {
+				descriptor->crop_width = 1280;
+			}
+	    }
+    }
diff --git a/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.h b/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.h
index 5c6b56bd73ffeacb21ba0044e458b1fa39b8a09d..7ee30b0cdc246876c4ab67dde15d5350a4e84a0d 100644
--- a/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.h
+++ b/src/xrt/drivers/montrack/frameservers/v4l2/v4l2_frameserver.h
@@ -17,6 +17,7 @@
 typedef struct v4l2_source_descriptor {
 	char device_path[256]; //TODO: might not be enough
 	char name[128];
+	char model[128];
 	uint64_t source_id;
 	frame_format_t format;
 	uint32_t stream_format;
@@ -24,6 +25,9 @@ typedef struct v4l2_source_descriptor {
 	uint32_t width;
 	uint32_t height;
 	uint32_t rate;
+	uint8_t extended_format;
+	uint32_t crop_scanline_bytes_start; //byte offset - special case for ps4 camera
+	uint32_t crop_width; // pixels - special case for ps4 camera
 } v4l2_source_descriptor_t;
 
 typedef struct v4l2_frameserver_instance {
@@ -58,6 +62,7 @@ bool v4l2_frameserver_is_running(frameserver_instance_t* inst);
 bool v4l2_frameserver_test();
 
 static void v4l2_frameserver_stream_run(frameserver_instance_t* inst);  //streaming thread entrypoint
+static bool source_descriptor_from_v4l2(v4l2_source_descriptor_t* source_descriptor, char* v4l2_device, struct v4l2_capability* cap,struct v4l2_fmtdesc* desc);
 
 
 #endif //V4L2_FRAMESERVER_H
diff --git a/src/xrt/drivers/montrack/mt_device.c b/src/xrt/drivers/montrack/mt_device.c
index 7b35cdfff70122847f052b4c45e596f4cd008fe4..d612695c4dd59ea175c71ad448197272a130dd1c 100644
--- a/src/xrt/drivers/montrack/mt_device.c
+++ b/src/xrt/drivers/montrack/mt_device.c
@@ -65,6 +65,13 @@ mt_device_get_tracked_pose(struct xrt_device *xdev,
 			md->filter->filter_predict_state(md->filter,&filtered,0);
 			out_relation->pose = filtered.pose;
 		    break;
+	    case TRACKER_TYPE_OSVR_UVBI:
+		    out_relation->relation_flags = (enum xrt_space_relation_flags)(
+			XRT_SPACE_RELATION_POSITION_VALID_BIT |
+			XRT_SPACE_RELATION_POSITION_TRACKED_BIT);
+			//TODO: get pose from osvr tracker
+			//out_relation->pose = filtered.pose;
+		break;
 
 	    default:
 		    printf("ERROR: Unknown tracker type\n");
@@ -109,6 +116,18 @@ mt_device_create(char* device_name,bool log_verbose, bool log_debug) {
 			return md;
 		}
 	}
+	if (strcmp(device_name,"OSVR_ELP_60FPS") == 0) {
+		if (mt_create_osvr_elp(md)) {
+			return md;
+		}
+	}
+
+	if (strcmp(device_name,"STEREO_PS4_60FPS") == 0) {
+		if (mt_create_stereo_ps4(md)) {
+			return md;
+		}
+	}
+
 
 	return NULL;
 
@@ -343,6 +362,151 @@ bool mt_create_stereo_elp(mt_device_t* md) {
 }
 
 
+bool mt_create_osvr_elp(mt_device_t* md) {
+
+	// TODO - add IMU input source -> filter
+
+	md->frameserver_count=1; // this driver uses a single, mono camera source
+	md->frameservers[0] = frameserver_create(FRAMESERVER_TYPE_UVC);
+	// ask our frameserver for available sources - note this will return a
+	// type-specific struct that we need to deal with e.g. UVC-specific, FFMPEG-specific.
+	uint32_t source_count=0;
+	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],NULL,&source_count);
+	if (source_count == 0){
+		//we have no sources, we cannot continue
+		return false;
+	}
+	uvc_source_descriptor_t* descriptors = calloc(source_count,sizeof(uvc_source_descriptor_t));
+	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],descriptors,&source_count);
+	// defer further configuration and stream start until the rest of our chain is set up.
+
+	md->tracker = tracker_create(TRACKER_TYPE_OSVR_UVBI);
+	tracker_mono_configuration_t tracker_config = {};
+
+	// configure our ELP camera when we find it during enumeration
+	uint32_t source_index; // our frameserver config descriptor index - we would have an array for multiple devices
+	for (uint32_t i=0; i< source_count;i++){
+		uvc_source_descriptor_t s = descriptors[i];
+		if (descriptors[i].product_id == 0x9750 && descriptors[i].vendor_id == 0x05a3 && descriptors[i].format == FORMAT_Y_UINT8) {
+			if (descriptors[i].width == 1280 && descriptors[i].height == 480 && descriptors[i].rate == 166666) {
+				tracker_config.format = descriptors[i].format;
+				tracker_config.source_id =descriptors[i].source_id;
+				snprintf(tracker_config.configuration_filename,128,"ELP_60FPS_osvr_%s",descriptors[i].serial);
+				source_index =i;
+			}
+		}
+
+	}
+	// configure our tracker for this frame source
+	bool configured = md->tracker->tracker_configure(md->tracker,&tracker_config);
+
+	if (! configured) {
+		printf("ERROR: tracker rejected frameserver configuration!\n");
+		return false;
+	}
+
+	// tracker is happy - connect our frameserver to our tracker
+	md->frameservers[0]->frameserver_register_frame_callback(md->frameservers[0],md->tracker,md->tracker->tracker_queue);
+
+	//now our chain is setup up we can start streaming data through it
+	printf("INFO: frame source path: %s %d x %d interval: %d\n",&(descriptors[source_index].name), descriptors[source_index].width,descriptors[source_index].height,descriptors[source_index].format,descriptors[source_index].rate);
+	md->frameservers[0]->frameserver_configure_capture(md->frameservers[0],md->tracker->tracker_get_capture_params(md->tracker));
+	md->frameservers[0]->frameserver_stream_start(md->frameservers[0],&(descriptors[source_index]));
+
+	return true;
+}
+
+
+bool mt_create_stereo_ps4(mt_device_t* md) {
+
+	// TODO - add IMU input source -> filter
+
+	md->frameserver_count=1; // this driver uses a single, composite stereo, camera source
+	md->frameservers[0] = frameserver_create(FRAMESERVER_TYPE_V4L2);
+	// ask our frameserver for available sources - note this will return a
+	// type-specific struct that we need to deal with e.g. UVC-specific, FFMPEG-specific.
+	uint32_t source_count=0;
+	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],NULL,&source_count);
+	if (source_count == 0){
+		//we have no sources, we cannot continue
+		return false;
+	}
+	v4l2_source_descriptor_t* descriptors = calloc(source_count,sizeof(v4l2_source_descriptor_t));
+	md->frameservers[0]->frameserver_enumerate_sources(md->frameservers[0],descriptors,&source_count);
+	// defer further configuration and stream start until the rest of our chain is set up.
+
+	md->tracker = tracker_create(TRACKER_TYPE_SPHERE_STEREO);
+	tracker_stereo_configuration_t tracker_config = {};
+
+	// configure our PS4 camera when we find it during enumeration
+	uint32_t source_index; // our frameserver config descriptor index - we would have an array for multiple devices
+	for (uint32_t i=0; i< source_count;i++){
+		v4l2_source_descriptor_t s = descriptors[i];
+		if (strcmp(descriptors[i].model,"USB Camera-OV580: USB Camera-OV���������������������������������������������������������������������") == 0 && descriptors[i].format == FORMAT_YUV444_UINT8) {
+		    if (descriptors[i].width == 1748 && descriptors[i].height == 408 && descriptors[i].rate == 166666) {
+		        tracker_config.l_format = descriptors[i].format;
+		        tracker_config.l_source_id =descriptors[i].source_id;
+		        snprintf(tracker_config.configuration_filename,128,"PS4_60FPS_stereo_%s",descriptors[i].model);
+
+
+		        //start in calibration mode
+
+		        tracker_config.calibration_mode = CALIBRATION_MODE_CHESSBOARD;
+
+		        // set up 50/50 horizontal stereo split - may need to put this in calibration data
+		        uint32_t effective_width = descriptors[i].width;
+		        if (descriptors[i].crop_width >0) {
+		           effective_width=descriptors[i].crop_width;
+	            }
+
+		        struct xrt_vec2 ltl = { 0.0f,0.0f };
+		        struct xrt_vec2 lbr = { effective_width / 2.0f, descriptors[i].height };
+		        struct xrt_vec2 rtl = { effective_width / 2.0f,0.0f };
+		        struct xrt_vec2 rbr = { effective_width ,descriptors[i].height};
+
+		        tracker_config.l_rect.tl=ltl;
+		        tracker_config.l_rect.br=lbr;
+		        tracker_config.r_rect.tl=rtl;
+		        tracker_config.r_rect.br=rbr;
+
+		        tracker_config.split_left = true;
+
+		        source_index =i;
+	        }
+	    }
+
+	}
+	// configure our tracker for this frame source
+	bool configured = md->tracker->tracker_configure(md->tracker,&tracker_config);
+
+	if (! configured) {
+		printf("ERROR: tracker rejected frameserver configuration!\n");
+		return false;
+	}
+
+	// tracker is happy - connect our frameserver to our tracker
+	md->frameservers[0]->frameserver_register_frame_callback(md->frameservers[0],md->tracker,md->tracker->tracker_queue);
+
+	//create a filter for the trackers output
+	opencv_filter_configuration_t filter_config = {};
+	filter_config.measurement_noise_cov =0.1f;
+	filter_config.process_noise_cov =0.1f;
+
+	md->filter = filter_create(FILTER_TYPE_OPENCV_KALMAN);
+	md->filter->filter_configure(md->filter,&filter_config);
+
+	//connect our tracker to our filter
+	md->tracker->tracker_register_measurement_callback(md->tracker,md->filter,md->filter->filter_queue);
+	md->tracker->tracker_register_event_callback(md->tracker,md,mt_handle_event);
+
+	//nw our chain is setup up we can start streaming data through it
+	printf("INFO: frame source path: %s %d x %d interval: %d\n",&(descriptors[source_index].name), descriptors[source_index].width,descriptors[source_index].height,descriptors[source_index].format,descriptors[source_index].rate);
+	md->frameservers[0]->frameserver_configure_capture(md->frameservers[0],md->tracker->tracker_get_capture_params(md->tracker));
+	md->frameservers[0]->frameserver_stream_start(md->frameservers[0],&(descriptors[source_index]));
+
+	return true;
+}
+
 void mt_handle_event(mt_device_t* md, driver_event_t e){
 	switch (e.type){
 	case EVENT_TRACKER_RECONFIGURED:
diff --git a/src/xrt/drivers/montrack/mt_device.h b/src/xrt/drivers/montrack/mt_device.h
index 1d7e46cb6a1b348e8706dcc1df78c2303df2fefb..655be757509791bcbac14031062bcf3ff70438b9 100644
--- a/src/xrt/drivers/montrack/mt_device.h
+++ b/src/xrt/drivers/montrack/mt_device.h
@@ -48,6 +48,8 @@ mt_device_create(char* device_name,bool log_verbose, bool log_debug);
 bool mt_create_mono_ps3eye(mt_device_t* md); //mono blob tracker, ps3 60fps camera
 bool mt_create_mono_c270(mt_device_t* md); //mono blob tracker, logitech 30fps c270 camera
 bool mt_create_stereo_elp(mt_device_t* md); //stereo tracker, ELP 60fps stereo camera
+bool mt_create_osvr_elp(mt_device_t* md); //osvr tracker, ELP 60fps stereo camera
+bool mt_create_stereo_ps4(mt_device_t* md); //stereo tracker, PS4 60fps stereo camera
 
 void mt_handle_event(mt_device_t* md, driver_event_t e);
 
diff --git a/src/xrt/drivers/montrack/mt_prober.c b/src/xrt/drivers/montrack/mt_prober.c
index 673c994445f3679ea8e7a038dcea528e8a8f2379..6c2589ccfe1e5c7785897f38566776f523110172 100644
--- a/src/xrt/drivers/montrack/mt_prober.c
+++ b/src/xrt/drivers/montrack/mt_prober.c
@@ -50,10 +50,11 @@ mt_prober_autoprobe(struct xrt_prober* p)
 	// uvc camera we can use
 
 	//mt_device_t* mtd = mt_device_create("MONO_LOGITECH_C270",true,true);
-	mt_device_t* mtd = mt_device_create("STEREO_ELP_60FPS",true,true);
+	//mt_device_t* mtd = mt_device_create("STEREO_ELP_60FPS",true,true);
 	//mt_device_t* mtd = mt_device_create("MONO_PS3EYE",true,true);
 
 	//mt_device_t* mtd = mt_device_create("STEREO_LOGITECH_C270",true,true);
+	mt_device_t* mtd = mt_device_create("STEREO_PS4_60FPS",true,true);
 
 
 	return &mtd->base;
diff --git a/src/xrt/drivers/montrack/optical_tracking/CMakeLists.txt b/src/xrt/drivers/montrack/optical_tracking/CMakeLists.txt
index bf00ca36bce3b15c76a6bff670d5fd4f335c93dd..f665d6540aa16d079b337d8722909a2d46ca844c 100644
--- a/src/xrt/drivers/montrack/optical_tracking/CMakeLists.txt
+++ b/src/xrt/drivers/montrack/optical_tracking/CMakeLists.txt
@@ -5,6 +5,12 @@ include_directories(
 	${CMAKE_CURRENT_SOURCE_DIR}/../include
 	${CMAKE_CURRENT_SOURCE_DIR}/../auxiliary
 	${CMAKE_CURRENT_SOURCE_DIR}
+	/home/pblack/Documents/gitlab/UVBI-and-KalmanFramework-Standalone/plugins/unifiedvideoinertialtracker
+	/home/pblack/Documents/gitlab/UVBI-and-KalmanFramework-Standalone/plugins/videotrackershared
+	/home/pblack/Documents/gitlab/UVBI-and-KalmanFramework-Standalone/inc
+	/home/pblack/Documents/gitlab/UVBI-and-KalmanFramework-Standalone/build/src
+	/home/pblack/Documents/gitlab/UVBI-and-KalmanFramework-Standalone/vendor/util-headers
+	/home/pblack/Documents/gitlab/UVBI-and-KalmanFramework-Standalone/vendor/optional-lite-3.2.0/include
 	)
 
 set(OPTICAL_TRACKING_SOURCE_FILES
@@ -19,6 +25,12 @@ set(OPTICAL_TRACKING_SOURCE_FILES
 	tracker3D_sphere_stereo.h
 	tracker3D_sphere_mono.cpp
 	tracker3D_sphere_mono.h
+	tracker3D_osvr_uvbi.cpp
+	tracker3D_osvr_uvbi.h
+	track_psvr.h
+	track_psvr.cpp
+
+
 	)
 
 # Use OBJECT to not create a archive, since it just gets in the way.
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/tracker.c b/src/xrt/drivers/montrack/optical_tracking/common/tracker.c
index 06cc657668948a25307b10bb82b14d1a2cd94f9a..de3130db1f0c96c7f5616433bd9e5692da80ab62 100644
--- a/src/xrt/drivers/montrack/optical_tracking/common/tracker.c
+++ b/src/xrt/drivers/montrack/optical_tracking/common/tracker.c
@@ -1,7 +1,8 @@
 #include "tracker.h"
 #include "tracker3D_sphere_mono.h"
 #include "tracker3D_sphere_stereo.h"
-#include "../frameservers/uvc/uvc_frameserver.h"
+#include "tracker3D_osvr_uvbi.h"
+
 #include <string.h>
 #include <sys/ioctl.h>
 #include <sys/signal.h>
@@ -35,6 +36,18 @@ tracker_instance_t* tracker_create(tracker_type_t t) {
 				i->tracker_has_new_poses = tracker3D_sphere_stereo_new_poses;
 				i->tracker_configure = tracker3D_sphere_stereo_configure;
 			    break;
+		    case TRACKER_TYPE_OSVR_UVBI:
+			    i->tracker_type = t;
+				i->internal_instance = tracker3D_osvr_uvbi_create(i);
+				i->tracker_get_capture_params = tracker3D_osvr_uvbi_get_capture_params;
+				i->tracker_get_poses = tracker3D_osvr_uvbi_get_poses;
+				i->tracker_get_debug_frame= tracker3D_osvr_uvbi_get_debug_frame;
+				i->tracker_queue = tracker3D_osvr_uvbi_queue;
+				i->tracker_register_measurement_callback = tracker3D_osvr_uvbi_register_measurement_callback;
+				i->tracker_register_event_callback = tracker3D_osvr_uvbi_register_event_callback;
+				i->tracker_has_new_poses = tracker3D_osvr_uvbi_new_poses;
+				i->tracker_configure = tracker3D_osvr_uvbi_configure;
+			break;
 		    case TRACKER_TYPE_NONE:
 		    default:
 			    free(i);
diff --git a/src/xrt/drivers/montrack/optical_tracking/common/tracker.h b/src/xrt/drivers/montrack/optical_tracking/common/tracker.h
index 0174d9f94db3b3d73d7f88ac23160229151a035f..ae29ab85b0d04af57b39728a75293398a983f5b0 100644
--- a/src/xrt/drivers/montrack/optical_tracking/common/tracker.h
+++ b/src/xrt/drivers/montrack/optical_tracking/common/tracker.h
@@ -25,7 +25,8 @@ typedef struct tracker_measurement {
 typedef enum tracker_type {
 	TRACKER_TYPE_NONE,
 	TRACKER_TYPE_SPHERE_STEREO,
-	TRACKER_TYPE_SPHERE_MONO
+	TRACKER_TYPE_SPHERE_MONO,
+	TRACKER_TYPE_OSVR_UVBI
 } tracker_type_t;
 
 typedef struct tracker_event {
diff --git a/src/xrt/drivers/montrack/optical_tracking/track_psvr.cpp b/src/xrt/drivers/montrack/optical_tracking/track_psvr.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..73c0b3fe837ab7ce01b1c806c29ad69f67ff49aa
--- /dev/null
+++ b/src/xrt/drivers/montrack/optical_tracking/track_psvr.cpp
@@ -0,0 +1,204 @@
+#include "track_psvr.h"
+
+static float dist_3d(xrt_vec3 a, xrt_vec3 b)
+{
+	xrt_vec3 d;
+	d.x = a.x - b.x;
+	d.y = a.y - b.y;
+	d.z = a.z - b.z;
+	return sqrt(d.x*d.x + d.y*d.y + d.z * d.z);
+}
+
+bool psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data* t){
+	//create a list of the corners, ignoring the center
+	std::vector<uint32_t> corner_indices;
+	for (uint32_t i=0;i<leds->size();i++)
+	{
+		psvr_led_t p = leds->at(i);
+	  if (p.sign_x == 0 && p.sign_y == 0)
+	  {
+		  t->translation = p.position;
+		  t->confidence[2] = i;
+		  t->positions_3d[2] = p.position;
+		  t->l_positions_2d[2] = p.l_position_2d;
+		  t->r_positions_2d[2] = p.r_position_2d;
+	  }
+	  else
+	  {corner_indices.push_back(i);}
+	}
+
+	//find the leftmost and rightmost points - these will belong to our left and right side edges.
+
+	float lowest_x=65535.0;
+	float highest_x=-65535.0;
+	uint32_t lowest_x_index,highest_x_index;
+	for (uint32_t i=0;i< corner_indices.size();i++)
+	{
+		psvr_led_t p = leds->at(corner_indices[i]);
+		if (p.position.x < lowest_x)
+		{
+			lowest_x=p.position.x;
+			lowest_x_index=corner_indices[i];
+		}
+		if (p.position.x > highest_x)
+		{
+			highest_x=p.position.x;
+			highest_x_index=corner_indices[i];
+		}
+	}
+	//printf("lowestX %f lowestXIndex %d highestX %f highestXIndex %d\n",lowestX,lowestXIndex,highestX,highestXIndex);
+	//find the corresponding (closest) point on the 'short side' for the left and right extremities
+
+	float lowest_l_x_distance = 65535.0f;
+	float lowest_h_x_distance = 65535.0f;
+	uint32_t lowest_x_pair_index;
+	uint32_t highest_x_pair_index;
+
+	psvr_led_t lcA = leds->at(lowest_x_index);
+	for (uint32_t i=0; i < leds->size();i++)
+	{
+		psvr_led_t lcB = leds->at(corner_indices[i]);
+		if  (corner_indices[i] != lowest_x_index)
+		{
+			float dist_l_x = dist_3d(lcA.position,lcB.position);
+			if ( dist_l_x < lowest_l_x_distance)
+			{
+				lowest_x_pair_index = corner_indices[i];
+				lowest_l_x_distance = dist_l_x;
+			}
+		}
+	 }
+	psvr_led_t hcA = leds->at(highest_x_index);
+	for (uint32_t i=0; i < corner_indices.size();i++)
+	{
+		psvr_led_t hcB = leds->at(corner_indices[i]);
+		if  (corner_indices[i] != highest_x_index)
+		{
+			float dist_h_x = dist_3d(hcA.position,hcB.position);
+			if (dist_h_x < lowest_h_x_distance)
+			{
+				highest_x_pair_index = corner_indices[i];
+				lowest_h_x_distance=dist_h_x;
+			}
+		}
+
+	}
+	//printf("lowestLXDistance %f lowestXPairIndex %d lowestHXDistance %f highestXPairIndex %d\n",lowestLXDistance,lowestXPairIndex,lowestHXDistance,highestXPairIndex);
+
+	//now we have 4 points, and can know which 2 are left and which 2 are right.
+
+	psvr_led_t lA = leds->at(lowest_x_index);
+	psvr_led_t lB = leds->at(lowest_x_pair_index);
+	if (lA.position.y < lB.position.y)
+	{
+		//lA is upper left and lB is lower left
+		t->positions_3d[0] = lA.position;
+		t->l_positions_2d[0]=lA.l_position_2d;
+		t->r_positions_2d[0]=lA.r_position_2d;
+		t->confidence[0] = 1;
+		t->positions_3d[3] = lB.position;
+		t->l_positions_2d[3]=lB.l_position_2d;
+		t->r_positions_2d[3]=lB.r_position_2d;
+		t->confidence[3] = 1;
+
+	}
+	else
+	{
+		//lA is lower left and lB is upper left
+		t->positions_3d[0] = lB.position;
+		t->l_positions_2d[0]=lB.l_position_2d;
+		t->r_positions_2d[0]=lB.r_position_2d;
+		t->confidence[0] = 1;
+
+		t->positions_3d[3] = lA.position;
+		t->l_positions_2d[3]=lA.l_position_2d;
+		t->r_positions_2d[3]=lA.r_position_2d;
+		t->confidence[3] = 1;
+
+	}
+
+	psvr_led_t hA = leds->at(highest_x_index);
+	psvr_led_t hB = leds->at(highest_x_pair_index);
+	if (hA.position.y < hB.position.y)
+	{
+		//hA is upper right and rB is lower right
+		t->positions_3d[1] = hA.position;
+		t->l_positions_2d[1]=hA.l_position_2d;
+		t->r_positions_2d[1]=hA.r_position_2d;
+
+		t->confidence[1] = 1;
+		t->positions_3d[4] = hB.position;
+		t->l_positions_2d[4]=hB.l_position_2d;
+		t->r_positions_2d[4]=hB.r_position_2d;
+		t->confidence[4] = 1;
+	}
+	else
+	{
+		//hA is lower right and hB is upper right
+		t->positions_3d[1] = hB.position;
+		t->l_positions_2d[1]=hB.l_position_2d;
+		t->r_positions_2d[1]=hB.r_position_2d;
+		t->confidence[1] = 1;
+		t->positions_3d[4] = hA.position;
+		t->l_positions_2d[4]=hA.l_position_2d;
+		t->r_positions_2d[4]=hA.l_position_2d;
+		t->confidence[4] = 1;
+	}
+	return true;
+}
+
+/*//TODO: we dont need to pass a TrackData* here
+bool psvr_compute_svd()
+{
+	//compute SVD for the points we have found, assuming we have at least 3 points
+
+	uint8_t pointCount=0;
+	for (uint32_t i=0;i<MAX_POINTS;i++)
+	{
+		if (t->confidence[i] > 0)
+		{
+			pointCount++;
+		}
+	}
+
+	if (pointCount > 2)
+	{
+		cv::Mat measurement(pointCount, 3, cv::DataType<float>::type);
+		cv::Mat model(pointCount, 3, cv::DataType<float>::type);
+		cv::Mat xCovar;
+		uint8_t c = 0;
+		for (uint32_t i=0;i<MAX_POINTS;i++)
+		{
+			if (t->confidence[i] > 0)
+			{
+				measurement.at<float>(c,0) = t->positions[i].x;
+				measurement.at<float>(c,1) = t->positions[i].y;
+				measurement.at<float>(c,2) = t->positions[i].z;
+				model.at<float>(c,0) = ledPositions[i].x;
+				model.at<float>(c,1) = ledPositions[i].y;
+				model.at<float>(c,2) = ledPositions[i].z;
+				c++;
+			}
+		}
+
+		// create our cross-covariance matrix
+		cv::transpose(model,model);
+		xCovar =  model * measurement;
+		cv::Mat w,u,v,ut;
+		decomposer->compute(xCovar,w,u,v);
+		cv::transpose(u,ut);
+		//TODO: compute determinant
+		cv::Mat rot = v * ut;
+		glm::mat3 glmRot;
+		memcpy((void*)&(glmRot[0][0]),rot.data, 9 * sizeof(float));
+		glm::mat3 tRot = glm::transpose(glmRot);
+		t->rotationMatrix = glm::mat4(tRot);
+		//cout << "R = "<< endl << " "  << rotationMatrix << endl << endl;
+		return true;
+	}
+	else
+	{
+		return false;
+	}
+}*/
+
diff --git a/src/xrt/drivers/montrack/optical_tracking/track_psvr.h b/src/xrt/drivers/montrack/optical_tracking/track_psvr.h
new file mode 100644
index 0000000000000000000000000000000000000000..ac94dda7e078616fcff881b48fc88f9c2df990d2
--- /dev/null
+++ b/src/xrt/drivers/montrack/optical_tracking/track_psvr.h
@@ -0,0 +1,62 @@
+#ifndef TRACK_PSVR_H
+#define TRACKPSVR_H
+
+#include <xrt/xrt_defines.h>
+#include "common/tracked_object.h"
+#include "common/tracker.h"
+#include "opencv2/opencv.hpp"
+
+#define NUM_LEDS 9
+
+static const char* LED_LABELS[]={"LU","RU","C","LL","RL","LS","RS","LB","RB"};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+typedef struct psvr_track_data
+{
+	uint64_t timestamp;
+	xrt_vec3 positions_3d[NUM_LEDS]; //x,y,z position for up to (currently) 9 points - LU,RU,C,LL,RL,LS,RS,LB,RB
+	xrt_vec2 l_positions_2d[NUM_LEDS]; //2d positions in left and right images
+	xrt_vec2 r_positions_2d[NUM_LEDS];
+	int8_t confidence[NUM_LEDS]; //-1 if point is not tracked, TODO: 0-128 for confidence
+	xrt_matrix_4x4 rotation_matrix; // SVD-fitted head rotation matrix
+	xrt_vec3 translation; // head translation
+} psvr_track_data_t;
+
+
+typedef struct point_dist
+{
+	uint32_t index;
+	xrt_vec3 translation;
+	float length;
+	xrt_vec3 scaled_translation;
+	float scaled_length;
+} point_dist_t;
+
+typedef struct psvr_led
+{
+	xrt_vec2 l_position_2d;
+	xrt_vec2 r_position_2d;
+	float radius;
+	cv::Point2f distance;
+	xrt_vec3 position;
+	xrt_vec3 scaled_position;
+	std::vector<point_dist> point_dists;
+	float rms_distance;
+	int sign_x;
+	int sign_y;
+} psvr_led_t;
+
+bool psvr_disambiguate_5points(std::vector<psvr_led_t>* leds, psvr_track_data* t);
+
+//bool psvr_computeSVD();
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //TRACKPSVR_H
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.cpp b/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..eef55a3da6ab48aedb6e8dac81a5df2f3ca3facc
--- /dev/null
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.cpp
@@ -0,0 +1,107 @@
+#include "tracker3D_osvr_uvbi.h"
+#include "TrackingSystem.h"
+#include "TrackingDebugDisplay.h"
+#include "opencv4/opencv2/opencv.hpp"
+
+typedef struct tracker3D_osvr_uvbi_instance {
+	bool configured;
+	measurement_consumer_callback_func measurement_target_callback;
+	void* measurement_target_instance; //where we send our measurements
+	event_consumer_callback_func event_target_callback;
+	void* event_target_instance; //where we send our measurements
+	tracker_mono_configuration_t configuration;
+	osvr::vbtracker::TrackingSystem* system;
+	osvr::vbtracker::TrackedBody* hmd;
+	osvr::vbtracker::TrackingDebugDisplay* debug;
+	cv::Mat frame_gray;
+	cv::Mat debug_rgb;
+	bool alloced_frames;
+	osvr::vbtracker::CameraParameters camera_params;
+	osvr::util::time::TimeValue current_time;
+} tracker3D_osvr_uvbi_instance_t;
+
+
+
+capture_parameters_t tracker3D_osvr_uvbi_get_capture_params(tracker_instance_t* inst) {
+	capture_parameters_t cp ={};
+	cp.exposure = 0.5f;
+	cp.gain = 0.5f;
+	return cp;
+}
+
+tracker3D_osvr_uvbi_instance_t* tracker3D_osvr_uvbi_create(tracker_instance_t* inst) {
+
+	tracker3D_osvr_uvbi_instance_t* i = (tracker3D_osvr_uvbi_instance_t*)calloc(1,sizeof(tracker3D_osvr_uvbi_instance_t));
+	if (i) {
+		osvr::vbtracker::ConfigParams cp;
+		i->system= new osvr::vbtracker::TrackingSystem(cp);
+		i->debug = new osvr::vbtracker::TrackingDebugDisplay(cp);
+		i->debug_rgb = cv::Mat(480,1280,CV_8UC3,cv::Scalar(0,0,0));
+		i->camera_params = osvr::vbtracker::getSimulatedHDKCameraParameters();
+		i->hmd = i->system->createTrackedBody();
+		i->alloced_frames = false;
+		return i;
+	}
+	return NULL;
+}
+
+
+bool tracker3D_osvr_uvbi_get_debug_frame(tracker_instance_t* inst,frame_t* frame){
+	tracker3D_osvr_uvbi_instance_t* internal = (tracker3D_osvr_uvbi_instance_t*)inst->internal_instance;
+	cv::Mat rgbFrame;
+	cv::cvtColor(internal->frame_gray,rgbFrame,CV_GRAY2BGR);
+	cv::Mat osvr_debug = internal->debug->createStatusImage(*(internal->system),internal->camera_params,rgbFrame);
+
+	osvr_debug.copyTo(internal->debug_rgb);
+
+
+	frame->format = FORMAT_RGB_UINT8;
+	frame->width = internal->debug_rgb.cols;
+	frame->stride = internal->debug_rgb.cols * format_bytes_per_pixel(frame->format);
+	frame->height = internal->debug_rgb.rows;
+	frame->data = internal->debug_rgb.data;
+	frame->size_bytes = frame_size_in_bytes(frame);
+
+	return true;
+}
+bool tracker3D_osvr_uvbi_queue(tracker_instance_t* inst,frame_t* frame){
+	tracker3D_osvr_uvbi_instance_t* internal = (tracker3D_osvr_uvbi_instance_t*)inst->internal_instance;
+	printf("received frame, tracking!\n");
+	if (!internal->alloced_frames)
+	{
+		internal->frame_gray = cv::Mat(frame->height,frame->stride,CV_8UC1,cv::Scalar(0,0,0));
+		internal->debug_rgb = cv::Mat(frame->height,frame->width,CV_8UC3,cv::Scalar(0,0,0));
+		internal->alloced_frames =true;
+	}
+	//we will just 'do the work' here.
+	//TODO: asynchronous tracker thread
+
+	memcpy(internal->frame_gray.data,frame->data,frame->size_bytes);
+	internal->system->processFrame(internal->current_time,internal->frame_gray,internal->frame_gray,internal->camera_params);
+	tracker_send_debug_frame(inst); //publish our debug frame
+	return true;
+}
+bool tracker3D_osvr_uvbi_get_poses(tracker_instance_t* inst,tracked_object_t* objects,uint32_t* count){
+	return false;
+}
+bool tracker3D_osvr_uvbi_new_poses(tracker_instance_t* inst){
+	return false;
+}
+bool tracker3D_osvr_uvbi_configure(tracker_instance_t* inst, tracker_mono_configuration_t* config){
+	tracker3D_osvr_uvbi_instance_t*  internal = (tracker3D_osvr_uvbi_instance_t*)inst->internal_instance;
+	//return false if we cannot handle this config
+
+	if (config->format != FORMAT_Y_UINT8) {
+		internal->configured=false;
+		return false;
+	}
+	internal->configuration = *config;
+	internal->configured=true;
+	return true;
+}
+void tracker3D_osvr_uvbi_register_measurement_callback (tracker_instance_t* inst, void* target_instance, measurement_consumer_callback_func target_func){
+
+}
+void tracker3D_osvr_uvbi_register_event_callback (tracker_instance_t* inst, void* target_instance, event_consumer_callback_func target_func){
+
+}
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.h b/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.h
new file mode 100644
index 0000000000000000000000000000000000000000..32b473af2ab8a7a20ba8eead7fda2c70865776d9
--- /dev/null
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_osvr_uvbi.h
@@ -0,0 +1,39 @@
+#ifndef TRACKER3D_OSVR_UVBI_H
+#define TRACKER3D_OSVR_UVBI_H
+
+#include <xrt/xrt_defines.h>
+#include "common/tracked_object.h"
+#include "common/tracker.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+//forward declare this
+typedef struct tracker3D_osvr_uvbi_instance tracker3D_osvr_uvbi_instance_t;
+
+
+tracker3D_osvr_uvbi_instance_t* tracker3D_osvr_uvbi_create(tracker_instance_t* inst);
+bool tracker3D_osvr_uvbi_destroy(tracker_instance_t* inst);
+
+capture_parameters_t tracker3D_osvr_uvbi_get_capture_params(tracker_instance_t* inst);
+
+bool tracker3D_osvr_uvbi_get_debug_frame(tracker_instance_t* inst,frame_t* frame);
+bool tracker3D_osvr_uvbi_queue(tracker_instance_t* inst,frame_t* frame);
+bool tracker3D_osvr_uvbi_get_poses(tracker_instance_t* inst,tracked_object_t* objects,uint32_t* count);
+bool tracker3D_osvr_uvbi_new_poses(tracker_instance_t* inst);
+bool tracker3D_osvr_uvbi_configure(tracker_instance_t* inst, tracker_mono_configuration_t* config);
+void tracker3D_osvr_uvbi_register_measurement_callback (tracker_instance_t* inst, void* target_instance, measurement_consumer_callback_func target_func);
+void tracker3D_osvr_uvbi_register_event_callback (tracker_instance_t* inst, void* target_instance, event_consumer_callback_func target_func);
+
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif // TRACKER3D_OSVR_UVBI_H
diff --git a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp
index ac8043cfc37eb8abac839e8b71dd08c32e7c7b5c..97b2c01abc3d92316c13dbed8c8170597b2e2c91 100644
--- a/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp
+++ b/src/xrt/drivers/montrack/optical_tracking/tracker3D_sphere_stereo.cpp
@@ -2,6 +2,8 @@
 #include "opencv4/opencv2/opencv.hpp"
 #include "common/opencv_utils.hpp"
 
+#include "track_psvr.h"
+
 #include <sys/stat.h>
 #include <linux/limits.h>
 
@@ -60,14 +62,11 @@ typedef struct tracker3D_sphere_stereo_instance {
 	cv::Mat l_undistort_map_y;
 	cv::Mat r_undistort_map_x;
 	cv::Mat r_undistort_map_y;
+
 	cv::Mat l_rectify_map_x;
 	cv::Mat l_rectify_map_y;
 	cv::Mat r_rectify_map_x;
 	cv::Mat r_rectify_map_y;
-	cv::Mat l_rev_rectify_map_x;
-	cv::Mat l_rev_rectify_map_y;
-	cv::Mat r_rev_rectify_map_x;
-	cv::Mat r_rev_rectify_map_y;
 
 
 	//calibration data structures
@@ -254,12 +253,12 @@ bool tracker3D_sphere_stereo_queue(tracker_instance_t* inst,frame_t* frame) {
 
 bool tracker3D_sphere_stereo_track(tracker_instance_t* inst){
 
-	printf("tracking...\n");
+	//printf("tracking...\n");
 	tracker3D_sphere_stereo_instance_t* internal = (tracker3D_sphere_stereo_instance_t*)inst->internal_instance;
 	internal->l_keypoints.clear();
 	internal->r_keypoints.clear();
 
-
+	//DEBUG: dump all our planes out for inspection
 	/*cv::imwrite("/tmp/l_out_y.jpg",internal->l_frame_gray);
 	cv::imwrite("/tmp/r_out_y.jpg",internal->r_frame_gray);
 	cv::imwrite("/tmp/l_out_u.jpg",internal->l_frame_u);
@@ -268,6 +267,9 @@ bool tracker3D_sphere_stereo_track(tracker_instance_t* inst){
 	cv::imwrite("/tmp/r_out_v.jpg",internal->r_frame_v);
 */
 
+	//disabled channel combining in favour of using v plane directly - Y is 2x resolution
+	// so we do want to use that eventually
+
 	//combine our yuv channels to isolate blue leds - y channel is all we will use from now on
 	//cv::subtract(internal->l_frame_u,internal->l_frame_v,internal->l_frame_u);
 	//cv::subtract(internal->r_frame_u,internal->r_frame_v,internal->r_frame_u);
@@ -295,14 +297,9 @@ bool tracker3D_sphere_stereo_track(tracker_instance_t* inst){
 	cv::remap( r_frame_undist,internal->r_frame_gray, internal->r_rectify_map_x, internal->r_rectify_map_y, cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0) );
 
 
-	//DEBUG: output channels to disk
-	//cv::imwrite("/tmp/l_out_y.jpg",internal->l_frame_gray);
-	//cv::imwrite("/tmp/r_out_y.jpg",internal->r_frame_gray);
+	//block-match for disparity calculation - disabled
 
-
-	//block-match for disparity calculation
-
-	cv::Mat disp,disp8;
+	cv::Mat disp;
 	cv::Ptr<cv::StereoBM> sbm = cv::StereoBM::create(128,5);
 	sbm->setNumDisparities(64);
 	   sbm->setBlockSize(5);
@@ -318,6 +315,8 @@ bool tracker3D_sphere_stereo_track(tracker_instance_t* inst){
 //	cv::normalize(disp, disp8, 0.1, 255, CV_MINMAX, CV_8UC1);
 
 
+	//disabled background subtraction for now
+
 	//internal->background_subtractor->apply(internal->l_frame_gray,internal->l_mask_gray);
 	//internal->background_subtractor->apply(internal->r_frame_gray,internal->r_mask_gray);
 
@@ -336,8 +335,11 @@ bool tracker3D_sphere_stereo_track(tracker_instance_t* inst){
 	cv::threshold(internal->l_frame_gray,internal->l_frame_gray,32.0,255.0,0);
 	cv::threshold(internal->r_frame_gray,internal->r_frame_gray,32.0,255.0,0);
 
-	cv::cvtColor(internal->l_frame_gray,disp8,CV_GRAY2BGR);
-	disp8.copyTo(internal->debug_rgb);
+	//TODO: handle source images larger than debug_rgb
+	cv::Mat debug_img;
+	cv::cvtColor(internal->l_frame_gray,debug_img,CV_GRAY2BGR);
+	cv::Mat dst_roi = internal->debug_rgb(cv::Rect(0, 0, debug_img.cols, debug_img.rows));
+	debug_img.copyTo(dst_roi);
 
 
 	tracker_measurement_t m = {};
@@ -527,8 +529,8 @@ bool tracker3D_sphere_stereo_calibrate(tracker_instance_t* inst){
 	//clear our debug image
 	cv::rectangle(internal->debug_rgb, cv::Point2f(0,0),cv::Point2f(internal->debug_rgb.cols,internal->debug_rgb.rows),cv::Scalar( 0,0,0 ),-1,0);
 	cv::Mat disp8;
-	cv::cvtColor(internal->l_frame_gray,disp8,CV_GRAY2BGR);
-	disp8.copyTo(internal->debug_rgb);
+	cv::cvtColor(internal->r_frame_gray,disp8,CV_GRAY2BGR);
+	//disp8.copyTo(internal->debug_rgb);
 	// we will collect samples continuously - the user should be able to wave a
 	// chessboard around randomly while the system calibrates.. we only add a
 	// sample when it increases the coverage area substantially, to give the solver
@@ -656,6 +658,14 @@ bool tracker3D_sphere_stereo_calibrate(tracker_instance_t* inst){
 	cv::putText(internal->debug_rgb,"CALIBRATION MODE",cv::Point2i(160,240),0,1.0f,cv::Scalar(192,192,192));
 	cv::putText(internal->debug_rgb,message,cv::Point2i(160,460),0,0.5f,cv::Scalar(192,192,192));
 
+	//DEBUG: write out our image planes to confirm imagery is arriving as expected
+	/*cv::imwrite("/tmp/l_out_y.jpg",internal->l_frame_gray);
+	cv::imwrite("/tmp/r_out_y.jpg",internal->r_frame_gray);
+	cv::imwrite("/tmp/l_out_u.jpg",internal->l_frame_u);
+	cv::imwrite("/tmp/r_out_u.jpg",internal->r_frame_u);
+	cv::imwrite("/tmp/l_out_v.jpg",internal->l_frame_v);
+	cv::imwrite("/tmp/r_out_v.jpg",internal->r_frame_v);
+*/
 	tracker_send_debug_frame(inst);
 	printf("calibrating f end\n");
 	return true;
diff --git a/src/xrt/targets/openxr/CMakeLists.txt b/src/xrt/targets/openxr/CMakeLists.txt
index e9abe9f7e8c34e065f66fd143c72432f0cc3547a..d6d7d357b5aaf5489e9921484289403c839f12b7 100644
--- a/src/xrt/targets/openxr/CMakeLists.txt
+++ b/src/xrt/targets/openxr/CMakeLists.txt
@@ -93,6 +93,9 @@ target_link_libraries(${RUNTIME_TARGET}
 	/usr/local/lib64/libopencv_features2d.so
 	/usr/local/lib64/libopencv_video.so
 	libjpeg.so
+	/home/pblack/Documents/gitlab/build-UVBI-and-KalmanFramework-Standalone-Desktop-Debug/plugins/unifiedvideoinertialtracker/libuvbi-core.so
+	/home/pblack/Documents/gitlab/build-UVBI-and-KalmanFramework-Standalone-Desktop-Debug/plugins/unifiedvideoinertialtracker/libuvbi_plugin_parts.so
+
 	)
 
 target_compile_definitions(${RUNTIME_TARGET} PRIVATE XRT_HAVE_OHMD)