diff --git a/client/CMakeLists.txt b/client/CMakeLists.txt index 9f0eba4ac2634759521b4c0422487caced477f2d..e128188207f0bedce8a628c49da120b62d963140 100644 --- a/client/CMakeLists.txt +++ b/client/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 2.8) +cmake_minimum_required(VERSION 3.1) project (blender_client) @@ -33,6 +33,7 @@ option(WITH_CLIENT_CUDA_CPU_STAT2_LB "Enable build for CUDA CPU STAT" OF option(WITH_CLIENT_CUDA_CPU_STAT3 "Enable build for CUDA CPU STAT" OFF) option(WITH_SOCKET_UDP "Enable UDP" OFF) +option(WITH_SOCKET_ONLY_DATA "Enable SOCKET_ONLY_DATA" OFF) option(WITH_CLIENT_MPI_SOCKET "Enable MPI_SOCKET" OFF) option(WITH_CLIENT_MPI "Enable MPI" OFF) option(WITH_CLIENT_FILE "Enable FILE" OFF) @@ -40,7 +41,8 @@ option(WITH_CLIENT_FILE_MMAP "Enable FILE" OFF) option(WITH_CLIENT_MPI_FILE "Enable FILE+MPI" OFF) option(WITH_CLIENT_RENDERENGINE "Enable RENDERENGINE" OFF) option(WITH_CLIENT_RENDERENGINE_VR "Enable RENDERENGINE_VR" OFF) -option(WITH_CLIENT_RENDERENGINE_VRCLIENT "Enable RENDERENGINE_VRCLIENT" OFF) +#option(WITH_CLIENT_RENDERENGINE_VRCLIENT "Enable RENDERENGINE_VRCLIENT" OFF) +option(WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB "Enable RENDERENGINE_ULTRAGRID" OFF) option(WITH_CLIENT_RENDERENGINE_EMULATE "Enable RENDERENGINE" OFF) option(WITH_CLIENT_UNIMEM "Enable build for UNIMEM" OFF) @@ -100,6 +102,7 @@ option(WITH_VRCLIENT_VRGSTREAM "Enable build for VRCLIENT_VRGSTREAM" OFF) option(WITH_VRCLIENT "Enable build for VRCLIENT" OFF) option(WITH_OPENXR_SIMPLE "Enable build for OPENXR" OFF) option(WITH_VRCLIENT_OPENVR "Enable build for OPENVR" OFF) +option(WITH_VRCLIENT_GLSTEREO "Enable build for GLSTEREO" OFF) set(WITH_OPENMP ON) set(CLIENT_SUFFIX "") @@ -161,11 +164,6 @@ if(ENABLE_LOAD_BALANCEv3) set(ENABLE_LOAD_BALANCE ON) endif() -if(WITH_CLIENT_RENDERENGINE_VRCLIENT) - set(WITH_CLIENT_RENDERENGINE ON) - set(WITH_CLIENT_RENDERENGINE_VR ON) -endif() - #message(${CMAKE_CXX_COMPILER_ID}) if(CMAKE_CXX_COMPILER_ID MATCHES "Intel") @@ -299,11 +297,16 @@ endif() # Subdirectories if (WITH_CLIENT_CUDA) - #add_subdirectory(cycles_cuda) + add_subdirectory(cycles_cuda) endif() if (WITH_CLIENT_RENDERENGINE) - add_subdirectory(renderengine) + #add_subdirectory(glew) + add_subdirectory(glew-mx) + set(GLEW_INCLUDE_DIR "" CACHE PATH "") + set(GLEW_LIBRARIES "" CACHE FILEPATH "") + add_subdirectory(renderengine) + endif() if (WITH_CLIENT_VRGSTREAM) diff --git a/client/cycles/CMakeLists.txt b/client/cycles/CMakeLists.txt index 4332728b867323aae4b1971efb2e2d389ab2f53c..47df016e54fb723c6a186f5804d20681bdff69ec 100644 --- a/client/cycles/CMakeLists.txt +++ b/client/cycles/CMakeLists.txt @@ -254,6 +254,10 @@ if(WITH_SOCKET_UDP) add_definitions(-DWITH_SOCKET_UDP) endif() +if(WITH_SOCKET_ONLY_DATA) + add_definitions(-DWITH_SOCKET_ONLY_DATA) +endif() + if(CLIENT_MPI_LOAD_BALANCING_SAMPLES) add_definitions(-DCLIENT_MPI_LOAD_BALANCING_SAMPLES) endif() diff --git a/client/cycles/cycles_file.cpp b/client/cycles/cycles_file.cpp index 699b20b23063524698ad47c0f5f178c55c8ad78a..fb1eb46000bebebddfeeda8f39dec1dedf0c5eec 100644 --- a/client/cycles/cycles_file.cpp +++ b/client/cycles/cycles_file.cpp @@ -90,7 +90,7 @@ bool displayFPS(int type) double currentTime = omp_get_wtime(); g_frameCount[type]++; - if (currentTime - g_previousTime[type] >= 1.0) { + if (currentTime - g_previousTime[type] >= 3.0) { # pragma omp critical printf("Sending: FPS: %.2f \n", @@ -588,8 +588,10 @@ bool file_cam_change(client_kernel_struct &data) bool file_image_size_change(client_kernel_struct &data) { int *size = file_get_image_size(); + int num_samples = file_get_image_sample(); return data.client_path_trace_data.tile_w != size[0] || - data.client_path_trace_data.tile_h != size[1]; + data.client_path_trace_data.tile_h != size[1] || + data.client_path_trace_data.num_samples != num_samples; } #endif @@ -1912,9 +1914,15 @@ void file_path_trace_socket(client_kernel_struct &data) while (true) { if (buf_new) { if (data.world_rank == 0) { - file_socket_step_cam(data); - file_socket_step_cam_ack(data, -1); - file_cam_change(data); + # pragma omp parallel num_threads(2) + { + int tid = omp_get_thread_num(); + if(tid==0) { + file_socket_step_cam(data); + file_socket_step_cam_ack(data, -1); + file_cam_change(data); + } + } data.client_path_trace_data.num_samples = file_get_image_sample(); @@ -1986,15 +1994,21 @@ void file_path_trace_socket(client_kernel_struct &data) int flag_exit = 0; int last_state = 0; - int pix_state[5]; - pix_state[0] = 2; // state buff A - pix_state[1] = 2; // state buff B + int pix_state[10]; + pix_state[0] = 0; // state buff A + pix_state[1] = 0; // state buff B pix_state[2] = 0; // buf_reset pix_state[3] = data.client_path_trace_data.start_sample; pix_state[4] = 0; // buf_new, change resolution omp_set_nested(1); + omp_lock_t *lock0 = (omp_lock_t*)&pix_state[5]; + omp_lock_t *lock1 = (omp_lock_t*)&pix_state[7]; + + omp_init_lock(lock0); + omp_init_lock(lock1); + bool do_exit = false; # pragma omp parallel num_threads(2) @@ -2029,9 +2043,13 @@ void file_path_trace_socket(client_kernel_struct &data) if (tid == 0) { if (data.world_rank == 0) { + int used_buffer = 0; while (!do_exit) { - if (pix_state[0] == 2) { + if (used_buffer == 0) { + omp_set_lock(lock0); + + //printf("send1: pix_state[0] = 2, %f\n", omp_get_wtime()); fflush(0); // pix_state[0] = 1; // memcpy(&pixels[0], pixels_node1, pix_size); pixels = pixels_node1; @@ -2040,9 +2058,12 @@ void file_path_trace_socket(client_kernel_struct &data) t = omp_get_wtime(); // pix_state[1] = 0; -# pragma omp flush +//# pragma omp flush } - else if (pix_state[1] == 2) { + else if (used_buffer == 1) { + omp_set_lock(lock1); + + //printf("send1: pix_state[1] = 2, %f\n", omp_get_wtime()); fflush(0); // pix_state[1] = 1; // memcpy(&pixels[0], pixels_node2, pix_size); pixels = pixels_node2; @@ -2051,7 +2072,7 @@ void file_path_trace_socket(client_kernel_struct &data) t = omp_get_wtime(); // pix_state[0] = 0; -# pragma omp flush +//# pragma omp flush } else { if (!do_exit) { @@ -2180,7 +2201,7 @@ void file_path_trace_socket(client_kernel_struct &data) if (cesnet_is_required_exit()) { pix_state[4] = 1; -# pragma omp flush +//# pragma omp flush break; } } @@ -2228,23 +2249,31 @@ void file_path_trace_socket(client_kernel_struct &data) int tile_w = data.client_path_trace_data.tile_w; int tile_h = data.client_path_trace_data.tile_h; -# ifdef WITH_CLIENT_RENDERENGINE_VRCLIENT - kernel::socket_send_data_data(pixels, tile_w * tile_h, false); - kernel::socket_send_data_data(pixels + pix_size / 2, tile_w* tile_h); -# else - kernel::socket_send_data_data(pixels, tile_w * tile_h); +# ifdef WITH_CLIENT_RENDERENGINE_VR + tile_w *= 2; # endif + + int *ps = (int*)pixels; + ps[0] = samples; + + kernel::socket_send_data_data(pixels, tile_h * tile_w + tile_h * tile_w / 2); + double t4 = omp_get_wtime(); CLIENT_DEBUG_PRINTF2("send: pix:%f, \n", t4 - t3); # else double t3 = omp_get_wtime(); -# ifdef WITH_CLIENT_RENDERENGINE_VRCLIENT - kernel::socket_send_data_data(pixels, pix_size / 2, false); - kernel::socket_send_data_data(pixels + pix_size / 2, pix_size / 2); -# else +//# ifdef WITH_CLIENT_RENDERENGINE_VRCLIENT +// //kernel::socket_send_data_data(pixels, pix_size / 2, false); +// //kernel::socket_send_data_data(pixels + pix_size / 2, pix_size / 2); +// kernel::socket_send_data_data(pixels, pix_size); +// +//# else + int *ps = (int*)pixels; + ps[0] = samples; + kernel::socket_send_data_data(pixels, pix_size); -# endif +//# endif double t4 = omp_get_wtime(); CLIENT_DEBUG_PRINTF2("send: pix:%f, \n", t4 - t3); # endif @@ -2276,14 +2305,13 @@ void file_path_trace_socket(client_kernel_struct &data) if (buf_reset) { file_socket_step_data(data); pix_state[2] = 1; -# pragma omp flush +//# pragma omp flush } buf_new = file_image_size_change(data); if (buf_new) { pix_state[4] = 1; -# pragma omp flush - +//# pragma omp flush file_socket_step_cam_ack(data, 0); break; @@ -2291,12 +2319,19 @@ void file_path_trace_socket(client_kernel_struct &data) file_socket_step_cam_ack(data, -1); - if (pix_state[0] == 2) { - pix_state[0] = 0; + if (used_buffer == 0/*pix_state[0] == 2*/) { + //pix_state[0] = 0; + //printf("send2: pix_state[0] = 0, %f\n", omp_get_wtime()); fflush(0); + omp_unset_lock(lock0); } - else if (pix_state[1] == 2) { - pix_state[1] = 0; + else if (used_buffer==1/*pix_state[1] == 2*/) { + //pix_state[1] = 0; + //printf("send2: pix_state[1] = 0, %f\n", omp_get_wtime()); fflush(0); + omp_unset_lock(lock1); } + used_buffer++; + if(used_buffer>1) + used_buffer=0; } } } @@ -2308,10 +2343,17 @@ void file_path_trace_socket(client_kernel_struct &data) kernel::cuda_host_free("pixels_node", NULL, pixels_node, 2); # endif + omp_destroy_lock(lock0); + omp_destroy_lock(lock1); + if (!buf_new) break; + + } + } + } #endif diff --git a/client/cycles/kernel_cuda_stream.cpp b/client/cycles/kernel_cuda_stream.cpp index 08d93046f7c8ecb45bcb563a5c4a339277d0424b..fea00fbc9f44f55b632b5e4c4cb27f4ce730c6d9 100644 --- a/client/cycles/kernel_cuda_stream.cpp +++ b/client/cycles/kernel_cuda_stream.cpp @@ -238,9 +238,9 @@ const char *cuewErrorString(CUresult result) void check_exit() { -# ifndef _WIN32 +//# ifndef _WIN32 exit(-1); -# endif +//# endif } // exit(-1); @@ -3336,6 +3336,9 @@ void cuda_path_trace_internal(int numDevice, //#endif int *pix_state = (int *)signal_value; + omp_lock_t *lock0 = (omp_lock_t*)&pix_state[5]; + omp_lock_t *lock1 = (omp_lock_t*)&pix_state[7]; + //#if defined(WITH_CLIENT_ULTRAGRID) // //pixels_node2 = (char *)pixels_node2 + pix_size; // stride *= 2; @@ -3457,7 +3460,8 @@ void cuda_path_trace_internal(int numDevice, wtile.w = tile_w; wtile.offset = offset; wtile.stride = stride; -#ifdef WITH_CLIENT_ULTRAGRID +# if defined(WITH_CLIENT_RENDERENGINE_VR) || \ + (defined(WITH_CLIENT_ULTRAGRID) && !defined(WITH_CLIENT_RENDERENGINE)) wtile.stride*=2; if (id < devices_left_eye) { }else{ @@ -3555,28 +3559,28 @@ void cuda_path_trace_internal(int numDevice, dev_buffer = (map_buffer_bin != 0) ? (float *)scope.get().cuda_mem_map[map_buffer_bin].device_pointer : 0; -#ifdef WITH_CLIENT_ULTRAGRID +//#ifdef WITH_CLIENT_ULTRAGRID dev_buffer_size = scope.get().cuda_mem_map[map_buffer_bin].size; -#else - dev_buffer_size = scope.get().cuda_mem_map[map_buffer_bin].size / 2; -#endif +//#else +// dev_buffer_size = scope.get().cuda_mem_map[map_buffer_bin].size / 2; +//#endif cuda_const_copy_internal("__data", &cuda_kernel_data[0], cuda_kernel_data.size(), scope); } else { // dev_buffer_size = scope.get().cuda_mem_map[map_buffer_bin].size / 2; -#ifdef WITH_CLIENT_ULTRAGRID +//#ifdef WITH_CLIENT_ULTRAGRID dev_buffer = (map_buffer_bin != 0) ? (float *)scope.get().cuda_mem_map[map_buffer_bin].device_pointer : 0; dev_buffer_size = scope.get().cuda_mem_map[map_buffer_bin].size; -#else - dev_buffer = (float *)((char *)scope.get().cuda_mem_map[map_buffer_bin].device_pointer + - scope.get().cuda_mem_map[map_buffer_bin].size / 2); - - dev_buffer_size = scope.get().cuda_mem_map[map_buffer_bin].size / 2; -#endif +//#else +// dev_buffer = (float *)((char *)scope.get().cuda_mem_map[map_buffer_bin].device_pointer + +// scope.get().cuda_mem_map[map_buffer_bin].size / 2); +// +// dev_buffer_size = scope.get().cuda_mem_map[map_buffer_bin].size / 2; +//#endif // dev_buffer = (map_buffer_bin != 0) ? // (float *)scope.get().cuda_mem_map[map_buffer_bin].device_pointer : // 0; @@ -3596,14 +3600,49 @@ void cuda_path_trace_internal(int numDevice, cuda_const_copy_internal("__data", &cuda_kernel_data[0], cuda_kernel_data.size(), scope); # endif /////////////////////////////////////////////// + int used_buffer = 0; while (true) { if (pix_state[4] == 1) { break; } - else if (pix_state[0] == 0) { + else if (/*(pix_state[0] == 0*/used_buffer == 0) { + +#pragma omp master + { + omp_set_lock(lock0); + } + //printf("rend1: pix_state[0] = 0, %f\n", omp_get_wtime()); fflush(0); + // pix_state[1] = 1; + //# pragma omp flush + + dev_pixels_node = (DEVICE_PTR)dev_pixels_node1; +# ifdef WITH_OPTIX_DENOISER + dev_pixels_node_denoised = (DEVICE_PTR)dev_pixels_node1_denoised; +# endif + pixels_node = pixels_node1; + stream_path = scope.get().stream[STREAM_PATH1]; + stream_memcpy = scope.get().stream[STREAM_PATH1_MEMCPY]; + stream_memcpy_id = STREAM_PATH1_MEMCPY; + // host_fn_memcpy_status_flag = memcpy_status_flag1; + + event_path_start = scope.get().event[STREAM_PATH1]; + event_path_stop = scope.get().event[STREAM_PATH1 + STREAM_COUNT]; + + event_memcpy_start = scope.get().event[STREAM_PATH1_MEMCPY]; + event_memcpy_stop = scope.get().event[STREAM_PATH1_MEMCPY + STREAM_COUNT]; + + time_path = &scope.get().running_time[STREAM_PATH1]; + time_memcpy = &scope.get().running_time[STREAM_PATH1_MEMCPY]; + } + else if (/*pix_state[1] == 0*/ used_buffer == 1) { + //printf("rend1: pix_state[1] = 0, %f\n", omp_get_wtime()); fflush(0); //pix_state[0] = 1; //# pragma omp flush + #pragma omp master + { + omp_set_lock(lock1); + } dev_pixels_node = (DEVICE_PTR)dev_pixels_node2; # ifdef WITH_OPTIX_DENOISER @@ -3624,31 +3663,9 @@ void cuda_path_trace_internal(int numDevice, time_path = &scope.get().running_time[STREAM_PATH2]; time_memcpy = &scope.get().running_time[STREAM_PATH2_MEMCPY]; } - else if (pix_state[1] == 0) { - //pix_state[1] = 1; -//# pragma omp flush - - dev_pixels_node = (DEVICE_PTR)dev_pixels_node1; -# ifdef WITH_OPTIX_DENOISER - dev_pixels_node_denoised = (DEVICE_PTR)dev_pixels_node1_denoised; -# endif - pixels_node = pixels_node1; - stream_path = scope.get().stream[STREAM_PATH1]; - stream_memcpy = scope.get().stream[STREAM_PATH1_MEMCPY]; - stream_memcpy_id = STREAM_PATH1_MEMCPY; - // host_fn_memcpy_status_flag = memcpy_status_flag1; - - event_path_start = scope.get().event[STREAM_PATH1]; - event_path_stop = scope.get().event[STREAM_PATH1 + STREAM_COUNT]; - - event_memcpy_start = scope.get().event[STREAM_PATH1_MEMCPY]; - event_memcpy_stop = scope.get().event[STREAM_PATH1_MEMCPY + STREAM_COUNT]; - - time_path = &scope.get().running_time[STREAM_PATH1]; - time_memcpy = &scope.get().running_time[STREAM_PATH1_MEMCPY]; - } else { - //usleep(100); + usleep(100); + # pragma omp flush continue; } @@ -3661,8 +3678,8 @@ void cuda_path_trace_internal(int numDevice, &cuda_kernel_data[0], cuda_kernel_data.size(), cudaMemcpyHostToDevice)); - - cuda_assert(cudaMemset((CU_DEVICE_PTR)dev_buffer, 0, dev_buffer_size)); + if (!scope.get().cuda_mem_map[map_buffer_bin].uni_mem || id == 0) + cuda_assert(cudaMemset((CU_DEVICE_PTR)dev_buffer, 0, dev_buffer_size)); } else { cuda_assert(cudaMemcpy((CU_DEVICE_PTR)scope.get().kerneldata, @@ -3670,7 +3687,8 @@ void cuda_path_trace_internal(int numDevice, cuda_kernel_data_right.size(), cudaMemcpyHostToDevice)); - cuda_assert(cudaMemset((CU_DEVICE_PTR)dev_buffer, 0, dev_buffer_size)); + if (!scope.get().cuda_mem_map[map_buffer_bin].uni_mem || id == 0) + cuda_assert(cudaMemset((CU_DEVICE_PTR)dev_buffer, 0, dev_buffer_size)); } # else cuda_assert(cudaMemcpy((CU_DEVICE_PTR)scope.get().kerneldata, @@ -3678,11 +3696,14 @@ void cuda_path_trace_internal(int numDevice, cuda_kernel_data.size(), cudaMemcpyHostToDevice)); - cuda_assert(cudaMemset((CU_DEVICE_PTR)dev_buffer, 0, dev_buffer_size)); + if (!scope.get().cuda_mem_map[map_buffer_bin].uni_mem || id == 0) + cuda_assert(cudaMemset((CU_DEVICE_PTR)dev_buffer, 0, dev_buffer_size)); # endif - if (id == 0) { + //if (id == 0) +# pragma omp master + { pix_state[2] = 0; -# pragma omp flush +//# pragma omp flush start_sample = start_sample2; end_sample = end_sample2; } @@ -3735,15 +3756,16 @@ void cuda_path_trace_internal(int numDevice, //# endif ///////////////////////////////////////////////////////////////////////////////////////////// -#if defined(WITH_CLIENT_RENDERENGINE_VR) //|| (defined(WITH_CLIENT_ULTRAGRID) && !defined(WITH_CLIENT_RENDERENGINE)) - if (id < devices_left_eye) { - // pixels_node = pixels_node; - } - else { - pixels_node = (char *)pixels_node + pix_size; - dev_pixels_node = (DEVICE_PTR)((char*)dev_pixels_node + pix_size); - } -# endif +//#if defined(WITH_CLIENT_RENDERENGINE_VR) //|| (defined(WITH_CLIENT_ULTRAGRID) && !defined(WITH_CLIENT_RENDERENGINE)) +// if (id < devices_left_eye) { +// // pixels_node = pixels_node; +// } +// else { +// pixels_node = (char *)pixels_node + pix_size; +// dev_pixels_node = (DEVICE_PTR)((char*)dev_pixels_node + pix_size); +// } +//# endif +# pragma omp barrier ///////////////////////////////////////////////////////////////////////////////////////////// /* Launch kernel. */ @@ -3799,7 +3821,7 @@ void cuda_path_trace_internal(int numDevice, //#ifndef WITH_CUDA_BUFFER_MANAGED // // transfer to CPU -// cuda_assert(cudaEventRecord(event_memcpy_start, stream_path)); + //cuda_assert(cudaEventRecord(event_memcpy_start, stream_path)); //#endif # ifdef WITH_OPTIX_DENOISER @@ -3934,7 +3956,7 @@ void cuda_path_trace_internal(int numDevice, # endif - // cuda_assert(cudaStreamSynchronize(stream_path)); + //cuda_assert(cudaStreamSynchronize(stream_path)); # ifdef WITH_CUDA_STAT if (scope.get().path_stat_is_done == 0) { @@ -4018,22 +4040,16 @@ void cuda_path_trace_internal(int numDevice, # pragma omp barrier /////////////////////////////////////////////////////////////////////// - if (id == 0) { + //if (id == 0) +# pragma omp master + { //cuda_displayFPS(0); g_report_time += (omp_get_wtime() - t_id); g_report_time_count++; - if (dev_pixels_node == (DEVICE_PTR)dev_pixels_node1) { - pix_state[0] = 2; -# pragma omp flush - } - else { - pix_state[1] = 2; -# pragma omp flush - } # if defined(ENABLE_LOAD_BALANCE) || defined(ENABLE_LOAD_BALANCEv2) || \ !defined(WITH_CLIENT_PATHTRACER2) - if (scope.get().path_stat_is_done == 0 || g_report_time >= 1.0) { + if (scope.get().path_stat_is_done == 0 || g_report_time >= 3.0) { # else if (scope.get().path_stat_is_done > 0) { # endif @@ -4050,12 +4066,14 @@ void cuda_path_trace_internal(int numDevice, min_time = cuda_devices[id1].running_time[STREAM_PATH1]; } avg_time += cuda_devices[id1].running_time[STREAM_PATH1]; +#if 0 printf("%d: Time = %f [ms], %d-%d, %d\n", id1, cuda_devices[id1].running_time[STREAM_PATH1], cuda_devices[id1].wtile.y, cuda_devices[id1].wtile.h, end_sample); +#endif } else { if (max_time < cuda_devices[id1].running_time[STREAM_PATH2]) { @@ -4065,25 +4083,29 @@ void cuda_path_trace_internal(int numDevice, min_time = cuda_devices[id1].running_time[STREAM_PATH2]; } avg_time += cuda_devices[id1].running_time[STREAM_PATH2]; +#if 0 printf("%d: Time = %f [ms], %d-%d, %d\n", id1, cuda_devices[id1].running_time[STREAM_PATH2], cuda_devices[id1].wtile.y, cuda_devices[id1].wtile.h, end_sample); +#endif } } - printf("Rendering: FPS: %.2f, MaxTime = %f [ms], AvgTime = %f [ms], Samples: %d\n", + printf("Rendering: FPS: %.2f, MaxTime = %f [ms], AvgTime = %f [ms], Samples: %d, buf: %d\n", 1000.0f / max_time, max_time, avg_time / (double)devices_size, - end_sample); + end_sample, + (dev_pixels_node == (DEVICE_PTR)dev_pixels_node1) ? 0 : 1); +#if 0 printf("OMP: FPS: %.2f: Time = %f [ms], %f [ms]\n", 1.0f / (omp_get_wtime() - t_id), (omp_get_wtime() - t_id) * 1000.0f, (t_id2 - t_id) * 1000.0f); - +#endif g_report_time = 0; g_report_time_count = 0; @@ -4096,7 +4118,7 @@ void cuda_path_trace_internal(int numDevice, } pix_state[3] = end_sample; -# pragma omp flush +//# pragma omp flush ///////////////////////SAMPLES//////////////////////// # ifdef ENABLE_INC_SAMPLES @@ -4109,7 +4131,7 @@ void cuda_path_trace_internal(int numDevice, ///////////////////////LB//////////////////////// # if defined(ENABLE_LOAD_BALANCE) || defined(ENABLE_LOAD_BALANCEv2) - if (scope.get().path_stat_is_done > 1) { + if (scope.get().path_stat_is_done > 0) { # ifdef ENABLE_LOAD_BALANCEv3 for (int id1 = 0; id1 < devices_size - 1; id1++) { float time1 = 0; @@ -4141,7 +4163,7 @@ void cuda_path_trace_internal(int numDevice, # else for (int id1 = 0; id1 < devices_size - 1; id1++) { -#ifdef WITH_CLIENT_ULTRAGRID +#if defined(WITH_CLIENT_RENDERENGINE_VR) || (defined(WITH_CLIENT_ULTRAGRID) && !defined(WITH_CLIENT_RENDERENGINE)) if (id1==devices_left_eye-1) continue; #endif @@ -4156,12 +4178,12 @@ void cuda_path_trace_internal(int numDevice, time2 = cuda_devices[id1 + 1].running_time[STREAM_PATH2]; } - if (time1 < time2) { + if (time1 < time2 && cuda_devices[id1 + 1].wtile.h > 2) { cuda_devices[id1].wtile.h++; cuda_devices[id1 + 1].wtile.y++; cuda_devices[id1 + 1].wtile.h--; } - else if (time1 > time2) { + else if (time1 > time2 && cuda_devices[id1].wtile.h > 2) { cuda_devices[id1].wtile.h--; cuda_devices[id1 + 1].wtile.y--; cuda_devices[id1 + 1].wtile.h++; @@ -4253,6 +4275,35 @@ void cuda_path_trace_internal(int numDevice, */ scope.get().path_stat_is_done++; +//# pragma omp master + { + if (/*pix_state[0] == 0*/ used_buffer == 0) { +// pix_state[0] = 2; +// pix_state[1] = 0; +// //printf("rend2: pix_state[0] = 2, %f\n", omp_get_wtime()); fflush(0); +//# pragma omp flush + #pragma omp master + { + omp_unset_lock(lock0); + //omp_unset_nest_lock(lock0); + } + } + else if (/*pix_state[1] == 0*/used_buffer == 1) { +// pix_state[1] = 2; +// pix_state[0] = 0; +// //printf("rend2: pix_state[1] = 2, %f\n", omp_get_wtime()); fflush(0); +//# pragma omp flush + #pragma omp master + { + omp_unset_lock(lock1); + } + } + + used_buffer++; + if (used_buffer>1) + used_buffer = 0; + } + # pragma omp barrier // if (scope.get().path_stat_is_done > 10) // break; @@ -4557,7 +4608,9 @@ void cuda_path_trace_lb(int numDevice, # pragma omp barrier /////////////////////////////////////////////////////////////////////// - if (id == 0) { + //if (id == 0) +# pragma omp master + { std::string client_tiles = ""; double total_time = 0; double avg_time = 0; diff --git a/client/glew-mx/CMakeLists.txt b/client/glew-mx/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..90f82f9c64d62197468dd496a1787608625e2a51 --- /dev/null +++ b/client/glew-mx/CMakeLists.txt @@ -0,0 +1,44 @@ +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# The Original Code is Copyright (C) 2014, Blender Foundation +# All rights reserved. +# ***** END GPL LICENSE BLOCK ***** + +set(INC + . + ../../intern/glew-mx/intern + ../../intern/glew-mx/ + ${GLEW_INCLUDE_DIR} +) + +set(SRC + ../../intern/glew-mx/intern/glew-mx.c + ../../intern/glew-mx/glew-mx.h + ../../intern/glew-mx/intern/gl-deprecated.h + ../../intern/glew-mx/intern/symbol-binding.h +) + +include_directories(${INC}) + +add_definitions(-DWITH_LEGACY_OPENGL) +add_definitions(-DGLEW_STATIC) +add_definitions(-DGLEW_NO_ES) +add_definitions(-DGLEW_NO_GLU) + +add_library(glew_mx STATIC ${SRC}) + +install (TARGETS glew_mx DESTINATION lib) diff --git a/client/glew/CMakeLists.txt b/client/glew/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..6b1e14a0c52aba45899d1f47c94d9fad90e04d19 --- /dev/null +++ b/client/glew/CMakeLists.txt @@ -0,0 +1,49 @@ +set(INC + . + ../../extern/glew/include +) + +set(SRC + ../../extern/glew/src/glew.c + + ../../extern/glew/include/GL/eglew.h + ../../extern/glew/include/GL/glew.h + ../../extern/glew/include/GL/glxew.h + ../../extern/glew/include/GL/wglew.h +) + +set(SRC_HEADERS +) + +if(WITH_OPENMP) + add_definitions(-DWITH_OPENMP) +endif() + + list(APPEND GL_DEFINITIONS -DGLEW_STATIC) + + # This won't affect the non-experimental glew library, + # but is used for conditional compilation elsewhere. + list(APPEND GL_DEFINITIONS -DGLEW_NO_ES) +list(APPEND GL_DEFINITIONS -DGLEW_NO_GLU) + +add_definitions(${GL_DEFINITIONS}) + +# avoid noisy warnings +if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang") + + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-strict-prototypes") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-strict-prototypes") + +endif() + +if(MSVC) + + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Os") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Os") +endif() + +include_directories(${INC}) + +add_library(extern_glew STATIC ${SRC} ${SRC_HEADERS}) + +install (TARGETS extern_glew DESTINATION lib) diff --git a/client/renderengine/addon/cyclesphi/__init__.py b/client/renderengine/addon/cyclesphi/__init__.py index 975e6760f876fd7a76d3498b778f059248ec7484..5af1b3d9bee2e154163fb4ccf455fbfb7db376f1 100644 --- a/client/renderengine/addon/cyclesphi/__init__.py +++ b/client/renderengine/addon/cyclesphi/__init__.py @@ -13,17 +13,17 @@ bl_info = { "name" : "CyclesPhi", - "author" : "Milan Jaros", + "author" : "Milan Jaros, Petr Strakos", "description" : "", "blender" : (2, 80, 0), - "version" : (0, 0, 1), + "version" : (0, 0, 2), "location" : "", "warning" : "", "category" : "Render" } import bpy -import bgl +#import bgl import time import sys @@ -47,7 +47,7 @@ from ctypes import cdll from dataclasses import dataclass -from ctypes import Array, cdll, c_void_p, c_char, c_char_p, c_int, c_int32, c_float, c_bool, c_ulong, POINTER +from ctypes import Array, cdll, c_void_p, c_char, c_char_p, c_int, c_int32, c_uint32, c_float, c_bool, c_ulong, POINTER if sys.platform == 'linux': gl = cdll.LoadLibrary('libGL.so') @@ -57,6 +57,8 @@ elif sys.platform == 'darwin': else: gl = ctypes.windll.opengl32 +gl.glViewport.argtypes = [c_int32, c_int32, c_uint32, c_uint32] + # platform specific library loading if sys.platform == 'linux': @@ -67,9 +69,17 @@ elif sys.platform == 'darwin': else: _cyclesphi_dll = cdll.LoadLibrary('cyclesphi_renderengine.dll') -# CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD resize(unsigned width, unsigned height); + +render_callback_type = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int) + +_cyclesphi_dll.register_render_callback.argtypes = [c_void_p] + +# CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD resize(unsigned width, unsigned height) _cyclesphi_dll.resize.argtypes = [c_int32, c_int32] +_cyclesphi_dll.cuda_gl_map_buffer.argtypes = [c_uint32] +_cyclesphi_dll.cuda_gl_unmap_buffer.argtypes = [c_uint32] + # CYCLESPHI_EXPORT_DLL int CYCLESPHI_EXPORT_STD main_loop(); #_cyclesphi_dll.main_loop.restype = c_int32 # CYCLESPHI_EXPORT_DLL int CYCLESPHI_EXPORT_STD recv_pixels_data(); @@ -80,6 +90,8 @@ _cyclesphi_dll.send_cam_data.restype = c_int32 # CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD client_init(const char *server, int port_cam, int port_data, int w, int h, int step_samples); _cyclesphi_dll.client_init.argtypes = [c_char_p, c_int32, c_int32, c_int32, c_int32, c_int32] +_cyclesphi_dll.get_DWIDTH.restype = c_int32 + # CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD client_close(); # CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD get_camera(float *cameratoworld); @@ -99,6 +111,10 @@ _cyclesphi_dll.client_init.argtypes = [c_char_p, c_int32, c_int32, c_int32, c_in _cyclesphi_dll.set_camera.argtypes = [c_void_p, c_float, c_float, c_float, c_float, c_float, c_int, c_float, c_float, c_float, c_int, c_float, c_float] _cyclesphi_dll.set_camera_right.argtypes = [c_void_p, c_float, c_float, c_float, c_float, c_float, c_int, c_float, c_float, c_float, c_int, c_float, c_float] +_cyclesphi_dll.set_camera_ug.argtypes = [c_float,c_float,c_float,c_float,c_float,c_float,c_float,c_float,c_float,c_float,c_float] + +_cyclesphi_dll.set_camera_right_ug.argtypes = [c_float,c_float,c_float,c_float,c_float,c_float,c_float,c_float,c_float,c_float,c_float] + # CYCLESPHI_EXPORT_DLL int CYCLESPHI_EXPORT_STD get_samples(); _cyclesphi_dll.get_samples.restype = c_int32 _cyclesphi_dll.get_current_samples.restype = c_int32 @@ -110,7 +126,7 @@ _cyclesphi_dll.get_current_samples.restype = c_int32 _cyclesphi_dll.get_pixels.argtypes = [c_void_p] _cyclesphi_dll.get_pixels_right.argtypes = [c_void_p] -_cyclesphi_dll.is_vr.restype = c_int32 +_cyclesphi_dll.get_renderengine_type.restype = c_int32 #------------------------------------------------------------------- import bpy @@ -123,161 +139,17 @@ from bpy.props import IntProperty, CollectionProperty, BoolProperty, StringPrope from bpy.types import Panel, UIList, Operator from mathutils import Vector, Matrix from math import pi, sqrt, sin, cos, pow, atan, tan -from bgl import * -import openvr - -class OpenVrUserProp: - vr_system = None - pre_handle_3d = None - post_handle_3d = None - compositor = None - poses = None - textureL = None - textureR = None - - openVrGlRenderer = None - areaL = None - areaR = None - - camSet = False - -class OpenVrFramebuffer: - "Framebuffer for rendering one eye" - - def __init__(self): - self.fb = 0 - self.depth_buffer = 0 - self.texture_id = 0 - #self.camobj = None - - def init_gl(self, context): - # Set render textures - texture_ids = Buffer(GL_INT,1) - glGenTextures(1, texture_ids) - self.texture_id = int(texture_ids[0]) - glBindTexture(GL_TEXTURE_2D, self.texture_id) - - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0) - - self.w, self.h = context.scene.openvr_user_prop.vr_system.getRecommendedRenderTargetSize() - self.channels = 4 - - bgl.glTexImage2D( - bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA, - self.w, self.h, 0, - bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, - bgl.Buffer(bgl.GL_BYTE, [self.w, self.h, self.channels]) - ) - - glBindTexture(GL_TEXTURE_2D, 0) - - # OpenVR texture data - self.texture = openvr.Texture_t() - self.texture.handle = self.texture_id - self.texture.eType = openvr.TextureType_OpenGL - self.texture.eColorSpace = openvr.ColorSpace_Gamma - - def getWidth(self): - return self.w - - def getHeight(self): - return self.h - - def submit(self, eye, im: np.array): - glBindTexture(GL_TEXTURE_2D, self.texture_id) - #glCopyTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 0, 0, self.getWidth(), self.getHeight(), 0) - gl.glTexSubImage2D( - bgl.GL_TEXTURE_2D, 0, - 0, 0, self.w, self.h, - bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, - ctypes.c_void_p(im.ctypes.data) - ) - #Submit_LensDistortionAlreadyApplied = ENUM_VALUE_TYPE(1) - - #err = openvr.VRCompositor().submit(eye, self.texture, None, openvr.Submit_GlRenderBuffer) - err = openvr.VRCompositor().submit(eye, self.texture) - #if not err == 0: # and not err == 108: - # print('submit:', err) - -class OpenVrGlRenderer: - "Renders to virtual reality headset using OpenVR and OpenGL APIs" - - def __init__(self): - self.left_fb = None - self.right_fb = None - #self.renderAction = 0 #0 - pose, 1 - submit left, 2 - submit right - - def init_gl(self, context): - "allocate OpenGL resources" - self.left_fb = OpenVrFramebuffer() - self.right_fb = OpenVrFramebuffer() - self.left_fb.init_gl(context) - self.right_fb.init_gl(context) - - def post_render_scene_left(self, im: np.array): - #if self.renderAction == 1: - self.left_fb.submit(openvr.Eye_Left, im) - #self.renderAction = 2 - - def post_render_scene_right(self, im: np.array): - #if self.renderAction == 2: - self.right_fb.submit(openvr.Eye_Right, im) - #self.renderAction = 0 - - def getWidth(self): - return self.left_fb.getWidth() - - def getHeight(self): - return self.left_fb.getHeight() - - -def hmdMatrix34_t2Bmatrix(mat): - return Matrix(((-mat.m[0][0], -mat.m[0][1], -mat.m[0][2], -mat.m[0][3]), - (mat.m[2][0], mat.m[2][1], mat.m[2][2], mat.m[2][3]), - (mat.m[1][0], mat.m[1][1], mat.m[1][2], mat.m[1][3]), - (0.0, 0.0, 0.0, 1.0))) - -def hmdMatrix342Bmatrix(mat): - return Matrix(((mat.m[0][0], mat.m[0][1], mat.m[0][2], mat.m[0][3]), - (mat.m[1][0], mat.m[1][1], mat.m[1][2], mat.m[1][3]), - (mat.m[2][0], mat.m[2][1], mat.m[2][2], mat.m[2][3]), - (0.0, 0.0, 0.0, 1.0))) - -def hmdMatrix442Bmatrix(mat): - return Matrix(((mat.m[0][0], mat.m[0][1], mat.m[0][2], mat.m[0][3]), - (mat.m[1][0], mat.m[1][1], mat.m[1][2], mat.m[1][3]), - (mat.m[2][0], mat.m[2][1], mat.m[2][2], mat.m[2][3]), - (mat.m[3][0], mat.m[3][1], mat.m[3][2], mat.m[3][3]))) - -def hmdMatrix44_t2Bmatrix(mat): - return Matrix(((-mat.m[0][0], -mat.m[0][1], -mat.m[0][2], -mat.m[0][3]), - (mat.m[2][0], mat.m[2][1], mat.m[2][2], mat.m[2][3]), - (mat.m[1][0], mat.m[1][1], mat.m[1][2], mat.m[1][3]), - (mat.m[3][0], mat.m[3][1], mat.m[3][2], mat.m[3][3]))) - -def addPositionAndScale(context, matrix): - #matrix[0][3] = matrix[0][3] * context.scene.openvr_position_scale_x + context.scene.openvr_position_x - #matrix[1][3] = matrix[1][3] * context.scene.openvr_position_scale_y + context.scene.openvr_position_y - #matrix[2][3] = matrix[2][3] * context.scene.openvr_position_scale_z + context.scene.openvr_position_z - - return matrix - -def addScale(context, matrix): - #matrix[0][3] = matrix[0][3] * context.scene.openvr_positions[1].x #+ context.scene.openvr_positions[0].x - #matrix[1][3] = matrix[1][3] * context.scene.openvr_positions[1].y #+ context.scene.openvr_positions[0].y - #matrix[2][3] = matrix[2][3] * context.scene.openvr_positions[1].z #+ context.scene.openvr_positions[0].z - - return matrix -#------------------------------------------------------------------- - +#from bgl import * # def get_user_settings(): # return bpy.context.preferences.addons['cyclesphi'].preferences.settings +def check_gl_error(): + error = bgl.glGetError() + if error != bgl.GL_NO_ERROR: + raise Exception(error) +############################################# + class CyclesPhiServerSettings(bpy.types.PropertyGroup): port_cam: bpy.props.IntProperty( name="Port Cam", @@ -310,42 +182,43 @@ class CyclesPhiRenderSettings(bpy.types.PropertyGroup): server_settings: bpy.props.PointerProperty(type=CyclesPhiServerSettings) sampling_settings: bpy.props.PointerProperty(type=CyclesPhiSamplingSettings) -class GLTexture: - channels = 4 +# class GLTexture: +# channels = 4 - def __init__(self, width, height): - self.width = width - self.height = height +# def __init__(self, width, height): +# self.width = width +# self.height = height - textures = bgl.Buffer(bgl.GL_INT, [1,]) - bgl.glGenTextures(1, textures) - self.texture_id = textures[0] +# textures = bgl.Buffer(bgl.GL_INT, [1,]) +# bgl.glGenTextures(1, textures) +# self.texture_id = textures[0] - bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture_id) - bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_LINEAR) - bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_LINEAR) - bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_S, bgl.GL_REPEAT) - bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_T, bgl.GL_REPEAT) +# bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture_id) +# bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_LINEAR) +# bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_LINEAR) +# bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_S, bgl.GL_REPEAT) +# bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_T, bgl.GL_REPEAT) - bgl.glTexImage2D( - bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA, - self.width, self.height, 0, - bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, - bgl.Buffer(bgl.GL_BYTE, [self.width, self.height, self.channels]) - ) +# bgl.glTexImage2D( +# bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA, +# self.width, self.height, 0, +# bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, +# bgl.Buffer(bgl.GL_BYTE, [self.width, self.height, self.channels]) +# ) + +# def __del__(self): +# textures = bgl.Buffer(bgl.GL_INT, [1, ], [self.texture_id, ]) +# bgl.glDeleteTextures(1, textures) + +# def set_image(self, im: np.array): +# bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture_id) +# gl.glTexSubImage2D( +# bgl.GL_TEXTURE_2D, 0, +# 0, 0, self.width, self.height, +# bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, +# ctypes.c_void_p(im.ctypes.data) +# ) - def __del__(self): - textures = bgl.Buffer(bgl.GL_INT, [1, ], [self.texture_id, ]) - bgl.glDeleteTextures(1, textures) - - def set_image(self, im: np.array): - bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture_id) - gl.glTexSubImage2D( - bgl.GL_TEXTURE_2D, 0, - 0, 0, self.width, self.height, - bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, - ctypes.c_void_p(im.ctypes.data) - ) class CyclesPhiContext: channels = 4 @@ -364,20 +237,6 @@ class CyclesPhiContext: def init(self, context, server, port_cam, port_data, width, height, step_samples): - if _cyclesphi_dll.is_vr() == True: - if context.scene.openvr_user_prop.vr_system is None: - context.scene.openvr_user_prop.vr_system = openvr.init(openvr.VRApplication_Scene) - context.scene.openvr_user_prop.compositor = openvr.VRCompositor() - - context.scene.openvr_user_prop.openVrGlRenderer = OpenVrGlRenderer() - context.scene.openvr_user_prop.openVrGlRenderer.init_gl(context) - - poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount - context.scene.openvr_user_prop.poses = poses_t() - - height = context.scene.openvr_user_prop.openVrGlRenderer.getHeight() - width = context.scene.openvr_user_prop.openVrGlRenderer.getWidth() - self.server = server self.port_cam = port_cam self.port_data = port_data @@ -393,34 +252,36 @@ class CyclesPhiContext: def client_init(self): _cyclesphi_dll.client_init(self.server.encode(), self.port_cam, self.port_data, self.width, self.height, self.step_samples) - def close(self): - if _cyclesphi_dll.is_vr() == True: - openvr.shutdown() - context.scene.openvr_user_prop.vr_system = None - _cyclesphi_dll.client_close() - + self.g_width = self.width + self.g_height = self.height + self.DWIDTH = _cyclesphi_dll.get_DWIDTH() + + #GLuint g_bufferIds[3]; # IDs of PBO + self.g_bufferIds = bgl.Buffer(bgl.GL_INT, 3) + #GLuint g_textureIds[3]; # ID of texture + self.g_textureIds = bgl.Buffer(bgl.GL_INT, 3) + + self.setupTexture(2) + + check_gl_error() def client_close(self): _cyclesphi_dll.client_close() - def resize(self, width, height): - #self.init(self.server, self.port_cam, self.port_data, width, height, self.step_samples) - pass + # def resize(self, width, height): + # #self.init(self.server, self.port_cam, self.port_data, width, height, self.step_samples) + # pass + # + + def register_render_callback(self, rc): + _cyclesphi_dll.register_render_callback(rc) def render(self, restart=False, tile=None): - #send_cam_data_result = _cyclesphi_dll.send_cam_data() - #if send_cam_data_result == 0: - # return 0 - _cyclesphi_dll.recv_pixels_data() return 1 - - # def send_cam_data(self): - # _cyclesphi_dll.send_cam_data() - def set_camera(self, camera_dataL, camera_dataR): transformL = np.array(camera_dataL.transform, dtype=np.float32) transformR = np.array(camera_dataR.transform, dtype=np.float32) @@ -453,6 +314,42 @@ class CyclesPhiContext: camera_dataR.shift_x, camera_dataR.shift_y) + ss = camera_dataL.sensor_size[0] + if camera_dataL.use_view_camera == 0: + ss = -ss + + _cyclesphi_dll.set_camera_ug( + float(camera_dataL.focal_length), + float(ss), + float(camera_dataL.view_camera_zoom), + float(camera_dataL.shift_x), + float(camera_dataL.pos[0]), + float(camera_dataL.pos[1]), + float(camera_dataL.pos[2]), + float(camera_dataL.quat[1]), + float(camera_dataL.quat[2]), + float(camera_dataL.quat[3]), + float(camera_dataL.quat[0]) + ) + + ss = camera_dataR.sensor_size[0] + if camera_dataR.use_view_camera == 0: + ss = -ss + + _cyclesphi_dll.set_camera_right_ug( + float(camera_dataR.focal_length), + float(ss), + float(camera_dataR.view_camera_zoom), + float(camera_dataR.shift_x), + float(camera_dataR.pos[0]), + float(camera_dataR.pos[1]), + float(camera_dataR.pos[2]), + float(camera_dataR.quat[1]), + float(camera_dataR.quat[2]), + float(camera_dataR.quat[3]), + float(camera_dataR.quat[0]) + ) + # cam_thread = threading.Thread(target=self.send_cam_data) # cam_thread.start() @@ -467,29 +364,297 @@ class CyclesPhiContext: def get_current_samples(self): return _cyclesphi_dll.get_current_samples() - # def get_data(self): - # #FrameBufferGetInfo(self, FRAMEBUFFER_DATA, self.size(), ffi.cast('float*', data.ctypes.data), ffi.NULL) - # _cyclesphi_dll.get_pixels(ctypes.c_void_p(self.data.ctypes.data)) - # return self.data + def toOrtho(self, eye, width, height): + # set viewport to be the entire window + gl.glViewport(0, 0, int(width), int(height)) + + return + + # set orthographic viewing frustum + gl.glMatrixMode(0x1701) #gl.GL_PROJECTION) + gl.glLoadIdentity() + + if eye == 2: + gl.glOrtho(0, 1, 0, 1, -1, 1) + + if eye == 0: + gl.glOrtho(0, 0.5, 0, 1, -1, 1) + + if eye == 1: + gl.glOrtho(0.5, 1, 0, 1, -1, 1) + + # switch to modelview matrix in order to set scene + gl.glMatrixMode(0x1700) #bgl.GL_MODELVIEW) + gl.glLoadIdentity() + + def freeTexture(self, eye): + bgl.glDeleteTextures(1, self.g_textureIds[eye]) + if eye == 0 or eye == 1: + bgl.glDeleteBuffers(1, self.g_bufferIds[eye]) + + if eye == 2: + #cuda_assert(cudaGLUnmapBufferObject(g_bufferIds[eye])) + #cuda_assert(cudaGLUnregisterBufferObject(g_bufferIds[eye])) + _cyclesphi_dll.cuda_gl_umap_buffer(self.g_bufferIds[eye]) + + bgl.glDeleteFramebuffers(1, self.g_bufferIds[eye]) + + + # Setup Texture + def setupTexture(self, eye): + #if 1 + #bgl.GLuint pboIds[1] # IDs of PBO + pboIds = bgl.Buffer(bgl.GL_INT, [1,]) + #bgl.GLuint textureIds[1] # ID of texture + textureIds = bgl.Buffer(bgl.GL_INT, [1,]) + + # init 2 texture objects + bgl.glGenTextures(1, textureIds) + self.g_textureIds[eye] = textureIds[0] + + bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.g_textureIds[eye]) + bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_NEAREST) + bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_NEAREST) + bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_S, bgl.GL_REPEAT) + bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_T, bgl.GL_REPEAT) + + #ifdef WITH_CLIENT_YUV + #glTexImage2D( + # GL_TEXTURE_2D, 0, GL_LUMINANCE8, DWIDTH, g_height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL) + #else + if eye == 2: + dw = self.DWIDTH + else: + dw = self.DWIDTH / 2 + + bgl.glTexImage2D(bgl.GL_TEXTURE_2D, + 0, + bgl.GL_RGBA8, + dw, + self.g_height, + 0, + bgl.GL_RGBA, + bgl.GL_UNSIGNED_BYTE, + 0) + + bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0) + + # if eye == 0 or eye == 1: + # # create 2 pixel buffer objects, you need to delete them when program exits. + # # glBufferData() with NULL pointer reserves only memory space. + # bgl.glGenFramebuffers(1, pboIds) + # self.g_bufferIds[eye] = pboIds[0] + + # bgl.glBindFramebuffer(bgl.GL_FRAMEBUFFER, self.g_bufferIds[eye]) + + # # Set "renderedTexture" as our colour attachement #0 + # #glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, g_textureIds[eye], 0) + # bgl.glFramebufferTexture2D(bgl.GL_FRAMEBUFFER, bgl.GL_COLOR_ATTACHMENT0, bgl.GL_TEXTURE_2D, self.g_textureIds[eye], 0) + + # bgl.glBindFramebuffer(bgl.GL_FRAMEBUFFER, 0) + + #endif + + # if (eye == 2) + # create 2 pixel buffer objects, you need to delete them when program exits. + # glBufferData() with NULL pointer reserves only memory space. + bgl.glGenBuffers(1, pboIds) + self.g_bufferIds[eye] = pboIds[0] + + bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, self.g_bufferIds[eye]) + + bgl.glBufferData(bgl.GL_PIXEL_UNPACK_BUFFER, + dw * self.g_height * 4, + 0, + bgl.GL_DYNAMIC_COPY) + + #cuda_assert(cudaGLRegisterBufferObject(g_bufferIds[eye])) + #cuda_assert(cudaGLMapBufferObject((void **)&g_pixels_buf_d, g_bufferIds[eye])) + _cyclesphi_dll.cuda_gl_map_buffer(self.g_bufferIds[eye]) + + bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, 0) + + @staticmethod + def _draw_texture(texture_id, x, y, width, height): + # INITIALIZATION + + # Getting shader program + shader_program = bgl.Buffer(bgl.GL_INT, 1) + bgl.glGetIntegerv(bgl.GL_CURRENT_PROGRAM, shader_program) + + # Generate vertex array + vertex_array = bgl.Buffer(bgl.GL_INT, 1) + bgl.glGenVertexArrays(1, vertex_array) + + texturecoord_location = bgl.glGetAttribLocation(shader_program[0], "texCoord") + position_location = bgl.glGetAttribLocation(shader_program[0], "pos") + + # Generate geometry buffers for drawing textured quad + position = [x, y, x + width, y, x + width, y + height, x, y + height] + position = bgl.Buffer(bgl.GL_FLOAT, len(position), position) + texcoord = [0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0] + texcoord = bgl.Buffer(bgl.GL_FLOAT, len(texcoord), texcoord) + + vertex_buffer = bgl.Buffer(bgl.GL_INT, 2) + bgl.glGenBuffers(2, vertex_buffer) + bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, vertex_buffer[0]) + bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, position, bgl.GL_STATIC_DRAW) + bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, vertex_buffer[1]) + bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, texcoord, bgl.GL_STATIC_DRAW) + bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, 0) + + # DRAWING + bgl.glActiveTexture(bgl.GL_TEXTURE0) + bgl.glBindTexture(bgl.GL_TEXTURE_2D, texture_id) + + bgl.glBindVertexArray(vertex_array[0]) + bgl.glEnableVertexAttribArray(texturecoord_location) + bgl.glEnableVertexAttribArray(position_location) + + bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, vertex_buffer[0]) + bgl.glVertexAttribPointer(position_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None) + bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, vertex_buffer[1]) + bgl.glVertexAttribPointer(texturecoord_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None) + bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, 0) + + bgl.glDrawArrays(bgl.GL_TRIANGLE_FAN, 0, 4) + + bgl.glBindVertexArray(0) + bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0) + + # DELETING + bgl.glDeleteBuffers(2, vertex_buffer) + bgl.glDeleteVertexArrays(1, vertex_array) + + def gl_render(self, eye): + #if 1 + # render to texture + if eye == 0 or eye == 1: + # Left Eye + bgl.glBindFramebuffer(bgl.GL_FRAMEBUFFER, self.g_bufferIds[eye]) + # glViewport(0, 0, g_width / 2, g_height) + # vr_RenderScene(vr::Eye_Left) + self.toOrtho(eye, self.DWIDTH / 2, self.g_height) + else: + + bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.g_textureIds[2]) + bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, self.g_bufferIds[2]) + + # copy pixels from PBO to texture object + # Use offset instead of ponter. + + # ifdef WITH_CLIENT_YUV + # glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, DWIDTH, g_height, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0); + # else + bgl.glTexSubImage2D(bgl.GL_TEXTURE_2D, + 0, + 0, + 0, + self.DWIDTH, + self.g_height, + bgl.GL_RGBA, + bgl.GL_UNSIGNED_BYTE, + 0) + + bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, 0) + bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0) + + border = (0, 0), (self.DWIDTH, self.height) + #draw_texture_2d(self.g_textureIds[2], border[0], *border[1]) + self._draw_texture(self.g_textureIds[2], 0, 0, self.DWIDTH, self.height) + return + + self.toOrtho(eye, self.g_width, self.g_height) + + check_gl_error() + + bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.g_textureIds[2]) + + check_gl_error() + + gl.glBegin(7) #bgl.GL_QUADS) + + check_gl_error() + + gl.glTexCoord2d(0, 0) + gl.glVertex2d(0, 0) + gl.glTexCoord2d(1, 0) + gl.glVertex2d(1, 0) + gl.glTexCoord2d(1, 1) + gl.glVertex2d(1, 1) + gl.glTexCoord2d(0, 1) + gl.glVertex2d(0, 1) + + check_gl_error() + + gl.glEnd() + + check_gl_error() + + bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0) + + check_gl_error() + + return + + + # glfwMakeContextCurrent(g_windows[eye]); + + # bind the texture and PBO + bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.g_textureIds[2]) + bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, self.g_bufferIds[2]) + + # copy pixels from PBO to texture object + # Use offset instead of ponter. + + # ifdef WITH_CLIENT_YUV + # glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, DWIDTH, g_height, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0); + # else + bgl.glTexSubImage2D(bgl.GL_TEXTURE_2D, + 0, + 0, + 0, + self.DWIDTH, + self.g_height, + bgl.GL_RGBA, + bgl.GL_UNSIGNED_BYTE, + 0) + # endif + + + + # draw a point with texture + bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.g_textureIds[2]) + + gl.glBegin(0x0007) #bgl.GL_QUADS) + + gl.glTexCoord2d(0, 0) + gl.glVertex2d(0, 0) + gl.glTexCoord2d(1, 0) + gl.glVertex2d(1, 0) + gl.glTexCoord2d(1, 1) + gl.glVertex2d(1, 1) + gl.glTexCoord2d(0, 1) + gl.glVertex2d(0, 1) + + + gl.glEnd() + + # unbind texture + bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, 0) + bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0) + + # render to texture + if eye == 0 or eye == 1: + bgl.glBindFramebuffer(bgl.GL_FRAMEBUFFER, 0) + -# Core has issues with drawing faces in orthographic camera view with big -# ortho depth (far_clip_plane - near_clip_plane). -# Experimentally found quite suited value = 200 MAX_ORTHO_DEPTH = 200.0 @dataclass(init=False, eq=True) class CameraData: """ Comparable dataclass which holds all camera settings """ - # #mode: int = None - # clip_plane: (float, float) = None - # focal_length: float = None - # sensor_size: (float, float) = None - # transform: tuple = None - # lens_shift: (float, float) = None - # ortho_size: (float, float) = None - # dof_data: (float, float, int) = None # tuple which holds data in following order: focus_distance, f_stop, blades - transform: tuple = None #void *camera_object, focal_length: float = None #float lens, clip_plane: (float, float) = None #float nearclip, float farclip, @@ -500,6 +665,8 @@ class CameraData: use_view_camera: int = None # int use_view_camera shift_x: float = None shift_y: float = None + quat: tuple = None + pos: tuple = None @staticmethod def init_from_camera(camera: bpy.types.Camera, transform, ratio, border=((0, 0), (1, 1))): @@ -513,7 +680,18 @@ class CameraData: data.use_view_camera = 1 data.shift_x = camera.shift_x - data.shift_y = camera.shift_y + data.shift_y = camera.shift_y + + data.quat = tuple(transform.to_quaternion()) + data.pos = tuple(transform.to_translation()) + + # projection_matrix = camera.calc_matrix_camera( + # bpy.context.view_layer.depsgraph, + # x = bpy.context.scene.render.resolution_x, + # y = bpy.context.scene.render.resolution_y, + # scale_x = bpy.context.scene.render.pixel_aspect_x, + # scale_y = bpy.context.scene.render.pixel_aspect_y, + # ) # if camera.dof.use_dof: # # calculating focus_distance @@ -595,73 +773,6 @@ class CameraData: return data - @staticmethod - def init_from_context_openvr(context: bpy.types.Context): - if context.scene.openvr_user_prop.vr_system is None: - context.scene.openvr_user_prop.vr_system = openvr.init(openvr.VRApplication_Scene) - context.scene.openvr_user_prop.compositor = openvr.VRCompositor() - - context.scene.openvr_user_prop.openVrGlRenderer = OpenVrGlRenderer() - context.scene.openvr_user_prop.openVrGlRenderer.init_gl(context) - - poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount - context.scene.openvr_user_prop.poses = poses_t() - - context.scene.openvr_user_prop.compositor.waitGetPoses(context.scene.openvr_user_prop.poses, None) - - pose = context.scene.openvr_user_prop.poses[0] - if not pose.bPoseIsValid: - return None, None - - dataL = CameraData() - dataR = CameraData() - - head_matrix = addPositionAndScale(context, hmdMatrix34_t2Bmatrix(pose.mDeviceToAbsoluteTracking)) - - viewL = addScale(context, hmdMatrix342Bmatrix(context.scene.openvr_user_prop.vr_system.getEyeToHeadTransform(openvr.Eye_Left))) - viewR = addScale(context, hmdMatrix342Bmatrix(context.scene.openvr_user_prop.vr_system.getEyeToHeadTransform(openvr.Eye_Right))) - - dataL.transform = tuple(head_matrix @ viewL) - dataR.transform = tuple(head_matrix @ viewR) - #bpy.data.objects[openvr_hmd_setting.camR].matrix_world = head_matrix * viewR - - l_left, l_right, l_top, l_bottom = context.scene.openvr_user_prop.vr_system.getProjectionRaw(openvr.Eye_Left) - r_left, r_right, r_top, r_bottom = context.scene.openvr_user_prop.vr_system.getProjectionRaw(openvr.Eye_Right) - - angle = math.atan(max(max(-l_top, l_bottom), max(-r_top, r_bottom))) - - #w, h = context.scene.openvr_user_prop.vr_system.getRecommendedRenderTargetSize() - - sensor_width = 36 - - #focal_length = 0.5 * sensor_width * 1.0 / math.tan(angle / 2) / (w / h) - dataL.focal_length = sensor_width / (math.fabs(l_left) + math.fabs(l_right)) - dataR.focal_length = sensor_width / (math.fabs(r_left) + math.fabs(r_right)) - - #dataL.shift_x = (l_bottom + l_top) * (w / h) #/ (l_right - l_left) - dataL.shift_x = 0.5 * (l_left + l_right) / (math.fabs(l_left) + math.fabs(l_right)) - dataL.shift_y = 0 - - #dataR.shift_x = - (r_bottom + r_top) * (w / h) #/ (r_right - r_left) - dataR.shift_x = 0.5 * (r_left + r_right) / (math.fabs(r_left) + math.fabs(r_right)) - dataR.shift_y = 0 - - dataL.clip_plane = (0.1, 1000) - dataL.use_view_camera = 1 - dataL.sensor_fit = 0 - dataL.sensor_size = (sensor_width, sensor_width) - dataL.view_camera_offset = (0,0) - dataL.view_camera_zoom = 1.0 - - dataR.clip_plane = (0.1, 1000) - dataR.use_view_camera = 1 - dataR.sensor_fit = 0 - dataR.sensor_size = (sensor_width, sensor_width) - dataR.view_camera_offset = (0,0) - dataR.view_camera_zoom = 1.0 - - return dataL, dataR - @staticmethod def init_from_context(context: bpy.types.Context): """ Returns CameraData from bpy.types.Context """ @@ -678,7 +789,8 @@ class CameraData: # data.sensor_size = (VIEWPORT_SENSOR_SIZE, VIEWPORT_SENSOR_SIZE / ratio) if ratio > 1.0 else \ # (VIEWPORT_SENSOR_SIZE * ratio, VIEWPORT_SENSOR_SIZE) #data.fov = 2.0 * math.atan(0.5 * VIEWPORT_SENSOR_SIZE / context.space_data.lens / ratio ) - data.transform = tuple(context.region_data.view_matrix.inverted()) + vmat = context.region_data.view_matrix.inverted() + data.transform = tuple(vmat) data.focal_length = context.space_data.lens data.use_view_camera = 0 data.sensor_size = (VIEWPORT_SENSOR_SIZE, VIEWPORT_SENSOR_SIZE) @@ -688,6 +800,10 @@ class CameraData: data.shift_x = 0 data.shift_y = 0 + data.quat = tuple(vmat.to_quaternion()) + data.pos = tuple(vmat.to_translation()) + + # elif context.region_data.view_perspective == 'ORTHO': # data = CameraData() # #data.mode = pyrpr.CAMERA_MODE_ORTHOGRAPHIC @@ -749,13 +865,13 @@ class ViewportSettings: def __init__(self, context: bpy.types.Context): """Initializes settings from Blender's context""" - if _cyclesphi_dll.is_vr() == True: - self.camera_dataL,self.camera_dataR = CameraData.init_from_context_openvr(context) - self.screen_height = context.scene.openvr_user_prop.openVrGlRenderer.getHeight() - self.screen_width = context.scene.openvr_user_prop.openVrGlRenderer.getWidth() - else: - self.camera_dataL,self.camera_dataR = CameraData.init_from_context(context) - self.screen_width, self.screen_height = context.region.width, context.region.height + # if _cyclesphi_dll.get_renderengine_type() == 1: + # self.camera_dataL,self.camera_dataR = CameraData.init_from_context_openvr(context) + # self.screen_height = context.scene.openvr_user_prop.openVrGlRenderer.getHeight() + # self.screen_width = context.scene.openvr_user_prop.openVrGlRenderer.getWidth() + # else: + self.camera_dataL,self.camera_dataR = CameraData.init_from_context(context) + self.screen_width, self.screen_height = context.region.width, context.region.height scene = context.scene @@ -774,16 +890,15 @@ class Engine: self.cyclesphi_engine = weakref.proxy(cyclesphi_engine) self.cyclesphi_context = CyclesPhiContext() +#g_viewport_engine = None + class ViewportEngine(Engine): """ Viewport render engine """ def __init__(self, cyclesphi_engine): super().__init__(cyclesphi_engine) - self.gl_texture: GLTexture = None + #self.gl_texture: GLTexture = None self.viewport_settings: ViewportSettings = None - #self.world_settings: world.WorldData = None - #self.shading_data: ShadingData = None - #self.view_layer_data: ViewLayerSettings = None self.sync_render_thread: threading.Thread = None self.restart_render_event = threading.Event() @@ -799,13 +914,18 @@ class ViewportEngine(Engine): #self.render_iterations = 0 #self.render_time = 0 + #g_viewport_engine = self + self.render_callback = render_callback_type(self.render_callback) + def stop_render(self): print("stop_render") - self.is_finished = True - self.restart_render_event.set() - self.sync_render_thread.join() + self.is_finished = True + + if _cyclesphi_dll.get_renderengine_type() != 2: + self.restart_render_event.set() + self.sync_render_thread.join() - self.cyclesphi_context.close() + #self.cyclesphi_context.client_close() self.cyclesphi_context = None self.image_filter = None pass @@ -828,8 +948,8 @@ class ViewportEngine(Engine): class FinishRender(Exception): pass - print('Start _do_sync_render') - self.cyclesphi_context.client_init() + # print('Start _do_sync_render') + # self.cyclesphi_context.client_init() try: # SYNCING OBJECTS AND INSTANCES @@ -877,83 +997,26 @@ class ViewportEngine(Engine): # self.viewport_settings.height) self.is_resized = False - # self.cyclesphi_context.sync_auto_adapt_subdivision() - # self.cyclesphi_context.sync_portal_lights() time_begin = time.perf_counter() - #print(f"Restart render [{self.cyclesphi_context.width}, {self.cyclesphi_context.height}]") # rendering with self.render_lock: if self.restart_render_event.is_set(): break - render_result = self.cyclesphi_context.render(restart=(iteration == 0)) - #if render_result == 0: - #self.is_finished = True - # self.restart_render_event.set() - # break - - # resolving - # with self.resolve_lock: - # self._resolve() + self.cyclesphi_context.render(restart=(iteration == 0)) - self.is_rendered = True - # self.is_denoised = False - #iteration += 1 + self.is_rendered = True current_samples = self.cyclesphi_context.get_current_samples() - # checking for last iteration - # preparing information to show in viewport time_render = time.perf_counter() - time_begin - # if self.render_iterations > 0: - # info_str = f"Time: {time_render:.1f} sec"\ - # f" | Iteration: {iteration}/{self.render_iterations}" - # else: fps = current_samples / time_render info_str = f"Time: {time_render:.1f} sec"\ f" | Samples: {current_samples}" \ f" | FPS: {fps:.1f}" - # if is_adaptive_active: - # active_pixels = self.cyclesphi_context.get_info(pyrpr.CONTEXT_ACTIVE_PIXEL_COUNT, int) - # adaptive_progress = max((all_pixels - active_pixels) / all_pixels, 0.0) - # info_str += f" | Adaptive Sampling: {math.floor(adaptive_progress * 100)}%" - - # if self.render_iterations > 0: - # if iteration >= self.render_iterations: - # is_last_iteration = True - # else: - # if time_render >= self.render_time: - # is_last_iteration = True - # if is_adaptive and active_pixels == 0: - # is_last_iteration = True - - # if is_last_iteration: - # break - notify_status(info_str, "Render") - - # notifying viewport that rendering is finished - # if is_last_iteration: - # time_render = time.perf_counter() - time_begin - - # if self.image_filter: - # notify_status(f"Time: {time_render:.1f} sec | Iteration: {iteration}" - # f" | Denoising...", "Render") - - # # applying denoising - # with self.resolve_lock: - # if self.image_filter: - # self.update_image_filter_inputs() - # self.image_filter.run() - # self.is_denoised = True - - # time_render = time.perf_counter() - time_begin - # notify_status(f"Time: {time_render:.1f} sec | Iteration: {iteration}" - # f" | Denoised", "Rendering Done") - # else: - # notify_status(f"Time: {time_render:.1f} sec | Iteration: {iteration}", - # "Rendering Done") + notify_status(info_str, "Render") except FinishRender: print("Finish by user") @@ -967,26 +1030,30 @@ class ViewportEngine(Engine): notify_status(f"{e}.\nPlease see logs for more details.", "ERROR") - self.cyclesphi_context.client_close() + #self.cyclesphi_context.client_close() print('Finish _do_sync_render') + #@ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int) + def render_callback(self, sample): + # requesting blender to call draw() + # if g_viewport_engine != None: + self.cyclesphi_engine.tag_redraw() + #print("render_callback") + + return 0 + + # #@ctypes.CFUNCTYPE(None) + # def render_callback(self, sample): + # # requesting blender to call draw() + # self.cyclesphi_engine.tag_redraw() + def sync(self, context, depsgraph): print('Start sync') - # if not self.is_finished: - # return - scene = depsgraph.scene - #viewport_limits = scene.rpr.viewport_limits view_layer = depsgraph.view_layer - # scene.rpr.init_rpr_context(self.cyclesphi_context, is_final_engine=False, - # use_gl_interop=config.use_gl_interop) - - # self.shading_data = ShadingData(context) - # self.view_layer_data = ViewLayerSettings(view_layer) - # getting initial render resolution viewport_settings = ViewportSettings(context) width, height = viewport_settings.width, viewport_settings.height @@ -1002,51 +1069,23 @@ class ViewportEngine(Engine): height, scene.cyclesphi.sampling_settings.step_samples) # if not self.cyclesphi_context.gl_interop: - self.gl_texture = GLTexture(width, height) - - # self.cyclesphi_context.enable_aov(pyrpr.AOV_COLOR) - - # if viewport_limits.noise_threshold > 0.0: - # # if adaptive is enable turn on aov and settings - # self.cyclesphi_context.enable_aov(pyrpr.AOV_VARIANCE) - # viewport_limits.set_adaptive_params(self.cyclesphi_context) - - # self.cyclesphi_context.scene.set_name(scene.name) - - # self.world_settings = self._get_world_settings(depsgraph) - # self.world_settings.export(self.cyclesphi_context) - - # rpr_camera = self.cyclesphi_context.create_camera() - # rpr_camera.set_name("Camera") - # self.cyclesphi_context.scene.set_camera(rpr_camera) - - # # image filter - # image_filter_settings = view_layer.rpr.denoiser.get_settings(scene, False) - # image_filter_settings['resolution'] = (self.cyclesphi_context.width, self.cyclesphi_context.height) - # self.setup_image_filter(image_filter_settings) - - # # other context settings - # self.cyclesphi_context.set_parameter(pyrpr.CONTEXT_PREVIEW, True) - # self.cyclesphi_context.set_parameter(pyrpr.CONTEXT_ITERATIONS, 1) - # scene.rpr.export_render_mode(self.cyclesphi_context) - # scene.rpr.export_ray_depth(self.cyclesphi_context) - # scene.rpr.export_pixel_filter(self.cyclesphi_context) - - #self.render_iterations = 1000 - #self.render_time = 0 #(viewport_limits.max_samples, 0) - + #self.gl_texture = GLTexture(width, height) + self.cyclesphi_context.client_init() + self.is_finished = False - self.restart_render_event.clear() - - self.sync_render_thread = threading.Thread(target=self._do_sync_render, args=(depsgraph,)) - self.sync_render_thread.start() + + if _cyclesphi_dll.get_renderengine_type() == 2: + self.is_synced = True + self.is_rendered = True + self.cyclesphi_context.register_render_callback(self.render_callback) + else: + print('Start _do_sync_render') + self.restart_render_event.clear() + self.sync_render_thread = threading.Thread(target=self._do_sync_render, args=(depsgraph,)) + self.sync_render_thread.start() print('Finish sync') - # def sync_update(self, context, depsgraph): - # """ sync just the updated things """ - # pass - @staticmethod def _draw_texture(texture_id, x, y, width, height): # INITIALIZATION @@ -1142,32 +1181,34 @@ class ViewportEngine(Engine): # self.is_resized = True - self.restart_render_event.set() + if _cyclesphi_dll.get_renderengine_type() != 2: + self.restart_render_event.set() if self.is_resized or not self.is_rendered: return - def draw_(texture_id): - #if False: - # if scene.rpr.render_mode in ('WIREFRAME', 'MATERIAL_INDEX', - # 'POSITION', 'NORMAL', 'TEXCOORD'): - # Draw without color management - # draw_texture_2d(texture_id, self.viewport_settings.border[0], *self.viewport_settings.border[1]) + # def draw_(texture_id): + # #if False: + # # if scene.rpr.render_mode in ('WIREFRAME', 'MATERIAL_INDEX', + # # 'POSITION', 'NORMAL', 'TEXCOORD'): + # # Draw without color management + # # draw_texture_2d(texture_id, self.viewport_settings.border[0], *self.viewport_settings.border[1]) - # else: - # Bind shader that converts from scene linear to display space, - bgl.glEnable(bgl.GL_BLEND) - bgl.glBlendFunc(bgl.GL_ONE, bgl.GL_ONE_MINUS_SRC_ALPHA) - self.cyclesphi_engine.bind_display_space_shader(scene) + # # else: + # # Bind shader that converts from scene linear to display space, + # # bgl.glEnable(bgl.GL_BLEND) + # # bgl.glBlendFunc(bgl.GL_ONE, bgl.GL_ONE_MINUS_SRC_ALPHA) + # # self.cyclesphi_engine.bind_display_space_shader(scene) + + # # note this has to draw to region size, not scaled down size + # # self._draw_texture(texture_id, *self.viewport_settings.border[0], *self.viewport_settings.border[1]) - # note this has to draw to region size, not scaled down size - self._draw_texture(texture_id, *self.viewport_settings.border[0], - *self.viewport_settings.border[1]) + # _cyclesphi_dll.draw_texture() - self.cyclesphi_engine.unbind_display_space_shader() - bgl.glDisable(bgl.GL_BLEND) + # # self.cyclesphi_engine.unbind_display_space_shader() + # # bgl.glDisable(bgl.GL_BLEND) - #pass + # #pass # if self.is_denoised: # im = None @@ -1185,15 +1226,43 @@ class ViewportEngine(Engine): # draw_(self.cyclesphi_context.get_frame_buffer().texture_id) # return - with self.resolve_lock: - imL, imR = self._get_render_image() + # with self.resolve_lock: + # imL, imR = self._get_render_image() + + # self.gl_texture.set_image(imL) + # if _cyclesphi_dll.get_renderengine_type() == 1: + # context.scene.openvr_user_prop.openVrGlRenderer.post_render_scene_left(imL) + # context.scene.openvr_user_prop.openVrGlRenderer.post_render_scene_right(imR) + + #draw_(self.gl_texture.texture_id) + #_cyclesphi_dll.draw_texture() + #self.cyclesphi_context.gl_render(0) + #self.cyclesphi_context.gl_render(1) + #self.cyclesphi_context.gl_render(2) - self.gl_texture.set_image(imL) - if _cyclesphi_dll.is_vr() == True: - context.scene.openvr_user_prop.openVrGlRenderer.post_render_scene_left(imL) - context.scene.openvr_user_prop.openVrGlRenderer.post_render_scene_right(imR) + # Bind shader that converts from scene linear to display space, + bgl.glEnable(bgl.GL_BLEND) + bgl.glBlendFunc(bgl.GL_ONE, bgl.GL_ONE_MINUS_SRC_ALPHA) + self.cyclesphi_engine.bind_display_space_shader(scene) + + # note this has to draw to region size, not scaled down size + # self._draw_texture(texture_id, *self.viewport_settings.border[0], *self.viewport_settings.border[1]) + + #_cyclesphi_dll.draw_texture() + if _cyclesphi_dll.get_renderengine_type == 0: + self.cyclesphi_context.gl_render(2) + else: + #self.cyclesphi_context.gl_render(0) + #self.cyclesphi_context.gl_render(1) + self.cyclesphi_context.gl_render(2) + + # border = (0, 0), (self.cyclesphi_context.width, self.cyclesphi_context.height) + # self._draw_texture(self.cyclesphi_context.g_textureIds[2], *border[0], *border[1]) - draw_(self.gl_texture.texture_id) + self.cyclesphi_engine.unbind_display_space_shader() + bgl.glDisable(bgl.GL_BLEND) + + check_gl_error() class CyclesPhiRenderEngine(bpy.types.RenderEngine): # These three members are used by blender to set up the @@ -1376,109 +1445,109 @@ class CyclesPhiRenderEngine(bpy.types.RenderEngine): pass -class CyclesPhiDrawData: - def __init__(self, dimensions): - # Generate dummy float image buffer - self.dimensions = dimensions - width, height = dimensions - - #self.pixels = numpy.char.array([0, 0, 0, 0] * width * height) - #self.pixels = bgl.Buffer(bgl.GL_BYTE, width * height * 4, pixels) - - # Generate texture - self.texture = bgl.Buffer(bgl.GL_INT, 1) - bgl.glGenTextures(1, self.texture) - bgl.glActiveTexture(bgl.GL_TEXTURE0) - bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture[0]) - #bgl.glTexImage2D(bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA16F, width, height, 0, bgl.GL_RGBA, bgl.GL_FLOAT, self.pixels) - bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_LINEAR) - bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_LINEAR) - #bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_NEAREST) - #bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_NEAREST) - # bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_S, bgl.GL_CLAMP) - # bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_T, bgl.GL_CLAMP) - - bgl.glTexImage2D(bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA, width, height, 0, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, bgl.Buffer(bgl.GL_BYTE, width * height * 4)); - - bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0) - - # Bind shader that converts from scene linear to display space, - # use the scene's color management settings. - shader_program = bgl.Buffer(bgl.GL_INT, 1) - bgl.glGetIntegerv(bgl.GL_CURRENT_PROGRAM, shader_program) - - # Generate vertex array - self.vertex_array = bgl.Buffer(bgl.GL_INT, 1) - bgl.glGenVertexArrays(1, self.vertex_array) - bgl.glBindVertexArray(self.vertex_array[0]) - - texturecoord_location = bgl.glGetAttribLocation(shader_program[0], "texCoord") - position_location = bgl.glGetAttribLocation(shader_program[0], "pos") - - bgl.glEnableVertexAttribArray(texturecoord_location) - bgl.glEnableVertexAttribArray(position_location) - - # Generate geometry buffers for drawing textured quad - position = [0.0, 0.0, width, 0.0, width, height, 0.0, height] - position = bgl.Buffer(bgl.GL_FLOAT, len(position), position) - texcoord = [0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0] - texcoord = bgl.Buffer(bgl.GL_FLOAT, len(texcoord), texcoord) - - self.vertex_buffer = bgl.Buffer(bgl.GL_INT, 2) - - bgl.glGenBuffers(2, self.vertex_buffer) - bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vertex_buffer[0]) - bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, position, bgl.GL_STATIC_DRAW) - bgl.glVertexAttribPointer(position_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None) - - bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vertex_buffer[1]) - bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, texcoord, bgl.GL_STATIC_DRAW) - bgl.glVertexAttribPointer(texturecoord_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None) - - bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, 0) - bgl.glBindVertexArray(0) - - ######################################################## - # self.pixel_buffer = bgl.Buffer(bgl.GL_INT, 1) - - # bgl.glGenBuffers(1, self.pixel_buffer) - # bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, self.pixel_buffer[0]) - # bgl.glBufferData(bgl.GL_PIXEL_UNPACK_BUFFER, width * height * 4, 0, bgl.GL_STREAM_DRAW) - # bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, 0) - - def __del__(self): - #bgl.glDeleteBuffers(1, self.pixel_buffer) - bgl.glDeleteBuffers(2, self.vertex_buffer) - bgl.glDeleteVertexArrays(1, self.vertex_array) - bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0) - bgl.glDeleteTextures(1, self.texture) - - def draw(self): - bgl.glActiveTexture(bgl.GL_TEXTURE0) - bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture[0]) - - ######################################################################## - width, height = self.dimensions - - #bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, self.pixel_buffer[0]) - #gl.glTexSubImage2D(bgl.GL_TEXTURE_2D, 0, 0, 0, width, height, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, ctypes.c_void_p(self.pixels.ctypes.data)) - #bgl.glBufferData(bgl.GL_PIXEL_UNPACK_BUFFER, width * height * 4, 0, bgl.GL_STREAM_DRAW) - #ptr_pixels = None #gl.glMapBuffer( bgl.GL_PIXEL_UNPACK_BUFFER, bgl.GL_WRITE_ONLY) +# class CyclesPhiDrawData: +# def __init__(self, dimensions): +# # Generate dummy float image buffer +# self.dimensions = dimensions +# width, height = dimensions + +# #self.pixels = numpy.char.array([0, 0, 0, 0] * width * height) +# #self.pixels = bgl.Buffer(bgl.GL_BYTE, width * height * 4, pixels) + +# # Generate texture +# # self.texture = bgl.Buffer(bgl.GL_INT, 1) +# # bgl.glGenTextures(1, self.texture) +# # bgl.glActiveTexture(bgl.GL_TEXTURE0) +# # bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture[0]) +# # #bgl.glTexImage2D(bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA16F, width, height, 0, bgl.GL_RGBA, bgl.GL_FLOAT, self.pixels) +# # bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_LINEAR) +# # bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_LINEAR) +# # #bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_NEAREST) +# # #bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_NEAREST) +# # # bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_S, bgl.GL_CLAMP) +# # # bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_T, bgl.GL_CLAMP) + +# # bgl.glTexImage2D(bgl.GL_TEXTURE_2D, 0, bgl.GL_RGBA, width, height, 0, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, bgl.Buffer(bgl.GL_BYTE, width * height * 4)); + +# # bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0) + +# # # Bind shader that converts from scene linear to display space, +# # # use the scene's color management settings. +# # shader_program = bgl.Buffer(bgl.GL_INT, 1) +# # bgl.glGetIntegerv(bgl.GL_CURRENT_PROGRAM, shader_program) + +# # # Generate vertex array +# # self.vertex_array = bgl.Buffer(bgl.GL_INT, 1) +# # bgl.glGenVertexArrays(1, self.vertex_array) +# # bgl.glBindVertexArray(self.vertex_array[0]) + +# # texturecoord_location = bgl.glGetAttribLocation(shader_program[0], "texCoord") +# # position_location = bgl.glGetAttribLocation(shader_program[0], "pos") + +# # bgl.glEnableVertexAttribArray(texturecoord_location) +# # bgl.glEnableVertexAttribArray(position_location) + +# # # Generate geometry buffers for drawing textured quad +# # position = [0.0, 0.0, width, 0.0, width, height, 0.0, height] +# # position = bgl.Buffer(bgl.GL_FLOAT, len(position), position) +# # texcoord = [0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0] +# # texcoord = bgl.Buffer(bgl.GL_FLOAT, len(texcoord), texcoord) + +# # self.vertex_buffer = bgl.Buffer(bgl.GL_INT, 2) + +# # bgl.glGenBuffers(2, self.vertex_buffer) +# # bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vertex_buffer[0]) +# # bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, position, bgl.GL_STATIC_DRAW) +# # bgl.glVertexAttribPointer(position_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None) + +# # bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vertex_buffer[1]) +# # bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, texcoord, bgl.GL_STATIC_DRAW) +# # bgl.glVertexAttribPointer(texturecoord_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None) + +# # bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, 0) +# # bgl.glBindVertexArray(0) + +# ######################################################## +# # self.pixel_buffer = bgl.Buffer(bgl.GL_INT, 1) + +# # bgl.glGenBuffers(1, self.pixel_buffer) +# # bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, self.pixel_buffer[0]) +# # bgl.glBufferData(bgl.GL_PIXEL_UNPACK_BUFFER, width * height * 4, 0, bgl.GL_STREAM_DRAW) +# # bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, 0) + +# # def __del__(self): +# # #bgl.glDeleteBuffers(1, self.pixel_buffer) +# # # bgl.glDeleteBuffers(2, self.vertex_buffer) +# # # bgl.glDeleteVertexArrays(1, self.vertex_array) +# # # bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0) +# # # bgl.glDeleteTextures(1, self.texture) + +# def draw(self): +# # bgl.glActiveTexture(bgl.GL_TEXTURE0) +# # bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture[0]) + +# ######################################################################## +# width, height = self.dimensions + +# # #bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, self.pixel_buffer[0]) +# # #gl.glTexSubImage2D(bgl.GL_TEXTURE_2D, 0, 0, 0, width, height, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, ctypes.c_void_p(self.pixels.ctypes.data)) +# # #bgl.glBufferData(bgl.GL_PIXEL_UNPACK_BUFFER, width * height * 4, 0, bgl.GL_STREAM_DRAW) +# # #ptr_pixels = None #gl.glMapBuffer( bgl.GL_PIXEL_UNPACK_BUFFER, bgl.GL_WRITE_ONLY) - # #path_tracer(ptr) - # if ptr_pixels: - # c_ptr_pixels = ctypes.cast(ptr_pixels, POINTER(c_char)) - # # _cyclesphi_dll.get_pixels(c_ptr_pixels) - # bgl.glUnmapBuffer(bgl.GL_PIXEL_UNPACK_BUFFER) - #_cyclesphi_dll.get_pixels(ctypes.c_void_p(self.pixels.ctypes.data)) +# # # #path_tracer(ptr) +# # # if ptr_pixels: +# # # c_ptr_pixels = ctypes.cast(ptr_pixels, POINTER(c_char)) +# # # # _cyclesphi_dll.get_pixels(c_ptr_pixels) +# # # bgl.glUnmapBuffer(bgl.GL_PIXEL_UNPACK_BUFFER) +# # #_cyclesphi_dll.get_pixels(ctypes.c_void_p(self.pixels.ctypes.data)) - # bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, 0) - ######################################################################## +# # # bgl.glBindBuffer(bgl.GL_PIXEL_UNPACK_BUFFER, 0) +# # ######################################################################## - bgl.glBindVertexArray(self.vertex_array[0]) - bgl.glDrawArrays(bgl.GL_TRIANGLE_FAN, 0, 4) - bgl.glBindVertexArray(0) - bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0) +# # # bgl.glBindVertexArray(self.vertex_array[0]) +# # # bgl.glDrawArrays(bgl.GL_TRIANGLE_FAN, 0, 4) +# # # bgl.glBindVertexArray(0) +# # # bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0) ############################# class RenderButtonsPanel: @@ -1621,7 +1690,7 @@ def register(): bpy.utils.register_class(RENDER_PT_cyclesphi_sampling) bpy.utils.register_class(RENDER_PT_cyclesphi_rendering) - bpy.types.Scene.openvr_user_prop = OpenVrUserProp() + #bpy.types.Scene.openvr_user_prop = OpenVrUserProp() bpy.utils.register_class(RENDER_OT_cyclesphi_connect) bpy.utils.register_class(RENDER_OT_cyclesphi_disconnect) @@ -1650,7 +1719,7 @@ def unregister(): bpy.utils.unregister_class(RENDER_OT_cyclesphi_disconnect) delattr(bpy.types.Scene, 'cyclesphi') - delattr(bpy.types.Scene, 'openvr_user_prop') + #delattr(bpy.types.Scene, 'openvr_user_prop') for panel in get_panels(): if 'CYCLESPHI' in panel.COMPAT_ENGINES: diff --git a/client/renderengine/src/CMakeLists.txt b/client/renderengine/src/CMakeLists.txt index ff8fc909a95149fe283c7abc0ddcd9a1c623a180..caf2a184e98e4308f93e0098b8067ccd2cd91314 100644 --- a/client/renderengine/src/CMakeLists.txt +++ b/client/renderengine/src/CMakeLists.txt @@ -3,19 +3,35 @@ set(INC ../../api ../../../intern/cycles/kernel/kernels/client ../../ultragrid + #../../../extern/glew/include + ../../../intern/glew-mx ${NVPIPE_INCLUDE_DIR} ${CUDA_TOOLKIT_INCLUDE} ${CUDA_INCLUDE_DIRS} + ${ULTRAGRID_INCLUDE_DIR} + + ${GLEW_INCLUDE_DIR} ) set(SRC cyclesphi.cpp + cyclesphi.h +) + +set(SRC_VRG + cyclesphi_vrg.cpp + cyclesphi.h + ../../ultragrid/vrgstream.h ) set(SRC_HEADERS cyclesphi_api.h cyclesphi_data.h + + ../../cycles/kernel_util.cpp + ../../cycles/kernel_util.h + ../../api/client_api.h ) if(WITH_OPENMP) @@ -26,6 +42,20 @@ if(WITH_SOCKET_UDP) add_definitions(-DWITH_SOCKET_UDP) endif() +if(WITH_SOCKET_ONLY_DATA) + add_definitions(-DWITH_SOCKET_ONLY_DATA) +endif() + +if(WITH_CUDA_GL_INTEROP) + add_definitions(-DWITH_CUDA_GL_INTEROP) + add_definitions(-DWITH_OPENGL) + + add_definitions(-DWITH_LEGACY_OPENGL) + add_definitions(-DGLEW_STATIC) + add_definitions(-DGLEW_NO_ES) + add_definitions(-DGLEW_NO_GLU) +endif() + if(WITH_OPTIX_DENOISER) add_definitions(-DWITH_OPTIX_DENOISER) endif() @@ -34,7 +64,7 @@ if(WITH_NVPIPE) add_definitions(-DWITH_NVPIPE) endif() -if(WITH_CLIENT_ULTRAGRID) +if(WITH_CLIENT_ULTRAGRID AND NOT WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB) add_definitions(-DWITH_CLIENT_ULTRAGRID) list(APPEND SRC @@ -46,7 +76,11 @@ if(WITH_CLIENT_ULTRAGRID) ../../ultragrid/platform_ipc.h ../../../intern/cycles/kernel/kernels/client/kernel_socket.h ) - +elseif(WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB) + add_definitions(-DWITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB) + list(APPEND SRC + ../../ultragrid/vrgstream.h + ) else() list(APPEND SRC ../../../intern/cycles/kernel/kernels/client/kernel_socket.cpp @@ -75,9 +109,33 @@ add_definitions(-DWITH_CLIENT_RENDERENGINE) include_directories(${INC}) add_library(cyclesphi_renderengine SHARED ${SRC} ${SRC_HEADERS}) + if(WITH_CUDA_GL_INTEROP) + #target_link_libraries(cyclesphi_renderengine extern_glew) + target_link_libraries(cyclesphi_renderengine glew_mx) + target_link_libraries(cyclesphi_renderengine ${GLEW_LIBRARIES}) + target_link_libraries(cyclesphi_renderengine ${CUDA_CUSTOM_LIBRARIES}) + endif() + if(WITH_NVPIPE) #${CUDA_LIBRARIES} ${CUDA_LIB} target_link_libraries(cyclesphi_renderengine ${NVPIPE_LIBRARIES} ${CUDA_CUSTOM_LIBRARIES}) endif() -install (TARGETS cyclesphi_renderengine DESTINATION lib) +if(WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB) + target_link_libraries(cyclesphi_renderengine ${ULTRAGRID_LIBRARIES}) + + add_library(vrgstream SHARED ${SRC_VRG}) + set_target_properties(vrgstream PROPERTIES PUBLIC_HEADER "../../ultragrid/vrgstream.h") + target_link_libraries(vrgstream cyclesphi_renderengine) + + install (TARGETS vrgstream + LIBRARY DESTINATION lib + PUBLIC_HEADER DESTINATION include + ) +endif() + +#install (TARGETS cyclesphi_renderengine DESTINATION lib) + +install (TARGETS cyclesphi_renderengine + LIBRARY DESTINATION lib + ) diff --git a/client/renderengine/src/cyclesphi.cpp b/client/renderengine/src/cyclesphi.cpp index a0b46deca031142241d7c9571326dc580d0364c6..1997d5d74f74a72b5169abfa3f648521a1a3e3fe 100644 --- a/client/renderengine/src/cyclesphi.cpp +++ b/client/renderengine/src/cyclesphi.cpp @@ -2,11 +2,20 @@ #include "cyclesphi_api.h" #include "cyclesphi_data.h" -//#ifdef WITH_CLIENT_ULTRAGRID -//#include "platform_ipc.h" -//#else -#include "kernel_socket.h" -//#endif +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB +# include "libug.h" + +#ifdef _WIN32 +# define RE_API __declspec(dllexport) +#else +# define RE_API +#endif +# include "cyclesphi.h" + +# include "vrgstream.h" +#else +# include "kernel_socket.h" +#endif #include <iostream> #include <omp.h> @@ -14,33 +23,73 @@ #include <string> #include <stdlib.h> -// int setenv(const char *envname, const char *envval, int overwrite); -#ifdef _WIN32 -int setenv(const char *name, const char *value, int overwrite) -{ - int errcode = 0; - if (!overwrite) { - size_t envsize = 0; - errcode = getenv_s(&envsize, NULL, 0, name); - if (errcode || envsize) - return errcode; - } - return _putenv_s(name, value); -} +//#pragma comment(lib, "opengl32") +//#pragma comment(lib, "glu32") +//#include <gl/gl.h> +//#include <gl/glu.h> + +//#include <GL/glew.h> +//#include <GL/wglew.h> + +#if defined(WITH_OPENGL) +//# pragma comment(lib, "opengl32") +//# include <GL/glew.h> +//# include <GL/GL.h> +# include "glew-mx.h" +//# include <GL/glut.h> #endif ////////////////////////// +//#undef WITH_CUDA_GL_INTEROP +#ifdef WITH_CUDA_GL_INTEROP +# include <cuda_gl_interop.h> +# include <cuda_runtime.h> +#endif +//#endif +//#include "GL/wglew.h" + + +////////////////////////// + +#if defined(WITH_CLIENT_RENDERENGINE_VR) || defined(WITH_VRCLIENT_OPENVR) +# define DWIDTH ((unsigned long long int)g_width * 2L) +#else +# define DWIDTH (g_width) +#endif -unsigned int g_width = 1; +////////^^^^ + +unsigned int g_width = 2; unsigned int g_height = 1; -char *g_pixels = NULL; + +unsigned char *g_pixels_buf = NULL; //[3] = {NULL, NULL, NULL}; + +#ifdef WITH_CUDA_GL_INTEROP +void *g_pixels_buf_d = NULL; //{NULL, NULL, NULL}; +void *g_pixels_buf_gl_d = NULL; //{NULL, NULL, NULL}; +cudaGraphicsResource_t g_cuda_resource = 0; +#endif + +#if 0 +GLuint g_bufferIds[3]; // IDs of PBO +GLuint g_textureIds[3]; // ID of texture +#endif + +typedef int render_callback(int); +render_callback *g_render_callback = NULL; + #if defined(WITH_NVPIPE) char *g_pixels_compressed = NULL; #endif +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB +RenderPacket g_packet; +ug_receiver *g_ug = NULL; +#else cyclesphi::cyclesphi_data g_cyclesphi_data; +#endif double g_previousTime[3] = {0, 0, 0}; int g_frameCount[3] = {0, 0, 0}; @@ -52,7 +101,73 @@ size_t polys_size = 0; int current_samples = 0; -////////////////////////// +void setupTexture(int eye); +void freeTexture(int eye); + +///////////////////////// +#ifdef WITH_CUDA_GL_INTEROP +void check_exit() +{ +//#ifndef _WIN32 + exit(-1); +//#endif +} + +#define cu_assert(stmt) \ + { \ + CUresult result = stmt; \ + if (result != CUDA_SUCCESS) { \ + char err[1024]; \ + sprintf(err, "CUDA error: %s in %s, line %d", cuewErrorString(result), #stmt, __LINE__); \ + std::string message(err); \ + fprintf(stderr, "%s\n", message.c_str()); \ + check_exit(); \ + } \ + } \ + (void)0 + +#define cuda_assert(stmt) \ + { \ + if (stmt != cudaSuccess) { \ + char err[1024]; \ + sprintf(err, \ + "CUDA error: %s: %s in %s, line %d", \ + cudaGetErrorName(stmt), \ + cudaGetErrorString(stmt), \ + #stmt, \ + __LINE__); \ + std::string message(err); \ + fprintf(stderr, "%s\n", message.c_str()); \ + check_exit(); \ + } \ + } \ + (void)0 + +bool cuda_error_(cudaError_t result, const std::string &stmt) +{ + if (result == cudaSuccess) + return false; + + char err[1024]; + sprintf(err, + "CUDA error at %s: %s: %s", + stmt.c_str(), + cudaGetErrorName(result), + cudaGetErrorString(result)); + std::string message(err); + // string message = string_printf("CUDA error at %s: %s", stmt.c_str(), cuewErrorString(result)); + fprintf(stderr, "%s\n", message.c_str()); + return true; +} + +#define cuda_error(stmt) cuda_error_(stmt, #stmt) + +void cuda_error_message(const std::string &message) +{ + fprintf(stderr, "%s\n", message.c_str()); +} +#endif +///////////////////////// void displayFPS(int type, int tot_samples = 0) { @@ -63,7 +178,11 @@ void displayFPS(int type, int tot_samples = 0) double fps = (double)g_frameCount[type] / (currentTime - g_previousTime[type]); if (fps > 0.01) { char sTemp[1024]; +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB + unsigned int *samples = &g_packet.frame; +#else int *samples = (int *)&g_cyclesphi_data.step_samples; +#endif sprintf(sTemp, "FPS: %.2f, Total Samples: %d, Samples: : %d, Res: %d x %d", fps, @@ -79,7 +198,7 @@ void displayFPS(int type, int tot_samples = 0) } ////////////////////////// - +#if 0 void resize(int width, int height) { if (width == g_width && height == g_height && g_pixels) @@ -112,59 +231,234 @@ void resize(int width, int height) memset(g_pixels_compressed, 0, width * height * pix_type_size); #endif +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB + g_packet.pix_width_eye = width; + g_packet.pix_height_eye = height; +#else + int *size = (int *)&g_cyclesphi_data.width; + size[0] = width; + size[1] = height; +#endif +} +#endif + +void resize(int width, int height) +{ + if (width == g_width && height == g_height && g_pixels_buf) + return; + + if (g_pixels_buf) { + //freeTexture(2); + +#ifdef WITH_CUDA_GL_INTEROP + cuda_assert(cudaFreeHost(g_pixels_buf)); +#else + delete[] g_pixels_buf; +#endif + } + + g_width = width; + g_height = height; + +#ifdef WITH_CUDA_GL_INTEROP + cuda_assert(cudaHostAlloc(&g_pixels_buf, DWIDTH * g_height * SIZE_UCHAR4, cudaHostAllocMapped)); + cuda_assert(cudaHostGetDevicePointer(&g_pixels_buf_d, g_pixels_buf, 0)); +#else + g_pixels_buf = new unsigned char[(size_t)width * height * SIZE_UCHAR4]; + memset(g_pixels_buf, 0, (size_t)width * height * SIZE_UCHAR4); +#endif + + //setupTexture(2); + +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB + g_packet.pix_width_eye = width; + g_packet.pix_height_eye = height; +#else int *size = (int *)&g_cyclesphi_data.width; size[0] = width; size[1] = height; +#endif } +#if 0 int recv_pixels_data() { +#ifndef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB size_t pix_type_size = SIZE_UCHAR4; -#ifdef WITH_CLIENT_RENDERENGINE_VR +# ifdef WITH_CLIENT_RENDERENGINE_VR pix_type_size *= 2; -#endif +# endif +# if defined(WITH_NVPIPE) + cyclesphi::kernel::socket_recv_nvpipe( + (char *)g_pixels_compressed, (char *)g_pixels, DWIDTH, g_height); +# else + cyclesphi::kernel::socket_recv_data_data((char *)g_pixels, DWIDTH * g_height * pix_type_size); -#if defined(WITH_NVPIPE) - cyclesphi::kernel::socket_recv_nvpipe((char *)g_pixels_compressed, (char *)g_pixels, g_width * 2, g_height); -#else - cyclesphi::kernel::socket_recv_data_data((char *)g_pixels, g_width * g_height * pix_type_size); +# endif + + // cyclesphi::kernel::socket_recv_data_data((char *)¤t_samples, sizeof(int)); + + // char ack = -1; + // cyclesphi::kernel::socket_send_data_cam((char *)&ack, sizeof(char)); + displayFPS(1, current_samples); #endif - //cyclesphi::kernel::socket_recv_data_data((char *)¤t_samples, sizeof(int)); + return 0; +} - //char ack = -1; - //cyclesphi::kernel::socket_send_data_cam((char *)&ack, sizeof(char)); +#endif + +int recv_pixels_data() +{ +#ifndef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB + +#ifdef WITH_CLIENT_XOR_RLE + + size_t recv_size = 0; + cyclesphi::kernel::socket_recv_data_data((char *)&recv_size, sizeof(recv_size), false); + cyclesphi::kernel::socket_recv_data_data((char *)g_pixels_buf, recv_size - sizeof(recv_size)); + displayFPS(1, current_samples, recv_size); + + char *recv_data = util_xor_rle_to_rgb((char *)g_pixels_buf, g_height, DWIDTH, recv_size); + + memcpy((char *)g_pixels_buf, recv_data, (size_t)g_height * DWIDTH * SIZE_UCHAR4); + +#elif defined(WITH_CLIENT_YUV) + double t2 = omp_get_wtime(); + + cyclesphi::kernel::socket_recv_data_data((char *)g_pixels_buf, + DWIDTH * g_height + DWIDTH * g_height / 2 /*, false*/); + + current_samples = ((int *)g_pixels_buf)[0]; + displayFPS(1, current_samples, DWIDTH * g_height + DWIDTH * g_height / 2); + + double t3 = omp_get_wtime(); +# ifdef WITH_CUDA_GL_INTEROP + + cuda_assert(cudaMemcpy(g_pixels_buf_d, + g_pixels_buf, + DWIDTH * g_height + DWIDTH * g_height / 2, + cudaMemcpyDefault)); // cudaMemcpyDefault cudaMemcpyHostToDevice + +# endif + double t4 = omp_get_wtime(); + + CLIENT_DEBUG_PRINTF3("displayFunc: pix:%f, conv:%f\n", t3 - t2, t4 - t3); +#else + + double t2 = omp_get_wtime(); + +# ifdef WITH_NVPIPE + + cyclesphi::kernel::socket_recv_nvpipe( + (char *)g_pixels_bufs[0], (char *)g_pixels_bufs_d[0], DWIDTH, g_height); + + + displayFPS(1, current_samples, DWIDTH * g_height * pix_type_size); + double t3 = omp_get_wtime(); + +# else + + cyclesphi::kernel::socket_recv_data_data((char *)g_pixels_buf, + DWIDTH * g_height * SIZE_UCHAR4 /*, false*/); + + // cyclesphi::kernel::socket_recv_data_data((char*)g_pixels_bufs[1], + // g_width * g_height * pix_type_size); + + current_samples = ((int *)g_pixels_buf)[0]; displayFPS(1, current_samples); + double t3 = omp_get_wtime(); +# ifdef WITH_CUDA_GL_INTEROP + if (g_pixels_buf_d != NULL && g_pixels_buf != NULL) { + cuda_assert(cudaMemcpy(g_pixels_buf_d, + g_pixels_buf, + DWIDTH * g_height * SIZE_UCHAR4, + cudaMemcpyDefault)); // cudaMemcpyDefault cudaMemcpyHostToDevice + + cuda_assert(cudaMemcpy(g_pixels_buf_gl_d, + g_pixels_buf_d, + DWIDTH * g_height * SIZE_UCHAR4, + cudaMemcpyDeviceToDevice)); // cudaMemcpyDefault cudaMemcpyHostToDevice + } +# endif + +# endif + double t4 = omp_get_wtime(); + //CLIENT_DEBUG_PRINTF3("displayFunc: pix:%f, conv:%f\n", t3 - t2, t4 - t3); +#endif + +#endif + return 0; } int send_cam_data() { +#ifndef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB cyclesphi::kernel::socket_send_data_cam((char *)&g_cyclesphi_data, sizeof(cyclesphi::cyclesphi_data)); - //char ack = 0; - //cyclesphi::kernel::socket_recv_data_data((char *)&ack, sizeof(char)); - + // char ack = 0; + // cyclesphi::kernel::socket_recv_data_data((char *)&ack, sizeof(char)); +#endif return 0; } +int get_DWIDTH() +{ + return DWIDTH; +} + void client_init(const char *server, int port_cam, int port_data, int w, int h, int step_samples) { +#ifdef WITH_OPENGL + //glewInit(); +#endif + +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB + ug_receiver_parameters init_params; + memset(&init_params, 0, sizeof(ug_receiver_parameters)); + init_params.decompress_to = UG_CUDA_RGBA; + //UG_CUDA_RGBA; // : UG_I420; + init_params.display = "vrg"; //"gl";// "vrg"; + init_params.sender = server; // "localhost"; + + //init_params.port = port_data; + init_params.disable_strips = 1; + init_params.port = port_data; + init_params.force_gpu_decoding = true; + + if (g_ug == NULL) + g_ug = ug_receiver_start(&init_params); + +#else +# ifdef WITH_SOCKET_ONLY_DATA + cyclesphi::kernel::init_sockets_data(server, port_data); +# else cyclesphi::kernel::init_sockets_cam(server, port_cam, port_data); +# endif g_cyclesphi_data.step_samples = step_samples; +#endif + resize(w, h); } void client_close() { +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB + if (g_ug != NULL) { + //ug_receiver_done(g_ug); + //g_ug = NULL; + } +#else cyclesphi::kernel::socket_client_close(); cyclesphi::kernel::socket_server_close(); +#endif } void set_camera(void *view_martix, @@ -181,7 +475,7 @@ void set_camera(void *view_martix, float shift_x, float shift_y) { - +#ifndef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB memcpy( (char *)g_cyclesphi_data.cam.transform_inverse_view_matrix, view_martix, sizeof(float) * 12); @@ -199,6 +493,38 @@ void set_camera(void *view_martix, g_cyclesphi_data.cam.use_view_camera = use_view_camera; g_cyclesphi_data.cam.shift_x = shift_x; g_cyclesphi_data.cam.shift_y = shift_y; +#endif +} + +void set_camera_ug(float left, + float right, + float top, + float bottom, + + float vx, + float vy, + float vz, + + float qx, + float qy, + float qz, + float qw) +{ +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB + g_packet.left_projection_fov.left = left; + g_packet.left_projection_fov.right = right; + g_packet.left_projection_fov.top = top; + g_packet.left_projection_fov.bottom = bottom; + + g_packet.left_view_pose.position.x = vx; + g_packet.left_view_pose.position.y = vy; + g_packet.left_view_pose.position.z = vz; + + g_packet.left_view_pose.orientation.x = qx; + g_packet.left_view_pose.orientation.y = qy; + g_packet.left_view_pose.orientation.z = qz; + g_packet.left_view_pose.orientation.w = qw; +#endif } void set_camera_right(void *view_martix, @@ -236,14 +562,33 @@ void set_camera_right(void *view_martix, g_cyclesphi_data.cam_right.shift_x = shift_x; g_cyclesphi_data.cam_right.shift_y = shift_y; - #endif } +void set_camera_right_ug(float left, + float right, + float top, + float bottom, + + float vx, + float vy, + float vz, + + float qx, + float qy, + float qz, + float qw) +{ +} + int get_samples() { +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB + return g_packet.frame; +#else int *samples = (int *)&g_cyclesphi_data.step_samples; return samples[0]; +#endif } int get_current_samples() @@ -251,9 +596,11 @@ int get_current_samples() return current_samples; } -int is_vr() +int get_renderengine_type() { -#ifdef WITH_CLIENT_RENDERENGINE_VR +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB + return 2; +#elif defined(WITH_CLIENT_RENDERENGINE_VR) return 1; #else return 0; @@ -264,10 +611,10 @@ void get_pixels(void *pixels) { #ifdef WITH_CLIENT_RENDERENGINE_VR size_t pix_type_size = SIZE_UCHAR4; - memcpy(pixels, (char *)g_pixels, g_width * g_height * pix_type_size); + memcpy(pixels, (char *)g_pixels_buf, g_width * g_height * pix_type_size); #else size_t pix_type_size = SIZE_UCHAR4; - memcpy(pixels, (char *)g_pixels, g_width * g_height * pix_type_size); + memcpy(pixels, (char *)g_pixels_buf, g_width * g_height * pix_type_size); #endif } @@ -276,7 +623,329 @@ void get_pixels_right(void *pixels) #ifdef WITH_CLIENT_RENDERENGINE_VR size_t pix_type_size = SIZE_UCHAR4; memcpy(pixels, - (char *)g_pixels + (g_width * g_height * pix_type_size), + (char *)g_pixels_buf + (g_width * g_height * pix_type_size), g_width * g_height * pix_type_size); #endif } + +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB +void re_init() +{ +} + +void re_render_frame(char *packet) +{ + memcpy(packet, &g_packet, sizeof(RenderPacket)); + resize(g_packet.pix_width_eye, g_packet.pix_height_eye); +} + +void re_submit_frame(char *packet, char *sbs_image_data) +{ + displayFPS(0, current_samples); + + RenderPacket *rp = (RenderPacket *)packet; + + size_t pix_size = DWIDTH * g_height * SIZE_UCHAR4; + //(DWIDTH * g_height * SIZE_UCHAR4 < + // rp->pix_width_eye * rp->pix_height_eye * 4 * DWIDTH / g_width) ? + // DWIDTH * g_height * SIZE_UCHAR4 : + // rp->pix_width_eye * rp->pix_height_eye * 4 * DWIDTH / g_width; + //resize(2 * rp->pix_width_eye, rp->pix_height_eye); + + // memcpy(g_pixels_buf, sbs_image_data, DWIDTH * g_height * pix_type_size); +# ifdef WITH_CUDA_GL_INTEROP + // glfwMakeContextCurrent(g_windows[0]); + cuda_assert(cudaMemcpy(g_pixels_buf_d, sbs_image_data, pix_size, cudaMemcpyDefault)); +# endif + + if (g_render_callback != NULL) { + g_render_callback(0); + } +} + +#endif + +void cuda_gl_unmap_buffer(unsigned int buffer_id) +{ +#ifdef WITH_CUDA_GL_INTEROP + cuda_assert(cudaGLUnmapBufferObject(buffer_id)); + cuda_assert(cudaGLUnregisterBufferObject(buffer_id)); +#endif +} + +//void toOrtho(int eye, int width, int height) +//{ +//#if 1 +// // set viewport to be the entire window +// glViewport(0, 0, (GLsizei)width, (GLsizei)height); +// +// // set orthographic viewing frustum +// glMatrixMode(GL_PROJECTION); +// glLoadIdentity(); +// +// if (eye == 2) +// glOrtho(0, 1, 0, 1, -1, 1); +// +// if (eye == 0) +// glOrtho(0, 0.5, 0, 1, -1, 1); +// +// if (eye == 1) +// glOrtho(0.5, 1, 0, 1, -1, 1); +// +// // switch to modelview matrix in order to set scene +// glMatrixMode(GL_MODELVIEW); +// glLoadIdentity(); +// #endif +//} +// +//void freeTexture(int eye) +//{ +//#if 1 +// // glfwMakeContextCurrent(g_windows[eye]); +// +// glDeleteTextures(1, &g_textureIds[eye]); +// if (eye == 0 || eye == 1) +// glDeleteBuffers(1, &g_bufferIds[eye]); +// +// if (eye == 2) { +// cuda_assert(cudaGLUnmapBufferObject(g_bufferIds[eye])); +// cuda_assert(cudaGLUnregisterBufferObject(g_bufferIds[eye])); +// +// glDeleteFramebuffers(1, &g_bufferIds[eye]); +// } +//#endif +//} + +void cuda_gl_map_buffer(unsigned int buffer_id) +{ +#ifdef WITH_CUDA_GL_INTEROP + //cuda_assert(cudaGLRegisterBufferObject(buffer_id)); + //cuda_assert(cudaGLMapBufferObject((void **)&g_pixels_buf_d, buffer_id)); + +//g_pixels_buf_d + cuda_assert(cudaGraphicsGLRegisterBuffer(&g_cuda_resource, buffer_id, cudaGraphicsRegisterFlagsNone)); + size_t bytes; + cuda_assert(cudaGraphicsMapResources(1, &g_cuda_resource, 0)); + cuda_assert( + cudaGraphicsResourceGetMappedPointer((void **)&g_pixels_buf_gl_d, &bytes, g_cuda_resource)); + #endif +} + +//// Setup Texture +//void setupTexture(int eye) +//{ +//#if 1 +// // glfwMakeContextCurrent(g_windows[eye]); +// // int w2 = width * 2; +// +// GLuint pboIds[1]; // IDs of PBO +// GLuint textureIds[1]; // ID of texture +// +// // init 2 texture objects +// glGenTextures(1, textureIds); +// g_textureIds[eye] = textureIds[0]; +// +// glBindTexture(GL_TEXTURE_2D, g_textureIds[eye]); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); +// +// //#if defined(WITH_CUDA_GL_INTEROP) +//#ifdef WITH_CLIENT_YUV +// glTexImage2D( +// GL_TEXTURE_2D, 0, GL_LUMINANCE8, DWIDTH, g_height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL); +//#else +// glTexImage2D(GL_TEXTURE_2D, +// 0, +// GL_RGBA8, +// (eye == 2) ? DWIDTH : DWIDTH / 2, +// g_height, +// 0, +// GL_RGBA, +// GL_UNSIGNED_BYTE, +// NULL); +//#endif +// //#else +// // glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, +// //(GLvoid*)g_pixels); #endif +// +// glBindTexture(GL_TEXTURE_2D, 0); +// +//#if 1 +// if (eye == 0 || eye == 1) +// { +// // create 2 pixel buffer objects, you need to delete them when program exits. +// // glBufferData() with NULL pointer reserves only memory space. +// glGenFramebuffers(1, pboIds); +// g_bufferIds[eye] = pboIds[0]; +// +// glBindFramebuffer(GL_FRAMEBUFFER, g_bufferIds[eye]); +// +// // Set "renderedTexture" as our colour attachement #0 +// //glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, g_textureIds[eye], 0); +// glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, g_textureIds[eye], 0); +// +// // Set the list of draw buffers. +// //GLenum drawBuffers[1] = { GL_COLOR_ATTACHMENT0 }; +// //glDrawBuffers(1, drawBuffers); // "1" is the size of DrawBuffers +// +// glBindFramebuffer(GL_FRAMEBUFFER, 0); +// } +//#endif +// // if (eye == 2) +// { +// +// // create 2 pixel buffer objects, you need to delete them when program exits. +// // glBufferData() with NULL pointer reserves only memory space. +// glGenBuffers(1, pboIds); +// g_bufferIds[eye] = pboIds[0]; +// +// glBindBuffer(GL_PIXEL_UNPACK_BUFFER, g_bufferIds[eye]); +// +//#if defined(WITH_CUDA_GL_INTEROP) +// //#ifdef WITH_CLIENT_YUV +// // glBufferData(GL_PIXEL_UNPACK_BUFFER, (size_t)w2 * height, 0, GL_DYNAMIC_COPY); +// //#else +// glBufferData(GL_PIXEL_UNPACK_BUFFER, +// (size_t)((eye == 2) ? DWIDTH : DWIDTH / 2) * g_height * SIZE_UCHAR4, +// 0, +// GL_DYNAMIC_COPY); +// //#endif +// cuda_assert(cudaGLRegisterBufferObject(g_bufferIds[eye])); +// cuda_assert(cudaGLMapBufferObject((void **)&g_pixels_buf_d, g_bufferIds[eye])); +//#else +// glBufferData(GL_PIXEL_UNPACK_BUFFER, (size_t)w2 * height * SIZE_UCHAR4, 0, GL_STREAM_DRAW); +//#endif +// +// glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); +// } +// +// // Create a texture +// // glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, +// // (GLvoid*)g_pixels); +// +// //// Set up the texture +// // glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); +// // glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); +// // glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); +// // glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); +// +// //// Enable textures +// // glEnable(GL_TEXTURE_2D); +//#endif +//} + +#if 0 +void gl_render(int eye) +{ +#if 1 + + + // render to texture + if (eye == 0 || eye == 1) { + // glBindFramebuffer(GL_FRAMEBUFFER, g_bufferIds[eye]); + // toOrtho(g_width/2, g_height); + + // glEnable(GL_MULTISAMPLE); + + // Left Eye + glBindFramebuffer(GL_FRAMEBUFFER, g_bufferIds[eye]); + // glViewport(0, 0, g_width / 2, g_height); + // vr_RenderScene(vr::Eye_Left); + toOrtho(eye, DWIDTH / 2, g_height); + } + else { + + toOrtho(eye, g_width, g_height); + + glBindTexture(GL_TEXTURE_2D, g_textureIds[2]); + + glBegin(GL_QUADS); + + glTexCoord2d(0.0, 0.0); + glVertex2d(0.0, 0.0); + glTexCoord2d(1.0, 0.0); + glVertex2d(1, 0.0); + glTexCoord2d(1.0, 1.0); + glVertex2d(1, 1); + glTexCoord2d(0.0, 1.0); + glVertex2d(0.0, 1); + + glEnd(); + + glBindTexture(GL_TEXTURE_2D, 0); + + return; + } + + // glfwMakeContextCurrent(g_windows[eye]); + + // bind the texture and PBO + glBindTexture(GL_TEXTURE_2D, g_textureIds[2]); + glBindBuffer(GL_PIXEL_UNPACK_BUFFER, g_bufferIds[2]); + + // copy pixels from PBO to texture object + // Use offset instead of ponter. + +# ifdef WITH_CLIENT_YUV + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, DWIDTH, g_height, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0); +# else + glTexSubImage2D(GL_TEXTURE_2D, + 0, + 0, + 0, + DWIDTH, //(eye == 2) ? g_width : g_width / 2, + g_height, + GL_RGBA, + GL_UNSIGNED_BYTE, + 0); +# endif + + + + // draw a point with texture + glBindTexture(GL_TEXTURE_2D, g_textureIds[2]); + + glBegin(GL_QUADS); + + glTexCoord2d(0.0, 0.0); + glVertex2d(0.0, 0.0); + glTexCoord2d(1.0, 0.0); + glVertex2d(1, 0.0); + glTexCoord2d(1.0, 1.0); + glVertex2d(1, 1); + glTexCoord2d(0.0, 1.0); + glVertex2d(0.0, 1); + + + glEnd(); + + // unbind texture + glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); + glBindTexture(GL_TEXTURE_2D, 0); + + // render to texture + if (eye == 0 || eye == 1) { + glBindFramebuffer(GL_FRAMEBUFFER, 0); + } + +#endif +} +#endif + +void draw_texture() +{ + #if 0 + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT); + + //gl_render(0); + //gl_render(1); + gl_render(2); + #endif +} + +void register_render_callback(void *rc) +{ + g_render_callback = (render_callback*)rc; +} diff --git a/client/renderengine/src/cyclesphi.h b/client/renderengine/src/cyclesphi.h new file mode 100644 index 0000000000000000000000000000000000000000..5c70e7ef8079aa6b84ee8ed7ff5fa06e7f2ece1d --- /dev/null +++ b/client/renderengine/src/cyclesphi.h @@ -0,0 +1,20 @@ +#ifndef __CYCLESPHI_H__ +#define __CYCLESPHI_H__ + +#ifndef RE_API +# ifdef __cplusplus +#ifdef _WIN32 +# define RE_API __declspec(dllimport) +#else +# define RE_API +#endif +# else +# define RE_API +# endif +#endif + +RE_API void re_init(); +RE_API void re_render_frame(char *packet); +RE_API void re_submit_frame(char *packet, char *sbs_image_data); + +#endif diff --git a/client/renderengine/src/cyclesphi_api.h b/client/renderengine/src/cyclesphi_api.h index c97e41f1a3566ba3a70ac90dc4d1feccffc2e5a0..706152c0c9a007cd468ae658f3bcf0834a110885 100644 --- a/client/renderengine/src/cyclesphi_api.h +++ b/client/renderengine/src/cyclesphi_api.h @@ -40,6 +40,20 @@ CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD set_camera(void *view_martix, float shift_x, float shift_y); +CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD set_camera_ug(float left, + float right, + float top, + float bottom, + + float vx, + float vy, + float vz, + + float qx, + float qy, + float qz, + float qw); + CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD set_camera_right(void *view_martix, float lens, float nearclip, @@ -54,13 +68,35 @@ CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD set_camera_right(void *view_marti float shift_x, float shift_y); +CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD set_camera_right_ug(float left, + float right, + float top, + float bottom, + + float vx, + float vy, + float vz, + + float qx, + float qy, + float qz, + float qw); + CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD get_pixels(void *pixels); CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD get_pixels_right(void *pixels); CYCLESPHI_EXPORT_DLL int CYCLESPHI_EXPORT_STD get_samples(); CYCLESPHI_EXPORT_DLL int CYCLESPHI_EXPORT_STD get_current_samples(); -CYCLESPHI_EXPORT_DLL int CYCLESPHI_EXPORT_STD is_vr(); +CYCLESPHI_EXPORT_DLL int CYCLESPHI_EXPORT_STD get_renderengine_type(); +CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD draw_texture(); + +CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD cuda_gl_map_buffer(unsigned int buffer_id); +CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD cuda_gl_unmap_buffer(unsigned int buffer_id); + +CYCLESPHI_EXPORT_DLL int CYCLESPHI_EXPORT_STD get_DWIDTH(); + +CYCLESPHI_EXPORT_DLL void CYCLESPHI_EXPORT_STD register_render_callback(void *rc); #ifdef __cplusplus } diff --git a/client/renderengine/src/cyclesphi_vrg.cpp b/client/renderengine/src/cyclesphi_vrg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..69f28a15ac07e87cf9b75d4602731f2ad68caff9 --- /dev/null +++ b/client/renderengine/src/cyclesphi_vrg.cpp @@ -0,0 +1,43 @@ +#include "cyclesphi.h" + +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB + +#ifdef _WIN32 +# define VRG_STREAM_API extern "C" __declspec(dllexport) +#else +# define VRG_STREAM_API +#endif + +# include "vrgstream.h" + +#endif + +#include <iostream> +#include <omp.h> +#include <string.h> +#include <string> + +#include <stdlib.h> + +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB +VRG_STREAM_API enum VrgStreamApiError vrgStreamInit(enum VrgInputFormat inputFormat) +{ + re_init(); + return Ok; +} + +VRG_STREAM_API enum VrgStreamApiError vrgStreamRenderFrame(struct RenderPacket *packet) +{ + re_render_frame((char *)packet); + return Ok; +} + +VRG_STREAM_API enum VrgStreamApiError vrgStreamSubmitFrame(struct RenderPacket *packet, + void *sbs_image_data, + enum VrgMemory api) +{ + re_submit_frame((char *)packet, (char *)sbs_image_data); + return Ok; +} + +#endif diff --git a/client/scripts/fiona/run_vrclient_gcc.sh b/client/scripts/fiona/run_vrclient_gcc.sh index b6633dda3571dad74fcffd683584d12de3527909..409ae74d392bb66c0c9a98fb6fc2de111398319e 100644 --- a/client/scripts/fiona/run_vrclient_gcc.sh +++ b/client/scripts/fiona/run_vrclient_gcc.sh @@ -7,19 +7,17 @@ export LD_LIBRARY_PATH=${src}/ultragrid/lib:$LD_LIBRARY_PATH export KERNEL_CUDA_CUBIN=${ROOT_DIR}/build/vrclient_gcc/cycles_cuda/kernel_sm_%cuda_sm%.cubin export LD_LIBRARY_PATH=/usr/local/cuda/lib64:/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH -export CLIENT_FILE_KERNEL_GLOBAL=$ROOT_DIR/src/blender-vr-interactive/client/data/mercedes_gpu.kg +export CLIENT_FILE_KERNEL_GLOBAL=$ROOT_DIR/src/blender-vr-interactive/client/data/box_gpu.kg export CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-0,1,2,3,4,5,6,7} export DEBUG_RES_W=3840 export DEBUG_RES_H=2160 #rendering of 400 rows -#export CLIENT_TILES="0;100;100;100;200;100;300;100" +export CLIENT_TILES="0;100;100;100;200;100;300;100" -#export LIBUG_COMPRESSION="UG_UNCOMPRESSED" -export LIBUG_COMPRESSION="UG_JPEG" - -export UG_MAX_FPS=90 +export LIBUG_COMPRESSION="UG_UNCOMPRESSED" +#export LIBUG_COMPRESSION="UG_JPEG" #TX #export SOCKET_SERVER_PORT_DATA=5006 diff --git a/client/scripts/vrgin/HmdStreamTester1.7z b/client/scripts/vrgin/HmdStreamTester1.7z new file mode 100644 index 0000000000000000000000000000000000000000..520809cedbe63f3828f724ce7f16b14611b50054 Binary files /dev/null and b/client/scripts/vrgin/HmdStreamTester1.7z differ diff --git a/client/scripts/vrgin/HmdStreamTester2.7z b/client/scripts/vrgin/HmdStreamTester2.7z new file mode 100644 index 0000000000000000000000000000000000000000..358e37f5c588030d5d90e86c2a7ff43877e7950f Binary files /dev/null and b/client/scripts/vrgin/HmdStreamTester2.7z differ diff --git a/client/ultragrid/CMakeLists.txt b/client/ultragrid/CMakeLists.txt index 23e890c9783fbe4259e35c3bf33c177103ad1050..d0723abe357a53967133e572c2d26e1e89560b9d 100644 --- a/client/ultragrid/CMakeLists.txt +++ b/client/ultragrid/CMakeLists.txt @@ -18,6 +18,10 @@ set(SRC platform_ipc.c ) +if(WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB) + add_definitions(-DWITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB) +endif() + if(WITH_RGBA_FORMAT) add_definitions(-DWITH_RGBA_FORMAT) endif() diff --git a/client/ultragrid/ultragrid.cpp b/client/ultragrid/ultragrid.cpp index 955cc081ae73fd16687de600d1af9fcd01b419c3..ecca436b0b982867027d31c565b78055b4a6f1e3 100644 --- a/client/ultragrid/ultragrid.cpp +++ b/client/ultragrid/ultragrid.cpp @@ -16,14 +16,14 @@ #include "platform_ipc.h" #ifdef WITH_CLIENT_VRGSTREAM - #include "../vrgstream/vrgstream.h" +# include "../vrgstream/vrgstream.h" #else - #include "vrgstream.h" +# include "vrgstream.h" #endif #include <omp.h> #ifndef _WIN32 -#include <unistd.h> +# include <unistd.h> #endif constexpr const int BUFFERS = 2; @@ -40,8 +40,7 @@ static_assert(UG_CUDA_IPC_HANDLE_SIZE == CUDA_IPC_HANDLE_SIZE, #endif #ifdef WITH_CLIENT_ULTRAGRID_LIB -extern "C" -{ +extern "C" { # include "libug.h" } @@ -52,8 +51,7 @@ ug_sender *g_ug_sender = NULL; using std::atomic_bool; using std::atomic_int; -void convert_render_pkt_to_view_matrix(struct RenderPacket *pkt, - cyclesphi::cyclesphi_data *cdata); +void convert_render_pkt_to_view_matrix(struct RenderPacket *pkt, cyclesphi::cyclesphi_data *cdata); #ifdef RENDER_PACKET_FROM_FILE int g_render_packet_row = 0; @@ -179,11 +177,11 @@ double g_last_time_counter = 0.0; struct shm_frame { atomic_bool buffer_free; int width, height; - int color_space; // 0 - YUV 420, 1 - RGBA - bool stereo; ///< in data there are actually stored sequentially 2 buffers + int color_space; // 0 - YUV 420, 1 - RGBA + bool stereo; ///< in data there are actually stored sequentially 2 buffers char cuda_ipc_mem_handle[UG_CUDA_IPC_HANDLE_SIZE]; char data[MAX_BUF_LEN]; - uint32_t footer; // always 'FOOT' - overflow detection + uint32_t footer; // always 'FOOT' - overflow detection // follow auxilliary data used by UltraGrid for dispose int frame_idx; ///< index inside shm::frames @@ -198,17 +196,17 @@ struct cesnet_shm { struct RenderPacket pkt; ///@todo make this more IPC safe - atomic_int write_head; ///< next position of written frame - atomic_int read_head; ///< next position of frame read + atomic_int write_head; ///< next position of written frame + atomic_int read_head; ///< next position of frame read struct shm_frame frames[BUFFERS]; }; #ifdef WITH_MATH_STUB -#define quat_to_mat4(x, y) -#define invert_m4_m4(x, y) +# define quat_to_mat4(x, y) +# define invert_m4_m4(x, y) #endif -#ifndef WITH_CLIENT_RENDERENGINE +#if !defined(WITH_CLIENT_RENDERENGINE) //|| defined(WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB) void cesnet_set_inverse_matrix(float *transform_inverse_view_matrix, Vector3 p, Quaternion o) { float position[3] = {p.x, p.y, p.z}; @@ -216,7 +214,7 @@ void cesnet_set_inverse_matrix(float *transform_inverse_view_matrix, Vector3 p, float view_mat[4][4], iview_mat[4][4]; quat_to_mat4(view_mat, orientation); copy_v3_v3(view_mat[3], position); - invert_m4_m4(iview_mat, view_mat); + //invert_m4_m4(iview_mat, view_mat); transform_inverse_view_matrix[0] = -view_mat[0][0]; transform_inverse_view_matrix[1] = -view_mat[1][0]; @@ -227,205 +225,258 @@ void cesnet_set_inverse_matrix(float *transform_inverse_view_matrix, Vector3 p, transform_inverse_view_matrix[5] = view_mat[1][2]; transform_inverse_view_matrix[6] = view_mat[2][2]; transform_inverse_view_matrix[7] = view_mat[3][2]; - + transform_inverse_view_matrix[8] = view_mat[0][1]; transform_inverse_view_matrix[9] = view_mat[1][1]; transform_inverse_view_matrix[10] = view_mat[2][1]; transform_inverse_view_matrix[11] = view_mat[3][1]; -// transform_inverse_view_matrix[0] = view_mat[0][0]; -// transform_inverse_view_matrix[1] = view_mat[1][0]; -// transform_inverse_view_matrix[2] = view_mat[2][0]; -// transform_inverse_view_matrix[3] = view_mat[3][0]; -// -// transform_inverse_view_matrix[4] = view_mat[0][1]; -// transform_inverse_view_matrix[5] = view_mat[1][1]; -// transform_inverse_view_matrix[6] = view_mat[2][1]; -// transform_inverse_view_matrix[7] = view_mat[3][1]; -// -// transform_inverse_view_matrix[8] = view_mat[0][2]; -// transform_inverse_view_matrix[9] = view_mat[1][2]; -// transform_inverse_view_matrix[10] = view_mat[2][2]; -// transform_inverse_view_matrix[11] = view_mat[3][2]; - - //transform_inverse_view_matrix[0] = iview_mat[0][0]; - //transform_inverse_view_matrix[1] = iview_mat[1][0]; - //transform_inverse_view_matrix[2] = iview_mat[2][0]; - //transform_inverse_view_matrix[3] = iview_mat[3][0]; - - //transform_inverse_view_matrix[4] = iview_mat[0][1]; - //transform_inverse_view_matrix[5] = iview_mat[1][1]; - //transform_inverse_view_matrix[6] = iview_mat[2][1]; - //transform_inverse_view_matrix[7] = iview_mat[3][1]; - - //transform_inverse_view_matrix[8] = iview_mat[0][2]; - //transform_inverse_view_matrix[9] = iview_mat[1][2]; - //transform_inverse_view_matrix[10] = iview_mat[2][2]; - //transform_inverse_view_matrix[11] = iview_mat[3][2]; - - //transform_inverse_view_matrix[0] = -iview_mat[0][0]; - //transform_inverse_view_matrix[1] = -iview_mat[1][0]; - //transform_inverse_view_matrix[2] = -iview_mat[2][0]; - //transform_inverse_view_matrix[3] = -iview_mat[3][0]; - - //transform_inverse_view_matrix[4] = iview_mat[0][2]; - //transform_inverse_view_matrix[5] = iview_mat[1][2]; - //transform_inverse_view_matrix[6] = iview_mat[2][2]; - //transform_inverse_view_matrix[7] = iview_mat[3][2]; - - //transform_inverse_view_matrix[8] = iview_mat[0][1]; - //transform_inverse_view_matrix[9] = iview_mat[1][1]; - //transform_inverse_view_matrix[10] = iview_mat[2][1]; - //transform_inverse_view_matrix[11] = iview_mat[3][1]; + // transform_inverse_view_matrix[0] = view_mat[0][0]; + // transform_inverse_view_matrix[1] = view_mat[1][0]; + // transform_inverse_view_matrix[2] = view_mat[2][0]; + // transform_inverse_view_matrix[3] = view_mat[3][0]; + // + // transform_inverse_view_matrix[4] = view_mat[0][1]; + // transform_inverse_view_matrix[5] = view_mat[1][1]; + // transform_inverse_view_matrix[6] = view_mat[2][1]; + // transform_inverse_view_matrix[7] = view_mat[3][1]; + // + // transform_inverse_view_matrix[8] = view_mat[0][2]; + // transform_inverse_view_matrix[9] = view_mat[1][2]; + // transform_inverse_view_matrix[10] = view_mat[2][2]; + // transform_inverse_view_matrix[11] = view_mat[3][2]; + + // transform_inverse_view_matrix[0] = iview_mat[0][0]; + // transform_inverse_view_matrix[1] = iview_mat[1][0]; + // transform_inverse_view_matrix[2] = iview_mat[2][0]; + // transform_inverse_view_matrix[3] = iview_mat[3][0]; + + // transform_inverse_view_matrix[4] = iview_mat[0][1]; + // transform_inverse_view_matrix[5] = iview_mat[1][1]; + // transform_inverse_view_matrix[6] = iview_mat[2][1]; + // transform_inverse_view_matrix[7] = iview_mat[3][1]; + + // transform_inverse_view_matrix[8] = iview_mat[0][2]; + // transform_inverse_view_matrix[9] = iview_mat[1][2]; + // transform_inverse_view_matrix[10] = iview_mat[2][2]; + // transform_inverse_view_matrix[11] = iview_mat[3][2]; + + // transform_inverse_view_matrix[0] = -iview_mat[0][0]; + // transform_inverse_view_matrix[1] = -iview_mat[1][0]; + // transform_inverse_view_matrix[2] = -iview_mat[2][0]; + // transform_inverse_view_matrix[3] = -iview_mat[3][0]; + + // transform_inverse_view_matrix[4] = iview_mat[0][2]; + // transform_inverse_view_matrix[5] = iview_mat[1][2]; + // transform_inverse_view_matrix[6] = iview_mat[2][2]; + // transform_inverse_view_matrix[7] = iview_mat[3][2]; + + // transform_inverse_view_matrix[8] = iview_mat[0][1]; + // transform_inverse_view_matrix[9] = iview_mat[1][1]; + // transform_inverse_view_matrix[10] = iview_mat[2][1]; + // transform_inverse_view_matrix[11] = iview_mat[3][1]; +} +#endif +#if defined(WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB) +void cesnet_set_inverse_matrix(float *transform_inverse_view_matrix, Vector3 p, Quaternion o) +{ + float position[3] = {p.x, p.y, p.z}; + float orientation[4] = {o.w, o.x, o.y, o.z}; + float view_mat[4][4], iview_mat[4][4]; + quat_to_mat4(view_mat, orientation); + copy_v3_v3(view_mat[3], position); + //invert_m4_m4(iview_mat, view_mat); + + //transform_inverse_view_matrix[0] = -view_mat[0][0]; + //transform_inverse_view_matrix[1] = -view_mat[1][0]; + //transform_inverse_view_matrix[2] = -view_mat[2][0]; + //transform_inverse_view_matrix[3] = -view_mat[3][0]; + + //transform_inverse_view_matrix[4] = view_mat[0][2]; + //transform_inverse_view_matrix[5] = view_mat[1][2]; + //transform_inverse_view_matrix[6] = view_mat[2][2]; + //transform_inverse_view_matrix[7] = view_mat[3][2]; + + //transform_inverse_view_matrix[8] = view_mat[0][1]; + //transform_inverse_view_matrix[9] = view_mat[1][1]; + //transform_inverse_view_matrix[10] = view_mat[2][1]; + //transform_inverse_view_matrix[11] = view_mat[3][1]; + + transform_inverse_view_matrix[0] = view_mat[0][0]; + transform_inverse_view_matrix[1] = view_mat[1][0]; + transform_inverse_view_matrix[2] = view_mat[2][0]; + transform_inverse_view_matrix[3] = view_mat[3][0]; + + transform_inverse_view_matrix[4] = view_mat[0][1]; + transform_inverse_view_matrix[5] = view_mat[1][1]; + transform_inverse_view_matrix[6] = view_mat[2][1]; + transform_inverse_view_matrix[7] = view_mat[3][1]; + + transform_inverse_view_matrix[8] = view_mat[0][2]; + transform_inverse_view_matrix[9] = view_mat[1][2]; + transform_inverse_view_matrix[10] = view_mat[2][2]; + transform_inverse_view_matrix[11] = view_mat[3][2]; } #endif -static void render_packet_received_callback(void *udata, struct RenderPacket *pkt) { - (void) udata; - printf("Received RenderPacket: %p\n", pkt); - convert_render_pkt_to_view_matrix(pkt, &g_cdata); +static void render_packet_received_callback(void *udata, struct RenderPacket *pkt) +{ + (void)udata; + //printf("Received RenderPacket: %p\n", pkt); + if (pkt !=NULL && pkt->pix_width_eye != 0 && pkt->pix_height_eye != 0) + convert_render_pkt_to_view_matrix(pkt, &g_cdata); + else + printf("Received RenderPacket is empty: %p\n", pkt); } // camera view matrix // vector is on 3,7,11 position void cesnet_set_camera_data(cyclesphi::cyclesphi_data *cdata) { - if (!g_cdata_init) { - // printf("get_camera_matrices\n"); - g_cdata.step_samples = 1; - g_cdata.width = 7680 / 2 ; - g_cdata.height = 2160 ; - - //g_cdata.width = 1440; - //g_cdata.height = 1600; - const char* env_res_w = getenv("DEBUG_RES_W"); - const char* env_res_h = getenv("DEBUG_RES_H"); - if (env_res_w != NULL && env_res_h != NULL) { - g_cdata.width = atoi(env_res_w); - g_cdata.height = atoi(env_res_h); - } + if (!g_cdata_init) { + // printf("get_camera_matrices\n"); + g_cdata.step_samples = 5; + g_cdata.width = 7680 / 2; + g_cdata.height = 2160; + + // g_cdata.width = 1440; + // g_cdata.height = 1600; + const char *env_res_w = getenv("DEBUG_RES_W"); + const char *env_res_h = getenv("DEBUG_RES_H"); + if (env_res_w != NULL && env_res_h != NULL) { + g_cdata.width = atoi(env_res_w); + g_cdata.height = atoi(env_res_h); + } #ifndef RENDER_PACKET_FROM_FILE - g_cdata.cam.transform_inverse_view_matrix[0] = -0.437973917; - g_cdata.cam.transform_inverse_view_matrix[1] = -0.267504632; - g_cdata.cam.transform_inverse_view_matrix[2] = 0.858265936; - g_cdata.cam.transform_inverse_view_matrix[3] = 1.29672277; - g_cdata.cam.transform_inverse_view_matrix[4] = 0.874406159; - g_cdata.cam.transform_inverse_view_matrix[5] = 0.0949685797; - g_cdata.cam.transform_inverse_view_matrix[6] = 0.475810111; - g_cdata.cam.transform_inverse_view_matrix[7] = 1.42842984; - g_cdata.cam.transform_inverse_view_matrix[8] = -0.208789662; - g_cdata.cam.transform_inverse_view_matrix[9] = 0.958865106; - g_cdata.cam.transform_inverse_view_matrix[10] = 0.192313895; - g_cdata.cam.transform_inverse_view_matrix[11] = 1.09617722; - - g_cdata.cam.lens = 14.0577269f; - g_cdata.cam.shift_x = -0.0295778885f; - g_cdata.cam.shift_y = 0.0f; + g_cdata.cam.transform_inverse_view_matrix[0] = -0.437973917; + g_cdata.cam.transform_inverse_view_matrix[1] = -0.267504632; + g_cdata.cam.transform_inverse_view_matrix[2] = 0.858265936; + g_cdata.cam.transform_inverse_view_matrix[3] = 1.29672277; + g_cdata.cam.transform_inverse_view_matrix[4] = 0.874406159; + g_cdata.cam.transform_inverse_view_matrix[5] = 0.0949685797; + g_cdata.cam.transform_inverse_view_matrix[6] = 0.475810111; + g_cdata.cam.transform_inverse_view_matrix[7] = 1.42842984; + g_cdata.cam.transform_inverse_view_matrix[8] = -0.208789662; + g_cdata.cam.transform_inverse_view_matrix[9] = 0.958865106; + g_cdata.cam.transform_inverse_view_matrix[10] = 0.192313895; + g_cdata.cam.transform_inverse_view_matrix[11] = 1.09617722; + + g_cdata.cam.lens = 14.0577269f; + g_cdata.cam.shift_x = -0.0295778885f; + g_cdata.cam.shift_y = 0.0f; #endif - g_cdata.cam.clip_start = 0.1f; - g_cdata.cam.clip_end = 1000.0f; - g_cdata.cam.sensor_width = g_sensor_width; - g_cdata.cam.sensor_fit = 0; + g_cdata.cam.clip_start = 0.1f; + g_cdata.cam.clip_end = 1000.0f; + g_cdata.cam.sensor_width = g_sensor_width; + g_cdata.cam.sensor_fit = 0; - g_cdata.cam.view_camera_zoom = 1.0f; - g_cdata.cam.view_camera_offset[0] = 0; - g_cdata.cam.view_camera_offset[1] = 0; - g_cdata.cam.use_view_camera = 1; + g_cdata.cam.view_camera_zoom = 1.0f; + g_cdata.cam.view_camera_offset[0] = 0; + g_cdata.cam.view_camera_offset[1] = 0; + g_cdata.cam.use_view_camera = 1; #ifndef RENDER_PACKET_FROM_FILE - g_cdata.cam_right.transform_inverse_view_matrix[0] = -0.437973917; - g_cdata.cam_right.transform_inverse_view_matrix[1] = -0.267504632; - g_cdata.cam_right.transform_inverse_view_matrix[2] = 0.858265936; - g_cdata.cam_right.transform_inverse_view_matrix[3] = 1.26781642; - g_cdata.cam_right.transform_inverse_view_matrix[4] = 0.874406159; - g_cdata.cam_right.transform_inverse_view_matrix[5] = 0.0949685797; - g_cdata.cam_right.transform_inverse_view_matrix[6] = 0.475810111; - g_cdata.cam_right.transform_inverse_view_matrix[7] = 1.48614061; - g_cdata.cam_right.transform_inverse_view_matrix[8] = -0.208789662; - g_cdata.cam_right.transform_inverse_view_matrix[9] = 0.958865106; - g_cdata.cam_right.transform_inverse_view_matrix[10] = 0.192313895; - g_cdata.cam_right.transform_inverse_view_matrix[11] = 1.08239710; - - g_cdata.cam_right.lens = 14.0856476; - g_cdata.cam_right.shift_x = 0.0284842327; - g_cdata.cam_right.shift_y = 0.0f; + g_cdata.cam_right.transform_inverse_view_matrix[0] = -0.437973917; + g_cdata.cam_right.transform_inverse_view_matrix[1] = -0.267504632; + g_cdata.cam_right.transform_inverse_view_matrix[2] = 0.858265936; + g_cdata.cam_right.transform_inverse_view_matrix[3] = 1.26781642; + g_cdata.cam_right.transform_inverse_view_matrix[4] = 0.874406159; + g_cdata.cam_right.transform_inverse_view_matrix[5] = 0.0949685797; + g_cdata.cam_right.transform_inverse_view_matrix[6] = 0.475810111; + g_cdata.cam_right.transform_inverse_view_matrix[7] = 1.48614061; + g_cdata.cam_right.transform_inverse_view_matrix[8] = -0.208789662; + g_cdata.cam_right.transform_inverse_view_matrix[9] = 0.958865106; + g_cdata.cam_right.transform_inverse_view_matrix[10] = 0.192313895; + g_cdata.cam_right.transform_inverse_view_matrix[11] = 1.08239710; + + g_cdata.cam_right.lens = 14.0856476; + g_cdata.cam_right.shift_x = 0.0284842327; + g_cdata.cam_right.shift_y = 0.0f; #endif - g_cdata.cam_right.clip_start = 0.1f; - g_cdata.cam_right.clip_end = 1000.0f; - g_cdata.cam_right.sensor_width = g_sensor_width; - g_cdata.cam_right.sensor_fit = 0; + g_cdata.cam_right.clip_start = 0.1f; + g_cdata.cam_right.clip_end = 1000.0f; + g_cdata.cam_right.sensor_width = g_sensor_width; + g_cdata.cam_right.sensor_fit = 0; - g_cdata.cam_right.view_camera_zoom = 1.0f; - g_cdata.cam_right.view_camera_offset[0] = 0; - g_cdata.cam_right.view_camera_offset[1] = 0; - g_cdata.cam_right.use_view_camera = 1; + g_cdata.cam_right.view_camera_zoom = 1.0f; + g_cdata.cam_right.view_camera_offset[0] = 0; + g_cdata.cam_right.view_camera_offset[1] = 0; + g_cdata.cam_right.use_view_camera = 1; #ifdef WITH_CLIENT_ULTRAGRID_LIB - memset(&g_init_params, 0, sizeof(ug_sender_parameters)); - - const char *env_p_name_data = std::getenv("SOCKET_SERVER_NAME_DATA"); - if(env_p_name_data) - g_init_params.receiver = env_p_name_data; - else - g_init_params.receiver = "localhost"; - - const char *env_p_port_data = std::getenv("SOCKET_SERVER_PORT_DATA"); - if(env_p_port_data) - g_init_params.tx_port = atoi(env_p_port_data); - - const char *env_p_port_cam = std::getenv("SOCKET_SERVER_PORT_CAM"); - if(env_p_port_cam) - g_init_params.rx_port = atoi(env_p_port_cam); - - g_init_params.compression = UG_JPEG; //UG_JPEG; //UG_UNCOMPRESSED; //UG_JPEG;// - const char* env_libug_comp = getenv("LIBUG_COMPRESSION"); - if (env_libug_comp != NULL) { - if (!strcmp(env_libug_comp, "UG_UNCOMPRESSED")) { - g_init_params.compression = UG_UNCOMPRESSED; - } - if (!strcmp(env_libug_comp, "UG_JPEG")) { - g_init_params.compression = UG_JPEG; - } - } - g_init_params.rprc = render_packet_received_callback; - g_init_params.rprc_udata = NULL; // not used by render_packet_received_callback() - //g_init_params.mtu = 8900; - g_init_params.mtu = 9000; - - g_ug_sender = ug_sender_init(&g_init_params); - printf("ug_sender_init: %s, %lld\n", g_init_params.receiver, g_ug_sender); + memset(&g_init_params, 0, sizeof(ug_sender_parameters)); + + const char *env_p_name_data = std::getenv("SOCKET_SERVER_NAME_DATA"); + if (env_p_name_data) + g_init_params.receiver = env_p_name_data; + else + g_init_params.receiver = "localhost"; + + const char *env_p_port_data = std::getenv("SOCKET_SERVER_PORT_DATA"); + if (env_p_port_data) + g_init_params.port = atoi(env_p_port_data); + + //const char *env_p_port_cam = std::getenv("SOCKET_SERVER_PORT_CAM"); + //if (env_p_port_cam) + // g_init_params.rx_port = atoi(env_p_port_cam); + + g_init_params.compression = UG_JPEG; // UG_JPEG; //UG_UNCOMPRESSED; //UG_JPEG;// + const char *env_libug_comp = getenv("LIBUG_COMPRESSION"); + if (env_libug_comp != NULL) { + if (!strcmp(env_libug_comp, "UG_UNCOMPRESSED")) { + g_init_params.compression = UG_UNCOMPRESSED; + } + if (!strcmp(env_libug_comp, "UG_JPEG")) { + g_init_params.compression = UG_JPEG; + } + } + g_init_params.rprc = render_packet_received_callback; + g_init_params.rprc_udata = NULL; // not used by render_packet_received_callback() + + const char *env_libug_mtu = getenv("LIBUG_MTU"); + if (env_libug_mtu != NULL) { + g_init_params.mtu = atoi(env_libug_mtu); + } + + g_init_params.disable_strips = 1; + + g_ug_sender = ug_sender_init(&g_init_params); + printf("ug_sender_init: %s, %lld\n", g_init_params.receiver, g_ug_sender); #endif - const char *env_p_max_fps = std::getenv("UG_MAX_FPS"); - if(env_p_max_fps) - g_max_fps = atof(env_p_max_fps); + const char *env_p_max_fps = std::getenv("UG_MAX_FPS"); + if (env_p_max_fps) + g_max_fps = atof(env_p_max_fps); - g_cdata_init = true; - } + g_cdata_init = true; + } memcpy(cdata, &g_cdata, sizeof(cyclesphi::cyclesphi_data)); - if(omp_get_wtime() - g_last_time < 1.0 / g_max_fps /*&& g_last_time_counter > g_max_fps*/) { + if (omp_get_wtime() - g_last_time < 1.0 / g_max_fps /*&& g_last_time_counter > g_max_fps*/) { #ifndef _WIN32 usleep(1000.0 * 1000.0 * (1.0 / g_max_fps - omp_get_wtime() + g_last_time)); #endif - //g_cdata.step_samples++; - //g_last_time_counter = 0; + // g_cdata.step_samples++; + // g_last_time_counter = 0; } g_last_time = omp_get_wtime(); - //g_last_time_counter = g_last_time_counter + 1.0; + // g_last_time_counter = g_last_time_counter + 1.0; } #define READY_TO_CONSUME_FRAME 0 #define FRAME_READY 1 #define SHOULD_EXIT_LOCK 2 -static bool initialize_shm(platform_ipc_shm_t *shm_id, struct cesnet_shm **cesnet_shm, platform_ipc_sem_t *sem, bool *post_should_exit) +static bool initialize_shm(platform_ipc_shm_t *shm_id, + struct cesnet_shm **cesnet_shm, + platform_ipc_sem_t *sem, + bool *post_should_exit) { *shm_id = PLATFORM_IPC_ERR; *cesnet_shm = (struct cesnet_shm *)PLATFORM_IPC_ERR; @@ -481,7 +532,10 @@ static bool initialize_shm(platform_ipc_shm_t *shm_id, struct cesnet_shm **cesne return true; } -static void done_shm(platform_ipc_shm_t *shm_id, struct cesnet_shm **cesnet_shm, platform_ipc_sem_t *sem, bool *post_should_exit) +static void done_shm(platform_ipc_shm_t *shm_id, + struct cesnet_shm **cesnet_shm, + platform_ipc_sem_t *sem, + bool *post_should_exit) { if (*post_should_exit) { if (!platform_ipc_sem_post(sem[SHOULD_EXIT_LOCK])) { // try to release the "lock" @@ -498,7 +552,7 @@ static void done_shm(platform_ipc_shm_t *shm_id, struct cesnet_shm **cesnet_shm, } if (*cesnet_shm != (void *)PLATFORM_IPC_ERR) { platform_ipc_shm_detach(*cesnet_shm); - *cesnet_shm = (struct cesnet_shm *) PLATFORM_IPC_ERR; + *cesnet_shm = (struct cesnet_shm *)PLATFORM_IPC_ERR; } if (*shm_id != PLATFORM_IPC_ERR) { platform_ipc_shm_done(*shm_id, false); @@ -511,44 +565,46 @@ static bool post_should_exit = false; static platform_ipc_shm_t shm_id = PLATFORM_IPC_ERR; static platform_ipc_sem_t sem[3] = {PLATFORM_IPC_ERR, PLATFORM_IPC_ERR, PLATFORM_IPC_ERR}; -# if defined(WITH_RGBA_FORMAT) || defined(WITH_CLIENT_ULTRAGRID_LIB) -static void shm_cleanup() { -#ifdef DEBUG +#if defined(WITH_RGBA_FORMAT) || defined(WITH_CLIENT_ULTRAGRID_LIB) +static void shm_cleanup() +{ +# ifdef DEBUG fprintf(stderr, "shm_cleanup() called\n"); -#endif +# endif done_shm(&shm_id, &cesnet_shm, sem, &post_should_exit); } static bool shm_cleanup_registered_atexit = false; static char scratch_buffer[MAX_BUF_LEN]; -char* shared_pixels_buffer_rgba8888 = scratch_buffer; +char *shared_pixels_buffer_rgba8888 = scratch_buffer; -int cesnet_get_buffers(char** buf1, char** buf2) { - if (cesnet_shm == (struct cesnet_shm *) PLATFORM_IPC_ERR) { - if (!initialize_shm(&shm_id, &cesnet_shm, sem, &post_should_exit)) { - done_shm(&shm_id, &cesnet_shm, sem, &post_should_exit); - return 0; - } - if (!shm_cleanup_registered_atexit) { - atexit(shm_cleanup); - shm_cleanup_registered_atexit = true; - } +int cesnet_get_buffers(char **buf1, char **buf2) +{ + if (cesnet_shm == (struct cesnet_shm *)PLATFORM_IPC_ERR) { + if (!initialize_shm(&shm_id, &cesnet_shm, sem, &post_should_exit)) { + done_shm(&shm_id, &cesnet_shm, sem, &post_should_exit); + return 0; + } + if (!shm_cleanup_registered_atexit) { + atexit(shm_cleanup); + shm_cleanup_registered_atexit = true; + } } - *buf1 = (char*)&cesnet_shm->frames[0].data; - *buf2 = (char*)&cesnet_shm->frames[1].data; + *buf1 = (char *)&cesnet_shm->frames[0].data; + *buf2 = (char *)&cesnet_shm->frames[1].data; return MAX_BUF_LEN; } # if defined(WITH_BMP_OUTPUT) - int g_required_exit_num = 0; +int g_required_exit_num = 0; # endif // rendering buffer - results in RGBA format -int cesnet_set_render_buffer_rgba(unsigned char* rgba, int width, int height) +int cesnet_set_render_buffer_rgba(unsigned char *rgba, int width, int height) { - //cesnet_shm->write_head = (rgba == (char*)&cesnet_shm->frames[0].data) ? 0 : 1; + // cesnet_shm->write_head = (rgba == (char*)&cesnet_shm->frames[0].data) ? 0 : 1; // printf("cesnet_set_render_buffer_rgba: %d %d %d %d\n", rgba[0], rgba[1], rgba[2], rgba[4]); # if defined(WITH_BMP_OUTPUT) @@ -563,26 +619,25 @@ int cesnet_set_render_buffer_rgba(unsigned char* rgba, int width, int height) return -1; } -#ifdef WITH_CLIENT_ULTRAGRID_LIB -#ifdef WITH_RGBA_FORMAT - ug_send_frame(g_ug_sender, (char*)rgba, UG_RGBA, width, height); -#else - //printf("ug_send_frame: %d x %d\n", width, height); - ug_send_frame(g_ug_sender, (char*)rgba, UG_I420, width, height); -#endif +# ifdef WITH_CLIENT_ULTRAGRID_LIB +# ifdef WITH_RGBA_FORMAT + ug_send_frame(g_ug_sender, (char *)rgba, UG_RGBA, width, height, 0); +# else + // printf("ug_send_frame: %d x %d\n", width, height); + ug_send_frame(g_ug_sender, (char *)rgba, UG_I420, width, height, 0); +# endif return 0; -#endif - +# endif - if (cesnet_shm == (struct cesnet_shm *) PLATFORM_IPC_ERR) { - if (!initialize_shm(&shm_id, &cesnet_shm, sem, &post_should_exit)) { - done_shm(&shm_id, &cesnet_shm, sem, &post_should_exit); - return -1; - } - if (!shm_cleanup_registered_atexit) { - atexit(shm_cleanup); - shm_cleanup_registered_atexit = true; - } + if (cesnet_shm == (struct cesnet_shm *)PLATFORM_IPC_ERR) { + if (!initialize_shm(&shm_id, &cesnet_shm, sem, &post_should_exit)) { + done_shm(&shm_id, &cesnet_shm, sem, &post_should_exit); + return -1; + } + if (!shm_cleanup_registered_atexit) { + atexit(shm_cleanup); + shm_cleanup_registered_atexit = true; + } } if (cesnet_shm->ug_exited) { @@ -590,7 +645,8 @@ int cesnet_set_render_buffer_rgba(unsigned char* rgba, int width, int height) done_shm(&shm_id, &cesnet_shm, sem, &post_should_exit); } - if (!platform_ipc_sem_wait(sem[READY_TO_CONSUME_FRAME])) { // wait until UltraGrid is able to take a frame + if (!platform_ipc_sem_wait( + sem[READY_TO_CONSUME_FRAME])) { // wait until UltraGrid is able to take a frame fprintf(stderr, "Waiting for UltraGrid ready failed!\n"); return -1; } @@ -608,7 +664,7 @@ int cesnet_set_render_buffer_rgba(unsigned char* rgba, int width, int height) cesnet_shm->frames[cesnet_shm->write_head].width = width; cesnet_shm->frames[cesnet_shm->write_head].height = height; - cesnet_shm->frames[cesnet_shm->write_head].color_space = 1; // RGBA + cesnet_shm->frames[cesnet_shm->write_head].color_space = 1; // RGBA cesnet_shm->frames[cesnet_shm->write_head].buffer_free = false; cesnet_shm->write_head = (cesnet_shm->write_head + 1) % BUFFERS; @@ -629,7 +685,11 @@ int cesnet_set_render_buffer_rgba(unsigned char* rgba, int width, int height) } while (0) bool copy_data(struct cesnet_shm *cesnet_shm, - unsigned char *y, unsigned char *u, unsigned char *v, int width, int height) + unsigned char *y, + unsigned char *u, + unsigned char *v, + int width, + int height) { void *(*copy_fn)(void *dest, const void *src, size_t n) = memcpy; @@ -642,16 +702,16 @@ bool copy_data(struct cesnet_shm *cesnet_shm, fprintf(stderr, "CUDA support not compiled in! Use 'uv -t shm' to use standard SHM.\n"); return false; } -# endif // !defined WITH_CLIENT_CUDA +# endif // !defined WITH_CLIENT_CUDA - char *out = [&](){ + char *out = [&]() { if (cesnet_shm->use_gpu) { char *d_data = nullptr; # ifdef WITH_CLIENT_CUDA CUDA_CHECK(cudaIpcOpenMemHandle( - (void **)&d_data, - (cudaIpcMemHandle_t &)cesnet_shm->frames[cesnet_shm->write_head].cuda_ipc_mem_handle, - cudaIpcMemLazyEnablePeerAccess)); + (void **)&d_data, + (cudaIpcMemHandle_t &)cesnet_shm->frames[cesnet_shm->write_head].cuda_ipc_mem_handle, + cudaIpcMemLazyEnablePeerAccess)); # endif return d_data; } @@ -668,42 +728,49 @@ bool copy_data(struct cesnet_shm *cesnet_shm, if (cesnet_shm->use_gpu) { # ifdef WITH_CLIENT_CUDA CUDA_CHECK(cudaMemcpy2D(out1_y, width, y, 2 * width, width, height, cudaMemcpyHostToDevice)); - CUDA_CHECK(cudaMemcpy2D(out2_y, width, y + width, 2 * width, width, height, cudaMemcpyHostToDevice)); - CUDA_CHECK(cudaMemcpy2D(out1_u, width / 2, u, width, width / 2, height / 2, cudaMemcpyHostToDevice)); - CUDA_CHECK(cudaMemcpy2D(out2_u, width / 2, u + width / 2, width, width / 2, height / 2, cudaMemcpyHostToDevice)); - CUDA_CHECK(cudaMemcpy2D(out1_v, width / 2, v, width, width / 2, height / 2, cudaMemcpyHostToDevice)); - CUDA_CHECK(cudaMemcpy2D(out2_v, width / 2, v + width / 2, width, width / 2, height / 2, cudaMemcpyHostToDevice)); + CUDA_CHECK(cudaMemcpy2D( + out2_y, width, y + width, 2 * width, width, height, cudaMemcpyHostToDevice)); + CUDA_CHECK(cudaMemcpy2D( + out1_u, width / 2, u, width, width / 2, height / 2, cudaMemcpyHostToDevice)); + CUDA_CHECK(cudaMemcpy2D( + out2_u, width / 2, u + width / 2, width, width / 2, height / 2, cudaMemcpyHostToDevice)); + CUDA_CHECK(cudaMemcpy2D( + out1_v, width / 2, v, width, width / 2, height / 2, cudaMemcpyHostToDevice)); + CUDA_CHECK(cudaMemcpy2D( + out2_v, width / 2, v + width / 2, width, width / 2, height / 2, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaIpcCloseMemHandle(out)); -# endif - } else { +# endif + } + else { // Y for (int i = 0; i < height; ++i) { - copy_fn(out1_y, y, width); - out1_y += width; - y += width; - copy_fn(out2_y, y, width); - out2_y += width; - y += width; + copy_fn(out1_y, y, width); + out1_y += width; + y += width; + copy_fn(out2_y, y, width); + out2_y += width; + y += width; } // U V for (int i = 0; i < height / 2; ++i) { - copy_fn(out1_u, u, width / 2); - out1_u += width / 2; - u += width / 2; - copy_fn(out2_u, u, width / 2); - out2_u += width / 2; - u += width / 2; - - copy_fn(out1_v, v, width / 2); - out1_v += width / 2; - v += width / 2; - copy_fn(out2_v, v, width / 2); - out2_v += width / 2; - v += width / 2; + copy_fn(out1_u, u, width / 2); + out1_u += width / 2; + u += width / 2; + copy_fn(out2_u, u, width / 2); + out2_u += width / 2; + u += width / 2; + + copy_fn(out1_v, v, width / 2); + out1_v += width / 2; + v += width / 2; + copy_fn(out2_v, v, width / 2); + out2_v += width / 2; + v += width / 2; } } - } else { + } + else { char *tmp = out; if (cesnet_shm->use_gpu) { # ifdef WITH_CLIENT_CUDA @@ -717,7 +784,8 @@ bool copy_data(struct cesnet_shm *cesnet_shm, CUDA_CHECK(cudaIpcCloseMemHandle(out)); # endif - } else { + } + else { int len = width * height; copy_fn(tmp, y, len); tmp += len; @@ -751,7 +819,7 @@ void cesnet_set_render_buffer_yuv_i420( return; } # ifdef WITH_CLIENT_CUDA - //cudaSetDevice(0); + // cudaSetDevice(0); # endif if (!initialize_shm(&shm_id, &cesnet_shm, sem, &post_should_exit)) { @@ -759,7 +827,8 @@ void cesnet_set_render_buffer_yuv_i420( return; } - if (!platform_ipc_sem_wait(sem[READY_TO_CONSUME_FRAME])) { // wait until UltraGrid is able to take a frame + if (!platform_ipc_sem_wait( + sem[READY_TO_CONSUME_FRAME])) { // wait until UltraGrid is able to take a frame fprintf(stderr, "Waiting for UltraGrid ready failed!\n"); done_shm(&shm_id, &cesnet_shm, sem, &post_should_exit); return; @@ -777,11 +846,11 @@ void cesnet_set_render_buffer_yuv_i420( if (getenv("STEREO")) { cesnet_shm->frames[cesnet_shm->write_head].stereo = true; width /= 2; - } else { + } + else { cesnet_shm->frames[cesnet_shm->write_head].stereo = false; } - cesnet_shm->frames[cesnet_shm->write_head].width = width; cesnet_shm->frames[cesnet_shm->write_head].height = height; @@ -790,15 +859,15 @@ void cesnet_set_render_buffer_yuv_i420( return; } -#ifdef WITH_CLIENT_RENDERENGINE +# ifdef WITH_CLIENT_RENDERENGINE if (cesnet_shm->pkt.data.width != (unsigned)-1 && cesnet_shm->pkt.data.width != 0) { - memcpy(&g_cdata, &cesnet_shm->pkt, sizeof(cyclesphi::cyclesphi_data)); + memcpy(&g_cdata, &cesnet_shm->pkt, sizeof(cyclesphi::cyclesphi_data)); } -#else +# else if (cesnet_shm->pkt.frame != (unsigned)-1) { convert_render_pkt_to_view_matrix(&cesnet_shm->pkt, &g_cdata); } -#endif +# endif # ifdef RENDER_PACKET_FROM_FILE { @@ -882,7 +951,7 @@ bool cesnet_is_required_exit() //{ //} -#ifndef WITH_CLIENT_RENDERENGINE +#if !defined(WITH_CLIENT_RENDERENGINE) || defined(WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB) static void dump_render_pkt(struct RenderPacket *pkt) { printf("["); @@ -897,14 +966,48 @@ static void dump_render_pkt(struct RenderPacket *pkt) pkt->frame); } -void convert_render_pkt_to_view_matrix(struct RenderPacket *pkt, - cyclesphi::cyclesphi_data *cdata) +void convert_render_pkt_to_view_matrix(struct RenderPacket *pkt, cyclesphi::cyclesphi_data *cdata) { -#if 0 + //return; +# if 0 dump_render_pkt(pkt); -#endif +# endif + +#ifdef WITH_CLIENT_RENDERENGINE_ULTRAGRID_LIB + g_cdata.width = pkt->pix_width_eye; + g_cdata.height = pkt->pix_height_eye; + + cdata->cam.lens = pkt->left_projection_fov.left; + + cdata->cam_right.lens = pkt->right_projection_fov.left; + + cdata->cam.shift_x = pkt->left_projection_fov.bottom; + + cdata->cam.shift_y = 0.0f; + + cdata->cam_right.shift_x = pkt->right_projection_fov.bottom; + + cdata->cam_right.shift_y = 0.0f; + + cdata->cam.view_camera_zoom = pkt->left_projection_fov.top; + + if (pkt->left_projection_fov.right > 0) { + cdata->cam.sensor_width = pkt->left_projection_fov.right; + cdata->cam.use_view_camera = 1; + } + else { + cdata->cam.sensor_width = -pkt->left_projection_fov.right; + cdata->cam.use_view_camera = 0; + } + + cesnet_set_inverse_matrix(cdata->cam.transform_inverse_view_matrix, + pkt->left_view_pose.position, + pkt->left_view_pose.orientation); + cesnet_set_inverse_matrix(cdata->cam_right.transform_inverse_view_matrix, + pkt->right_view_pose.position, + pkt->right_view_pose.orientation); -#if 1 +#else g_cdata.width = pkt->pix_width_eye; g_cdata.height = pkt->pix_height_eye; @@ -933,11 +1036,9 @@ void convert_render_pkt_to_view_matrix(struct RenderPacket *pkt, cesnet_set_inverse_matrix(cdata->cam_right.transform_inverse_view_matrix, pkt->right_view_pose.position, pkt->right_view_pose.orientation); -#endif +# endif } #endif - - /* vim: set noexpandtab sw=2: */ diff --git a/client/ultragrid/vrgstream.h b/client/ultragrid/vrgstream.h index dd21b50026e03300419ec8068d95644c1b230afd..955144b8b1e7f09e7e1c5dcf45ee37c8cf9f70f3 100644 --- a/client/ultragrid/vrgstream.h +++ b/client/ultragrid/vrgstream.h @@ -1,76 +1,78 @@ #pragma once #if defined _MSC_VER || defined __MINGW32__ -#define DLLIMPORT __declspec(dllimport) +# define DLLIMPORT __declspec(dllimport) #else -#define DLLIMPORT +# define DLLIMPORT #endif #ifndef VRG_STREAM_API -#ifdef __cplusplus -#define VRG_STREAM_API extern "C" DLLIMPORT -#else -#define VRG_STREAM_API -#endif +# ifdef __cplusplus +# define VRG_STREAM_API extern "C" DLLIMPORT +# else +# define VRG_STREAM_API +# endif #endif struct ProjectionFovTan { - float left; - float right; - float top; - float bottom; + float left; + float right; + float top; + float bottom; }; struct Vector3 { - float x; - float y; - float z; + float x; + float y; + float z; }; struct Quaternion { - float x; - float y; - float z; - float w; + float x; + float y; + float z; + float w; }; struct Pose { - struct Vector3 position; - struct Quaternion orientation; + struct Vector3 position; + struct Quaternion orientation; }; struct RenderPacket { - struct ProjectionFovTan left_projection_fov; - struct Pose left_view_pose; - struct ProjectionFovTan right_projection_fov; - struct Pose right_view_pose; - int pix_width_eye; - int pix_height_eye; - unsigned long long timepoint; - unsigned int frame; + struct ProjectionFovTan left_projection_fov; + struct Pose left_view_pose; + struct ProjectionFovTan right_projection_fov; + struct Pose right_view_pose; + int pix_width_eye; + int pix_height_eye; + unsigned long long timepoint; + unsigned int frame; }; enum VrgStreamApiError { - HmdTestAppNotResponds = -8, - NoSupportedGPUFound = -7, - GPUFnNotSupported = -6, - FrameNotRequested = -5, - HmdTesterNotInited = -4, - GPUError = -3, - NotInited = -2, - InitFail = -1, - Ok = 0 + HmdTestAppNotResponds = -8, + NoSupportedGPUFound = -7, + GPUFnNotSupported = -6, + FrameNotRequested = -5, + HmdTesterNotInited = -4, + GPUError = -3, + NotInited = -2, + InitFail = -1, + Ok = 0 }; -enum VrgInputFormat { - RGBA = 0, - YUV420 = 1, - NV12 = 2 -}; +enum VrgInputFormat { RGBA = 0, YUV420 = 1, NV12 = 2 }; -VRG_STREAM_API enum VrgStreamApiError vrgStreamInit(enum VrgInputFormat inputFormat); +enum VrgMemory { CPU = 0, DX11 = 1, CUDA = 2, GL = 3 }; -VRG_STREAM_API enum VrgStreamApiError vrgStreamRenderFrame(int frame_no, struct RenderPacket *packet); +#ifndef RE_API -VRG_STREAM_API enum VrgStreamApiError vrgStreamSubmitFrame(int frame_no, void* sbs_image_data); +VRG_STREAM_API enum VrgStreamApiError vrgStreamInit(enum VrgInputFormat inputFormat); + +VRG_STREAM_API enum VrgStreamApiError vrgStreamRenderFrame(struct RenderPacket *packet); +VRG_STREAM_API enum VrgStreamApiError vrgStreamSubmitFrame(struct RenderPacket *packet, + void *sbs_image_data, + enum VrgMemory api); +#endif diff --git a/client/vrclient/CMakeLists.txt b/client/vrclient/CMakeLists.txt index f14f20ebf8b5be702502ed63383e5d8a21a345fe..cc66f02728d89c334eb1738bbb1c18a9522b015b 100644 --- a/client/vrclient/CMakeLists.txt +++ b/client/vrclient/CMakeLists.txt @@ -51,6 +51,14 @@ if(WITH_SOCKET_UDP) add_definitions(-DWITH_SOCKET_UDP) endif() +if(WITH_VRCLIENT_GLSTEREO) + add_definitions(-DWITH_VRCLIENT_GLSTEREO) +endif() + +if(WITH_SOCKET_ONLY_DATA) + add_definitions(-DWITH_SOCKET_ONLY_DATA) +endif() + if(WITH_CLIENT_XOR_RLE) add_definitions(-DWITH_CLIENT_XOR_RLE) endif() @@ -109,6 +117,12 @@ if(WITH_VRCLIENT_VRGSTREAM) add_executable(vrclient ${SRC_LIBUG}) target_link_libraries(vrclient vrgstream ${ULTRAGRID_LIBRARIES}) + if(WITH_VRCLIENT_OPENVR) + message("${OPENVR_LIBRARIES}") + target_link_libraries(vrgstream ${OPENVR_LIBRARIES}) + target_link_libraries(vrclient ${OPENVR_LIBRARIES}) + endif() + install (TARGETS vrclient DESTINATION bin) else() add_executable(vrclient ${SRC}) diff --git a/client/vrclient/vrclient.cpp b/client/vrclient/vrclient.cpp index 0c3561bf7ee9d5f688f71074fbd81ea8b2f56bc2..b3edf38fc2f313002bb3c3fc786c141bc5ce939d 100644 --- a/client/vrclient/vrclient.cpp +++ b/client/vrclient/vrclient.cpp @@ -29,16 +29,25 @@ # include <cuda_runtime.h> #endif +#if defined(WITH_CLIENT_RENDERENGINE_VR) || defined(WITH_VRCLIENT_OPENVR) +# define DWIDTH ((unsigned long long int)g_width * 2L) +#else +# define DWIDTH (g_width) +#endif + ////////////////////////// unsigned int g_width = 2; unsigned int g_height = 1; +int w_width = 0; +int w_height = 0; + // unsigned char* g_pixels = NULL; -unsigned char *g_pixels_bufs[3] = {NULL}; +unsigned char *g_pixels_buf = NULL;//[3] = {NULL, NULL, NULL}; #ifdef WITH_CUDA_GL_INTEROP -void *g_pixels_bufs_d[3] = {NULL}; +void *g_pixels_buf_d = NULL;//{NULL, NULL, NULL}; #endif //#ifdef WITH_CLIENT_YUV @@ -53,6 +62,9 @@ char fname[1024]; int current_samples = 1; +double mouse_pos[2] = {0, 0}; +float landscape_pos[3] = {0, 5, 1}; + #ifdef WITH_VRCLIENT_VRGSTREAM RenderPacket g_packet; #else @@ -70,10 +82,13 @@ cyclesphi::cyclesphi_data g_cdata; GLFWwindow *g_windows[1] = {NULL}; // GLFWwindow *g_window = NULL; GLuint g_bufferIds[3]; // IDs of PBO +GLuint g_textureIds[3]; // ID of texture + +#if 0 GLuint g_resbufferIds[3]; // IDs of PBO GLuint g_depthbufferIds[3]; // IDs of PBO -GLuint g_textureIds[3]; // ID of texture GLuint g_restextureIds[3]; // ID of texture +#endif ////GLubyte* imageData = 0; // pointer to texture buffer // int pboMode = 1; // @@ -86,9 +101,21 @@ GLuint g_restextureIds[3]; // ID of texture // stl_tri *polys = NULL; // size_t polys_size = 0; +#define EYE_OFFSET 0.0358333f // default viewpoint separation +#define EYE_ADJUST -0.0123611f // default horizontal image shift adjustment + /////////////OPENVR////////////// #ifdef WITH_VRCLIENT_OPENVR -# include <openvr/openvr.h> +# ifdef __cplusplus +extern "C" { +# endif + +# include "openvr/openvr.h" + +# ifdef __cplusplus +} +# endif + ////--------------------------------------------------------------------------------------------------------------------- //// Purpose: Returns true if the action is active and had a rising edge @@ -280,17 +307,27 @@ float m_rmat4DevicePose[vr::k_unMaxTrackedDeviceCount * 12]; // return true; //} +# if 1 bool vr_CreateFrameBuffer(int eye, int nWidth, int nHeight) { glGenFramebuffers(1, &g_bufferIds[eye]); glBindFramebuffer(GL_FRAMEBUFFER, g_bufferIds[eye]); +#if 0 glGenRenderbuffers(1, &g_depthbufferIds[eye]); glBindRenderbuffer(GL_RENDERBUFFER, g_depthbufferIds[eye]); +# if 0 glRenderbufferStorageMultisample(GL_RENDERBUFFER, 4, GL_DEPTH_COMPONENT, nWidth, nHeight); +#else + glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, nWidth, nHeight); +#endif + glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, g_depthbufferIds[eye]); +#endif + +#if 0 glGenTextures(1, &g_textureIds[eye]); glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, g_textureIds[eye]); glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA8, nWidth, nHeight, true); @@ -299,7 +336,17 @@ float m_rmat4DevicePose[vr::k_unMaxTrackedDeviceCount * 12]; GL_TEXTURE_2D_MULTISAMPLE, g_textureIds[eye], 0); +#else + glGenTextures(1, &g_textureIds[eye]); + glBindTexture(GL_TEXTURE_2D, g_textureIds[eye]); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, nWidth, nHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr); + glFramebufferTexture2D( + GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, g_textureIds[eye], 0); +#endif +#if 0 glGenFramebuffers(1, &g_resbufferIds[eye]); glBindFramebuffer(GL_FRAMEBUFFER, g_resbufferIds[eye]); @@ -307,14 +354,16 @@ float m_rmat4DevicePose[vr::k_unMaxTrackedDeviceCount * 12]; glBindTexture(GL_TEXTURE_2D, g_restextureIds[eye]); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0); - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, nWidth, nHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, - nullptr); glFramebufferTexture2D( + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, nWidth, nHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE,nullptr); + glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, g_restextureIds[eye], 0); +#endif // check FBO status GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER); if (status != GL_FRAMEBUFFER_COMPLETE) { + printf("status != GL_FRAMEBUFFER_COMPLETE\n"); return false; } @@ -322,6 +371,7 @@ float m_rmat4DevicePose[vr::k_unMaxTrackedDeviceCount * 12]; return true; } + #endif ////----------------------------------------------------------------------------- //// Purpose: @@ -599,7 +649,19 @@ void vr_render_frame() // RenderControllerAxes(); // vr_RenderStereoTargets(); // RenderCompanionWindow(); +#if 1 + vr::Texture_t leftEyeTexture = { + (void *)(uintptr_t)g_textureIds[0], // leftEyeDesc.m_nResolveTextureId, + vr::TextureType_OpenGL, + vr::ColorSpace_Gamma}; + vr::VRCompositor()->Submit(vr::Eye_Left, &leftEyeTexture); + vr::Texture_t rightEyeTexture = { + (void *)(uintptr_t)g_textureIds[1], // rightEyeDesc.m_nResolveTextureId, + vr::TextureType_OpenGL, + vr::ColorSpace_Gamma}; + vr::VRCompositor()->Submit(vr::Eye_Right, &rightEyeTexture); +#else vr::Texture_t leftEyeTexture = { (void *)(uintptr_t)g_restextureIds[0], // leftEyeDesc.m_nResolveTextureId, vr::TextureType_OpenGL, @@ -611,6 +673,7 @@ void vr_render_frame() vr::TextureType_OpenGL, vr::ColorSpace_Gamma}; vr::VRCompositor()->Submit(vr::Eye_Right, &rightEyeTexture); +#endif } // vr_update_HMDMatrixPose(); @@ -697,9 +760,9 @@ void vr_render_frame() ///////////////////////// void check_exit() { -#ifndef _WIN32 +//#ifndef _WIN32 exit(-1); -#endif +//#endif } #define cu_assert(stmt) \ @@ -758,12 +821,12 @@ void cuda_error_message(const std::string &message) ///////////////////////// void freeTexture(int eye); -void setupTexture(int eye, int width, int height); +void setupTexture(int eye); void resize(unsigned width, unsigned height); void gl_free(); void gl_init(); void displayFPS(int type, int tot_samples, size_t recv_size); -void toOrtho(int width, int height); +void toOrtho(int eye, int width, int height); ///////////////////////// #ifdef WITH_VRCLIENT_OPENVR @@ -799,6 +862,10 @@ vr::HmdVector3_t GetPosition(vr::HmdMatrix34_t matrix) { return vector; } +#endif + +#if defined(WITH_VRCLIENT_VRGSTREAM) && defined(WITH_VRCLIENT_OPENVR) + void convert_openvr_to_vrgstream(RenderPacket* packet) { float left_Left, left_Right, left_Top, left_Bottom; @@ -857,9 +924,9 @@ void convert_openvr_to_vrgstream(RenderPacket* packet) // struct Vector3 position; vr::HmdVector3_t pos = GetPosition(m_rTrackedDevicePose[vr::k_unTrackedDeviceIndex_Hmd].mDeviceToAbsoluteTracking); - packet->left_view_pose.position.x = pos.v[0]; - packet->left_view_pose.position.y = pos.v[1]; - packet->left_view_pose.position.z = pos.v[2]; + packet->left_view_pose.position.x = pos.v[0] + landscape_pos[0]; + packet->left_view_pose.position.y = pos.v[1] + landscape_pos[1]; + packet->left_view_pose.position.z = pos.v[2] + landscape_pos[2]; // struct Quaternion orientation; vr::HmdQuaternion_t rot = GetRotation(m_rTrackedDevicePose[vr::k_unTrackedDeviceIndex_Hmd].mDeviceToAbsoluteTracking); @@ -876,9 +943,9 @@ void convert_openvr_to_vrgstream(RenderPacket* packet) // struct Pose right_view_pose; // struct Vector3 position; - packet->right_view_pose.position.x = pos.v[0]; - packet->right_view_pose.position.y = pos.v[1]; - packet->right_view_pose.position.z = pos.v[2]; + packet->right_view_pose.position.x = pos.v[0] + landscape_pos[0]; + packet->right_view_pose.position.y = pos.v[1] + landscape_pos[1]; + packet->right_view_pose.position.z = pos.v[2] + landscape_pos[2]; // struct Quaternion orientation; packet->right_view_pose.orientation.x = rot.x; @@ -898,11 +965,13 @@ void convert_openvr_to_vrgstream(RenderPacket* packet) packet->timepoint = omp_get_wtime(); } -#elif !defined(WITH_VRCLIENT_VRGSTREAM) +#endif + +#if !defined(WITH_VRCLIENT_VRGSTREAM) size_t g_recv_pixels_data2 = 0; int recv_pixels_data() { - size_t pix_type_size = SIZE_UCHAR4; + //size_t pix_type_size = SIZE_UCHAR4; # ifdef WITH_CLIENT_XOR_RLE @@ -918,36 +987,22 @@ int recv_pixels_data() # elif defined(WITH_CLIENT_YUV) double t2 = omp_get_wtime(); - cyclesphi::kernel::socket_recv_data_data( - (char*)g_pixels_bufs[0], g_width * g_height, false); + cyclesphi::kernel::socket_recv_data_data((char *)g_pixels_buf, + DWIDTH * g_height + DWIDTH * g_height / 2 /*, false*/); - cyclesphi::kernel::socket_recv_data_data((char*)g_pixels_bufs[1], - g_width * g_height); - displayFPS(1, current_samples, 2 * g_height * g_width); + current_samples = ((int *)g_pixels_buf)[0]; + displayFPS(1, current_samples, DWIDTH * g_height + DWIDTH * g_height / 2); double t3 = omp_get_wtime(); -//# if defined(WITH_CLIENT_RENDERENGINE_VR) || !defined(WITH_CLIENT_RENDERENGINE) -// util_yuv_i420_to_rgba_stereo((char *)g_pixels_buf, (char *)g_pixels_buf_yuv, g_height, g_width); -//# else -// util_yuv_i420_to_rgba((char *)g_pixels_buf, (char *)g_pixels_buf_yuv, g_height, g_width); -//# endif - # ifdef WITH_CUDA_GL_INTEROP - // glfwMakeContextCurrent(g_windows[0]); - - cuda_assert(cudaMemcpy(g_pixels_bufs_d[0], - g_pixels_bufs[0], - g_width * 2 * g_height, - cudaMemcpyDefault)); - // glfwMakeContextCurrent(g_windows[1]); + cuda_assert(cudaMemcpy(g_pixels_buf_d, + g_pixels_buf, + DWIDTH * g_height + DWIDTH * g_height / 2, + cudaMemcpyDefault)); // cudaMemcpyDefault cudaMemcpyHostToDevice - //cuda_assert(cudaMemcpy(g_pixels_bufs_d[1], - // g_pixels_bufs[1], - // g_width * g_height, - // cudaMemcpyDefault)); -#endif +# endif double t4 = omp_get_wtime(); @@ -960,8 +1015,7 @@ int recv_pixels_data() cyclesphi::kernel::socket_recv_nvpipe( (char*)g_pixels_bufs[0], - (char*)g_pixels_bufs_d[0], - g_width * 2, + (char*)g_pixels_bufs_d[0], DWIDTH, g_height); //cyclesphi::kernel::socket_recv_nvpipe( @@ -970,28 +1024,30 @@ int recv_pixels_data() // g_width, // g_height); - displayFPS(1, current_samples, g_width * 2 * g_height * pix_type_size); + displayFPS(1, current_samples, DWIDTH * g_height * pix_type_size); double t3 = omp_get_wtime(); #else - cyclesphi::kernel::socket_recv_data_data( - (char*)g_pixels_bufs[0], g_width * 2* g_height * pix_type_size, false); + cyclesphi::kernel::socket_recv_data_data((char *)g_pixels_buf, + DWIDTH * g_height * SIZE_UCHAR4 /*, false*/); //cyclesphi::kernel::socket_recv_data_data((char*)g_pixels_bufs[1], // g_width * g_height * pix_type_size); - - displayFPS(1, current_samples, g_width * 2 * g_height * pix_type_size); + current_samples = ((int *)g_pixels_buf)[0]; + displayFPS(1, current_samples, DWIDTH * g_height * SIZE_UCHAR4); double t3 = omp_get_wtime(); # ifdef WITH_CUDA_GL_INTEROP // glfwMakeContextCurrent(g_windows[0]); - cuda_assert(cudaMemcpy(g_pixels_bufs_d[0], - g_pixels_bufs[0], - g_width * 2 * g_height * pix_type_size, - cudaMemcpyDefault)); + //cuda_assert(cudaSetDevice(0)); + //cuda_assert(cudaDeviceSynchronize()); + cuda_assert(cudaMemcpy(g_pixels_buf_d, + g_pixels_buf, + DWIDTH * g_height * SIZE_UCHAR4, + cudaMemcpyDefault)); // cudaMemcpyDefault cudaMemcpyHostToDevice // glfwMakeContextCurrent(g_windows[1]); @@ -1017,7 +1073,11 @@ int send_cam_data() void client_init(const char *server, int port_cam, int port_data, int w, int h) { +# ifdef WITH_SOCKET_ONLY_DATA + cyclesphi::kernel::init_sockets_data(server, port_data); +# else cyclesphi::kernel::init_sockets_cam(server, port_cam, port_data); +# endif //#ifdef WITH_CLIENT_RENDERENGINE_VR // resize(w * 2, h); //#else @@ -1030,6 +1090,7 @@ void client_close() cyclesphi::kernel::socket_client_close(); cyclesphi::kernel::socket_server_close(); } +#endif //<<<<<<< HEAD //======= @@ -1202,15 +1263,22 @@ void init_camera_data(cyclesphi::cyclesphi_data *cdata) // if (!g_cdata_init) { // printf("get_camera_matrices\n"); - g_cdata.step_samples = 1; - // g_cdata.width = 7680;// / 2; - // g_cdata.height = 2160; + cdata->step_samples = 1; + cdata->width = 7680 / 2; + cdata->height = 2160; + + //cdata->width = 3840 * 2; + //cdata->height = 2400; + + cdata->width = 1920 * 2; + cdata->height = 1200; - // g_cdata.width = 8 * 1024; // / 2; - // g_cdata.height = 2 * 1024; // 2116x1203 - g_cdata.width = 1024; - g_cdata.height = 1024; + // cdata->width = 8 * 1024; // / 2; + // cdata->height = 2 * 1024; // 2116x1203 + + //cdata->width = 1024; + //cdata->height = 1024; // Matrix(((-1.0, -1.424297928675957e-14, -8.742277657347586e-08, 0.31489095091819763), // (-8.742277657347586e-08, 1.6292068494294654e-07, 1.0, 0.0), @@ -1221,98 +1289,98 @@ void init_camera_data(cyclesphi::cyclesphi_data *cdata) // Vector((1.4901162970204496e-08, 0.8953957557678223, 0.44527143239974976, 4.958309650421143)) // Vector((-0.0, 0.0, 0.0, 1.0)) - // g_cdata.cam.transform_inverse_view_matrix[0] = 0.6859206557273865; - // g_cdata.cam.transform_inverse_view_matrix[1] = -0.32401350140571594; - // g_cdata.cam.transform_inverse_view_matrix[2] = 0.6515582799911499; - // g_cdata.cam.transform_inverse_view_matrix[3] = 7.358892917633057; - // g_cdata.cam.transform_inverse_view_matrix[4] = 0.7276763916015625; - // g_cdata.cam.transform_inverse_view_matrix[5] = 0.305420845746994; - // g_cdata.cam.transform_inverse_view_matrix[6] = -0.6141703724861145; - // g_cdata.cam.transform_inverse_view_matrix[7] = -6.925791263580322; - // g_cdata.cam.transform_inverse_view_matrix[8] = 0; - // g_cdata.cam.transform_inverse_view_matrix[9] = 0.8953957557678223; - // g_cdata.cam.transform_inverse_view_matrix[10] = 0.44527143239974976; - // g_cdata.cam.transform_inverse_view_matrix[11] = 4.958309650421143; - - g_cdata.cam.transform_inverse_view_matrix[0] = -1.0; - g_cdata.cam.transform_inverse_view_matrix[1] = 0; - g_cdata.cam.transform_inverse_view_matrix[2] = 0; - g_cdata.cam.transform_inverse_view_matrix[3] = 2; - g_cdata.cam.transform_inverse_view_matrix[4] = 0; - g_cdata.cam.transform_inverse_view_matrix[5] = 0; - g_cdata.cam.transform_inverse_view_matrix[6] = 1; - g_cdata.cam.transform_inverse_view_matrix[7] = 2; - g_cdata.cam.transform_inverse_view_matrix[8] = 0; - g_cdata.cam.transform_inverse_view_matrix[9] = 1; - g_cdata.cam.transform_inverse_view_matrix[10] = 0; - g_cdata.cam.transform_inverse_view_matrix[11] = 2; - - g_cdata.cam.lens = 50; - g_cdata.cam.shift_x = 0.0f; - g_cdata.cam.shift_y = 0.0f; - - g_cdata.cam.clip_start = 0.1f; - g_cdata.cam.clip_end = 100.0f; - g_cdata.cam.sensor_width = 36; - g_cdata.cam.sensor_fit = 0; - - g_cdata.cam.view_camera_zoom = 0.0f; - g_cdata.cam.view_camera_offset[0] = 0; - g_cdata.cam.view_camera_offset[1] = 0; - g_cdata.cam.use_view_camera = 1; - - // g_cdata.cam_right.transform_inverse_view_matrix[0] = -0.437973917; - // g_cdata.cam_right.transform_inverse_view_matrix[1] = -0.267504632; - // g_cdata.cam_right.transform_inverse_view_matrix[2] = 0.858265936; - // g_cdata.cam_right.transform_inverse_view_matrix[3] = 1.26781642; - // g_cdata.cam_right.transform_inverse_view_matrix[4] = 0.874406159; - // g_cdata.cam_right.transform_inverse_view_matrix[5] = 0.0949685797; - // g_cdata.cam_right.transform_inverse_view_matrix[6] = 0.475810111; - // g_cdata.cam_right.transform_inverse_view_matrix[7] = 1.48614061; - // g_cdata.cam_right.transform_inverse_view_matrix[8] = -0.208789662; - // g_cdata.cam_right.transform_inverse_view_matrix[9] = 0.958865106; - // g_cdata.cam_right.transform_inverse_view_matrix[10] = 0.192313895; - // g_cdata.cam_right.transform_inverse_view_matrix[11] = 1.08239710; - - // g_cdata.cam_right.transform_inverse_view_matrix[0] = 0.6859206557273865; - // g_cdata.cam_right.transform_inverse_view_matrix[1] = -0.32401350140571594; - // g_cdata.cam_right.transform_inverse_view_matrix[2] = 0.6515582799911499; - // g_cdata.cam_right.transform_inverse_view_matrix[3] = 7.358892917633057; - // g_cdata.cam_right.transform_inverse_view_matrix[4] = 0.7276763916015625; - // g_cdata.cam_right.transform_inverse_view_matrix[5] = 0.305420845746994; - // g_cdata.cam_right.transform_inverse_view_matrix[6] = -0.6141703724861145; - // g_cdata.cam_right.transform_inverse_view_matrix[7] = -6.925791263580322; - // g_cdata.cam_right.transform_inverse_view_matrix[8] = 0; - // g_cdata.cam_right.transform_inverse_view_matrix[9] = 0.8953957557678223; - // g_cdata.cam_right.transform_inverse_view_matrix[10] = 0.44527143239974976; - // g_cdata.cam_right.transform_inverse_view_matrix[11] = 4.958309650421143; - - g_cdata.cam_right.transform_inverse_view_matrix[0] = -1.0; - g_cdata.cam_right.transform_inverse_view_matrix[1] = 0; - g_cdata.cam_right.transform_inverse_view_matrix[2] = 0; - g_cdata.cam_right.transform_inverse_view_matrix[3] = 2; - g_cdata.cam_right.transform_inverse_view_matrix[4] = 0; - g_cdata.cam_right.transform_inverse_view_matrix[5] = 0; - g_cdata.cam_right.transform_inverse_view_matrix[6] = 1; - g_cdata.cam_right.transform_inverse_view_matrix[7] = 2; - g_cdata.cam_right.transform_inverse_view_matrix[8] = 0; - g_cdata.cam_right.transform_inverse_view_matrix[9] = 1; - g_cdata.cam_right.transform_inverse_view_matrix[10] = 0; - g_cdata.cam_right.transform_inverse_view_matrix[11] = 2; - - g_cdata.cam_right.lens = 50; - g_cdata.cam_right.shift_x = 0.0f; - g_cdata.cam_right.shift_y = 0.0f; - - g_cdata.cam_right.clip_start = 0.1f; - g_cdata.cam_right.clip_end = 100.0f; - g_cdata.cam_right.sensor_width = 36; - g_cdata.cam_right.sensor_fit = 0; - - g_cdata.cam_right.view_camera_zoom = 0.0f; - g_cdata.cam_right.view_camera_offset[0] = 0; - g_cdata.cam_right.view_camera_offset[1] = 0; - g_cdata.cam_right.use_view_camera = 1; + // cdata->cam.transform_inverse_view_matrix[0] = 0.6859206557273865; + // cdata->cam.transform_inverse_view_matrix[1] = -0.32401350140571594; + // cdata->cam.transform_inverse_view_matrix[2] = 0.6515582799911499; + // cdata->cam.transform_inverse_view_matrix[3] = 7.358892917633057; + // cdata->cam.transform_inverse_view_matrix[4] = 0.7276763916015625; + // cdata->cam.transform_inverse_view_matrix[5] = 0.305420845746994; + // cdata->cam.transform_inverse_view_matrix[6] = -0.6141703724861145; + // cdata->cam.transform_inverse_view_matrix[7] = -6.925791263580322; + // cdata->cam.transform_inverse_view_matrix[8] = 0; + // cdata->cam.transform_inverse_view_matrix[9] = 0.8953957557678223; + // cdata->cam.transform_inverse_view_matrix[10] = 0.44527143239974976; + // cdata->cam.transform_inverse_view_matrix[11] = 4.958309650421143; + + cdata->cam.transform_inverse_view_matrix[0] = -1.0; + cdata->cam.transform_inverse_view_matrix[1] = 0; + cdata->cam.transform_inverse_view_matrix[2] = 0; + cdata->cam.transform_inverse_view_matrix[3] = 2; + cdata->cam.transform_inverse_view_matrix[4] = 0; + cdata->cam.transform_inverse_view_matrix[5] = 0; + cdata->cam.transform_inverse_view_matrix[6] = 1; + cdata->cam.transform_inverse_view_matrix[7] = 2; + cdata->cam.transform_inverse_view_matrix[8] = 0; + cdata->cam.transform_inverse_view_matrix[9] = 1; + cdata->cam.transform_inverse_view_matrix[10] = 0; + cdata->cam.transform_inverse_view_matrix[11] = 2; + + cdata->cam.lens = 50; + cdata->cam.shift_x = -EYE_OFFSET / 2.0; // 0 .0f; + cdata->cam.shift_y = 0.0f; + + cdata->cam.clip_start = 0.1f; + cdata->cam.clip_end = 100.0f; + cdata->cam.sensor_width = 36; + cdata->cam.sensor_fit = 0; + + cdata->cam.view_camera_zoom = 0.0f; + cdata->cam.view_camera_offset[0] = 0; + cdata->cam.view_camera_offset[1] = 0; + cdata->cam.use_view_camera = 1; + + // cdata->cam_right.transform_inverse_view_matrix[0] = -0.437973917; + // cdata->cam_right.transform_inverse_view_matrix[1] = -0.267504632; + // cdata->cam_right.transform_inverse_view_matrix[2] = 0.858265936; + // cdata->cam_right.transform_inverse_view_matrix[3] = 1.26781642; + // cdata->cam_right.transform_inverse_view_matrix[4] = 0.874406159; + // cdata->cam_right.transform_inverse_view_matrix[5] = 0.0949685797; + // cdata->cam_right.transform_inverse_view_matrix[6] = 0.475810111; + // cdata->cam_right.transform_inverse_view_matrix[7] = 1.48614061; + // cdata->cam_right.transform_inverse_view_matrix[8] = -0.208789662; + // cdata->cam_right.transform_inverse_view_matrix[9] = 0.958865106; + // cdata->cam_right.transform_inverse_view_matrix[10] = 0.192313895; + // cdata->cam_right.transform_inverse_view_matrix[11] = 1.08239710; + + // cdata->cam_right.transform_inverse_view_matrix[0] = 0.6859206557273865; + // cdata->cam_right.transform_inverse_view_matrix[1] = -0.32401350140571594; + // cdata->cam_right.transform_inverse_view_matrix[2] = 0.6515582799911499; + // cdata->cam_right.transform_inverse_view_matrix[3] = 7.358892917633057; + // cdata->cam_right.transform_inverse_view_matrix[4] = 0.7276763916015625; + // cdata->cam_right.transform_inverse_view_matrix[5] = 0.305420845746994; + // cdata->cam_right.transform_inverse_view_matrix[6] = -0.6141703724861145; + // cdata->cam_right.transform_inverse_view_matrix[7] = -6.925791263580322; + // cdata->cam_right.transform_inverse_view_matrix[8] = 0; + // cdata->cam_right.transform_inverse_view_matrix[9] = 0.8953957557678223; + // cdata->cam_right.transform_inverse_view_matrix[10] = 0.44527143239974976; + // cdata->cam_right.transform_inverse_view_matrix[11] = 4.958309650421143; + + cdata->cam_right.transform_inverse_view_matrix[0] = -1.0; + cdata->cam_right.transform_inverse_view_matrix[1] = 0; + cdata->cam_right.transform_inverse_view_matrix[2] = 0; + cdata->cam_right.transform_inverse_view_matrix[3] = 2; + cdata->cam_right.transform_inverse_view_matrix[4] = 0; + cdata->cam_right.transform_inverse_view_matrix[5] = 0; + cdata->cam_right.transform_inverse_view_matrix[6] = 1; + cdata->cam_right.transform_inverse_view_matrix[7] = 2; + cdata->cam_right.transform_inverse_view_matrix[8] = 0; + cdata->cam_right.transform_inverse_view_matrix[9] = 1; + cdata->cam_right.transform_inverse_view_matrix[10] = 0; + cdata->cam_right.transform_inverse_view_matrix[11] = 2; + + cdata->cam_right.lens = 50; + cdata->cam_right.shift_x = EYE_OFFSET / 2.0; // 0.0f; + cdata->cam_right.shift_y = 0.0f; + + cdata->cam_right.clip_start = 0.1f; + cdata->cam_right.clip_end = 100.0f; + cdata->cam_right.sensor_width = 36; + cdata->cam_right.sensor_fit = 0; + + cdata->cam_right.view_camera_zoom = 0.0f; + cdata->cam_right.view_camera_offset[0] = 0; + cdata->cam_right.view_camera_offset[1] = 0; + cdata->cam_right.use_view_camera = 1; // g_cdata_init = true; } @@ -1321,7 +1389,7 @@ void init_camera_data(cyclesphi::cyclesphi_data *cdata) convert_openvr_to_view_matrix(cdata); # endif } -#endif + //////////////////////////////////////////////// //<<<<<<< HEAD @@ -1336,43 +1404,45 @@ void path_tracer(void *pixels) void gl_render(int eye) { +#if 1 //render to texture if (eye == 0 || eye == 1) { //glBindFramebuffer(GL_FRAMEBUFFER, g_bufferIds[eye]); //toOrtho(g_width/2, g_height); - glEnable(GL_MULTISAMPLE); + //glEnable(GL_MULTISAMPLE); // Left Eye glBindFramebuffer(GL_FRAMEBUFFER, g_bufferIds[eye]); - //glViewport(0, 0, g_width, g_height); + //glViewport(0, 0, g_width / 2, g_height); // vr_RenderScene(vr::Eye_Left); - //toOrtho(g_width / 2, g_height); + toOrtho(eye, DWIDTH / 2, g_height); } else { - //toOrtho(g_width, g_height); - - //glBindTexture(GL_TEXTURE_2D, g_restextureIds[0]); +#if 1 + toOrtho(eye, w_width, w_height); - //glBegin(GL_QUADS); + glBindTexture(GL_TEXTURE_2D, g_textureIds[2]); - //glTexCoord2d(0.0, 0.0); - //glVertex2d(0.0, 0.0); - //glTexCoord2d(1.0, 0.0); - //glVertex2d(1, 0.0); - //glTexCoord2d(1.0, 1.0); - //glVertex2d(1, 1); - //glTexCoord2d(0.0, 1.0); - //glVertex2d(0.0, 1); + glBegin(GL_QUADS); - //glEnd(); + glTexCoord2d(0.0, 0.0); + glVertex2d(0.0, 0.0); + glTexCoord2d(1.0, 0.0); + glVertex2d(1, 0.0); + glTexCoord2d(1.0, 1.0); + glVertex2d(1, 1); + glTexCoord2d(0.0, 1.0); + glVertex2d(0.0, 1); - //glBindTexture(GL_TEXTURE_2D, 0); + glEnd(); - //return; + glBindTexture(GL_TEXTURE_2D, 0); +#endif + return; } - +#endif // glfwMakeContextCurrent(g_windows[eye]); #ifdef WITH_CUDA_GL_INTEROP @@ -1384,9 +1454,17 @@ void gl_render(int eye) // Use offset instead of ponter. #ifdef WITH_CLIENT_YUV - // glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, g_width, g_height, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0); + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, DWIDTH, g_height, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0); #else - glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, g_width, g_height, GL_RGBA, GL_UNSIGNED_BYTE, 0); + glTexSubImage2D(GL_TEXTURE_2D, + 0, + 0, + 0, + DWIDTH, //(eye == 2) ? g_width : g_width / 2, + g_height, + GL_RGBA, + GL_UNSIGNED_BYTE, + 0); #endif #else @@ -1521,10 +1599,12 @@ void gl_render(int eye) glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); +#if 1 //render to texture if (eye == 0 || eye == 1) { glBindFramebuffer(GL_FRAMEBUFFER, 0); +#if 0 glDisable(GL_MULTISAMPLE); glBindFramebuffer(GL_READ_FRAMEBUFFER, g_bufferIds[eye]); @@ -1543,8 +1623,9 @@ void gl_render(int eye) glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); +#endif } - +#endif // glPopMatrix(); // if(eye == 1) @@ -1560,16 +1641,48 @@ void keyboardFunc(GLFWwindow *window, int key, int scancode, int action, int mod { if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) glfwSetWindowShouldClose(window, true); + + + float landscape_step = 0.1f; + + if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS) + landscape_pos[0] -= landscape_step; + + if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS) + landscape_pos[0] += landscape_step; + + if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS) + landscape_pos[1] += landscape_step; + + if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS) + landscape_pos[1] -= landscape_step; + + if (glfwGetKey(window, GLFW_KEY_E) == GLFW_PRESS) + landscape_pos[2] += landscape_step; + + if (glfwGetKey(window, GLFW_KEY_R) == GLFW_PRESS) + landscape_pos[2] -= landscape_step; + + if (glfwGetKey(window, GLFW_KEY_U) == GLFW_PRESS) + if (g_cdata.step_samples > 2) + g_cdata.step_samples--; + + if (glfwGetKey(window, GLFW_KEY_I) == GLFW_PRESS) + g_cdata.step_samples++; } void clickFunc(GLFWwindow *window, int button, int action, int mods) { double x, y; glfwGetCursorPos(window, &x, &y); + //mouse_pos[0] = 2.0 * (x - g_width / 2.0) / (g_width / 2.0); + //mouse_pos[1] = 2.0 * (y - g_height / 2.0) / (g_height / 2.0); } void motionFunc(GLFWwindow *window, double x, double y) { + mouse_pos[0] = 2.0 * (((int)x) - w_width / 2.0) / (w_width / 2.0); + mouse_pos[1] = 2.0 * (((int)y) - w_height / 2.0) / (w_height / 2.0); } void displayFPS(int type, int tot_samples, size_t recv_size) @@ -1587,7 +1700,7 @@ void displayFPS(int type, int tot_samples, size_t recv_size) "FPS: %.2f, Samples: %d, Res: %d x %d, BW: %.3f [Gb/s]", fps, samples[0], - g_width, + DWIDTH, g_height, ((double)g_frameSize[type] * 8.0 / 1024.0 / 1024.0 / 1024.0 / (currentTime - g_previousTime[type]))); @@ -1623,9 +1736,6 @@ void displayFunc() gl_render(1); gl_render(2); - // - //gl_render(1); - glfwSwapBuffers(g_windows[0]); double t4 = omp_get_wtime(); @@ -1675,7 +1785,7 @@ void render_packet_init(RenderPacket *packet) packet->right_view_pose.orientation.w = 0.256363; // int pix_width_eye; - packet->pix_width_eye = g_width/2; + packet->pix_width_eye = DWIDTH / 2; // int pix_height_eye; packet->pix_height_eye = g_height; // unsigned long long timepoint; @@ -1696,25 +1806,50 @@ void displayFunc() double t2 = omp_get_wtime(); CLIENT_DEBUG_PRINTF2("recv_pixels_data: pix:%f\n", t2 - t1); +# ifdef WITH_VRCLIENT_OPENVR convert_openvr_to_view_matrix(&g_cdata); +#else + g_cdata.cam.transform_inverse_view_matrix[3] = landscape_pos[0] + mouse_pos[0]; + g_cdata.cam.transform_inverse_view_matrix[7] = landscape_pos[1]; + g_cdata.cam.transform_inverse_view_matrix[11] = landscape_pos[2] + mouse_pos[1]; + + g_cdata.cam_right.transform_inverse_view_matrix[3] = landscape_pos[0] + mouse_pos[0]; + g_cdata.cam_right.transform_inverse_view_matrix[7] = landscape_pos[1]; + g_cdata.cam_right.transform_inverse_view_matrix[11] = landscape_pos[2] + mouse_pos[1]; +#endif + send_cam_data(); double t3 = omp_get_wtime(); CLIENT_DEBUG_PRINTF2("send_cam_data: cam:%f\n", t3 - t2); +# ifdef WITH_VRCLIENT_GLSTEREO + + glDrawBuffer(GL_BACK_LEFT); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT /*| GL_STENCIL_BUFFER_BIT*/); + gl_render(0); + + glDrawBuffer(GL_BACK_RIGHT); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT /*| GL_STENCIL_BUFFER_BIT*/); + gl_render(1); + + glfwSwapBuffers(g_windows[0]); +#else // clear buffer glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT); gl_render(0); gl_render(1); + gl_render(2); glfwSwapBuffers(g_windows[0]); +#endif double t4 = omp_get_wtime(); CLIENT_DEBUG_PRINTF4("displayFunc: pix:%f, cam:%f, ren:%f\n", t2 - t1, t3 - t2, t4 - t3); - current_samples++; + //current_samples++; } //<<<<<<< HEAD #endif @@ -1723,7 +1858,7 @@ void displayFunc() void resize(unsigned width, unsigned height) { - if (width == g_width && height == g_height && g_pixels_bufs[0]) + if (width == g_width && height == g_height && g_pixels_buf) return; // if (g_pixels) { @@ -1732,15 +1867,16 @@ void resize(unsigned width, unsigned height) // //setupTexture(width, height); // } - if (g_pixels_bufs[0]) { + if (g_pixels_buf) { freeTexture(0); freeTexture(1); freeTexture(2); #ifdef WITH_CUDA_GL_INTEROP - cuda_assert(cudaFreeHost(g_pixels_bufs[0])); - cuda_assert(cudaFreeHost(g_pixels_bufs[1])); - cuda_assert(cudaFreeHost(g_pixels_bufs[2])); + //cuda_assert(cudaFreeHost(g_pixels_bufs[0])); + cuda_assert(cudaFreeHost(g_pixels_buf)); + //cuda_assert(cudaFree(g_pixels_buf)); + //cuda_assert(cudaFree(g_pixels_buf_d)); #else delete[] g_pixels_buf; #endif @@ -1757,18 +1893,18 @@ void resize(unsigned width, unsigned height) // memset(g_pixels, 0, (size_t)width * height * SIZE_UCHAR4); #ifdef WITH_CUDA_GL_INTEROP - // cuda_assert(cudaMallocManaged(&g_pixels_buf, g_width * g_height * SIZE_UCHAR4)); - cuda_assert( - cudaHostAlloc(&g_pixels_bufs[0], g_width / 2 * g_height * SIZE_UCHAR4, cudaHostAllocMapped)); - cuda_assert(cudaHostGetDevicePointer(&g_pixels_bufs_d[0], g_pixels_bufs[0], 0)); + //cuda_assert(cudaMallocManaged(&g_pixels_buf, DWIDTH * g_height * SIZE_UCHAR4)); + //cuda_assert(cudaHostAlloc(&g_pixels_bufs[0], DWIDTH / 2 * g_height * SIZE_UCHAR4, cudaHostAllocMapped)); + //cuda_assert(cudaHostGetDevicePointer(&g_pixels_bufs_d[0], g_pixels_bufs[0], 0)); + + //cuda_assert(cudaHostAlloc(&g_pixels_bufs[1], DWIDTH / 2 * g_height * SIZE_UCHAR4, cudaHostAllocMapped)); + //cuda_assert(cudaHostGetDevicePointer(&g_pixels_bufs_d[1], g_pixels_bufs[1], 0)); - cuda_assert( - cudaHostAlloc(&g_pixels_bufs[1], g_width / 2 * g_height * SIZE_UCHAR4, cudaHostAllocMapped)); - cuda_assert(cudaHostGetDevicePointer(&g_pixels_bufs_d[1], g_pixels_bufs[1], 0)); + cuda_assert(cudaHostAlloc( + &g_pixels_buf, DWIDTH * g_height * SIZE_UCHAR4, cudaHostAllocMapped)); + cuda_assert(cudaHostGetDevicePointer(&g_pixels_buf_d, g_pixels_buf, 0)); - cuda_assert( - cudaHostAlloc(&g_pixels_bufs[2], g_width * g_height * SIZE_UCHAR4, cudaHostAllocMapped)); - cuda_assert(cudaHostGetDevicePointer(&g_pixels_bufs_d[2], g_pixels_bufs[2], 0)); + //cuda_assert(cudaMalloc(&g_pixels_buf_d, DWIDTH * g_height * SIZE_UCHAR4)); //cuda_assert( // cudaHostAlloc(&g_pixels_bufs[1], g_width * g_height * SIZE_UCHAR4, cudaHostAllocMapped)); @@ -1786,15 +1922,22 @@ void resize(unsigned width, unsigned height) //#endif // if (pboMode != -1) - //setupTexture(0, width, height); - //setupTexture(1, width, height); - setupTexture(2, width, height); + setupTexture(2); +#if 0 + setupTexture(0, width, height); + setupTexture(1, width, height); +#else + +# ifdef WITH_VRCLIENT_OPENVR vr_CreateFrameBuffer(0, width, height); vr_CreateFrameBuffer(1, width, height); +#endif + +#endif } -void toOrtho(int width, int height) +void toOrtho(int eye, int width, int height) { // set viewport to be the entire window glViewport(0, 0, (GLsizei)width, (GLsizei)height); @@ -1802,7 +1945,15 @@ void toOrtho(int width, int height) // set orthographic viewing frustum glMatrixMode(GL_PROJECTION); glLoadIdentity(); - glOrtho(0, 1, 0, 1, -1, 1); + + if (eye == 2) + glOrtho(0, 1, 0, 1, -1, 1); + + if (eye == 0) + glOrtho(0, 0.5, 0, 1, -1, 1); + + if (eye == 1) + glOrtho(0.5, 1, 0, 1, -1, 1); // switch to modelview matrix in order to set scene glMatrixMode(GL_MODELVIEW); @@ -1811,8 +1962,7 @@ void toOrtho(int width, int height) void reshapeFunc(GLFWwindow *window, int, int) { - int width, height; - glfwGetFramebufferSize(window, &width, &height); + glfwGetFramebufferSize(window, &w_width, &w_height); // resize(width, height); // glViewport(0, 0, width, height); // g_width = width; g_height = height; @@ -1821,7 +1971,7 @@ void reshapeFunc(GLFWwindow *window, int, int) // glOrtho(0, width, 0, height, -1, 1); // glMatrixMode(GL_MODELVIEW); - toOrtho(width, height); + //toOrtho(2, w_width, w_height); } void freeTexture(int eye) @@ -1829,12 +1979,19 @@ void freeTexture(int eye) // glfwMakeContextCurrent(g_windows[eye]); glDeleteTextures(1, &g_textureIds[eye]); - //if(eye == 0) - glDeleteBuffers(1, &g_bufferIds[eye]); + if(eye == 0 || eye == 1) + glDeleteBuffers(1, &g_bufferIds[eye]); + + if (eye == 2) { + cuda_assert(cudaGLUnmapBufferObject(g_bufferIds[eye])); + cuda_assert(cudaGLUnregisterBufferObject(g_bufferIds[eye])); + + glDeleteFramebuffers(1, &g_bufferIds[eye]); + } } // Setup Texture -void setupTexture(int eye, int width, int height) +void setupTexture(int eye) { // glfwMakeContextCurrent(g_windows[eye]); //int w2 = width * 2; @@ -1854,9 +2011,18 @@ void setupTexture(int eye, int width, int height) //#if defined(WITH_CUDA_GL_INTEROP) #ifdef WITH_CLIENT_YUV - glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE8, width, height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL); + glTexImage2D( + GL_TEXTURE_2D, 0, GL_LUMINANCE8, DWIDTH, g_height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL); #else - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, (eye==2) ? width : width / 2, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); + glTexImage2D(GL_TEXTURE_2D, + 0, + GL_RGBA8, + (eye == 2) ? DWIDTH : DWIDTH / 2, + g_height, + 0, + GL_RGBA, + GL_UNSIGNED_BYTE, + NULL); #endif //#else // glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, @@ -1864,6 +2030,7 @@ void setupTexture(int eye, int width, int height) glBindTexture(GL_TEXTURE_2D, 0); +#if 0 if (eye == 0 || eye == 1) { // create 2 pixel buffer objects, you need to delete them when program exits. @@ -1883,8 +2050,8 @@ void setupTexture(int eye, int width, int height) glBindFramebuffer(GL_FRAMEBUFFER, 0); } - - if (eye == 2) +#endif + //if (eye == 2) { // create 2 pixel buffer objects, you need to delete them when program exits. @@ -1895,13 +2062,16 @@ void setupTexture(int eye, int width, int height) glBindBuffer(GL_PIXEL_UNPACK_BUFFER, g_bufferIds[eye]); #if defined(WITH_CUDA_GL_INTEROP) -#ifdef WITH_CLIENT_YUV - glBufferData(GL_PIXEL_UNPACK_BUFFER, (size_t)w2 * height, 0, GL_DYNAMIC_COPY); -#else - glBufferData(GL_PIXEL_UNPACK_BUFFER, (size_t)((eye == 2) ? width : width / 2) * height * SIZE_UCHAR4, 0, GL_DYNAMIC_COPY); -#endif +//#ifdef WITH_CLIENT_YUV +// glBufferData(GL_PIXEL_UNPACK_BUFFER, (size_t)w2 * height, 0, GL_DYNAMIC_COPY); +//#else + glBufferData(GL_PIXEL_UNPACK_BUFFER, + (size_t)((eye == 2) ? DWIDTH : DWIDTH / 2) * g_height * SIZE_UCHAR4, + 0, + GL_DYNAMIC_COPY); +//#endif cuda_assert(cudaGLRegisterBufferObject(g_bufferIds[eye])); - cuda_assert(cudaGLMapBufferObject((void**)&g_pixels_bufs_d[eye], g_bufferIds[eye])); + cuda_assert(cudaGLMapBufferObject((void**)&g_pixels_buf_d, g_bufferIds[eye])); #else glBufferData(GL_PIXEL_UNPACK_BUFFER, (size_t)w2 * height * SIZE_UCHAR4, 0, GL_STREAM_DRAW); #endif @@ -1927,15 +2097,15 @@ void gl_init_win(int eye) { glfwMakeContextCurrent(g_windows[eye]); - // glfwSetKeyCallback(g_windows[eye], keyboardFunc); - // glfwSetCursorPosCallback(g_windows[eye], motionFunc); - // glfwSetMouseButtonCallback(g_windows[eye], clickFunc); + glfwSetKeyCallback(g_windows[eye], keyboardFunc); + glfwSetCursorPosCallback(g_windows[eye], motionFunc); + glfwSetMouseButtonCallback(g_windows[eye], clickFunc); //// glfwSetCharCallback(window, ImGui_ImplGlfw_CharCallback); //// glfwSetScrollCallback(window, ImGui_ImplGlfw_ScrollCallback); glfwSetWindowSizeCallback(g_windows[eye], reshapeFunc); //// resize(g_width, g_height); //// setupTexture(g_width, g_height); - //// glfwSwapInterval(1); + glfwSwapInterval(1); reshapeFunc(g_windows[eye], 0, 0); @@ -1958,10 +2128,24 @@ void gl_init() { glfwSetErrorCallback(errorFunc); glfwInit(); - glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2); + glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0); - g_windows[0] = glfwCreateWindow(512, 256, "PathTracer0", NULL, NULL); + GLFWmonitor *monitor = NULL; + +#ifdef WITH_VRCLIENT_GLSTEREO + glfwWindowHint(GLFW_STEREO, GL_TRUE); + + //int count; + //GLFWmonitor **monitors = glfwGetMonitors(&count); + //for (int i = 0; i < count; i++) { + // const char *name = glfwGetMonitorName(monitors[i]); + // printf("monitor: %d, %s\n", i, name); + //} + //monitor = monitors[count - 1]; +#endif + + g_windows[0] = glfwCreateWindow(512, 256, "PathTracer0", monitor, NULL); // g_windows[1] = glfwCreateWindow(512, 512, "PathTracer1", NULL, NULL); glfwMakeContextCurrent(g_windows[0]); @@ -1995,8 +2179,7 @@ void gl_init() #ifdef WITH_VRCLIENT_VRGSTREAM VRG_STREAM_API enum VrgStreamApiError vrgStreamInit(enum VrgInputFormat inputFormat) { - printf("vrgStreamInit: %d\n", inputFormat); - + //printf("vrgStreamInit: %d\n", inputFormat); // cyclesphi::kernel::init_sockets_cam(); // cyclesphi::kernel::init_sockets_data(); @@ -2006,6 +2189,7 @@ VRG_STREAM_API enum VrgStreamApiError vrgStreamInit(enum VrgInputFormat inputFor vr_init(); # endif + resize(7680, 2160); //g_cdata.width = 7680 / 2; //g_cdata.height = 2160; @@ -2014,22 +2198,25 @@ VRG_STREAM_API enum VrgStreamApiError vrgStreamInit(enum VrgInputFormat inputFor return Ok; } -VRG_STREAM_API enum VrgStreamApiError vrgStreamRenderFrame(int frame_no, - struct RenderPacket *packet) +//VRG_STREAM_API enum VrgStreamApiError vrgStreamRenderFrame(int frame_no, +// struct RenderPacket *packet) +VRG_STREAM_API enum VrgStreamApiError vrgStreamRenderFrame(struct RenderPacket *packet) { - printf("vrgStreamRenderFrame: %d\n", frame_no); + //printf("vrgStreamRenderFrame: %d\n", frame_no); #ifdef WITH_VRCLIENT_OPENVR //vr_render_frame(); vr_UpdateHMDMatrixPose(); - convert_openvr_to_vrgstream(packet); + convert_openvr_to_vrgstream(&g_packet); # endif - memcpy(packet, &g_packet, sizeof(RenderPacket)); // unsigned int frame; - packet->frame = frame_no; + //g_packet.frame = frame_no + 1; + + memcpy(packet, &g_packet, sizeof(RenderPacket)); + - resize(2*g_packet.pix_width_eye, g_packet.pix_height_eye); + resize(DWIDTH * g_packet.pix_width_eye, g_packet.pix_height_eye); // recv_cam_data(&packet->data); // // if (g_pixels_size != packet->data.width * packet->data.height * sizeof(unsigned char) * 4) { @@ -2043,9 +2230,13 @@ VRG_STREAM_API enum VrgStreamApiError vrgStreamRenderFrame(int frame_no, return Ok; } -VRG_STREAM_API enum VrgStreamApiError vrgStreamSubmitFrame(int frame_no, void *sbs_image_data) +//VRG_STREAM_API enum VrgStreamApiError vrgStreamSubmitFrame(int frame_no, void *sbs_image_data) +VRG_STREAM_API enum VrgStreamApiError vrgStreamSubmitFrame(struct RenderPacket *packet, + void *sbs_image_data, + enum VrgMemory api) { - printf("vrgStreamSubmitFrame: %d, %d x %d\n", frame_no, g_width, g_height); + + //printf("vrgStreamSubmitFrame: %d, %d x %d\n", frame_no, g_width, g_height); glfwPollEvents(); @@ -2053,20 +2244,32 @@ VRG_STREAM_API enum VrgStreamApiError vrgStreamSubmitFrame(int frame_no, void *s double t2 = omp_get_wtime(); // cyclesphi::kernel::socket_recv_data_data((char*)g_pixels_buf, g_width * g_height * // SIZE_UCHAR4); - displayFPS(1, current_samples, g_width * g_height * SIZE_UCHAR4); + displayFPS(1, current_samples, DWIDTH * g_height * SIZE_UCHAR4); double t3 = omp_get_wtime(); - size_t pix_type_size = SIZE_UCHAR4; + //size_t pix_type_size = SIZE_UCHAR4; //int w2 = g_width * 2; - memcpy(g_pixels_bufs[2], sbs_image_data, g_width * g_height * pix_type_size); + //memcpy(g_pixels_bufs[2], sbs_image_data, g_width * g_height * pix_type_size); # ifdef WITH_CUDA_GL_INTEROP // glfwMakeContextCurrent(g_windows[0]); - cuda_assert(cudaMemcpy(g_pixels_bufs_d[2], - g_pixels_bufs[2], - g_width * g_height * pix_type_size, - cudaMemcpyDefault)); + cuda_assert(cudaMemcpy(g_pixels_buf_d, sbs_image_data, DWIDTH * g_height * SIZE_UCHAR4, cudaMemcpyDefault)); + + //cuda_assert(cudaMemcpy2D(g_pixels_bufs_d[0], + // g_width / 2 * pix_type_size, // g_width / 2 * pix_type_size, // dpitch + // //0, + // g_pixels_bufs_d[2], + // //0, + // g_width * pix_type_size, + // g_width * pix_type_size, + // g_height, + // cudaMemcpyDefault)); + + //cuda_assert(cudaMemcpy2D(g_pixels_bufs_d[1], + // g_pixels_bufs_d[2], + // g_width * g_height * pix_type_size, + // cudaMemcpyDefault)); // glfwMakeContextCurrent(g_windows[1]); @@ -2085,7 +2288,7 @@ VRG_STREAM_API enum VrgStreamApiError vrgStreamSubmitFrame(int frame_no, void *s #ifdef WITH_VRCLIENT_OPENVR vr_render_frame(); - vr_UpdateHMDMatrixPose(); + //vr_UpdateHMDMatrixPose(); #endif if (glfwWindowShouldClose(g_windows[0]) /* || glfwWindowShouldClose(g_windows[1])*/) @@ -2103,7 +2306,6 @@ void gl_loop() while (true) { glfwPollEvents(); displayFunc(); - //>>>>>>> a14bae17e5afdf12d27e4203d6fb157a0f641f6f #ifdef WITH_VRCLIENT_OPENVR vr_render_frame(); diff --git a/client/vrclient/vrclient_libug.cpp b/client/vrclient/vrclient_libug.cpp index 7461b69412f8c5668a98087656206c622b37de7c..2c1d4a2381aeceaaa2aee64bc909ea92764a8668 100644 --- a/client/vrclient/vrclient_libug.cpp +++ b/client/vrclient/vrclient_libug.cpp @@ -42,9 +42,12 @@ int main(int argc, char *argv[]) { ug_receiver_parameters init_params; memset(&init_params, 0, sizeof(ug_receiver_parameters)); - init_params.decompress_to = UG_RGBA;// : UG_I420; + init_params.decompress_to = UG_RGBA; // UG_CUDA_RGBA; // UG_RGBA; // UG_I420; init_params.display = "vrg";//"gl";// "vrg"; - init_params.sender = "195.113.250.203";// "localhost"; + init_params.sender = "195.113.175.2";// "195.113.250.203"; // "localhost"; + init_params.disable_strips = 0; + init_params.force_gpu_decoding = false; + //init_params.rx_port = 5004; //init_params.tx_port = 5006; diff --git a/intern/cycles/kernel/kernels/client/kernel_socket.cpp b/intern/cycles/kernel/kernels/client/kernel_socket.cpp index b4812e64c3eb3c8b7d7909357b8b7ce275a8e047..25dd7c88e2eda75653511b6c1e1cd0d09f70724c 100644 --- a/intern/cycles/kernel/kernels/client/kernel_socket.cpp +++ b/intern/cycles/kernel/kernels/client/kernel_socket.cpp @@ -46,8 +46,8 @@ #include <omp.h> // RGB -#define TCP_WIN_SIZE_SEND (64L * 1024L * 1024L) -#define TCP_WIN_SIZE_RECV (1024L * 1024L) +#define TCP_WIN_SIZE_SEND (32L * 1024L * 1024L) +#define TCP_WIN_SIZE_RECV (64L * 1024L * 1024L) #ifdef WITH_SOCKET_UDP # define TCP_BLK_SIZE (32000L) @@ -71,7 +71,7 @@ # define KERNEL_SOCKET_RECV(s, buf, len) read(s, buf, len) #endif -#define PRINT_DEBUG +//#define PRINT_DEBUG //#ifndef WITH_CLIENT_VRGSTREAM namespace cyclesphi { @@ -348,65 +348,78 @@ bool socket_server_create(int port, int &server_socket_id, int &client_socket_id, sockaddr_in &server_sock, - sockaddr_in &client_sock) + sockaddr_in &client_sock, + bool only_accept = false); + +bool socket_server_create(int port, + int &server_socket_id, + int &client_socket_id, + sockaddr_in &server_sock, + sockaddr_in &client_sock, + bool only_accept) { - if (!init_wsa()) { - return false; - } + if (!only_accept) { + if (!init_wsa()) { + return false; + } - int socket_type = SOCK_STREAM; - int socket_protocol = IPPROTO_TCP; + int socket_type = SOCK_STREAM; + int socket_protocol = IPPROTO_TCP; # ifdef WITH_SOCKET_UDP - socket_type = SOCK_DGRAM; - socket_protocol = IPPROTO_UDP; + socket_type = SOCK_DGRAM; + socket_protocol = IPPROTO_UDP; # endif - server_socket_id = socket(AF_INET, socket_type, socket_protocol); + server_socket_id = socket(AF_INET, socket_type, socket_protocol); - if (server_socket_id == -1) { - printf("server_socket_id == -1\n"); - fflush(0); - return false; - } + if (server_socket_id == -1) { + printf("server_socket_id == -1\n"); + fflush(0); + return false; + } # if !defined(__MIC__) && !defined(WIN32) - int enable = 1; - setsockopt(server_socket_id, SOL_SOCKET, SO_REUSEPORT, &enable, sizeof(int)); + int enable = 1; + setsockopt(server_socket_id, SOL_SOCKET, SO_REUSEPORT, &enable, sizeof(int)); # endif - // timeval tv; - // tv.tv_sec = g_timeval_sec; - // tv.tv_usec = 0; - // if (setsockopt(server_socket_id, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv)) < 0) { - // printf("setsockopt == -1\n"); - // fflush(0); - // return false; - //} - - // sockaddr_in sock_name; - memset(&server_sock, 0, sizeof(server_sock)); - memset(&client_sock, 0, sizeof(client_sock)); - server_sock.sin_family = AF_INET; - server_sock.sin_port = htons(port); - server_sock.sin_addr.s_addr = INADDR_ANY; - - int err_bind = bind(server_socket_id, (sockaddr *)&server_sock, sizeof(server_sock)); - if (err_bind == -1) { - printf("err_bind == -1\n"); - fflush(0); - return false; - } + // timeval tv; + // tv.tv_sec = g_timeval_sec; + // tv.tv_usec = 0; + // if (setsockopt(server_socket_id, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv)) < 0) { + // printf("setsockopt == -1\n"); + // fflush(0); + // return false; + //} + + // sockaddr_in sock_name; + memset(&server_sock, 0, sizeof(server_sock)); + memset(&client_sock, 0, sizeof(client_sock)); + server_sock.sin_family = AF_INET; + server_sock.sin_port = htons(port); + server_sock.sin_addr.s_addr = INADDR_ANY; + + int err_bind = bind(server_socket_id, (sockaddr *)&server_sock, sizeof(server_sock)); + if (err_bind == -1) { + printf("err_bind == -1\n"); + fflush(0); + return false; + } # ifdef WITH_SOCKET_UDP - client_socket_id = server_socket_id; + client_socket_id = server_socket_id; # else - int err_listen = listen(server_socket_id, 1); - if (err_listen == -1) { - printf("err_listen == -1\n"); - fflush(0); - return false; + int err_listen = listen(server_socket_id, 1); + if (err_listen == -1) { + printf("err_listen == -1\n"); + fflush(0); + return false; + } +# if defined(WITH_SOCKET_ONLY_DATA) + return true; + #endif } sockaddr_in client_info; @@ -423,7 +436,7 @@ bool socket_server_create(int port, # endif // printf("accept\n"); - printf("accept on %d\n", port); + printf("accept on %d <-> %d\n", port, client_info.sin_port); fflush(0); @@ -471,7 +484,7 @@ bool socket_client_create(const char *server_name, // fflush(0); // return false; //} -# ifndef WITH_SOCKET_UDP +# if !defined(WITH_SOCKET_UDP) //&& !defined(_WIN32) setsock_tcp_windowsize(client_socket_id, TCP_WIN_SIZE_SEND, 1); setsock_tcp_windowsize(client_socket_id, TCP_WIN_SIZE_RECV, 0); # endif @@ -757,8 +770,10 @@ void init_sockets_data(const char *server, int port) { if (g_client_socket_id_data[0] == -1) { init_wsa(); -# if (!defined(WITH_SOCKET_UDP) && !defined(BLENDER_CLIENT)) || \ - (defined(WITH_SOCKET_UDP) && defined(BLENDER_CLIENT)) + +# if (!defined(WITH_SOCKET_ONLY_DATA) && !defined(BLENDER_CLIENT)) \ + || \ + (defined(WITH_SOCKET_ONLY_DATA) && defined(BLENDER_CLIENT)) const char *env_p_port_data = std::getenv("SOCKET_SERVER_PORT_DATA"); if (port == 0) { @@ -777,13 +792,22 @@ void init_sockets_data(const char *server, int port) strcpy(server_temp, server); } + # ifdef WITH_SOCKET_ONLY_DATA +//# pragma omp parallel for num_threads(SOCKET_CONNECTIONS) + for (int i = 0; i < SOCKET_CONNECTIONS; i++) { + int tid = i;//omp_get_thread_num(); +//# pragma omp critical + socket_client_create( + server_temp, port, g_client_socket_id_data[tid], g_client_sockaddr_data[tid]); + } + #else # pragma omp parallel for num_threads(SOCKET_CONNECTIONS) for (int i = 0; i < SOCKET_CONNECTIONS; i++) { int tid = omp_get_thread_num(); socket_client_create( server_temp, port + tid, g_client_socket_id_data[tid], g_client_sockaddr_data[tid]); } - + #endif // char ack = -1; // socket_send_data_data(&ack, sizeof(ack)); @@ -794,6 +818,33 @@ void init_sockets_data(const char *server, int port) port = atoi(env_p_port_data); } +#if defined(WITH_SOCKET_ONLY_DATA) + socket_server_create(port, + g_server_socket_id_data[0], + g_client_socket_id_data[0], + g_server_sockaddr_data[0], + g_client_sockaddr_data[0], + false); + + for (int tid = 1; tid < SOCKET_CONNECTIONS; tid++) { + g_server_socket_id_data[tid] = g_server_socket_id_data[0]; + g_server_sockaddr_data[tid] = g_server_sockaddr_data[0]; + } + +//# pragma omp parallel for num_threads(SOCKET_CONNECTIONS) + for (int i = 0; i < SOCKET_CONNECTIONS; i++) { + int tid = i;//omp_get_thread_num(); +//#pragma omp critical + socket_server_create(port, + g_server_socket_id_data[tid], + g_client_socket_id_data[tid], + g_server_sockaddr_data[tid], + g_client_sockaddr_data[tid], + true); + } + + + #else # pragma omp parallel for num_threads(SOCKET_CONNECTIONS) for (int i = 0; i < SOCKET_CONNECTIONS; i++) { int tid = omp_get_thread_num(); @@ -805,7 +856,7 @@ void init_sockets_data(const char *server, int port) } // char ack = -1; // socket_recv_data_data(&ack, sizeof(ack)); - +# endif # endif } } @@ -813,6 +864,11 @@ void init_sockets_data(const char *server, int port) # if 1 void socket_send_data_cam(char *data, CLIENT_SIZE_T size, bool ack_enabled) { +# ifdef WITH_SOCKET_ONLY_DATA + socket_send_data_data(data, size, false); + return; +# endif + #ifdef PRINT_DEBUG printf("socket_send_data_cam: %d\n", size); fflush(0); @@ -942,6 +998,10 @@ void socket_send_data_cam(char *data, CLIENT_SIZE_T size, bool ack_enabled) void socket_send_data_data(char *data, CLIENT_SIZE_T size, bool ack_enabled) { +# ifdef WITH_SOCKET_ONLY_DATA + ack_enabled = false; +# endif + #ifdef PRINT_DEBUG printf("socket_send_data_data: %d\n", size); fflush(0); @@ -1072,6 +1132,11 @@ void socket_send_data_data(char *data, CLIENT_SIZE_T size, bool ack_enabled) void socket_recv_data_cam(char *data, CLIENT_SIZE_T size, bool ack_enabled) { +#ifdef WITH_SOCKET_ONLY_DATA + socket_recv_data_data(data, size, false); + return; +#endif + #ifdef PRINT_DEBUG printf("socket_recv_data_cam: %d\n", size); fflush(0); @@ -1203,6 +1268,10 @@ void socket_recv_data_cam(char *data, CLIENT_SIZE_T size, bool ack_enabled) void socket_recv_data_data(char *data, CLIENT_SIZE_T size, bool ack_enabled) { +# ifdef WITH_SOCKET_ONLY_DATA + ack_enabled = false; +# endif + #ifdef PRINT_DEBUG printf("socket_recv_data_data: %d\n", size); fflush(0);