diff --git a/spim_registration/timelapse/SingleChannel.yaml b/spim_registration/timelapse/SingleChannel.yaml
deleted file mode 100755
index 60b1688d0ca8bac5d1029846f28e22e181895169..0000000000000000000000000000000000000000
--- a/spim_registration/timelapse/SingleChannel.yaml
+++ /dev/null
@@ -1,189 +0,0 @@
-common: {
-  # directory that contains the bean shell scripts
-  bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/",
-  directory_cuda: "/sw/users/schmied/packages/2015-06-08_Fiji.app.cuda/lib/",
-  fiji-app: "/sw/users/schmied/packages/2015-06-08_Fiji.app.cuda/ImageJ-linux64",
-  fiji-prefix: "/sw/bin/xvfb-run -a",
-  first_xml_filename: "test_unicore",
-  hdf5_xml_filename: '"hdf5_test_unicore"',
-  merged_xml: "hdf5_test_unicore_merge",
-  ntimepoints: 5,
-  angles: "0,72,144,216,288",
-  channels: "green",
-  illumination: "0",
-  pixel_distance_x: '0.28590106964',
-  pixel_distance_y: '0.28590106964',
-  pixel_distance_z: '1.50000',
-  pixel_unit: "um",
-  # transformation_switch: "timelapse_dublicate",
-  transformation_switch: "timelapse",
-  # fusion_switch: "deconvolution"
-  fusion_switch: "fusion"
-  }
-              
-define_xml_czi: {
-  first_czi: "2015-04-11_LZ2_Stock68_3.czi",
-  rotation_around: "X-Axis",
-  bsh_file: "define_czi.bsh"
-  }
-          
-define_xml_tif: {
-  image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif',
-  imglib_container: '"ArrayImg (faster)"',
-  multiple_angles: '"YES (one file per angle)"',
-  multiple_channels: '"NO (one channel)"',
-  multiple_illumination_directions: '"NO (one illumination direction)"',
-  multiple_timepoints: '"YES (one file per time-point)"',
-  type_of_dataset: '"Image Stacks (ImageJ Opener)"',
-  bsh_file: "define_tif_zip.bsh"
-  }
-  
-resave_hdf5: {
-  hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"',
-  resave_angle: '"All angles"',
-  resave_channel: '"All channels"',
-  resave_illumination: '"All illuminations"',
-  resave_timepoint: '"All Timepoints"',
-  setups_per_partition: '0',
-  subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',
-  timepoints_per_partition: '1',
-  bsh_file: "export.bsh"
-  }
-
-registration: {
-  # "Single Channel" is not a valid choice for "Process_channel"
-  reg_process_channel: '"All channels"', 
-  reg_processing_channel: '"green"',
-  reg_interest_points_channel: '"beads"',
-  reg_radius_1: '2',
-  reg_radius_2: '3',
-  reg_threshold: '0.005',
-  initial_sigma: '1.8',
-  threshold_gaussian: '0.0080',
-  type_of_detection: '"Difference-of-Mean (Integral image based)"',
-  label_interest_points: '"beads"',
-  reg_process_timepoint: '"Single Timepoint (Select from List)"',
-  reg_process_angle: '"All angles"',
-  reg_process_illumination: '"All illuminations"',
-  subpixel_localization: '"3-dimensional quadratic fit"',
-  detection_min_max: "find_maxima",
-  type_of_registration: '"Register timepoints individually"',
-  algorithm: '"Fast 3d geometric hashing (rotation invariant)"',
-  transformation_model: "Affine",
-  allowed_error_for_ransac: '5',
-  significance: '10',
-  fix_tiles: '"Fix first tile"',
-  map_back_tiles: '"Map back to first tile using rigid model"',
-  model_to_regularize_with: "Rigid",
-  lambda: '0.10',
-  imglib_container: '"ArrayImg (faster)"',
-  bsh_file: "registration.bsh"
-  }
-
-xml_merge: {
-  bsh_file: "xml_merge.bsh"
-  }
-  
-timelapse: {
-  reference_timepoint: '0',
-  type_of_registration_timelapse: '"Match against one reference timepoint (no global optimization)"',
-  timelapse_process_timepoints: '"All Timepoints"',
-  bsh_file: "timelapse_registration.bsh"
-  }
-  
-Dublicate_transformations: {
-  source_dublication: "red",
-  target_dublication: "green",
-  duplicate_which_transformations: '"Replace all transformations"',
-  bsh_file: "Dublicate_transformations.bsh"
-  }
-  
-fusion: {
-  bsh_file: "fusion.bsh",
-  downsample: '4',
-  fused_image: '"Append to current XML Project"',
-  imglib2_container_fusion: '"ArrayImg"',
-  imglib2_data_container: '"ArrayImg (faster)"',
-  interpolation: '"Linear Interpolation"',
-  minimal_x: '190',
-  minimal_y: '-16',
-  minimal_z: '-348',
-  maximal_x: '1019',
-  maximal_y: '1941',
-  maximal_z: '486',
-  pixel_type: '"16-bit unsigned integer"',
-  process_angle: '"All angles"',
-  process_channel: '"All channels"',
-  process_illumination: '"All illuminations"',
-  process_timepoint: '"Single Timepoint (Select from List)"',
-  process_views_in_paralell: '"All"',
-  xml_output: '"Save every XML with user-provided unique id"'
-  }
-
-external_transform: {
-  # channel setting: '"all_channels"'
-  channel_setting: '"green"',
-  transform_timepoint: '"All Timepoints"',
-  transform_angle: '"All angles"',
-  transform_channel: '"All channels"',
-  # illumination setting only one illumination side
-  transform_illumination: '"All illuminations"',
-  apply_transformation: '"Current view transformations (appends to current transforms)"',
-  define_mode_transform: '"Matrix"',
-  matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"',
-  transformation: '"Rigid"',
-  bsh_file: "transform.bsh"
-  }
-
-deconvolution: {
-  iterations: '1',
-  minimal_x_deco: '190',
-  minimal_y_deco: '-16',
-  minimal_z_deco: '-348',
-  maximal_x_deco: '1019',
-  maximal_y_deco: '1941',
-  maximal_z_deco: '486',
-  detections_to_extract_psf_for_channel: '"beads"',
-  process_timepoint: '"Single Timepoint (Select from List)"',
-  process_angle: '"All angles"',
-  process_channel: '"All channels"',
-  process_illumination: '"All illuminations"',
-  Tikhonov_parameter: '0.0006',
-  compute: '"in 512x512x512 blocks"',
-  compute_on: '"GPU (Nvidia CUDA via JNA)"',
-  imglib2_container: '"ArrayImg"',
-  osem_acceleration: '"1 (balanced)"',
-  psf_estimation: '"Extract from beads"',
-  psf_size_x: '19',
-  psf_size_y: '19',
-  psf_size_z: '25',
-  type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
-  bsh_file: "deconvolution_GPU.bsh"
-  }
-  
-hdf5_output: {
-  output_image_file_pattern: 'TP{{t}}_Chgreen_Ill0_Ang0,72,144,216,288.tif',
-  output_xml: '"fused_Single_Channel"',
-  output_hdf5_xml: '"hdf5_fused_Single_Channel"',
-  output_multiple_channels: '"NO (one channel)"',
-  output_timepoints: '0-4',
-  output_pixel_distance_x: 0.5718,
-  output_pixel_distance_y: 0.5718,
-  output_pixel_distance_z: 0.5718,
-  output_pixel_unit: 'um',
-  output_channels: "green",
-  output_data_type: "32Bit",
-  convert_32bit: '"[Use min/max of first image (might saturate intenities over time)]"',
-  output_type_of_dataset: '"Image Stacks (ImageJ Opener)"',
-  output_multiple_timepoints: '"YES (one file per time-point)"',
-  output_multiple_angles: '"NO (one angle)"',
-  output_illumination_directions: '"NO (one illumination direction)"',
-  output_imglib_container: '"ArrayImg (faster)"',
-  subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"',
-  chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"',
-  # subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
-  # chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
-  bsh_file_define: "define_output.bsh",
-  bsh_file_hdf5: "export_output.bsh"
-  }
-
diff --git a/spim_registration/timelapse/Snakefile b/spim_registration/timelapse/Snakefile
index 19798e53ed8258cb30911bdb4d00a557d41e85d2..f7211ce48505bc7396ac0bb0229d30df15118706 100755
--- a/spim_registration/timelapse/Snakefile
+++ b/spim_registration/timelapse/Snakefile
@@ -6,9 +6,13 @@ JOBDIR=os.path.abspath(os.path.curdir)
 if JOBDIR[-1] != "/": # this checks if jobdir ends with slash if not it adds a slash
    JOBDIR+="/"
 
-#data specific config file, expected to be inside JOBDIR
-# configfile: "tomancak_test_cluster.json"
-configfile: "tomancak_test_cluster.yaml"
+# Test config file single Channel:
+# configfile: "single_test.yaml"
+# Test config file dual channel one channel contains beads:
+configfile: "dual_OneChannel.yaml"
+
+# data specific config file, expected to be inside JOBDIR
+# configfile: "tomancak_test_cluster.yaml"
 
 
 padding_format = "{0:0"+str(padding_of_file_id(int(config["common"]["ntimepoints"])))+"d}"
@@ -179,9 +183,15 @@ rule registration:
 	-Dreg_radius_1={reg_radius_1} \
 	-Dreg_radius_2={reg_radius_2} \
         -Dreg_threshold={reg_threshold} \
-        -Dinitial_sigma={initial_sigma} \
+        -Dsigma={sigma} \
         -Dthreshold_gaussian={threshold_gaussian} \
-        -Dregistration_algorithm={algorithm} \
+        -Dcompute_on={compute_on} \
+	-Ddirectory_cuda={directory_cuda} \
+	-Dseparableconvolution={separableconvolution} \
+	-Ddownsample_detection={downsample_detection} \
+	-Ddownsample_xy={downsample_xy} \
+	-Ddownsample_z={downsample_z} \
+	-Dregistration_algorithm={algorithm} \
         -Dreg_interest_points_channel={reg_interest_points_channel} \
         -Dfix_tiles={fix_tiles} \
         -Dmap_back_tiles={map_back_tiles} \
@@ -254,10 +264,10 @@ rule timelapse:
         cmd_string += " > {log} 2>&1 && touch {output}"
         shell(cmd_string)
 
-rule dublicate_transformations:
+rule duplicate_transformations:
     input: rules.timelapse.output, merged_xml="{xml_base}_merge.xml"
-    output: rules.timelapse.output[0] + "_dublicate"
-    log: "{xml_base}_dublicate_transformations.log"
+    output: rules.timelapse.output[0] + "_duplicate"
+    log: "{xml_base}_duplicate_transformations.log"
     run:
         cmd_string = produce_string(
         	"""{fiji-prefix} {fiji-app} \
@@ -296,7 +306,6 @@ rule fusion:
     	-Dprocess_illumination={process_illumination} \
     	-Dprocess_angle={process_angle} \
     	-Dxml_output={xml_output} \
-    	-Dfused_image={fused_image} \
     	-Dminimal_x={minimal_x} \
     	-Dminimal_y={minimal_y} \
     	-Dminimal_z={minimal_z} \
@@ -380,7 +389,9 @@ rule deconvolution:
         -Dosem_acceleration={osem_acceleration} \
         -DTikhonov_parameter={Tikhonov_parameter} \
         -Dcompute={compute} \
-        -Dpsf_estimation={psf_estimation} \
+        -Dcompute_on={compute_on} \
+        -Dcudafourierconvolution={cudafourierconvolution} \
+	-Dpsf_estimation={psf_estimation} \
         -Ddirectory_cuda={directory_cuda} \
         -Ddetections_to_extract_psf_for_channel={detections_to_extract_psf_for_channel} \
         -Dpsf_size_x={psf_size_x} \
diff --git a/spim_registration/timelapse/cluster.json b/spim_registration/timelapse/cluster.json
index 4b2be746447e0fbd6c2b2892b30e26c895b4b0dd..7f13c62a6e51384d597d272137c18db3a2f97eb6 100755
--- a/spim_registration/timelapse/cluster.json
+++ b/spim_registration/timelapse/cluster.json
@@ -4,7 +4,12 @@
 	"lsf_extra" : "-R \"span[hosts=1]\"",
 	"lsf_q" : "short"	
     },
- 
+
+    "resave_hdf5" :	
+    {
+ 	"lsf_extra" : "-n 7 -R \"span[hosts=1] rusage[mem=50000]\""
+    },
+
     "registration" :
     {
         "lsf_extra" : "-R \"span[hosts=1] rusage[mem=100000]\""
@@ -20,6 +25,11 @@
         "lsf_extra" : "-R \"span[hosts=1] rusage[mem=10000]\""
     },
 
+    "fusion" : 
+    {
+        "lsf_extra" : "-n 12 -R \"span[hosts=1] rusage[mem=100000]\""
+    },
+
     "deconvolution" :
     {
         "lsf_extra" : "-n 7 -R \"span[hosts=1] rusage[mem=50000]\"",
diff --git a/spim_registration/timelapse/deconvolution_GPU.bsh b/spim_registration/timelapse/deconvolution.bsh
similarity index 82%
rename from spim_registration/timelapse/deconvolution_GPU.bsh
rename to spim_registration/timelapse/deconvolution.bsh
index 32154c2c9986c8156073d169f1fa53071506cc35..1ecd9b960e59567c901217e65a0984533574d8b3 100755
--- a/spim_registration/timelapse/deconvolution_GPU.bsh
+++ b/spim_registration/timelapse/deconvolution.bsh
@@ -4,11 +4,11 @@ import ij.Prefs; 	// calls imagej settings
 import java.lang.Runtime;
 import java.io.File;
 import java.io.FilenameFilter;
- 
+
 runtime = Runtime.getRuntime();
 System.out.println(runtime.availableProcessors() + " cores available for multi-threading");
 
-Prefs.setThreads(7); 	// defines the number of threads allowed 
+Prefs.setThreads(7); 	// defines the number of threads allowed
 print("Threads: "+Prefs.getThreads()); // prints thread setting in output
 
 System.out.println( "Start to load Parameters:" );
@@ -27,13 +27,12 @@ int parallel_timepoints = Integer.parseInt(System.getProperty( "parallel_timepoi
 process_timepoint = System.getProperty( "process_timepoint" );
 process_channel = System.getProperty( "process_channel" );
 process_illumination = System.getProperty( "process_illumination" );
-process_angle = System.getProperty( "process_angle" ); 
-
+process_angle = System.getProperty( "process_angle" );
 
 System.out.println( "-------------------------------------------------------" );
 System.out.println( "General parameters: " );
 System.out.println( "timepoint_processed = " + parallel_timepoints );
-System.out.println( "process_timepoints = " + process_timepoint ); 
+System.out.println( "process_timepoints = " + process_timepoint );
 System.out.println( "process_channel = " + process_channel );
 System.out.println( "process_illumination = " + process_illumination );
 System.out.println( "process_angle = " + process_angle );
@@ -66,7 +65,6 @@ psf_estimation = System.getProperty( "psf_estimation" );
 iterations = System.getProperty( "iterations" );
 deco_output_file_directory = System.getProperty( "deco_output_file_directory" );
 
-
 System.out.println( "-------------------------------------------------------" );
 System.out.println( "Deconvolution settings: " );
 System.out.println( "imglib2_container = " + imglib2_container_deco );
@@ -78,12 +76,6 @@ System.out.println( "psf_estimation = " + psf_estimation );
 System.out.println( "number_of_iterations = " + iterations );
 System.out.println( "deco_output_file_directory = " + deco_output_file_directory );
 
-// Search for CUDA
-System.out.println( "-------------------------------------------------------" );
-System.out.println( "Loading CUDA directory: " );
-directory_cuda = System.getProperty( "directory_cuda" );
-System.out.println( "directory_cuda = " + directory_cuda );
-
 // PSF Parameters
 psf_size_x = System.getProperty( "psf_size_x" );
 psf_size_y = System.getProperty( "psf_size_y" );
@@ -101,24 +93,24 @@ System.out.println( "-------------------------------------------------------" );
 System.out.println( "Channel Settings: " );
 // Channel setting for Deconvolution
 // parses channels and takes from there the number of channels
-// parses detections_to_extract_psf_for_channel 
+// parses detections_to_extract_psf_for_channel
 channels = System.getProperty( "channels" );
-System.out.println( "Channels = " + channels ); 
+System.out.println( "Channels = " + channels );
 
 detections_to_extract_psf_for_channel = System.getProperty( "detections_to_extract_psf_for_channel" );
 System.out.println( "PSF: " + detections_to_extract_psf_for_channel );
 
-// Splits channels and detections_to_extract_psf_for_channel 
+// Splits channels and detections_to_extract_psf_for_channel
 String delims = "[,]";
 String[] channel_token = channels.split(delims);
 String[] psf_token = detections_to_extract_psf_for_channel.split(delims);
-		
-// Assembles channel_string 
+
+// Assembles channel_string
 StringBuilder channel_string = new StringBuilder();
 
 for (int channel=0; channel < channel_token.length; channel++ )
 
-{	
+{
 	String channel_part = "detections_to_extract_psf_for_channel_" + channel_token[channel] + "=" + psf_token[channel] + " ";
 	channel_string.append( channel_part );
 	channel_string.append(" ");
@@ -126,6 +118,45 @@ for (int channel=0; channel < channel_token.length; channel++ )
 
 System.out.println( channel_string );
 
+// GPU/CPU setting
+System.out.println( "-------------------------------------------------------" );
+System.out.println( "GPU/CPU setting: " );
+
+compute_on = System.getProperty( "compute_on" );
+directory_cuda = System.getProperty( "directory_cuda" );
+cudafourierconvolution = System.getProperty( "cudafourierconvolution" );
+
+System.out.println( "compute_on = " + compute_on );
+System.out.println( "directory_cuda = " + directory_cuda  );
+System.out.println( "cudafourierconvolution = " + cudafourierconvolution );
+
+String compute_string = "";
+String cuda_settings = "";
+
+if (compute_on.equalsIgnoreCase( "GPU (Nvidia CUDA via JNA)" ) )
+{
+	System.out.println( "GPU deconvoultion selected" );
+	compute_string = "compute_on=[GPU (Nvidia CUDA via JNA)] ";
+	cuda_settings = "cuda_directory=" + directory_cuda + " " +
+			"select_native_library_for_cudafourierconvolution=" + cudafourierconvolution + " " +
+			"gpu_1 ";
+}
+
+else if (compute_on.equalsIgnoreCase( "CPU (Java)" ) )
+{
+	System.out.println( "CPU deconvoultion selected" );
+	compute_string = "compute_on=[" + compute_on + "] ";
+	cuda_settings  = "";
+}
+			
+else 
+{
+	System.out.println( "Deconvolution GPU/CPU selection bad" );
+}
+					
+System.out.println( "compute_string=" + compute_string );
+System.out.println( "cuda_settings=" + cuda_settings );
+
 // Execute Fiji Plugin
 System.out.println( "=======================================================" );
 System.out.println( "Starting Deconvolution" );
@@ -154,24 +185,21 @@ IJ.run("Fuse/Deconvolve Dataset",
 	"use_tikhonov_regularization " +
 	"tikhonov_parameter=" + Tikhonov_parameter + " " +
 	"compute=[" + compute + "] " +
-	"compute_on=[GPU (Nvidia CUDA via JNA)] " +
+	compute_string +
 	"psf_estimation=[" + psf_estimation + "] " +
 	"psf_display=[Do not show PSFs] " +
 	"output_file_directory=" + deco_output_file_directory + " " +
-	"cuda_directory=[" + directory_cuda + "] " +
-	"select_native_library_for_cudafourierconvolution=libFourierConvolutionCUDALib.so " + 
-	"gpu_1 " +
+	cuda_settings +
 	channel_string +
 	"psf_size_x=" + psf_size_x + " " +
 	"psf_size_y=" + psf_size_y + " " +
 	"psf_size_z=" + psf_size_z + "");
 }
-catch ( e ) { 
+catch ( e ) {
 
     print( "[deconvolution-GPU] caught exception: "+e );
     //important to fail the process if exception occurs
     runtime.exit(1);
-    
 }
 
 /* shutdown */
diff --git a/spim_registration/timelapse/define_czi.bsh b/spim_registration/timelapse/define_czi.bsh
index e3e1d30e788e27aa6888ce5a4a7e352162aae4d5..aa6b0526148588817ababb68666eacb6e7529e3e 100755
--- a/spim_registration/timelapse/define_czi.bsh
+++ b/spim_registration/timelapse/define_czi.bsh
@@ -48,7 +48,6 @@ String angle_part;
 		angle_part = "angle_" + num_angles + "=" + angle_token[angle];
 		angle_string.append(angle_part);
 		angle_string.append(" ");
-			
 		}
 
 System.out.println( angle_string );
@@ -76,9 +75,7 @@ String channel_part;
 		channel_part = "channel_" + num_channel + "=" + channel_token[channel];
 		channel_string.append(channel_part);
 		channel_string.append(" ");
-			
 		}
-			
 System.out.println( "Channel String = " + channel_string );
 
 System.out.println("---------------------------------------------------------");
@@ -104,7 +101,6 @@ String illum_part;
 		illum_part = "_______illumination_" + num_illum + "=" + illum_token[illum];
 		illum_string.append(illum_part);
 		illum_string.append(" ");
-			
 		}
 
 System.out.println( illum_string );
diff --git a/spim_registration/timelapse/dual_OneChannel.yaml b/spim_registration/timelapse/dual_OneChannel.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..239f9dfd3b117367df8297cf56f13414c695f349
--- /dev/null
+++ b/spim_registration/timelapse/dual_OneChannel.yaml
@@ -0,0 +1,260 @@
+common: {
+  # directory that contains the bean shell scripts and Snakefile
+  bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/", 
+  # Directory that contains the cuda libraries
+  directory_cuda: "/sw/users/schmied/cuda/",                                                          
+  # Directory that contains the current working Fiji
+  #fiji-app: "/sw/users/schmied/packages/2015-06-08_Fiji.app.cuda/ImageJ-linux64",
+  fiji-app: "/sw/users/schmied/packages/2015-05-29_Fiji_2.3.9_SNAP.app.cuda/ImageJ-linux64",          
+  fiji-prefix: "/sw/bin/xvfb-run -a",       # calls xvfb for Fiji headless mode
+  # xml file names without .xml suffix
+  first_xml_filename: "Dual_Channel",       # Name of the xml file for the .czi or .tif files
+  hdf5_xml_filename: '"hdf5_Dual_Channel"', # Name of .xml file for the hdf5 data after resave_hdf5
+  merged_xml: "hdf5_Dual_Channel_merge",    # Name of .xml file after merge
+  # Describe the dataset
+  ntimepoints: 2,               # number of timepoints of dataset
+  angles: "0,72,144,216,288",   # angles          
+  channels: "green,red",        # channels
+  illumination: "0",            # illuminations
+  pixel_distance_x: '0.28590',  # Manual calibration x
+  pixel_distance_y: '0.28590',  # Manual calibration y
+  pixel_distance_z: '1.50000',  # Manual calibration z
+  pixel_unit: "um",             # unit of manual calibration
+  # Use switches to decide which processing steps you need:
+  # transformation_switch: "timelapse" standard processing
+  # after timelapse registration directly goes into fusion, timelapse_duplicate
+  # "timelapse_duplicate" for dual channel processing one channel contains the beads
+  # duplicates transformations
+  transformation_switch: "timelapse_duplicate", 
+  # Switches between content based fusion and deconvoltion
+  # "deconvolution" > for deconvolution
+  # "fusion" > for content based fusion
+  fusion_switch: "fusion"
+  }
+
+define_xml_czi: {
+  first_czi: "2015-02-20_LZ2_Stock48_Stock58.czi", # master .czi file
+  rotation_around: "X-Axis",                       # axis of acquistion
+  bsh_file: "define_czi.bsh"                       # .bsh script for defining .czi file
+  }
+          
+define_xml_tif: {
+  # file pattern of .tif files
+  # for multi channel give spim_TL{tt}_Angle{a}_Channel{c}.tif
+  # # SPIM file pattern: for padded zeros use tt 
+  image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif', 
+  # Settings for ImageJ Opener
+  type_of_dataset: '"Image Stacks (ImageJ Opener)"',
+  multiple_timepoints: '"YES (one file per time-point)"', # or NO (one time-point)
+  multiple_angles: '"YES (one file per angle)"',          # or NO (one angle)
+  multiple_channels: '"NO (one channel)"',                # or "\"NO (one channel)\""
+  multiple_illumination_directions: '"NO (one illumination direction)"', # or YES (one file per illumination direction)
+  imglib_container: '"ArrayImg (faster)"',        # '"ArrayImg (faster)"'
+  bsh_file: "define_tif_zip.bsh"  
+  }
+  
+resave_hdf5: {
+  # Resaves .tif or .czi data into hdf5
+  # Subsampling and resolution settings for hdf5: data dependent
+  hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"', 
+  subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',  
+  # Standard settings for cluster processing
+  setups_per_partition: '0',
+  timepoints_per_partition: '1',
+  resave_timepoint: '"All Timepoints"',  
+  resave_angle: '"All angles"',
+  resave_channel: '"All channels"',
+  resave_illumination: '"All illuminations"',
+  bsh_file: "export.bsh"
+  }
+
+registration: {
+  # reg_process_channel:
+  # # Single Channel:  '"All channels"'
+  # Dual Channel: '"All channels"'
+  # Dual Channel one Channel contains beads: '"Single channel (Select from List)"'
+  reg_process_channel: '"Single channel (Select from List)"',
+  # reg_processing_channel:
+  # Dual Channel setting for 1 Channel contains the beads
+  reg_processing_channel: '"red"',      
+  # reg_interest_points_channel:
+  # Single Channel: '"beads"'
+  # Dual Channel: '"beads,beads"'
+  # Dual Channel: Channel does not contain the beads '"[DO NOT register this channel],beads"'
+  reg_interest_points_channel: '"beads"',
+  # type of detection: '"Difference-of-Mean (Integral image based)"' or '"Difference-of-Gaussian"'
+  type_of_detection: '"Difference-of-Mean (Integral image based)"',  
+  # Settings for Difference-of-Mean
+  # For multiple channels 'value1,value2' delimiter is ,
+  reg_radius_1: '2',          
+  reg_radius_2: '3',            
+  reg_threshold: '0.005',
+  # Settings for Difference-of-Gaussian
+  # For multiple channels 'value1,value2' delimiter is ,
+  sigma: '1.8',         
+  threshold_gaussian: '0.0080',   
+  # Processing setting for Difference-of-Gaussian detection
+  # compute_on:
+  compute_on: '"GPU accurate (Nvidia CUDA via JNA)"',
+  separableconvolution: '"libSeparableConvolutionCUDALib.so"',
+  # Downsampling settings
+  downsample_detection: "No", # "No" or "Yes"
+  downsample_xy: '"Match Z Resolution (less downsampling)"',
+  downsample_z: "1x",
+  # Standard Settings for bead based registration
+  label_interest_points: '"beads"',              
+  reg_process_timepoint: '"Single Timepoint (Select from List)"',
+  reg_process_angle: '"All angles"',
+  reg_process_illumination: '"All illuminations"',
+  subpixel_localization: '"3-dimensional quadratic fit"',
+  detection_min_max: "find_maxima",
+  type_of_registration: '"Register timepoints individually"',
+  algorithm: '"Fast 3d geometric hashing (rotation invariant)"',
+  transformation_model: "Affine",
+  allowed_error_for_ransac: '5',
+  significance: '10',
+  fix_tiles: '"Fix first tile"',
+  map_back_tiles: '"Map back to first tile using rigid model"',
+  model_to_regularize_with: "Rigid",
+  lambda: '0.10',
+  imglib_container: '"ArrayImg (faster)"',
+  bsh_file: "registration.bsh"  # .bsh script for registration
+  }
+
+xml_merge: {
+  bsh_file: "xml_merge.bsh"
+  }
+  
+timelapse: {
+  reference_timepoint: '0',   # Reference timepoint
+  # Standard settings for timelapse registration
+  type_of_registration_timelapse: '"Match against one reference timepoint (no global optimization)"',
+  timelapse_process_timepoints: '"All Timepoints"',
+  bsh_file: "timelapse_registration.bsh"
+  }
+  
+dublicate_transformations: {
+  # If dual channel processing and only one channel contains beads
+  # this allows you to dublicate the transformation for the 
+  # channel that does not contain beas
+  source_dublication: "red",  # source channel
+  target_dublication: "green", # target channel
+  duplicate_which_transformations: '"Replace all transformations"', # mode of dublication
+  bsh_file: "dublicate_transformations.bsh" # .bsh script for dublication
+  }
+  
+fusion: {
+  # content based multiview fusion
+  # supports multi channel without new settings
+  downsample: '1',  # set downsampling
+  # Cropping parameters of full resolution
+  minimal_x: '220', 
+  minimal_y: '40',  
+  minimal_z: '-290',  
+  maximal_x: '976',   
+  maximal_y: '1892',    
+  maximal_z: '472',   
+  # fused_image: '"Append to current XML Project"', does not work yet
+  process_timepoint: '"Single Timepoint (Select from List)"',
+  process_angle: '"All angles"',
+  process_channel: '"All channels"',
+  process_illumination: '"All illuminations"',
+  imglib2_container_fusion: '"ArrayImg"',
+  interpolation: '"Linear Interpolation"',
+  pixel_type: '"16-bit unsigned integer"',
+  imglib2_data_container: '"ArrayImg (faster)"',
+  process_views_in_paralell: '"All"',
+  xml_output: '"Save every XML with user-provided unique id"',
+  bsh_file: "fusion.bsh"
+  }
+
+external_transform: {
+  # Downsamples for deconvolution
+  # BUG: external transformation breaks .xml file
+  # channel setting: '"all_channels"'
+  channel_setting: '"green,red"',
+  transform_timepoint: '"All Timepoints"',
+  transform_angle: '"All angles"',
+  transform_channel: '"All channels"',
+  transform_illumination: '"All illuminations"',
+  apply_transformation: '"Current view transformations (appends to current transforms)"',
+  define_mode_transform: '"Matrix"',
+  # Matrix for downsampling
+  matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"', 
+  transformation: '"Rigid"',
+  bsh_file: "transform.bsh"
+  }
+
+deconvolution: {
+  iterations: '1', # number of iterations
+  # Cropping parameters: take downsampling into account
+  minimal_x_deco: '190', 
+  minimal_y_deco: '-16', 
+  minimal_z_deco: '-348', 
+  maximal_x_deco: '1019', 
+  maximal_y_deco: '1941', 
+  maximal_z_deco: '486', 
+  # Channel settings for deconvolution
+  # Single Channel: '"beads"'
+  # Dual Channel: '"beads,beads"'
+  # Dual Channel one channel contains beads: '"[Same PSF as channel red],beads"'
+  detections_to_extract_psf_for_channel: '"[Same PSF as channel red],beads"',
+  # Settings for GPU or CPU processing 
+  # '"CPU (Java)"' or '"GPU (Nvidia CUDA via JNA)"'
+  compute_on: '"GPU (Nvidia CUDA via JNA)"',
+  cudafourierconvolution: "libFourierConvolutionCUDALib.so", # GPU processing name of cuda library
+  # Standard settings for deconvolution
+  process_timepoint: '"Single Timepoint (Select from List)"',
+  process_angle: '"All angles"',
+  process_channel: '"All channels"',
+  process_illumination: '"All illuminations"',
+  type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
+  Tikhonov_parameter: '0.0006',
+  compute: '"in 512x512x512 blocks"',
+  osem_acceleration: '"1 (balanced)"',
+  psf_estimation: '"Extract from beads"',
+  psf_size_x: '19',
+  psf_size_y: '19',
+  psf_size_z: '25',
+  imglib2_container: '"ArrayImg"',
+  bsh_file: "deconvolution.bsh"
+  }
+  
+hdf5_output: {
+  # writes new hdf5 dataset for fusion output: will be obsolete
+  # Naming pattern of output
+  # Single Channel: TP{t}_Chgreen_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
+  # Dual Channel: TP{t}_Ch{0}_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
+  output_image_file_pattern: 'TP{{t}}_Ch{{c}}_Ill0_Ang0,72,144,216,288.tif',
+  # channel setting
+  output_multiple_channels: '"YES (one file per channel)"', # '"YES (one file per channel)"' or  '"NO (one channel)"'
+  output_channels: "green,red",
+  # .xml file names
+  output_xml: '"fused_Dual_Channel"',
+  output_hdf5_xml: '"hdf5_fused_Dual_Channel"',
+  output_timepoints: '0-1', # Timepoints format: '1-2'
+  # pixel size of output: take downsampling into account!
+  output_pixel_distance_x: 0.28590, 
+  output_pixel_distance_y: 0.28590,
+  output_pixel_distance_z: 0.28590,
+  output_pixel_unit: 'um',
+  # give if 16Bit data or 32Bit data 
+  # output of fusion is 16Bit, of deconvolution it is 32Bit
+  output_data_type: "16Bit", # "32Bit" or "16Bit"
+  # if data is 32Bit then the data is converted into 16Bit data
+  convert_32bit: '"[Use min/max of first image (might saturate intenities over time)]"',
+  # subsampling and chunk size settings: dataset dependent
+  subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"', # data dependent
+  chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
+  # subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
+  # chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
+  # Standard settings for hdf5_output
+  output_type_of_dataset: '"Image Stacks (ImageJ Opener)"',
+  output_multiple_timepoints: '"YES (one file per time-point)"',
+  output_multiple_angles: '"NO (one angle)"',
+  output_illumination_directions: '"NO (one illumination direction)"',
+  output_imglib_container: '"ArrayImg (faster)"',
+  bsh_file_define: "define_output.bsh", # .bsh script for defining the dataset
+  bsh_file_hdf5: "export_output.bsh"    # .bsh script for resaving into hdf5
+  }
+
diff --git a/spim_registration/timelapse/registration.bsh b/spim_registration/timelapse/registration.bsh
old mode 100755
new mode 100644
index d6872232b191687a082c3e8f591979528318672e..31140723327eb85b805aa9219871608efce85aa8
--- a/spim_registration/timelapse/registration.bsh
+++ b/spim_registration/timelapse/registration.bsh
@@ -10,7 +10,7 @@ import java.lang.Object;
 runtime = Runtime.getRuntime();
 System.out.println(runtime.availableProcessors() + " cores available for multi-threading");
 
-//Prefs.setThreads(1); 	// defines the number of threads allowed 
+//Prefs.setThreads(1); 	// defines the number of threads allowed
 //print("Threads: "+Prefs.getThreads()); // prints thread setting in output
 
 System.out.println("Start loading parameters");
@@ -32,10 +32,10 @@ int parallel_timepoints = Integer.parseInt(System.getProperty( "parallel_timepoi
 unique_id = System.getProperty( "parallel_timepoints" );
 reg_process_timepoint = System.getProperty( "reg_process_timepoint" );
 reg_process_illumination = System.getProperty( "reg_process_illumination" );
-reg_process_angle = System.getProperty( "reg_process_angle" ); 
+reg_process_angle = System.getProperty( "reg_process_angle" );
 
 System.out.println( "parallel_timepoints = " + parallel_timepoints );
-System.out.println( "process_timepoints = " + reg_process_timepoint ); 
+System.out.println( "process_timepoints = " + reg_process_timepoint );
 System.out.println( "process_illumination = " + reg_process_illumination );
 System.out.println( "process_angle = " + reg_process_angle );
 
@@ -81,6 +81,37 @@ System.out.println( "significance = " + significance );
 
 // detection_min_max = System.getProperty( "detection_min_max" );
 // System.out.println( "detection_min_max = " + detection_min_max );
+
+System.out.println( "-------------------------------------------------------" );
+System.out.println( "Downsampling: " );
+
+downsample_detection = System.getProperty( "downsample_detection" );
+downsample_xy = System.getProperty( "downsample_xy" );
+downsample_z = System.getProperty( "downsample_z" );
+
+System.out.println( "downsample_detection = " + downsample_detection );
+System.out.println( "downsample_xy = " + downsample_xy );
+System.out.println( "downsample_z = " + downsample_z );
+
+// downsample switch
+if (downsample_detection.equalsIgnoreCase("Yes") )
+{
+	System.out.println( "Downsampling is turned on" );
+	downsample_string = "downsample_xy=[" + downsample_xy + "] " +
+				"downsample_z=" + downsample_z + " ";
+}
+
+else if (downsample_detection.equalsIgnoreCase("No") )
+{
+	System.out.println( "Downsampling is turned off" );
+	downsample_string = "";
+}
+
+else
+{
+	System.out.println("Warning: Downsample setting bad");
+}
+
 System.out.println( "-------------------------------------------------------" );
 System.out.println( "Channel setting for Detection: " );
 
@@ -91,108 +122,221 @@ System.out.println( "Channel setting for Detection: " );
 // from this it decides which string to assemble
 reg_process_channel = System.getProperty( "reg_process_channel" );
 System.out.println( "process_channel = " + reg_process_channel );
-
-reg_processing_channel = System.getProperty( "reg_processing_channel" ); 
-System.out.println( "processing_channel = channel " + reg_processing_channel ); 
-
+reg_processing_channel = System.getProperty( "reg_processing_channel" );
+System.out.println( "processing_channel = channel " + reg_processing_channel );
 channels = System.getProperty( "channels" );
 System.out.println( "Channels = " + channels );
 
+// Difference of Mean Settings
 reg_radius_1 = System.getProperty( "reg_radius_1" );
 System.out.println( "reg_radius_1 = " + reg_radius_1 );
-	
 reg_radius_2 = System.getProperty( "reg_radius_2" );
 System.out.println( "reg_radius_2 = " + reg_radius_2 );
-
 reg_threshold = System.getProperty( "reg_threshold" );
-System.out.println( "reg_threshold = " + reg_threshold);	
-	
-// Splits up channels, reg_radius_1, reg_radius_2 and reg_threshold
+System.out.println( "reg_threshold = " + reg_threshold);
+
+// Difference of Gaussian Settings
+sigma = System.getProperty( "sigma" );
+threshold_gaussian = System.getProperty( "threshold_gaussian" );
+compute_on = System.getProperty( "compute_on");
+directory_cuda = System.getProperty( "directory_cuda" );
+separableconvolution = System.getProperty( "separableconvolution" );
+
+System.out.println( "sigma = " + sigma );
+System.out.println( "threshold = " + threshold_gaussian );
+System.out.println( "compute_on = " + compute_on );
+System.out.println( "directory_cuda = " + directory_cuda );
+System.out.println( "separableconvolution = " + separableconvolution );
+
+// Sets delimiter
 String delims = "[,]";
+
+// parses channels
 String[] channel_token = channels.split(delims);
+
+// parses settings for Difference of Mean
 String[] radius_1_token = reg_radius_1.split(delims);
 String[] radius_2_token = reg_radius_2.split(delims);
 String[] threshold_token = reg_threshold.split(delims);
 
+// parses settings for Difference of Gaussian
+String[] sigma_token = sigma .split(delims);
+String[] dog_threshold_token = threshold_gaussian.split(delims);
+
+// Defines variables for compute_string and sets it to an empty string
+String compute_string = "";
+
+// Defines variables for channel_string and sets it to an empty string
 String channel_string = "";
+
+// Defines variables for processing_channel_string and sets it to an empty string
 String processing_channel_string = "";
+
+// Defines variables for channel_string_multi and activates StringBuilder
 StringBuilder channel_string_multi = new StringBuilder();
-			
+
 // If there are no channels set returns an error
 if (channels.equalsIgnoreCase(""))
 {
 	System.out.println("Warning: There are no channels set");
 }
-		
-// Returns an Error if there is one channel in channels but multiple settings in radius and or threshold	
+
+// Returns an Error if there is one channel in channels but multiple settings in radius and or threshold
 else if ( channel_token.length == 1 && (radius_1_token.length > 1 || radius_2_token.length > 1 || threshold_token.length > 1 ) )
 {
 	System.out.println( "Error: Only one channel detected but multiple channel settings for radius or threshold" );
 }
-				
-// Assembles String for Singel Channel 
-else if (reg_process_channel.equalsIgnoreCase( "All channels" ) && channel_token.length == 1 ) 
+
+// Assembles String for Singel Channel
+else if (reg_process_channel.equalsIgnoreCase( "All channels" ) && channel_token.length == 1 )
 {
+	System.out.println( "Channel Setting: Multiple channels" );
 	processing_channel_string = "";
-	channel_string_multi.append("");	
-	channel_string = "interest_point_specification=[Advanced ...] " +
-	"radius_1=" + reg_radius_1 + " " +
-	"radius_2=" + reg_radius_2 + " " +
-	"threshold=" + reg_threshold + " " +
-	"find_maxima";
-}	
-		
+	channel_string_multi.append("");
+// Switches between Difference-of-Mean and Difference-of-Gaussian Settings
+	if (type_of_detection.equalsIgnoreCase( "Difference-of-Mean (Integral image based)" ) )
+	{
+		channel_string = "interest_point_specification=[Advanced ...] " +
+				"radius_1=" + reg_radius_1 + " " +
+				"radius_2=" + reg_radius_2 + " " +
+				"threshold=" + reg_threshold + " " +
+				"find_maxima";
+	}
+
+	else if (type_of_detection.equalsIgnoreCase( "Difference-of-Gaussian" ) )
+	{
+		channel_string = "interest_point_specification=[Advanced ...] " +
+		"sigma=" + sigma + " " +
+		"threshold=" + threshold_gaussian + " " +
+		"find_maxima ";
+	}
+
+	else
+	{
+		System.out.println( "Error: type_of_detection selection is bad" );
+	}
+}
+
 // Dual Channels both Channels contain beads
 else if (reg_process_channel.equalsIgnoreCase( "All channels" ) && channel_token.length > 1 )
 {
-	// Assembles String using StringBuilder for 2 Channels		
-	for (int channel=0; channel < channel_token.length; channel++ )
+	System.out.println( "Channel Setting: Multiple channels" );
+
+	// Assembles String using StringBuilder for 2 Channels
+	// Switches between Difference-of-Mean and Difference-of-Gaussian Settings
+	if (type_of_detection.equalsIgnoreCase( "Difference-of-Mean (Integral image based)" ) )
+	{
+		for (int channel=0; channel < channel_token.length; channel++ )
+		{
+			String channel_part = "interest_point_specification_(channel_" + channel_token[channel] + ")=[Advanced ...] " +
+						"radius_1_" + channel_token[channel] +  "=" + radius_1_token[channel] + " " +
+						"radius_2_" + channel_token[channel] +  "=" + radius_2_token[channel] + " " +
+						"threshold_" + channel_token[channel] +  "=" + threshold_token[channel] + " " +
+						"find_maxima_" + channel_token[channel];
+						channel_string_multi.append(channel_part);
+						channel_string_multi.append(" ");
+		}
+	}
+
+	else if (type_of_detection.equalsIgnoreCase( "Difference-of-Gaussian" ) )
+	{
+		for (int channel=0; channel < channel_token.length; channel++ )
+		{
+
+			String channel_part = "interest_point_specification_(channel_" + channel_token[channel] + ")=[Advanced ...] " +
+						"sigma_" + channel_token[channel] +  "=" + sigma_token[channel] + " " +
+						"threshold_" + channel_token[channel] +  "=" + dog_threshold_token[channel] + " " +
+						"find_maxima_" + channel_token[channel] + " ";
+						channel_string_multi.append(channel_part);
+						channel_string_multi.append(" ");
+		}
+	}
+
+	else
 	{
-		String channel_part = "interest_point_specification_(channel_" + channel_token[channel] + ")=[Advanced ...] " +
-		"radius_1_" + channel_token[channel] +  "=" + radius_1_token[channel] + " " +
-		"radius_2_" + channel_token[channel] +  "=" + radius_2_token[channel] + " " +
-		"threshold_" + channel_token[channel] +  "=" + threshold_token[channel] + " " +
-		"find_maxima_" + channel_token[channel];
-		channel_string_multi.append(channel_part);
-		channel_string_multi.append(" ");	
+		System.out.println( "Error: type_of_detection selection is bad" );
 	}
 
 }
-		
+
 // Returns an Error if set to Dual Channel one Channel contains beads but there is only one channel in channels	
 else if ( reg_process_channel.equalsIgnoreCase( "Single channel (Select from List)" ) && channel_token.length == 1 )
 {
 	System.out.println( "Error: reg_process_channel or channels set incorrectly" );
 }
-		
+
 // Returns and Error if set to Dual Channel processing one channel contains the beads but there are multiple settings in radius and or threshold
 else if ( reg_process_channel.equalsIgnoreCase( "Single channel (Select from List)" ) && (radius_1_token.length > 1 || radius_2_token.length > 1 || threshold_token.length > 1 ) )
 {
-	System.out.println( "Error: You said you wanted to process only one channel but there are multiple settings for radius or threshold" );
-}	
-		
+	System.out.println( "Error: Set to process only one channel but multiple settings for radius or threshold" );
+}
+
 // Assembles String for Dual Channel but only one Channel contains beads
-else if (reg_process_channel.equalsIgnoreCase( "Single channel (Select from List)" ) && channel_token.length > 1)			
-{	
-	channel_string_multi.append("");
-	processing_channel_string = "processing_channel=[channel " +  reg_processing_channel + "] ";
-	channel_string = "interest_point_specification=[Advanced ...] " +
-	"radius_1=" + reg_radius_1 + " " +
-	"radius_2=" + reg_radius_2 + " " +
-	"threshold=" + reg_threshold + " " +
-	"find_maxima";
+else if (reg_process_channel.equalsIgnoreCase( "Single channel (Select from List)" ) && channel_token.length > 1)
+{
+
+	System.out.println( "Channel Setting: Multiple channels one channel contains beads" );
+	// Switches between Difference-of-Mean and Difference-of-Gaussian Settings
+	if (type_of_detection.equalsIgnoreCase( "Difference-of-Mean (Integral image based)" ) )
+	{
+		channel_string_multi.append("");
+		processing_channel_string = "processing_channel=[channel " +  reg_processing_channel + "] ";
+		channel_string = "interest_point_specification=[Advanced ...] " +
+				"radius_1=" + reg_radius_1 + " " +
+				"radius_2=" + reg_radius_2 + " " +
+				"threshold=" + reg_threshold + " " +
+				"find_maxima";
+	}
+
+	else if (type_of_detection.equalsIgnoreCase( "Difference-of-Gaussian" ) )
+	{
+		channel_string_multi.append("");
+		processing_channel_string = "processing_channel=[channel " +  reg_processing_channel + "] ";
+		channel_string = "interest_point_specification=[Advanced ...] " +
+				"sigma=" + sigma + " " +
+				"threshold=" + threshold_gaussian + " " +
+				"find_maxima ";
+	}
 }
 
 // Returns and Error if no conditions above are met
 else
 {
-	System.out.println( "Error: Incorrect settings" );
+	System.out.println( "Error: Incorrect Detection of Interest points settings" );
 }
-				
+
+// Compute on settings for Difference-of-Gaussian
+if (type_of_detection.equalsIgnoreCase( "Difference-of-Mean (Integral image based)" )  )
+{
+	System.out.println( "Bead detection set to Difference-of-Mean (Integral image based)" );
+	compute_string = "";
+}
+
+else if (type_of_detection.equalsIgnoreCase( "Difference-of-Gaussian" ) && compute_on.equalsIgnoreCase( "CPU (Java)" )  )
+{
+	System.out.println( "Bead detection set to Difference-of-Gaussian processing on CPU" );
+	compute_string = "compute_on=[" + compute_on + "]";
+}
+
+else if (type_of_detection.equalsIgnoreCase( "Difference-of-Gaussian" ) && ( compute_on.equalsIgnoreCase( "GPU accurate (Nvidia CUDA via JNA)" ) )  ) // need second setting 
+{
+	System.out.println( "Bead detection set to Difference-of-Gaussian processing on GPU" );
+	compute_string = "compute_on=[" + compute_on + "] " +
+			"directory_cuda=" + directory_cuda + " " +
+			"select_native_library_for_cudaseparableconvolution=" +separableconvolution;
+}
+
+else
+{
+	System.out.println( "Settings for [Difference of Gaussian:compute on] is bad" );
+}
+
+System.out.println( "downsample = " + downsample_string );
 System.out.println( "Processing Channel = " + processing_channel_string );
 System.out.println( "Channel String: " + channel_string );
 System.out.println( reg_process_channel );
 System.out.println( "Multi channel String: " + channel_string_multi );
+System.out.println( "Compute on=" + compute_string );
 
 // Channel Setting Registration
 System.out.println( "-------------------------------------------------------" );
@@ -212,31 +356,31 @@ System.out.println( "reg_interest_points_channel = " + reg_interest_points_chann
 String[] interest_token = reg_interest_points_channel.split(delims);
 String reg_single_channel_string ="";
 StringBuilder reg_multi_channel_string = new StringBuilder();
-				
+
 // If there are no channels set returns an error
 if (channels.equalsIgnoreCase(""))
 {
 	System.out.println("Warning: There are no channels set");
 }
-				
+
 // Assembles string for Single Channel registration
-else if (reg_process_channel.equalsIgnoreCase( "All channels" ) && channel_token.length == 1 && interest_token.length == 1 )	
-{	
+else if (reg_process_channel.equalsIgnoreCase( "All channels" ) && channel_token.length == 1 && interest_token.length == 1 )
+{
 	reg_multi_channel_string.append( "" );
 	reg_single_channel_string = "interest_points_channel_" + channels + "=" + reg_interest_points_channel + " ";
 }
-			
+
 // Returns error if Multi Channels are selected but not enough settings in channels or 	reg_interest_points_channel
-else if (reg_process_channel.equalsIgnoreCase( "All channels" ) &&  ( channel_token.length < 1 || interest_token.length < 1) ) 
+else if (reg_process_channel.equalsIgnoreCase( "All channels" ) &&  ( channel_token.length < 1 || interest_token.length < 1) )
 {
 	System.out.println( "Error: Multi Channel selected but only one setting in channels or reg_interest_points_channel" );
 }
-			
+
 // Assembles string for Multi Channel registration
-else if (reg_process_channel.equalsIgnoreCase( "All channels" ) && channel_token.length > 1 ) 
+else if (reg_process_channel.equalsIgnoreCase( "All channels" ) && channel_token.length > 1 )
 {
 	for (int channel=0; channel < channel_token.length; channel++ )
-	{	
+	{
 		String channel_part = "interest_points_channel_" + channel_token[channel] + "=" + interest_token[channel] + " ";
 		reg_multi_channel_string.append( channel_part );
 		reg_multi_channel_string.append(" ");
@@ -248,15 +392,15 @@ else if (reg_process_channel.equalsIgnoreCase( "Single channel (Select from List
 {
 	System.out.println( "Error: Multi Channel one Channel contains beads selected but only one setting in channls or reg_interest_points_channel" );
 }
-				
+
 // Assembles string for Multi Channel Processing one Channel Contains the beads
-else if (reg_process_channel.equalsIgnoreCase( "Single channel (Select from List)" ) && channel_token.length > 1 && interest_token.length > 1)	
+else if (reg_process_channel.equalsIgnoreCase( "Single channel (Select from List)" ) && channel_token.length > 1 && interest_token.length > 1)
 {
 	for (int channel=0; channel < channel_token.length; channel++ )
-	{	
+	{
 		String channel_part = "interest_points_channel_" + channel_token[channel] + "=" + interest_token[channel] + " ";
 		reg_multi_channel_string.append( channel_part );
-		reg_multi_channel_string.append(" ");		
+		reg_multi_channel_string.append(" ");
 	}
 }
 
@@ -268,12 +412,12 @@ System.out.println( "=======================================================" );
 try {
 IJ.run("Toggle Cluster Processing", "display_cluster");
 }
-catch ( e ) { 
+catch ( e ) {
 
     print( "[registration::activate_cluster_processing] caught exception: "+e );
     //important to fail the process if exception occurs
     runtime.exit(1);
-    
+
 }
 
 System.out.println( "Activated Cluster Processing" );
@@ -282,38 +426,42 @@ System.out.println( "Activated Cluster Processing" );
 System.out.println( "=======================================================" );
 System.out.println( "Starting Detection of Interest Points" );
 
-System.out.println("Detect Interest Points for Registration , select_xml=" + xml_path + xml_filename + ".xml " +  
+System.out.println("(Detect Interest Points for Registration , select_xml=" + xml_path + xml_filename + ".xml " +
 	"unique_id=" + unique_id + " " +
-	"process_angle=[" + reg_process_angle + "] " + 
+	"process_angle=[" + reg_process_angle + "] " +
 	"process_channel=[" + reg_process_channel + "] " +
-	"process_illumination=[" + reg_process_illumination + "] " + 
+	"process_illumination=[" + reg_process_illumination + "] " +
 	"process_timepoint=[" + reg_process_timepoint + "] " +
 	processing_channel_string +
-	"xml_output=[Save every XML with user-provided unique id]" + " " + 
-	"processing_timepoint=[Timepoint " + parallel_timepoints + "] " + 
-	"type_of_interest_point_detection=[" + type_of_detection + "] " + 
-	"label_interest_points=" + label_interest_points + " " + 
-	"subpixel_localization=[" + subpixel_localization + "] " + 
+	"xml_output=[Save every XML with user-provided unique id]" + " " +
+	"processing_timepoint=[Timepoint " + parallel_timepoints + "] " +
+	"type_of_interest_point_detection=[" + type_of_detection + "] " +
+	"label_interest_points=" + label_interest_points + " " +
+	"subpixel_localization=[" + subpixel_localization + "] " +
+	downsample_string +
 	channel_string + "" +
 	channel_string_multi + "" +
-	"");
+	compute_string + "" +
+	");");
 
 try {
-IJ.run("Detect Interest Points for Registration", 
-	"select_xml=" + xml_path + xml_filename + ".xml " +  
+IJ.run("Detect Interest Points for Registration",
+	"select_xml=" + xml_path + xml_filename + ".xml " +
 	"unique_id=" + unique_id + " " +
-	"process_angle=[" + reg_process_angle + "] " + 
+	"process_angle=[" + reg_process_angle + "] " +
 	"process_channel=[" + reg_process_channel + "] " +
-	"process_illumination=[" + reg_process_illumination + "] " + 
+	"process_illumination=[" + reg_process_illumination + "] " +
 	"process_timepoint=[" + reg_process_timepoint + "] " +
 	processing_channel_string +
-	"xml_output=[Save every XML with user-provided unique id]" + " " + 
-	"processing_timepoint=[Timepoint " + parallel_timepoints + "] " + 
-	"type_of_interest_point_detection=[" + type_of_detection + "] " + 
-	"label_interest_points=" + label_interest_points + " " + 
-	"subpixel_localization=[" + subpixel_localization + "] " + 
+	"xml_output=[Save every XML with user-provided unique id]" + " " +
+	"processing_timepoint=[Timepoint " + parallel_timepoints + "] " +
+	"type_of_interest_point_detection=[" + type_of_detection + "] " +
+	"label_interest_points=" + label_interest_points + " " +
+	"subpixel_localization=[" + subpixel_localization + "] " +
+	downsample_string +
 	channel_string + "" +
 	channel_string_multi + "" +
+	compute_string + "" +
 	"");
 }
 catch ( e ) { 
@@ -370,12 +518,10 @@ IJ.run("Register Dataset based on Interest Points",
         "allowed_error_for_ransac=" + allowed_error_for_ransac + " " +
         "significance=" + significance + "");
 }
-catch ( e ) { 
-
+catch ( e ) {
     print( "[registration::registration of interest points] caught exception: "+e );
     //important to fail the process if exception occurs
     runtime.exit(1);
-    
 }
 /* shutdown */
 runtime.exit(0);
diff --git a/spim_registration/timelapse/single_test.yaml b/spim_registration/timelapse/single_test.yaml
new file mode 100755
index 0000000000000000000000000000000000000000..e89c1f77f7960d3f25caad97663c72ae7d67bf2f
--- /dev/null
+++ b/spim_registration/timelapse/single_test.yaml
@@ -0,0 +1,259 @@
+common: {
+  # directory that contains the bean shell scripts and Snakefile
+  bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/", 
+  # Directory that contains the cuda libraries
+  directory_cuda: "/sw/users/schmied/cuda/",                                                          
+  # Directory that contains the current working Fiji
+  fiji-app: "/sw/users/schmied/packages/2015-06-30_Fiji.app.cuda/ImageJ-linux64",        
+  fiji-prefix: "/sw/users/schmied/packages/xvfb-run -a",       # calls xvfb for Fiji headless mode
+  # xml file names without .xml suffix
+  first_xml_filename: 'single',       # Name of the xml file for the .czi or .tif files
+  hdf5_xml_filename: '"hdf5_single"', # Name of .xml file for the hdf5 data after resave_hdf5
+  merged_xml: 'hdf5_single_merge',    # Name of .xml file after merge
+  # Describe the dataset
+  ntimepoints: 2,               # number of timepoints of dataset
+  angles: "0,72,144,216,288",   # angles          
+  channels: "green",        # channels
+  illumination: "0",            # illuminations
+  pixel_distance_x: '0.28590106964',  # Manual calibration x
+  pixel_distance_y: '0.28590106964',  # Manual calibration y
+  pixel_distance_z: '1.50000',  # Manual calibration z
+  pixel_unit: "um",             # unit of manual calibration
+  # Use switches to decide which processing steps you need:
+  # transformation_switch: "timelapse" standard processing
+  # after timelapse registration directly goes into fusion, timelapse_duplicate
+  # "timelapse_duplicate" for dual channel processing one channel contains the beads
+  # duplicates transformations
+  transformation_switch: "timelapse", 
+  # Switches between content based fusion and deconvoltion
+  # "deconvolution" > for deconvolution
+  # "fusion" > for content based fusion
+  fusion_switch: "fusion"
+  }
+
+define_xml_czi: {
+  first_czi: "2015-02-21_LZ1_Stock68_3.czi", # master .czi file
+  rotation_around: "X-Axis",                       # axis of acquistion
+  bsh_file: "define_czi.bsh"                       # .bsh script for defining .czi file
+  }
+          
+define_xml_tif: {
+  # file pattern of .tif files
+  # for multi channel give spim_TL{tt}_Angle{a}_Channel{c}.tif
+  # # SPIM file pattern: for padded zeros use tt 
+  image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif', 
+  # Settings for ImageJ Opener
+  type_of_dataset: '"Image Stacks (ImageJ Opener)"',
+  multiple_timepoints: '"YES (one file per time-point)"', # or NO (one time-point)
+  multiple_angles: '"YES (one file per angle)"',          # or NO (one angle)
+  multiple_channels: '"NO (one channel)"',                # or "\"NO (one channel)\""
+  multiple_illumination_directions: '"NO (one illumination direction)"', # or YES (one file per illumination direction)
+  imglib_container: '"ArrayImg (faster)"',        # '"ArrayImg (faster)"'
+  bsh_file: "define_tif_zip.bsh"  
+  }
+  
+resave_hdf5: {
+  # Resaves .tif or .czi data into hdf5
+  # Subsampling and resolution settings for hdf5: data dependent
+  hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"', 
+  subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',  
+  # Standard settings for cluster processing
+  setups_per_partition: '0',
+  timepoints_per_partition: '1',
+  resave_timepoint: '"All Timepoints"',  
+  resave_angle: '"All angles"',
+  resave_channel: '"All channels"',
+  resave_illumination: '"All illuminations"',
+  bsh_file: "export.bsh"
+  }
+
+registration: {
+  # reg_process_channel:
+  # # Single Channel:  '"All channels"'
+  # Dual Channel: '"All channels"'
+  # Dual Channel one Channel contains beads: '"Single channel (Select from List)"'
+  reg_process_channel: '"All channels"',
+  # reg_processing_channel:
+  # Dual Channel setting for 1 Channel contains the beads
+  reg_processing_channel: '"green"',      
+  # reg_interest_points_channel:
+  # Single Channel: '"beads"'
+  # Dual Channel: '"beads,beads"'
+  # Dual Channel: Channel does not contain the beads '"[DO NOT register this channel],beads"'
+  reg_interest_points_channel: '"beads"',
+  # type of detection: '"Difference-of-Mean (Integral image based)"' or '"Difference-of-Gaussian"'
+  type_of_detection: '"Difference-of-Mean (Integral image based)"',  
+  # Settings for Difference-of-Mean
+  # For multiple channels 'value1,value2' delimiter is ,
+  reg_radius_1: '2',          
+  reg_radius_2: '3',            
+  reg_threshold: '0.005',
+  # Settings for Difference-of-Gaussian
+  # For multiple channels 'value1,value2' delimiter is ,
+  sigma: '1.8',         
+  threshold_gaussian: '0.0080',   
+  # Processing setting for Difference-of-Gaussian detection
+  # compute_on:
+  compute_on: '"GPU accurate (Nvidia CUDA via JNA)"',
+  separableconvolution: '"libSeparableConvolutionCUDALib.so"',
+  # Downsampling settings
+  downsample_detection: "No", # "No" or "Yes"
+  downsample_xy: '"Match Z Resolution (less downsampling)"',
+  downsample_z: "1x",
+  # Standard Settings for bead based registration
+  label_interest_points: '"beads"',              
+  reg_process_timepoint: '"Single Timepoint (Select from List)"',
+  reg_process_angle: '"All angles"',
+  reg_process_illumination: '"All illuminations"',
+  subpixel_localization: '"3-dimensional quadratic fit"',
+  detection_min_max: "find_maxima",
+  type_of_registration: '"Register timepoints individually"',
+  algorithm: '"Fast 3d geometric hashing (rotation invariant)"',
+  transformation_model: "Affine",
+  allowed_error_for_ransac: '5',
+  significance: '10',
+  fix_tiles: '"Fix first tile"',
+  map_back_tiles: '"Map back to first tile using rigid model"',
+  model_to_regularize_with: "Rigid",
+  lambda: '0.10',
+  imglib_container: '"ArrayImg (faster)"',
+  bsh_file: "registration.bsh"  # .bsh script for registration
+  }
+
+xml_merge: {
+  bsh_file: "xml_merge.bsh"
+  }
+  
+timelapse: {
+  reference_timepoint: '0',   # Reference timepoint
+  # Standard settings for timelapse registration
+  type_of_registration_timelapse: '"Match against one reference timepoint (no global optimization)"',
+  timelapse_process_timepoints: '"All Timepoints"',
+  bsh_file: "timelapse_registration.bsh"
+  }
+  
+dublicate_transformations: {
+  # If dual channel processing and only one channel contains beads
+  # this allows you to dublicate the transformation for the 
+  # channel that does not contain beas
+  source_dublication: "red",  # source channel
+  target_dublication: "green", # target channel
+  duplicate_which_transformations: '"Replace all transformations"', # mode of dublication
+  bsh_file: "dublicate_transformations.bsh" # .bsh script for dublication
+  }
+  
+fusion: {
+  # content based multiview fusion
+  # supports multi channel without new settings
+  downsample: '2',  # set downsampling
+  # Cropping parameters of full resolution
+  minimal_x: '190', 
+  minimal_y: '-16',  
+  minimal_z: '-348',  
+  maximal_x: '1019',   
+  maximal_y: '1941',    
+  maximal_z: '486',   
+  # fused_image: '"Append to current XML Project"', does not work yet
+  process_timepoint: '"Single Timepoint (Select from List)"',
+  process_angle: '"All angles"',
+  process_channel: '"All channels"',
+  process_illumination: '"All illuminations"',
+  imglib2_container_fusion: '"ArrayImg"',
+  interpolation: '"Linear Interpolation"',
+  pixel_type: '"16-bit unsigned integer"',
+  imglib2_data_container: '"ArrayImg (faster)"',
+  process_views_in_paralell: '"All"',
+  xml_output: '"Save every XML with user-provided unique id"',
+  bsh_file: "fusion.bsh"
+  }
+
+external_transform: {
+  # Downsamples for deconvolution
+  # BUG: external transformation breaks .xml file
+  # channel setting: '"all_channels"'
+  channel_setting: '"green"',
+  transform_timepoint: '"All Timepoints"',
+  transform_angle: '"All angles"',
+  transform_channel: '"All channels"',
+  transform_illumination: '"All illuminations"',
+  apply_transformation: '"Current view transformations (appends to current transforms)"',
+  define_mode_transform: '"Matrix"',
+  # Matrix for downsampling
+  matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"', 
+  transformation: '"Rigid"',
+  bsh_file: "transform.bsh"
+  }
+
+deconvolution: {
+  iterations: '5', # number of iterations
+  # Cropping parameters: take downsampling into account
+  minimal_x_deco: '190', 
+  minimal_y_deco: '-16', 
+  minimal_z_deco: '-348', 
+  maximal_x_deco: '1019', 
+  maximal_y_deco: '1941', 
+  maximal_z_deco: '486', 
+  # Channel settings for deconvolution
+  # Single Channel: '"beads"'
+  # Dual Channel: '"beads,beads"'
+  # Dual Channel one channel contains beads: '"[Same PSF as channel red],beads"'
+  detections_to_extract_psf_for_channel: '"[Same PSF as channel red],beads"',
+  # Settings for GPU or CPU processing 
+  # '"CPU (Java)"' or '"GPU (Nvidia CUDA via JNA)"'
+  compute_on: '"GPU (Nvidia CUDA via JNA)"',
+  cudafourierconvolution: "libFourierConvolutionCUDALib.so", # GPU processing name of cuda library
+  # Standard settings for deconvolution
+  process_timepoint: '"Single Timepoint (Select from List)"',
+  process_angle: '"All angles"',
+  process_channel: '"All channels"',
+  process_illumination: '"All illuminations"',
+  type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
+  Tikhonov_parameter: '0.0006',
+  compute: '"in 512x512x512 blocks"',
+  osem_acceleration: '"1 (balanced)"',
+  psf_estimation: '"Extract from beads"',
+  psf_size_x: '19',
+  psf_size_y: '19',
+  psf_size_z: '25',
+  imglib2_container: '"ArrayImg"',
+  bsh_file: "deconvolution.bsh"
+  }
+  
+hdf5_output: {
+  # writes new hdf5 dataset for fusion output: will be obsolete
+  # Naming pattern of output
+  # Single Channel: TP{{t}}_Chgreen_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
+  # Dual Channel: TP{{t}}_Ch{{0}}_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
+  output_image_file_pattern: '"TP{{t}}_Chgreen_Ill0_Ang0,72,144,216,288.tif"',
+  # channel setting
+  output_multiple_channels: '"NO (one channel)"', # '"YES (one file per channel)"' or  '"NO (one channel)"'
+  output_channels: "green",
+  # .xml file names
+  output_xml: '"fused_Single"',
+  output_hdf5_xml: '"hdf5_fused_Single"',
+  output_timepoints: '0-1', # Timepoints format: '1-2'
+  # pixel size of output: take downsampling into account!
+  output_pixel_distance_x: 0.28590106964, 
+  output_pixel_distance_y: 0.28590106964,
+  output_pixel_distance_z: 0.28590106964,
+  output_pixel_unit: 'um',
+  # give if 16Bit data or 32Bit data 
+  # output of fusion is 16Bit, of deconvolution it is 32Bit
+  output_data_type: "16Bit", # "32Bit" or "16Bit"
+  # if data is 32Bit then the data is converted into 16Bit data
+  convert_32bit: '"[Use min/max of first image (might saturate intenities over time)]"',
+  # subsampling and chunk size settings: dataset dependent
+  subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"', # data dependent
+  chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
+  # subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
+  # chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
+  # Standard settings for hdf5_output
+  output_type_of_dataset: '"Image Stacks (ImageJ Opener)"',
+  output_multiple_timepoints: '"YES (one file per time-point)"',
+  output_multiple_angles: '"NO (one angle)"',
+  output_illumination_directions: '"NO (one illumination direction)"',
+  output_imglib_container: '"ArrayImg (faster)"',
+  bsh_file_define: "define_output.bsh", # .bsh script for defining the dataset
+  bsh_file_hdf5: "export_output.bsh"    # .bsh script for resaving into hdf5
+  }
+
diff --git a/spim_registration/timelapse/tomancak.json b/spim_registration/timelapse/tomancak.json
deleted file mode 100644
index 021578435e6439631f55fdcb0780d5de97888993..0000000000000000000000000000000000000000
--- a/spim_registration/timelapse/tomancak.json
+++ /dev/null
@@ -1,88 +0,0 @@
-{
-    "common" :
-    {
-	"fiji-app" : "/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/ImageJ-linux64",
-	"fiji-prefix" : "/sw/bin/xvfb-run -a",
-	"directory_cuda" : "/lustre/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/lib/",
-	"merged_xml" : "hdf5_test_unicore_merge"
-    },
-    
-    "registration" :
-    {
-	"timepoint" : "\"Single Timepoint (Select from List)\"",
-	"illuminations" : "\"All illuminations\"",
-	"angle" : "\"All angles\"",
-	"channel" : "\"All channels\"",
-	"proc-ch" : "\"channel 1\"",
-	"algorithm" : "\"Fast 3d geometric hashing (rotation invariant)\"",
-	"label_interest_points" : "\"beads\"",
-	"type_of_registration" : "\"Register timepoints individually\"",
-	"type_of_registration_timelapse" : "\"Match against one reference timepoint (no global optimization)\"",
-	"type_of_detection" : "\"Difference-of-Mean (Integral image based)\"" ,
-	"subpixel_localization" : "\"3-dimensional quadratic fit\"",
-	"imglib_container" : "\"ArrayImg (faster)\"",
-	"radius_1" : "2",
-	"radius_2" : "3",
-	"threshold" : "0.005",
-	"interest_points_channel_0" : "\"[DO NOT register this channel]\"" ,
-	"interest_points_channel_1" : "\"beads\"",
-	"fix_tiles" : "\"Fix first tile\""	,
-	"map_back_tiles" : "\"Map back to first tile using rigid model\"",
-	"transformation_model" : "Affine",
-	"model_to_regularize_with" : "Rigid",
-	"lambda" : "0.10" ,
-	"allowed_error_for_ransac" : "5",
-	"detection_min_max" : "find_maxima",
-	"initial_sigma" : "1.8",
-	"threshold_gaussian" : "0.0080",
-	"bsh_file" : "registration.bsh"
-    },
- 
-    "xml_merge" :
-    {
-	"bsh_file" : "xml_merge.bsh"
-    },
-
-    "external_transform" :
-    {
-	"bsh_file" : "transform.bsh",
-	"angle" : "\"All angles\"",
-	"channel" : "\"All channels\"",
-	"illumination" : "\"All illuminations\"",
-	"timepoint" : "\"All Timepoints\"",
-	"transformation" : "\"Rigid\"",
-	"apply_transformation" : "\"Current view transformations (appends to current transforms)\"",
-	"define_mode_transform" : "\"Matrix\"",
-	"matrix_transform" : "\"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0\""
-    },
-
-    "deconvolution" :
-    {
-
-	"bsh_file" : "deconvolution_GPU.bsh",
-	"process_timepoint" : "\"Single Timepoint (Select from List)\"",
-	"process_channel" : "\"All channels\"",
-	"process_illumination" : "\"All illuminations\"",
-	"process_angle" : "\"All angles\"",
-	"minimal_x" : "76",
-	"minimal_y" : "4",
-	"minimal_z" : "-192",
-	"maximal_x" : "488",
-	"maximal_y" : "956",
-	"maximal_z" : "214",
-	"imglib2_container" : "\"ArrayImg \"",
-	"type_of_iteration" : "\"Efficient Bayesian - Optimization I (fast, precise)\"",
-	"osem_acceleration" : "\"1 (balanced)\"",
-	"Tikhonov_parameter" : "0.0006",
-	"compute" : "\"in 512x512x512 blocks\"",
-	"compute_on" : "\"GPU (Nvidia CUDA via JNA)\"",
-	"psf_estimation" : "\"Extract from beads\"",
-	"iterations" : "5",
-	"detections_to_extract_psf_for_channel_0" : "\"beads\"",
-	"detections_to_extract_psf_for_channel_1" : "\"beads\"",
-	"psf_size_x" : "19",
-	"psf_size_y" : "19",
-	"psf_size_z" : "25"
-    }
-    
-}
diff --git a/spim_registration/timelapse/tomancak_czi.json b/spim_registration/timelapse/tomancak_czi.json
deleted file mode 100644
index 9a2445f03860cec92dec550035d55edcc38ada2e..0000000000000000000000000000000000000000
--- a/spim_registration/timelapse/tomancak_czi.json
+++ /dev/null
@@ -1,204 +0,0 @@
-{
-    "common" :
-    {
-	"fiji-app" : "/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/ImageJ-linux64",
-	"fiji-prefix" : "/sw/bin/xvfb-run -a",
-	"directory_cuda" : "/lustre/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/lib/",
-	"merged_xml" : "hdf5_test_unicore_merge",
-	"bsh_directory" : "/home/steinbac/development/cschmied-snakemake-workflows/spim_registration/timelapse/",
-	"first_xml_filename" : "test_unicore",
-	"hdf5_xml_filename" : "\"hdf5_test_unicore\"",
-	"fusion_switch" : "deconvolution",
-	"ntimepoints" : 3
-    },
-    
-    "define_xml_czi" :
-    {
-    	    "pixel_distance_x" : "0.2875535786151886",
-    	    "pixel_distance_y" : "0.2875535786151886",
-    	    "pixel_distance_z" : "1.50000", 
-    	    "pixel_unit" : "um",				    
-    	    "first_czi" : "2015-02-21_LZ1_Stock68_3.czi",
-    	    "channel_1" : "green",		
-    	    "channel_2" : "red",
-    	    "angle_1" : "0",
-    	    "angle_2" : "72",
-    	    "angle_3" :"144",
-    	    "angle_4" :"216",
-    	    "angle_5" : "288",
-    	    "illumination_1" : "0",
-    	    "rotation_around" : "X-Axis",
-    	    "bsh_file" : "define_czi.bsh" 
-    },
-    
-    "define_xml_tif" :
-    {
-	"timepoints" : "0-1",
-	"acquisition_angles" : "0,72,144,216,288",
-	"channels" : "0",
-	"image_file_pattern" : "img_TL{{t}}_Angle{{a}}.tif",
-	"pixel_distance_x"  : "0.2875535786151886",
-	"pixel_distance_y" : "0.2875535786151886",
-	"pixel_distance_z" : "1.50000",
-	"pixel_unit" : "um",
-	"multiple_timepoints" : "\"YES (one file per time-point)\"",
-	"multiple_channels" : "\"NO (one channel)\"",
-	"multiple_illumination_directions" : "\"NO (one illumination direction)\"",
-	"multiple_angles" : "\"YES (one file per angle)\"",
-	"type_of_dataset" : "\"Image Stacks (ImageJ Opener)\"",
-	"imglib_container" : "\"ArrayImg (faster)\"",
-	"bsh_file" : "define_tif_zip.bsh" 
-    },
-    
-    "resave_hdf5" :
-    {
-    	    "parallel_timepoints" : "3",
-    	    "resave_angle" : "\"All angles\"",
-    	    "resave_channel" : "\"All channels\"",
-    	    "resave_illumination" : "\"All illuminations\"",
-    	    "resave_timepoint" : "\"All Timepoints\"",
-    	    "subsampling_factors" : "\"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}\"",
-    	    "hdf5_chunk_sizes" : "\"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}\"",
-    	    "timepoints_per_partition" : "1",
-    	    "setups_per_partition" : "0",
-    	    "bsh_file" : "export.bsh"
-    },
-    
-    "registration" :
-    {
-	"timepoint" : "\"Single Timepoint (Select from List)\"",
-	"illuminations" : "\"All illuminations\"",
-	"angle" : "\"All angles\"",
-	"channel" : "\"All channels\"",
-	"proc-ch" : "\"channel 0\"",
-	"algorithm" : "\"Fast 3d geometric hashing (rotation invariant)\"",
-	"label_interest_points" : "\"beads\"",
-	"type_of_registration" : "\"Register timepoints individually\"",
-	"type_of_detection" : "\"Difference-of-Mean (Integral image based)\"" ,
-	"subpixel_localization" : "\"3-dimensional quadratic fit\"",
-	"imglib_container" : "\"ArrayImg (faster)\"",
-	"radius_1" : "2",
-	"radius_2" : "3",
-	"threshold" : "0.005",
-	"interest_points_channel_0" : "\"beads\"",
-	"interest_points_channel_1" : "\"beads\"",
-	"fix_tiles" : "\"Fix first tile\"",
-	"map_back_tiles" : "\"Map back to first tile using rigid model\"",
-	"transformation_model" : "Affine",
-	"model_to_regularize_with" : "Rigid",
-	"lambda" : "0.10" ,
-	"allowed_error_for_ransac" : "5",
-	"significance" : "10",
-	"detection_min_max" : "find_maxima",
-	"initial_sigma" : "1.8",
-	"threshold_gaussian" : "0.0080",
-	"bsh_file" : "registration.bsh"
-    },
- 
-    "xml_merge" :
-    {
-	"bsh_file" : "xml_merge.bsh"
-    },
-    
-    "timelapse" : 
-    {
-    	  "reference_timepoint" : "0",
-    	  "timelapse_process_timepoints" : "\"All Timepoints\"",
-    	  "type_of_registration_timelapse" : "\"Match against one reference timepoint (no global optimization)\"",
-    	  "bsh_file" : "timelapse_registration.bsh"
-    },
-
-    "external_transform" :
-    {
-	"bsh_file" : "transform.bsh",
-	"angle" : "\"All angles\"",
-	"channel" : "\"All channels\"",
-	"illumination" : "\"All illuminations\"",
-	"timepoint" : "\"All Timepoints\"",
-	"transformation" : "\"Rigid\"",
-	"apply_transformation" : "\"Current view transformations (appends to current transforms)\"",
-	"define_mode_transform" : "\"Matrix\"",
-	"matrix_transform" : "\"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0\""
-    },
-    
-    "fusion" :
-    {
-    	    "minimal_x" : "128",	
-    	    "minimal_y" : "-13",
-    	    "minimal_z" : "-407",
-    	    "maximal_x" : "986",
-    	    "maximal_y" : "1927",
-    	    "maximal_z" : "498",
-    	    "downsample" : "1",
-    	    "process_timepoint" : "\"Single Timepoint (Select from List)\"",
-    	    "process_channel" : "\"All channels\"",
-    	    "process_illumination" : "\"All illuminations\"",
-    	    "process_angle" : "\"All angles\"",
-    	    "xml_output" : "\"Save every XML with user-provided unique id\"", 
-    	    "fused_image" : "\"Append to current XML Project\"",
-    	    "pixel_type" : "\"16-bit unsigned integer\"",
-    	    "imglib2_container_fusion" : "\"ArrayImg\"",
-    	    "process_views_in_paralell" : "\"All\"",
-    	    "interpolation" : "\"Linear Interpolation\"",
-    	    "imglib2_data_container" : "\"ArrayImg (faster)\"",
-    	    "bsh_file" : "fusion.bsh"
-    },
-
-    "deconvolution" :
-    {
-
-	"bsh_file" : "deconvolution_GPU.bsh",
-	"process_timepoint" : "\"Single Timepoint (Select from List)\"",
-	"process_channel" : "\"All channels\"",
-	"process_illumination" : "\"All illuminations\"",
-	"process_angle" : "\"All angles\"",
-	"minimal_x" : "76",
-	"minimal_y" : "4",
-	"minimal_z" : "-192",
-	"maximal_x" : "488",
-	"maximal_y" : "956",
-	"maximal_z" : "214",
-	"imglib2_container" : "\"ArrayImg \"",
-	"type_of_iteration" : "\"Efficient Bayesian - Optimization I (fast, precise)\"",
-	"osem_acceleration" : "\"1 (balanced)\"",
-	"Tikhonov_parameter" : "0.0006",
-	"compute" : "\"in 512x512x512 blocks\"",
-	"compute_on" : "\"GPU (Nvidia CUDA via JNA)\"",
-	"psf_estimation" : "\"Extract from beads\"",
-	"iterations" : "5",
-	"detections_to_extract_psf_for_channel_0" : "\"beads\"",
-	"detections_to_extract_psf_for_channel_1" : "\"beads\"",
-	"psf_size_x" : "19",
-	"psf_size_y" : "19",
-	"psf_size_z" : "25"
-    },
-    
-    "hdf5_output" :
-    {
-    	    "output_image_file_pattern" : "TP{{t}}_Ch{{c}}_Ill0_Ang0,72,144,216,288.tif",
-    	    "output_data_type" : "32Bit",
-    	    "output_xml" : "\"fused_Dual_Channel\"",
-    	    "output_hdf5_xml" : "\"hdf5_fused_Stock68\"",
-    	    "output_multiple_channels" : "\"NO (one channel)\"",	
-    	    "output_timepoints" : "0-1",
-    	    "output_channels" : "green",
-    	    "output_pixel_distance_x" : "0.5718", 	
-    	    "output_pixel_distance_y" : "0.5718", 	
-    	    "output_pixel_distance_z" : "0.5718", 	
-    	    "output_pixel_unit" : "um",			
-    	    "output_multiple_timepoints" : "\"YES (one file per time-point)\"",   	
-    	    "output_illumination_directions" : "\"NO (one illumination direction)\"",	
-    	    "output_multiple_angles" : "\"NO (one angle)\"",					
-    	    "output_type_of_dataset" : "\"Image Stacks (ImageJ Opener)\"", 		
-    	    "output_imglib_container" : "\"ArrayImg (faster)\"",
-    	    "bsh_file_define" : "/define_output.bsh",
-    	    "bsh_file_hdf5" : "/export_output.bsh", 	
-    	    "convert_32bit" : "\"[Use min/max of first image (might saturate intenities over time)]\""
-    	    
-    }
-
-    
-    
-    
-    
-}
diff --git a/spim_registration/timelapse/tomancak_test_cluster.json b/spim_registration/timelapse/tomancak_test_cluster.json
deleted file mode 100644
index 44899c53b1b4dca63946a6b2a9d852faa25c9909..0000000000000000000000000000000000000000
--- a/spim_registration/timelapse/tomancak_test_cluster.json
+++ /dev/null
@@ -1,202 +0,0 @@
-{
-    "common" :
-    {
-	"fiji-app" : "/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/ImageJ-linux64",
-	"fiji-prefix" : "/sw/bin/xvfb-run -a",
-	"directory_cuda" : "/lustre/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/lib/",
-	"merged_xml" : "hdf5_test_unicore_merge",
-	"bsh_directory" : "/home/steinbac/development/schmied-snakemake-workflows/spim_registration/timelapse/",
-	"first_xml_filename" : "test_unicore",
-	"hdf5_xml_filename" : "\"hdf5_test_unicore\"",
-	"fusion_switch" : "deconvolution",
-	"ntimepoints" : 5
-    },
-    
-    "define_xml_czi" :
-    {
-    	    "pixel_distance_x" : "0.28590106964",
-    	    "pixel_distance_y" : "0.28590106964",
-    	    "pixel_distance_z" : "1.50000", 
-    	    "pixel_unit" : "um",				    
-    	    "first_czi" : "2015-04-11_LZ2_Stock68_3.czi",
-    	    "channel_1" : "green",		
-    	    "channel_2" : "red",
-    	    "angle_1" : "0",
-    	    "angle_2" : "72",
-    	    "angle_3" : "144",
-    	    "angle_4" : "216",
-    	    "angle_5" : "288",
-    	    "illumination_1" : "0",
-    	    "rotation_around" : "X-Axis",
-    	    "bsh_file" : "define_czi.bsh" 
-    },
-    
-    "define_xml_tif" :
-    {
-	"acquisition_angles" : "0,72,144,216,288",
-	"channels" : "0",
-	"image_file_pattern" : "img_TL{{t}}_Angle{{a}}.tif",
-	"pixel_distance_x" : "0.28590106964",
-	"pixel_distance_y" : "0.28590106964",
-	"pixel_distance_z" : "1.50000",
-	"pixel_unit" : "um",
-	"multiple_timepoints" : "\"YES (one file per time-point)\"",
-	"multiple_channels" : "\"NO (one channel)\"",
-	"multiple_illumination_directions" : "\"NO (one illumination direction)\"",
-	"multiple_angles" : "\"YES (one file per angle)\"",
-	"type_of_dataset" : "\"Image Stacks (ImageJ Opener)\"",
-	"imglib_container" : "\"ArrayImg (faster)\"",
-	"bsh_file" : "define_tif_zip.bsh" 
-    },
-    
-    "resave_hdf5" :
-    {
-    	    "resave_angle" : "\"All angles\"",
-    	    "resave_channel" : "\"All channels\"",
-    	    "resave_illumination" : "\"All illuminations\"",
-    	    "resave_timepoint" : "\"All Timepoints\"",
-    	    "subsampling_factors" : "\"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}\"",
-    	    "hdf5_chunk_sizes" : "\"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}\"",
-    	    "timepoints_per_partition" : "1",
-    	    "setups_per_partition" : "0",
-    	    "bsh_file" : "export.bsh"
-    },
-    
-    "registration" :
-    {
-	"timepoint" : "\"Single Timepoint (Select from List)\"",
-	"illuminations" : "\"All illuminations\"",
-	"angle" : "\"All angles\"",
-	"channel" : "\"All channels\"",
-	"proc-ch" : "\"channel 0\"",
-	"algorithm" : "\"Fast 3d geometric hashing (rotation invariant)\"",
-	"label_interest_points" : "\"beads\"",
-	"type_of_registration" : "\"Register timepoints individually\"",
-	"type_of_detection" : "\"Difference-of-Mean (Integral image based)\"" ,
-	"subpixel_localization" : "\"3-dimensional quadratic fit\"",
-	"imglib_container" : "\"ArrayImg (faster)\"",
-	"radius_1" : "2",
-	"radius_2" : "3",
-	"threshold" : "0.005",
-	"interest_points_channel_0" : "\"beads\"",
-	"interest_points_channel_1" : "\"beads\"",
-	"fix_tiles" : "\"Fix first tile\"",
-	"map_back_tiles" : "\"Map back to first tile using rigid model\"",
-	"transformation_model" : "Affine",
-	"model_to_regularize_with" : "Rigid",
-	"lambda" : "0.10" ,
-	"allowed_error_for_ransac" : "5",
-	"significance" : "10",
-	"detection_min_max" : "find_maxima",
-	"initial_sigma" : "1.8",
-	"threshold_gaussian" : "0.0080",
-	"bsh_file" : "registration.bsh"
-    },
- 
-    "xml_merge" :
-    {
-	"bsh_file" : "xml_merge.bsh"
-    },
-    
-    "timelapse" : 
-    {
-    	  "reference_timepoint" : "0",
-    	  "timelapse_process_timepoints" : "\"All Timepoints\"",
-    	  "type_of_registration_timelapse" : "\"Match against one reference timepoint (no global optimization)\"",
-    	  "bsh_file" : "timelapse_registration.bsh"
-    },
-
-    "external_transform" :
-    {
-	"bsh_file" : "transform.bsh",
-	"angle" : "\"All angles\"",
-	"channel" : "\"All channels\"",
-	"illumination" : "\"All illuminations\"",
-	"timepoint" : "\"All Timepoints\"",
-	"transformation" : "\"Rigid\"",
-	"apply_transformation" : "\"Current view transformations (appends to current transforms)\"",
-	"define_mode_transform" : "\"Matrix\"",
-	"matrix_transform" : "\"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0\""
-    },
-    
-    "fusion" :
-    {
-    	    "minimal_x" : "190",	
-    	    "minimal_y" : "-16",
-    	    "minimal_z" : "-348",
-    	    "maximal_x" : "1019",
-    	    "maximal_y" : "1941",
-    	    "maximal_z" : "486",
-    	    "downsample" : "1",
-    	    "process_timepoint" : "\"Single Timepoint (Select from List)\"",
-    	    "process_channel" : "\"All channels\"",
-    	    "process_illumination" : "\"All illuminations\"",
-    	    "process_angle" : "\"All angles\"",
-    	    "xml_output" : "\"Save every XML with user-provided unique id\"", 
-    	    "fused_image" : "\"Append to current XML Project\"",
-    	    "pixel_type" : "\"16-bit unsigned integer\"",
-    	    "imglib2_container_fusion" : "\"ArrayImg\"",
-    	    "process_views_in_paralell" : "\"All\"",
-    	    "interpolation" : "\"Linear Interpolation\"",
-    	    "imglib2_data_container" : "\"ArrayImg (faster)\"",
-    	    "bsh_file" : "fusion.bsh"
-    },
-
-    "deconvolution" :
-    {
-
-	"bsh_file" : "deconvolution_GPU.bsh",
-	"process_timepoint" : "\"Single Timepoint (Select from List)\"",
-	"process_channel" : "\"All channels\"",
-	"process_illumination" : "\"All illuminations\"",
-	"process_angle" : "\"All angles\"",
-	"minimal_x" : "95",
-	"minimal_y" : "-8",
-	"minimal_z" : "-174",
-	"maximal_x" : "509",
-	"maximal_y" : "970",
-	"maximal_z" : "243",
-	"imglib2_container" : "\"ArrayImg \"",
-	"type_of_iteration" : "\"Efficient Bayesian - Optimization I (fast, precise)\"",
-	"osem_acceleration" : "\"1 (balanced)\"",
-	"Tikhonov_parameter" : "0.0006",
-	"compute" : "\"in 512x512x512 blocks\"",
-	"compute_on" : "\"GPU (Nvidia CUDA via JNA)\"",
-	"psf_estimation" : "\"Extract from beads\"",
-	"iterations" : "5",
-	"detections_to_extract_psf_for_channel_0" : "\"beads\"",
-	"detections_to_extract_psf_for_channel_1" : "\"beads\"",
-	"psf_size_x" : "19",
-	"psf_size_y" : "19",
-	"psf_size_z" : "25"
-    },
-    
-    "hdf5_output" :
-    {
-    	    "output_image_file_pattern" : "TP{{t}}_Ch{{c}}_Ill0_Ang0,72,144,216,288.tif",
-    	    "output_data_type" : "32Bit",
-    	    "output_xml" : "\"fused_Dual_Channel\"",
-    	    "output_hdf5_xml" : "\"hdf5_fused_Stock68\"",
-    	    "output_multiple_channels" : "\"NO (one channel)\"",	
-    	    "output_timepoints" : "0-4",
-    	    "output_channels" : "green",
-    	    "output_pixel_distance_x" : "0.5718", 	
-    	    "output_pixel_distance_y" : "0.5718", 	
-    	    "output_pixel_distance_z" : "0.5718", 	
-    	    "output_pixel_unit" : "um",			
-    	    "output_multiple_timepoints" : "\"YES (one file per time-point)\"",   	
-    	    "output_illumination_directions" : "\"NO (one illumination direction)\"",	
-    	    "output_multiple_angles" : "\"NO (one angle)\"",					
-    	    "output_type_of_dataset" : "\"Image Stacks (ImageJ Opener)\"", 		
-    	    "output_imglib_container" : "\"ArrayImg (faster)\"",
-    	    "bsh_file_define" : "/define_output.bsh",
-    	    "bsh_file_hdf5" : "/export_output.bsh", 	
-    	    "convert_32bit" : "\"[Use min/max of first image (might saturate intenities over time)]\""
-    	    
-    }
-
-    
-    
-    
-    
-}
diff --git a/spim_registration/timelapse/tomancak_test_cluster.yaml b/spim_registration/timelapse/tomancak_test_cluster.yaml
index 84c42f55c8998a77e4113a35bc62b4f52be471e7..b0095960b0242d6a250f8854b9e2866500793375 100755
--- a/spim_registration/timelapse/tomancak_test_cluster.yaml
+++ b/spim_registration/timelapse/tomancak_test_cluster.yaml
@@ -1,66 +1,108 @@
 common: {
-  # directory that contains the bean shell scripts
-  bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/",
-  directory_cuda: "/sw/users/schmied/packages/cuda/lib/",
-  fiji-app: "/sw/users/schmied/packages/2015-06-08_Fiji.app.cuda/ImageJ-linux64",
-  fiji-prefix: "/sw/bin/xvfb-run -a",
-  first_xml_filename: "Dual_Channel",
-  hdf5_xml_filename: '"hdf5_Dual_Channel"',
-  merged_xml: "hdf5_Dual_Channel_merge",
-  ntimepoints: 2,
-  angles: "0,72,144,216,288",
-  channels: "green,red",
-  illumination: "0",
-  pixel_distance_x: '0.28590',
-  pixel_distance_y: '0.28590',
-  pixel_distance_z: '1.50000',
-  pixel_unit: "um",
-  transformation_switch: "timelapse_dublicate",
-  # transformation_switch: "timelapse",
-  # fusion_switch: "deconvolution"
-  fusion_switch: "fusion"
+  # directory that contains the bean shell scripts and Snakefile
+  bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/", 
+  # Directory that contains the cuda libraries
+  directory_cuda: "/sw/users/schmied/cuda/",                                                          
+  # Directory that contains the current working Fiji
+  #fiji-app: "/sw/users/schmied/packages/2015-06-08_Fiji.app.cuda/ImageJ-linux64",
+  fiji-app: "/sw/users/schmied/packages/2015-05-29_Fiji_2.3.9_SNAP.app.cuda/ImageJ-linux64",          
+  fiji-prefix: "/sw/bin/xvfb-run -a",       # calls xvfb for Fiji headless mode
+  # xml file names without .xml suffix
+  first_xml_filename: "Dual_Channel",       # Name of the xml file for the .czi or .tif files
+  hdf5_xml_filename: '"hdf5_Dual_Channel"', # Name of .xml file for the hdf5 data after resave_hdf5
+  merged_xml: "hdf5_Dual_Channel_merge",    # Name of .xml file after merge
+  # Describe the dataset
+  ntimepoints: 2,               # number of timepoints of dataset
+  angles: "0,72,144,216,288",   # angles          
+  channels: "green,red",        # channels
+  illumination: "0",            # illuminations
+  pixel_distance_x: '0.28590',  # Manual calibration x
+  pixel_distance_y: '0.28590',  # Manual calibration y
+  pixel_distance_z: '1.50000',  # Manual calibration z
+  pixel_unit: "um",             # unit of manual calibration
+  # Use switches to decide which processing steps you need:
+  # transformation_switch: "timelapse" standard processing
+  # after timelapse registration directly goes into fusion, timelapse_duplicate
+  # "timelapse_duplicate" for dual channel processing one channel contains the beads
+  # duplicates transformations
+  transformation_switch: "timelapse_duplicate", 
+  # Switches between content based fusion and deconvoltion
+  # "deconvolution" > for deconvolution
+  # "fusion" > for content based fusion
+  fusion_switch: "deconvolution"
   }
-              
+
 define_xml_czi: {
-  first_czi: "2015-02-20_LZ2_Stock48_Stock58.czi", # essential
-  rotation_around: "X-Axis",
-  bsh_file: "define_czi.bsh"
+  first_czi: "2015-02-20_LZ2_Stock48_Stock58.czi", # master .czi file
+  rotation_around: "X-Axis",                       # axis of acquistion
+  bsh_file: "define_czi.bsh"                       # .bsh script for defining .czi file
   }
           
 define_xml_tif: {
-  image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif', # essential
-  imglib_container: '"ArrayImg (faster)"',
-  multiple_angles: '"YES (one file per angle)"',
-  multiple_channels: '"NO (one channel)"',
-  multiple_illumination_directions: '"NO (one illumination direction)"',
-  multiple_timepoints: '"YES (one file per time-point)"',
+  # file pattern of .tif files
+  # for multi channel give spim_TL{tt}_Angle{a}_Channel{c}.tif
+  # # SPIM file pattern: for padded zeros use tt 
+  image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif', 
+  # Settings for ImageJ Opener
   type_of_dataset: '"Image Stacks (ImageJ Opener)"',
-  bsh_file: "define_tif_zip.bsh"
+  multiple_timepoints: '"YES (one file per time-point)"', # or NO (one time-point)
+  multiple_angles: '"YES (one file per angle)"',          # or NO (one angle)
+  multiple_channels: '"NO (one channel)"',                # or "\"NO (one channel)\""
+  multiple_illumination_directions: '"NO (one illumination direction)"', # or YES (one file per illumination direction)
+  imglib_container: '"ArrayImg (faster)"',        # '"ArrayImg (faster)"'
+  bsh_file: "define_tif_zip.bsh"  
   }
   
 resave_hdf5: {
-  hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
-  subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',  # data dependent
+  # Resaves .tif or .czi data into hdf5
+  # Subsampling and resolution settings for hdf5: data dependent
+  hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"', 
+  subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',  
+  # Standard settings for cluster processing
+  setups_per_partition: '0',
+  timepoints_per_partition: '1',
+  resave_timepoint: '"All Timepoints"',  
   resave_angle: '"All angles"',
   resave_channel: '"All channels"',
   resave_illumination: '"All illuminations"',
-  resave_timepoint: '"All Timepoints"',
-  setups_per_partition: '0',
-  timepoints_per_partition: '1',
   bsh_file: "export.bsh"
   }
 
 registration: {
-  reg_process_channel: '"Single channel (Select from List)"', # essential '"All channels"'; '"Single channel (Select from List)"'
-  reg_processing_channel: '"red"',      # essential
-  reg_interest_points_channel: '"beads"', # essential
-  reg_radius_1: '2',           # essential
-  reg_radius_2: '3',            # essential
-  reg_threshold: '0.005',       # essential
-  initial_sigma: '1.8',         # essetial
-  threshold_gaussian: '0.0080',   # essential
-  type_of_detection: '"Difference-of-Mean (Integral image based)"', # switch
-  label_interest_points: '"beads"',               
+  # reg_process_channel:
+  # # Single Channel:  '"All channels"'
+  # Dual Channel: '"All channels"'
+  # Dual Channel one Channel contains beads: '"Single channel (Select from List)"'
+  reg_process_channel: '"Single channel (Select from List)"',
+  # reg_processing_channel:
+  # Dual Channel setting for 1 Channel contains the beads
+  reg_processing_channel: '"red"',      
+  # reg_interest_points_channel:
+  # Single Channel: '"beads"'
+  # Dual Channel: '"beads,beads"'
+  # Dual Channel: Channel does not contain the beads '"[DO NOT register this channel],beads"'
+  reg_interest_points_channel: '"beads"',
+  # type of detection: '"Difference-of-Mean (Integral image based)"' or '"Difference-of-Gaussian"'
+  type_of_detection: '"Difference-of-Mean (Integral image based)"',  
+  # Settings for Difference-of-Mean
+  # For multiple channels 'value1,value2' delimiter is ,
+  reg_radius_1: '2',          
+  reg_radius_2: '3',            
+  reg_threshold: '0.005',
+  # Settings for Difference-of-Gaussian
+  # For multiple channels 'value1,value2' delimiter is ,
+  sigma: '1.8',         
+  threshold_gaussian: '0.0080',   
+  # Processing setting for Difference-of-Gaussian detection
+  # compute_on:
+  compute_on: '"GPU accurate (Nvidia CUDA via JNA)"',
+  separableconvolution: '"libSeparableConvolutionCUDALib.so"',
+  # Downsampling settings
+  downsample_detection: "No", # "No" or "Yes"
+  downsample_xy: '"Match Z Resolution (less downsampling)"',
+  downsample_z: "1x",
+  # Standard Settings for bead based registration
+  label_interest_points: '"beads"',              
   reg_process_timepoint: '"Single Timepoint (Select from List)"',
   reg_process_angle: '"All angles"',
   reg_process_illumination: '"All illuminations"',
@@ -76,7 +118,7 @@ registration: {
   model_to_regularize_with: "Rigid",
   lambda: '0.10',
   imglib_container: '"ArrayImg (faster)"',
-  bsh_file: "registration.bsh"
+  bsh_file: "registration.bsh"  # .bsh script for registration
   }
 
 xml_merge: {
@@ -84,107 +126,135 @@ xml_merge: {
   }
   
 timelapse: {
-  reference_timepoint: '0',   # essential
+  reference_timepoint: '0',   # Reference timepoint
+  # Standard settings for timelapse registration
   type_of_registration_timelapse: '"Match against one reference timepoint (no global optimization)"',
   timelapse_process_timepoints: '"All Timepoints"',
   bsh_file: "timelapse_registration.bsh"
   }
   
 dublicate_transformations: {
-  source_dublication: "red",  # essential for 1 usecase
-  target_dublication: "green", # essential for 1 usecase
-  duplicate_which_transformations: '"Replace all transformations"',
-  bsh_file: "dublicate_transformations.bsh"
+  # If dual channel processing and only one channel contains beads
+  # this allows you to dublicate the transformation for the 
+  # channel that does not contain beas
+  source_dublication: "red",  # source channel
+  target_dublication: "green", # target channel
+  duplicate_which_transformations: '"Replace all transformations"', # mode of dublication
+  bsh_file: "dublicate_transformations.bsh" # .bsh script for dublication
   }
   
 fusion: {
-  downsample: '1',  # essential
-  minimal_x: '220', # essential maybe automate
-  minimal_y: '40',  # essential maybe automate
-  minimal_z: '-290',  # essential maybe automate
-  maximal_x: '976',   # essential maybe automate
-  maximal_y: '1892',  # essential maybe automate  
-  maximal_z: '472',   # essential maybe automate
-  fused_image: '"Append to current XML Project"',
-  imglib2_container_fusion: '"ArrayImg"',
-  imglib2_data_container: '"ArrayImg (faster)"',
-  interpolation: '"Linear Interpolation"',
-  pixel_type: '"16-bit unsigned integer"',
+  # content based multiview fusion
+  # supports multi channel without new settings
+  downsample: '1',  # set downsampling
+  # Cropping parameters of full resolution
+  minimal_x: '220', 
+  minimal_y: '40',  
+  minimal_z: '-290',  
+  maximal_x: '976',   
+  maximal_y: '1892',    
+  maximal_z: '472',   
+  # fused_image: '"Append to current XML Project"', does not work yet
+  process_timepoint: '"Single Timepoint (Select from List)"',
   process_angle: '"All angles"',
   process_channel: '"All channels"',
   process_illumination: '"All illuminations"',
-  process_timepoint: '"Single Timepoint (Select from List)"',
+  imglib2_container_fusion: '"ArrayImg"',
+  interpolation: '"Linear Interpolation"',
+  pixel_type: '"16-bit unsigned integer"',
+  imglib2_data_container: '"ArrayImg (faster)"',
   process_views_in_paralell: '"All"',
   xml_output: '"Save every XML with user-provided unique id"',
   bsh_file: "fusion.bsh"
   }
 
 external_transform: {
-  # BUG
+  # Downsamples for deconvolution
+  # BUG: external transformation breaks .xml file
   # channel setting: '"all_channels"'
   channel_setting: '"green,red"',
   transform_timepoint: '"All Timepoints"',
   transform_angle: '"All angles"',
   transform_channel: '"All channels"',
-  # illumination setting only one illumination side
   transform_illumination: '"All illuminations"',
   apply_transformation: '"Current view transformations (appends to current transforms)"',
   define_mode_transform: '"Matrix"',
-  matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"',  # essential
+  # Matrix for downsampling
+  matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"', 
   transformation: '"Rigid"',
   bsh_file: "transform.bsh"
   }
 
 deconvolution: {
-  iterations: '1', # essential
-  minimal_x_deco: '190', # essential maybe automate
-  minimal_y_deco: '-16', # essential maybe automate
-  minimal_z_deco: '-348', # essential maybe automate
-  maximal_x_deco: '1019', # essential maybe automate
-  maximal_y_deco: '1941', # essential maybe automate
-  maximal_z_deco: '486', # essential maybe automate
-  detections_to_extract_psf_for_channel: '"beads"',
+  iterations: '1', # number of iterations
+  # Cropping parameters: take downsampling into account
+  minimal_x_deco: '190', 
+  minimal_y_deco: '-16', 
+  minimal_z_deco: '-348', 
+  maximal_x_deco: '1019', 
+  maximal_y_deco: '1941', 
+  maximal_z_deco: '486', 
+  # Channel settings for deconvolution
+  # Single Channel: '"beads"'
+  # Dual Channel: '"beads,beads"'
+  # Dual Channel one channel contains beads: '"[Same PSF as channel red],beads"'
+  detections_to_extract_psf_for_channel: '"[Same PSF as channel red],beads"',
+  # Settings for GPU or CPU processing 
+  # '"CPU (Java)"' or '"GPU (Nvidia CUDA via JNA)"'
+  compute_on: '"GPU (Nvidia CUDA via JNA)"',
+  cudafourierconvolution: "libFourierConvolutionCUDALib.so", # GPU processing name of cuda library
+  # Standard settings for deconvolution
   process_timepoint: '"Single Timepoint (Select from List)"',
   process_angle: '"All angles"',
   process_channel: '"All channels"',
   process_illumination: '"All illuminations"',
+  type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
   Tikhonov_parameter: '0.0006',
   compute: '"in 512x512x512 blocks"',
-  compute_on: '"GPU (Nvidia CUDA via JNA)"',
-  imglib2_container: '"ArrayImg"',
   osem_acceleration: '"1 (balanced)"',
   psf_estimation: '"Extract from beads"',
   psf_size_x: '19',
   psf_size_y: '19',
   psf_size_z: '25',
-  type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
-  bsh_file: "deconvolution_GPU.bsh"
+  imglib2_container: '"ArrayImg"',
+  bsh_file: "deconvolution.bsh"
   }
   
 hdf5_output: {
-  # Will be obsolete
+  # writes new hdf5 dataset for fusion output: will be obsolete
+  # Naming pattern of output
+  # Single Channel: TP{t}_Chgreen_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
+  # Dual Channel: TP{t}_Ch{0}_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
   output_image_file_pattern: 'TP{{t}}_Ch{{c}}_Ill0_Ang0,72,144,216,288.tif',
+  # channel setting
+  output_multiple_channels: '"YES (one file per channel)"', # '"YES (one file per channel)"' or  '"NO (one channel)"'
+  output_channels: "green,red",
+  # .xml file names
   output_xml: '"fused_Dual_Channel"',
   output_hdf5_xml: '"hdf5_fused_Dual_Channel"',
-  output_multiple_channels: '"YES (one file per channel)"', # "\"YES (one file per channel)\"" or  "\"NO (one channel)\""
-  output_timepoints: '0-1',
-  output_pixel_distance_x: 0.28590,
+  output_timepoints: '0-1', # Timepoints format: '1-2'
+  # pixel size of output: take downsampling into account!
+  output_pixel_distance_x: 0.28590, 
   output_pixel_distance_y: 0.28590,
   output_pixel_distance_z: 0.28590,
   output_pixel_unit: 'um',
-  output_channels: "green,red",
-  output_data_type: "16Bit",
+  # give if 16Bit data or 32Bit data 
+  # output of fusion is 16Bit, of deconvolution it is 32Bit
+  output_data_type: "16Bit", # "32Bit" or "16Bit"
+  # if data is 32Bit then the data is converted into 16Bit data
   convert_32bit: '"[Use min/max of first image (might saturate intenities over time)]"',
+  # subsampling and chunk size settings: dataset dependent
+  subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"', # data dependent
+  chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
+  # subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
+  # chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
+  # Standard settings for hdf5_output
   output_type_of_dataset: '"Image Stacks (ImageJ Opener)"',
   output_multiple_timepoints: '"YES (one file per time-point)"',
   output_multiple_angles: '"NO (one angle)"',
   output_illumination_directions: '"NO (one illumination direction)"',
   output_imglib_container: '"ArrayImg (faster)"',
-  subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"', # data dependent
-  chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
-  # subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
-  # chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
-  bsh_file_define: "define_output.bsh",
-  bsh_file_hdf5: "export_output.bsh"
+  bsh_file_define: "define_output.bsh", # .bsh script for defining the dataset
+  bsh_file_hdf5: "export_output.bsh"    # .bsh script for resaving into hdf5
   }