Skip to content
Snippets Groups Projects
Commit af79164f authored by Peter Steinbach's avatar Peter Steinbach
Browse files

Merge pull request #3 from schmiedc/newswitches

Newswitches
parents 5eb9ca5b b8265033
No related branches found
No related tags found
No related merge requests found
Showing with 998 additions and 712 deletions
......@@ -6,9 +6,13 @@ JOBDIR=os.path.abspath(os.path.curdir)
if JOBDIR[-1] != "/": # this checks if jobdir ends with slash if not it adds a slash
JOBDIR+="/"
#data specific config file, expected to be inside JOBDIR
# configfile: "tomancak_test_cluster.json"
configfile: "tomancak_test_cluster.yaml"
# Test config file single Channel:
# configfile: "single_test.yaml"
# Test config file dual channel one channel contains beads:
configfile: "dual_OneChannel.yaml"
# data specific config file, expected to be inside JOBDIR
# configfile: "tomancak_test_cluster.yaml"
padding_format = "{0:0"+str(padding_of_file_id(int(config["common"]["ntimepoints"])))+"d}"
......@@ -179,9 +183,15 @@ rule registration:
-Dreg_radius_1={reg_radius_1} \
-Dreg_radius_2={reg_radius_2} \
-Dreg_threshold={reg_threshold} \
-Dinitial_sigma={initial_sigma} \
-Dsigma={sigma} \
-Dthreshold_gaussian={threshold_gaussian} \
-Dregistration_algorithm={algorithm} \
-Dcompute_on={compute_on} \
-Ddirectory_cuda={directory_cuda} \
-Dseparableconvolution={separableconvolution} \
-Ddownsample_detection={downsample_detection} \
-Ddownsample_xy={downsample_xy} \
-Ddownsample_z={downsample_z} \
-Dregistration_algorithm={algorithm} \
-Dreg_interest_points_channel={reg_interest_points_channel} \
-Dfix_tiles={fix_tiles} \
-Dmap_back_tiles={map_back_tiles} \
......@@ -254,10 +264,10 @@ rule timelapse:
cmd_string += " > {log} 2>&1 && touch {output}"
shell(cmd_string)
rule dublicate_transformations:
rule duplicate_transformations:
input: rules.timelapse.output, merged_xml="{xml_base}_merge.xml"
output: rules.timelapse.output[0] + "_dublicate"
log: "{xml_base}_dublicate_transformations.log"
output: rules.timelapse.output[0] + "_duplicate"
log: "{xml_base}_duplicate_transformations.log"
run:
cmd_string = produce_string(
"""{fiji-prefix} {fiji-app} \
......@@ -296,7 +306,6 @@ rule fusion:
-Dprocess_illumination={process_illumination} \
-Dprocess_angle={process_angle} \
-Dxml_output={xml_output} \
-Dfused_image={fused_image} \
-Dminimal_x={minimal_x} \
-Dminimal_y={minimal_y} \
-Dminimal_z={minimal_z} \
......@@ -380,7 +389,9 @@ rule deconvolution:
-Dosem_acceleration={osem_acceleration} \
-DTikhonov_parameter={Tikhonov_parameter} \
-Dcompute={compute} \
-Dpsf_estimation={psf_estimation} \
-Dcompute_on={compute_on} \
-Dcudafourierconvolution={cudafourierconvolution} \
-Dpsf_estimation={psf_estimation} \
-Ddirectory_cuda={directory_cuda} \
-Ddetections_to_extract_psf_for_channel={detections_to_extract_psf_for_channel} \
-Dpsf_size_x={psf_size_x} \
......
......@@ -4,7 +4,12 @@
"lsf_extra" : "-R \"span[hosts=1]\"",
"lsf_q" : "short"
},
"resave_hdf5" :
{
"lsf_extra" : "-n 7 -R \"span[hosts=1] rusage[mem=50000]\""
},
"registration" :
{
"lsf_extra" : "-R \"span[hosts=1] rusage[mem=100000]\""
......@@ -20,6 +25,11 @@
"lsf_extra" : "-R \"span[hosts=1] rusage[mem=10000]\""
},
"fusion" :
{
"lsf_extra" : "-n 12 -R \"span[hosts=1] rusage[mem=100000]\""
},
"deconvolution" :
{
"lsf_extra" : "-n 7 -R \"span[hosts=1] rusage[mem=50000]\"",
......
......@@ -4,11 +4,11 @@ import ij.Prefs; // calls imagej settings
import java.lang.Runtime;
import java.io.File;
import java.io.FilenameFilter;
runtime = Runtime.getRuntime();
System.out.println(runtime.availableProcessors() + " cores available for multi-threading");
Prefs.setThreads(7); // defines the number of threads allowed
Prefs.setThreads(7); // defines the number of threads allowed
print("Threads: "+Prefs.getThreads()); // prints thread setting in output
System.out.println( "Start to load Parameters:" );
......@@ -27,13 +27,12 @@ int parallel_timepoints = Integer.parseInt(System.getProperty( "parallel_timepoi
process_timepoint = System.getProperty( "process_timepoint" );
process_channel = System.getProperty( "process_channel" );
process_illumination = System.getProperty( "process_illumination" );
process_angle = System.getProperty( "process_angle" );
process_angle = System.getProperty( "process_angle" );
System.out.println( "-------------------------------------------------------" );
System.out.println( "General parameters: " );
System.out.println( "timepoint_processed = " + parallel_timepoints );
System.out.println( "process_timepoints = " + process_timepoint );
System.out.println( "process_timepoints = " + process_timepoint );
System.out.println( "process_channel = " + process_channel );
System.out.println( "process_illumination = " + process_illumination );
System.out.println( "process_angle = " + process_angle );
......@@ -66,7 +65,6 @@ psf_estimation = System.getProperty( "psf_estimation" );
iterations = System.getProperty( "iterations" );
deco_output_file_directory = System.getProperty( "deco_output_file_directory" );
System.out.println( "-------------------------------------------------------" );
System.out.println( "Deconvolution settings: " );
System.out.println( "imglib2_container = " + imglib2_container_deco );
......@@ -78,12 +76,6 @@ System.out.println( "psf_estimation = " + psf_estimation );
System.out.println( "number_of_iterations = " + iterations );
System.out.println( "deco_output_file_directory = " + deco_output_file_directory );
// Search for CUDA
System.out.println( "-------------------------------------------------------" );
System.out.println( "Loading CUDA directory: " );
directory_cuda = System.getProperty( "directory_cuda" );
System.out.println( "directory_cuda = " + directory_cuda );
// PSF Parameters
psf_size_x = System.getProperty( "psf_size_x" );
psf_size_y = System.getProperty( "psf_size_y" );
......@@ -101,24 +93,24 @@ System.out.println( "-------------------------------------------------------" );
System.out.println( "Channel Settings: " );
// Channel setting for Deconvolution
// parses channels and takes from there the number of channels
// parses detections_to_extract_psf_for_channel
// parses detections_to_extract_psf_for_channel
channels = System.getProperty( "channels" );
System.out.println( "Channels = " + channels );
System.out.println( "Channels = " + channels );
detections_to_extract_psf_for_channel = System.getProperty( "detections_to_extract_psf_for_channel" );
System.out.println( "PSF: " + detections_to_extract_psf_for_channel );
// Splits channels and detections_to_extract_psf_for_channel
// Splits channels and detections_to_extract_psf_for_channel
String delims = "[,]";
String[] channel_token = channels.split(delims);
String[] psf_token = detections_to_extract_psf_for_channel.split(delims);
// Assembles channel_string
// Assembles channel_string
StringBuilder channel_string = new StringBuilder();
for (int channel=0; channel < channel_token.length; channel++ )
{
{
String channel_part = "detections_to_extract_psf_for_channel_" + channel_token[channel] + "=" + psf_token[channel] + " ";
channel_string.append( channel_part );
channel_string.append(" ");
......@@ -126,6 +118,45 @@ for (int channel=0; channel < channel_token.length; channel++ )
System.out.println( channel_string );
// GPU/CPU setting
System.out.println( "-------------------------------------------------------" );
System.out.println( "GPU/CPU setting: " );
compute_on = System.getProperty( "compute_on" );
directory_cuda = System.getProperty( "directory_cuda" );
cudafourierconvolution = System.getProperty( "cudafourierconvolution" );
System.out.println( "compute_on = " + compute_on );
System.out.println( "directory_cuda = " + directory_cuda );
System.out.println( "cudafourierconvolution = " + cudafourierconvolution );
String compute_string = "";
String cuda_settings = "";
if (compute_on.equalsIgnoreCase( "GPU (Nvidia CUDA via JNA)" ) )
{
System.out.println( "GPU deconvoultion selected" );
compute_string = "compute_on=[GPU (Nvidia CUDA via JNA)] ";
cuda_settings = "cuda_directory=" + directory_cuda + " " +
"select_native_library_for_cudafourierconvolution=" + cudafourierconvolution + " " +
"gpu_1 ";
}
else if (compute_on.equalsIgnoreCase( "CPU (Java)" ) )
{
System.out.println( "CPU deconvoultion selected" );
compute_string = "compute_on=[" + compute_on + "] ";
cuda_settings = "";
}
else
{
System.out.println( "Deconvolution GPU/CPU selection bad" );
}
System.out.println( "compute_string=" + compute_string );
System.out.println( "cuda_settings=" + cuda_settings );
// Execute Fiji Plugin
System.out.println( "=======================================================" );
System.out.println( "Starting Deconvolution" );
......@@ -154,24 +185,21 @@ IJ.run("Fuse/Deconvolve Dataset",
"use_tikhonov_regularization " +
"tikhonov_parameter=" + Tikhonov_parameter + " " +
"compute=[" + compute + "] " +
"compute_on=[GPU (Nvidia CUDA via JNA)] " +
compute_string +
"psf_estimation=[" + psf_estimation + "] " +
"psf_display=[Do not show PSFs] " +
"output_file_directory=" + deco_output_file_directory + " " +
"cuda_directory=[" + directory_cuda + "] " +
"select_native_library_for_cudafourierconvolution=libFourierConvolutionCUDALib.so " +
"gpu_1 " +
cuda_settings +
channel_string +
"psf_size_x=" + psf_size_x + " " +
"psf_size_y=" + psf_size_y + " " +
"psf_size_z=" + psf_size_z + "");
}
catch ( e ) {
catch ( e ) {
print( "[deconvolution-GPU] caught exception: "+e );
//important to fail the process if exception occurs
runtime.exit(1);
}
/* shutdown */
......
......@@ -48,7 +48,6 @@ String angle_part;
angle_part = "angle_" + num_angles + "=" + angle_token[angle];
angle_string.append(angle_part);
angle_string.append(" ");
}
System.out.println( angle_string );
......@@ -76,9 +75,7 @@ String channel_part;
channel_part = "channel_" + num_channel + "=" + channel_token[channel];
channel_string.append(channel_part);
channel_string.append(" ");
}
System.out.println( "Channel String = " + channel_string );
System.out.println("---------------------------------------------------------");
......@@ -104,7 +101,6 @@ String illum_part;
illum_part = "_______illumination_" + num_illum + "=" + illum_token[illum];
illum_string.append(illum_part);
illum_string.append(" ");
}
System.out.println( illum_string );
......
common: {
# directory that contains the bean shell scripts and Snakefile
bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/",
# Directory that contains the cuda libraries
directory_cuda: "/sw/users/schmied/cuda/",
# Directory that contains the current working Fiji
#fiji-app: "/sw/users/schmied/packages/2015-06-08_Fiji.app.cuda/ImageJ-linux64",
fiji-app: "/sw/users/schmied/packages/2015-05-29_Fiji_2.3.9_SNAP.app.cuda/ImageJ-linux64",
fiji-prefix: "/sw/bin/xvfb-run -a", # calls xvfb for Fiji headless mode
# xml file names without .xml suffix
first_xml_filename: "Dual_Channel", # Name of the xml file for the .czi or .tif files
hdf5_xml_filename: '"hdf5_Dual_Channel"', # Name of .xml file for the hdf5 data after resave_hdf5
merged_xml: "hdf5_Dual_Channel_merge", # Name of .xml file after merge
# Describe the dataset
ntimepoints: 2, # number of timepoints of dataset
angles: "0,72,144,216,288", # angles
channels: "green,red", # channels
illumination: "0", # illuminations
pixel_distance_x: '0.28590', # Manual calibration x
pixel_distance_y: '0.28590', # Manual calibration y
pixel_distance_z: '1.50000', # Manual calibration z
pixel_unit: "um", # unit of manual calibration
# Use switches to decide which processing steps you need:
# transformation_switch: "timelapse" standard processing
# after timelapse registration directly goes into fusion, timelapse_duplicate
# "timelapse_duplicate" for dual channel processing one channel contains the beads
# duplicates transformations
transformation_switch: "timelapse_duplicate",
# Switches between content based fusion and deconvoltion
# "deconvolution" > for deconvolution
# "fusion" > for content based fusion
fusion_switch: "fusion"
}
define_xml_czi: {
first_czi: "2015-02-20_LZ2_Stock48_Stock58.czi", # master .czi file
rotation_around: "X-Axis", # axis of acquistion
bsh_file: "define_czi.bsh" # .bsh script for defining .czi file
}
define_xml_tif: {
# file pattern of .tif files
# for multi channel give spim_TL{tt}_Angle{a}_Channel{c}.tif
# # SPIM file pattern: for padded zeros use tt
image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif',
# Settings for ImageJ Opener
type_of_dataset: '"Image Stacks (ImageJ Opener)"',
multiple_timepoints: '"YES (one file per time-point)"', # or NO (one time-point)
multiple_angles: '"YES (one file per angle)"', # or NO (one angle)
multiple_channels: '"NO (one channel)"', # or "\"NO (one channel)\""
multiple_illumination_directions: '"NO (one illumination direction)"', # or YES (one file per illumination direction)
imglib_container: '"ArrayImg (faster)"', # '"ArrayImg (faster)"'
bsh_file: "define_tif_zip.bsh"
}
resave_hdf5: {
# Resaves .tif or .czi data into hdf5
# Subsampling and resolution settings for hdf5: data dependent
hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"',
subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',
# Standard settings for cluster processing
setups_per_partition: '0',
timepoints_per_partition: '1',
resave_timepoint: '"All Timepoints"',
resave_angle: '"All angles"',
resave_channel: '"All channels"',
resave_illumination: '"All illuminations"',
bsh_file: "export.bsh"
}
registration: {
# reg_process_channel:
# # Single Channel: '"All channels"'
# Dual Channel: '"All channels"'
# Dual Channel one Channel contains beads: '"Single channel (Select from List)"'
reg_process_channel: '"Single channel (Select from List)"',
# reg_processing_channel:
# Dual Channel setting for 1 Channel contains the beads
reg_processing_channel: '"red"',
# reg_interest_points_channel:
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel: Channel does not contain the beads '"[DO NOT register this channel],beads"'
reg_interest_points_channel: '"beads"',
# type of detection: '"Difference-of-Mean (Integral image based)"' or '"Difference-of-Gaussian"'
type_of_detection: '"Difference-of-Mean (Integral image based)"',
# Settings for Difference-of-Mean
# For multiple channels 'value1,value2' delimiter is ,
reg_radius_1: '2',
reg_radius_2: '3',
reg_threshold: '0.005',
# Settings for Difference-of-Gaussian
# For multiple channels 'value1,value2' delimiter is ,
sigma: '1.8',
threshold_gaussian: '0.0080',
# Processing setting for Difference-of-Gaussian detection
# compute_on:
compute_on: '"GPU accurate (Nvidia CUDA via JNA)"',
separableconvolution: '"libSeparableConvolutionCUDALib.so"',
# Downsampling settings
downsample_detection: "No", # "No" or "Yes"
downsample_xy: '"Match Z Resolution (less downsampling)"',
downsample_z: "1x",
# Standard Settings for bead based registration
label_interest_points: '"beads"',
reg_process_timepoint: '"Single Timepoint (Select from List)"',
reg_process_angle: '"All angles"',
reg_process_illumination: '"All illuminations"',
subpixel_localization: '"3-dimensional quadratic fit"',
detection_min_max: "find_maxima",
type_of_registration: '"Register timepoints individually"',
algorithm: '"Fast 3d geometric hashing (rotation invariant)"',
transformation_model: "Affine",
allowed_error_for_ransac: '5',
significance: '10',
fix_tiles: '"Fix first tile"',
map_back_tiles: '"Map back to first tile using rigid model"',
model_to_regularize_with: "Rigid",
lambda: '0.10',
imglib_container: '"ArrayImg (faster)"',
bsh_file: "registration.bsh" # .bsh script for registration
}
xml_merge: {
bsh_file: "xml_merge.bsh"
}
timelapse: {
reference_timepoint: '0', # Reference timepoint
# Standard settings for timelapse registration
type_of_registration_timelapse: '"Match against one reference timepoint (no global optimization)"',
timelapse_process_timepoints: '"All Timepoints"',
bsh_file: "timelapse_registration.bsh"
}
dublicate_transformations: {
# If dual channel processing and only one channel contains beads
# this allows you to dublicate the transformation for the
# channel that does not contain beas
source_dublication: "red", # source channel
target_dublication: "green", # target channel
duplicate_which_transformations: '"Replace all transformations"', # mode of dublication
bsh_file: "dublicate_transformations.bsh" # .bsh script for dublication
}
fusion: {
# content based multiview fusion
# supports multi channel without new settings
downsample: '1', # set downsampling
# Cropping parameters of full resolution
minimal_x: '220',
minimal_y: '40',
minimal_z: '-290',
maximal_x: '976',
maximal_y: '1892',
maximal_z: '472',
# fused_image: '"Append to current XML Project"', does not work yet
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
imglib2_container_fusion: '"ArrayImg"',
interpolation: '"Linear Interpolation"',
pixel_type: '"16-bit unsigned integer"',
imglib2_data_container: '"ArrayImg (faster)"',
process_views_in_paralell: '"All"',
xml_output: '"Save every XML with user-provided unique id"',
bsh_file: "fusion.bsh"
}
external_transform: {
# Downsamples for deconvolution
# BUG: external transformation breaks .xml file
# channel setting: '"all_channels"'
channel_setting: '"green,red"',
transform_timepoint: '"All Timepoints"',
transform_angle: '"All angles"',
transform_channel: '"All channels"',
transform_illumination: '"All illuminations"',
apply_transformation: '"Current view transformations (appends to current transforms)"',
define_mode_transform: '"Matrix"',
# Matrix for downsampling
matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"',
transformation: '"Rigid"',
bsh_file: "transform.bsh"
}
deconvolution: {
iterations: '1', # number of iterations
# Cropping parameters: take downsampling into account
minimal_x_deco: '190',
minimal_y_deco: '-16',
minimal_z_deco: '-348',
maximal_x_deco: '1019',
maximal_y_deco: '1941',
maximal_z_deco: '486',
# Channel settings for deconvolution
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel one channel contains beads: '"[Same PSF as channel red],beads"'
detections_to_extract_psf_for_channel: '"[Same PSF as channel red],beads"',
# Settings for GPU or CPU processing
# '"CPU (Java)"' or '"GPU (Nvidia CUDA via JNA)"'
compute_on: '"GPU (Nvidia CUDA via JNA)"',
cudafourierconvolution: "libFourierConvolutionCUDALib.so", # GPU processing name of cuda library
# Standard settings for deconvolution
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
Tikhonov_parameter: '0.0006',
compute: '"in 512x512x512 blocks"',
osem_acceleration: '"1 (balanced)"',
psf_estimation: '"Extract from beads"',
psf_size_x: '19',
psf_size_y: '19',
psf_size_z: '25',
imglib2_container: '"ArrayImg"',
bsh_file: "deconvolution.bsh"
}
hdf5_output: {
# writes new hdf5 dataset for fusion output: will be obsolete
# Naming pattern of output
# Single Channel: TP{t}_Chgreen_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
# Dual Channel: TP{t}_Ch{0}_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
output_image_file_pattern: 'TP{{t}}_Ch{{c}}_Ill0_Ang0,72,144,216,288.tif',
# channel setting
output_multiple_channels: '"YES (one file per channel)"', # '"YES (one file per channel)"' or '"NO (one channel)"'
output_channels: "green,red",
# .xml file names
output_xml: '"fused_Dual_Channel"',
output_hdf5_xml: '"hdf5_fused_Dual_Channel"',
output_timepoints: '0-1', # Timepoints format: '1-2'
# pixel size of output: take downsampling into account!
output_pixel_distance_x: 0.28590,
output_pixel_distance_y: 0.28590,
output_pixel_distance_z: 0.28590,
output_pixel_unit: 'um',
# give if 16Bit data or 32Bit data
# output of fusion is 16Bit, of deconvolution it is 32Bit
output_data_type: "16Bit", # "32Bit" or "16Bit"
# if data is 32Bit then the data is converted into 16Bit data
convert_32bit: '"[Use min/max of first image (might saturate intenities over time)]"',
# subsampling and chunk size settings: dataset dependent
subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"', # data dependent
chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
# subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
# chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
# Standard settings for hdf5_output
output_type_of_dataset: '"Image Stacks (ImageJ Opener)"',
output_multiple_timepoints: '"YES (one file per time-point)"',
output_multiple_angles: '"NO (one angle)"',
output_illumination_directions: '"NO (one illumination direction)"',
output_imglib_container: '"ArrayImg (faster)"',
bsh_file_define: "define_output.bsh", # .bsh script for defining the dataset
bsh_file_hdf5: "export_output.bsh" # .bsh script for resaving into hdf5
}
This diff is collapsed.
common: {
# directory that contains the bean shell scripts
bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/",
directory_cuda: "/sw/users/schmied/packages/2015-06-08_Fiji.app.cuda/lib/",
fiji-app: "/sw/users/schmied/packages/2015-06-08_Fiji.app.cuda/ImageJ-linux64",
fiji-prefix: "/sw/bin/xvfb-run -a",
first_xml_filename: "test_unicore",
hdf5_xml_filename: '"hdf5_test_unicore"',
merged_xml: "hdf5_test_unicore_merge",
ntimepoints: 5,
angles: "0,72,144,216,288",
channels: "green",
illumination: "0",
pixel_distance_x: '0.28590106964',
pixel_distance_y: '0.28590106964',
pixel_distance_z: '1.50000',
pixel_unit: "um",
# transformation_switch: "timelapse_dublicate",
transformation_switch: "timelapse",
# fusion_switch: "deconvolution"
# directory that contains the bean shell scripts and Snakefile
bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/",
# Directory that contains the cuda libraries
directory_cuda: "/sw/users/schmied/cuda/",
# Directory that contains the current working Fiji
fiji-app: "/sw/users/schmied/packages/2015-06-30_Fiji.app.cuda/ImageJ-linux64",
fiji-prefix: "/sw/users/schmied/packages/xvfb-run -a", # calls xvfb for Fiji headless mode
# xml file names without .xml suffix
first_xml_filename: 'single', # Name of the xml file for the .czi or .tif files
hdf5_xml_filename: '"hdf5_single"', # Name of .xml file for the hdf5 data after resave_hdf5
merged_xml: 'hdf5_single_merge', # Name of .xml file after merge
# Describe the dataset
ntimepoints: 2, # number of timepoints of dataset
angles: "0,72,144,216,288", # angles
channels: "green", # channels
illumination: "0", # illuminations
pixel_distance_x: '0.28590106964', # Manual calibration x
pixel_distance_y: '0.28590106964', # Manual calibration y
pixel_distance_z: '1.50000', # Manual calibration z
pixel_unit: "um", # unit of manual calibration
# Use switches to decide which processing steps you need:
# transformation_switch: "timelapse" standard processing
# after timelapse registration directly goes into fusion, timelapse_duplicate
# "timelapse_duplicate" for dual channel processing one channel contains the beads
# duplicates transformations
transformation_switch: "timelapse",
# Switches between content based fusion and deconvoltion
# "deconvolution" > for deconvolution
# "fusion" > for content based fusion
fusion_switch: "fusion"
}
define_xml_czi: {
first_czi: "2015-04-11_LZ2_Stock68_3.czi",
rotation_around: "X-Axis",
bsh_file: "define_czi.bsh"
first_czi: "2015-02-21_LZ1_Stock68_3.czi", # master .czi file
rotation_around: "X-Axis", # axis of acquistion
bsh_file: "define_czi.bsh" # .bsh script for defining .czi file
}
define_xml_tif: {
image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif',
imglib_container: '"ArrayImg (faster)"',
multiple_angles: '"YES (one file per angle)"',
multiple_channels: '"NO (one channel)"',
multiple_illumination_directions: '"NO (one illumination direction)"',
multiple_timepoints: '"YES (one file per time-point)"',
# file pattern of .tif files
# for multi channel give spim_TL{tt}_Angle{a}_Channel{c}.tif
# # SPIM file pattern: for padded zeros use tt
image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif',
# Settings for ImageJ Opener
type_of_dataset: '"Image Stacks (ImageJ Opener)"',
bsh_file: "define_tif_zip.bsh"
multiple_timepoints: '"YES (one file per time-point)"', # or NO (one time-point)
multiple_angles: '"YES (one file per angle)"', # or NO (one angle)
multiple_channels: '"NO (one channel)"', # or "\"NO (one channel)\""
multiple_illumination_directions: '"NO (one illumination direction)"', # or YES (one file per illumination direction)
imglib_container: '"ArrayImg (faster)"', # '"ArrayImg (faster)"'
bsh_file: "define_tif_zip.bsh"
}
resave_hdf5: {
hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"',
# Resaves .tif or .czi data into hdf5
# Subsampling and resolution settings for hdf5: data dependent
hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"',
subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',
# Standard settings for cluster processing
setups_per_partition: '0',
timepoints_per_partition: '1',
resave_timepoint: '"All Timepoints"',
resave_angle: '"All angles"',
resave_channel: '"All channels"',
resave_illumination: '"All illuminations"',
resave_timepoint: '"All Timepoints"',
setups_per_partition: '0',
subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',
timepoints_per_partition: '1',
bsh_file: "export.bsh"
}
registration: {
# "Single Channel" is not a valid choice for "Process_channel"
reg_process_channel: '"All channels"',
reg_processing_channel: '"green"',
# reg_process_channel:
# # Single Channel: '"All channels"'
# Dual Channel: '"All channels"'
# Dual Channel one Channel contains beads: '"Single channel (Select from List)"'
reg_process_channel: '"All channels"',
# reg_processing_channel:
# Dual Channel setting for 1 Channel contains the beads
reg_processing_channel: '"green"',
# reg_interest_points_channel:
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel: Channel does not contain the beads '"[DO NOT register this channel],beads"'
reg_interest_points_channel: '"beads"',
reg_radius_1: '2',
reg_radius_2: '3',
# type of detection: '"Difference-of-Mean (Integral image based)"' or '"Difference-of-Gaussian"'
type_of_detection: '"Difference-of-Mean (Integral image based)"',
# Settings for Difference-of-Mean
# For multiple channels 'value1,value2' delimiter is ,
reg_radius_1: '2',
reg_radius_2: '3',
reg_threshold: '0.005',
initial_sigma: '1.8',
threshold_gaussian: '0.0080',
type_of_detection: '"Difference-of-Mean (Integral image based)"',
label_interest_points: '"beads"',
# Settings for Difference-of-Gaussian
# For multiple channels 'value1,value2' delimiter is ,
sigma: '1.8',
threshold_gaussian: '0.0080',
# Processing setting for Difference-of-Gaussian detection
# compute_on:
compute_on: '"GPU accurate (Nvidia CUDA via JNA)"',
separableconvolution: '"libSeparableConvolutionCUDALib.so"',
# Downsampling settings
downsample_detection: "No", # "No" or "Yes"
downsample_xy: '"Match Z Resolution (less downsampling)"',
downsample_z: "1x",
# Standard Settings for bead based registration
label_interest_points: '"beads"',
reg_process_timepoint: '"Single Timepoint (Select from List)"',
reg_process_angle: '"All angles"',
reg_process_illumination: '"All illuminations"',
......@@ -77,7 +117,7 @@ registration: {
model_to_regularize_with: "Rigid",
lambda: '0.10',
imglib_container: '"ArrayImg (faster)"',
bsh_file: "registration.bsh"
bsh_file: "registration.bsh" # .bsh script for registration
}
xml_merge: {
......@@ -85,105 +125,135 @@ xml_merge: {
}
timelapse: {
reference_timepoint: '0',
reference_timepoint: '0', # Reference timepoint
# Standard settings for timelapse registration
type_of_registration_timelapse: '"Match against one reference timepoint (no global optimization)"',
timelapse_process_timepoints: '"All Timepoints"',
bsh_file: "timelapse_registration.bsh"
}
Dublicate_transformations: {
source_dublication: "red",
target_dublication: "green",
duplicate_which_transformations: '"Replace all transformations"',
bsh_file: "Dublicate_transformations.bsh"
dublicate_transformations: {
# If dual channel processing and only one channel contains beads
# this allows you to dublicate the transformation for the
# channel that does not contain beas
source_dublication: "red", # source channel
target_dublication: "green", # target channel
duplicate_which_transformations: '"Replace all transformations"', # mode of dublication
bsh_file: "dublicate_transformations.bsh" # .bsh script for dublication
}
fusion: {
bsh_file: "fusion.bsh",
downsample: '4',
fused_image: '"Append to current XML Project"',
imglib2_container_fusion: '"ArrayImg"',
imglib2_data_container: '"ArrayImg (faster)"',
interpolation: '"Linear Interpolation"',
minimal_x: '190',
minimal_y: '-16',
minimal_z: '-348',
maximal_x: '1019',
maximal_y: '1941',
maximal_z: '486',
pixel_type: '"16-bit unsigned integer"',
# content based multiview fusion
# supports multi channel without new settings
downsample: '2', # set downsampling
# Cropping parameters of full resolution
minimal_x: '190',
minimal_y: '-16',
minimal_z: '-348',
maximal_x: '1019',
maximal_y: '1941',
maximal_z: '486',
# fused_image: '"Append to current XML Project"', does not work yet
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
process_timepoint: '"Single Timepoint (Select from List)"',
imglib2_container_fusion: '"ArrayImg"',
interpolation: '"Linear Interpolation"',
pixel_type: '"16-bit unsigned integer"',
imglib2_data_container: '"ArrayImg (faster)"',
process_views_in_paralell: '"All"',
xml_output: '"Save every XML with user-provided unique id"'
xml_output: '"Save every XML with user-provided unique id"',
bsh_file: "fusion.bsh"
}
external_transform: {
# Downsamples for deconvolution
# BUG: external transformation breaks .xml file
# channel setting: '"all_channels"'
channel_setting: '"green"',
transform_timepoint: '"All Timepoints"',
transform_angle: '"All angles"',
transform_channel: '"All channels"',
# illumination setting only one illumination side
transform_illumination: '"All illuminations"',
apply_transformation: '"Current view transformations (appends to current transforms)"',
define_mode_transform: '"Matrix"',
matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"',
# Matrix for downsampling
matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"',
transformation: '"Rigid"',
bsh_file: "transform.bsh"
}
deconvolution: {
iterations: '1',
minimal_x_deco: '190',
minimal_y_deco: '-16',
minimal_z_deco: '-348',
maximal_x_deco: '1019',
maximal_y_deco: '1941',
maximal_z_deco: '486',
detections_to_extract_psf_for_channel: '"beads"',
iterations: '5', # number of iterations
# Cropping parameters: take downsampling into account
minimal_x_deco: '190',
minimal_y_deco: '-16',
minimal_z_deco: '-348',
maximal_x_deco: '1019',
maximal_y_deco: '1941',
maximal_z_deco: '486',
# Channel settings for deconvolution
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel one channel contains beads: '"[Same PSF as channel red],beads"'
detections_to_extract_psf_for_channel: '"[Same PSF as channel red],beads"',
# Settings for GPU or CPU processing
# '"CPU (Java)"' or '"GPU (Nvidia CUDA via JNA)"'
compute_on: '"GPU (Nvidia CUDA via JNA)"',
cudafourierconvolution: "libFourierConvolutionCUDALib.so", # GPU processing name of cuda library
# Standard settings for deconvolution
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
Tikhonov_parameter: '0.0006',
compute: '"in 512x512x512 blocks"',
compute_on: '"GPU (Nvidia CUDA via JNA)"',
imglib2_container: '"ArrayImg"',
osem_acceleration: '"1 (balanced)"',
psf_estimation: '"Extract from beads"',
psf_size_x: '19',
psf_size_y: '19',
psf_size_z: '25',
type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
bsh_file: "deconvolution_GPU.bsh"
imglib2_container: '"ArrayImg"',
bsh_file: "deconvolution.bsh"
}
hdf5_output: {
output_image_file_pattern: 'TP{{t}}_Chgreen_Ill0_Ang0,72,144,216,288.tif',
output_xml: '"fused_Single_Channel"',
output_hdf5_xml: '"hdf5_fused_Single_Channel"',
output_multiple_channels: '"NO (one channel)"',
output_timepoints: '0-4',
output_pixel_distance_x: 0.5718,
output_pixel_distance_y: 0.5718,
output_pixel_distance_z: 0.5718,
output_pixel_unit: 'um',
# writes new hdf5 dataset for fusion output: will be obsolete
# Naming pattern of output
# Single Channel: TP{{t}}_Chgreen_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
# Dual Channel: TP{{t}}_Ch{{0}}_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
output_image_file_pattern: '"TP{{t}}_Chgreen_Ill0_Ang0,72,144,216,288.tif"',
# channel setting
output_multiple_channels: '"NO (one channel)"', # '"YES (one file per channel)"' or '"NO (one channel)"'
output_channels: "green",
output_data_type: "32Bit",
# .xml file names
output_xml: '"fused_Single"',
output_hdf5_xml: '"hdf5_fused_Single"',
output_timepoints: '0-1', # Timepoints format: '1-2'
# pixel size of output: take downsampling into account!
output_pixel_distance_x: 0.28590106964,
output_pixel_distance_y: 0.28590106964,
output_pixel_distance_z: 0.28590106964,
output_pixel_unit: 'um',
# give if 16Bit data or 32Bit data
# output of fusion is 16Bit, of deconvolution it is 32Bit
output_data_type: "16Bit", # "32Bit" or "16Bit"
# if data is 32Bit then the data is converted into 16Bit data
convert_32bit: '"[Use min/max of first image (might saturate intenities over time)]"',
# subsampling and chunk size settings: dataset dependent
subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"', # data dependent
chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
# subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
# chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
# Standard settings for hdf5_output
output_type_of_dataset: '"Image Stacks (ImageJ Opener)"',
output_multiple_timepoints: '"YES (one file per time-point)"',
output_multiple_angles: '"NO (one angle)"',
output_illumination_directions: '"NO (one illumination direction)"',
output_imglib_container: '"ArrayImg (faster)"',
subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"',
chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"',
# subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
# chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
bsh_file_define: "define_output.bsh",
bsh_file_hdf5: "export_output.bsh"
bsh_file_define: "define_output.bsh", # .bsh script for defining the dataset
bsh_file_hdf5: "export_output.bsh" # .bsh script for resaving into hdf5
}
{
"common" :
{
"fiji-app" : "/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/ImageJ-linux64",
"fiji-prefix" : "/sw/bin/xvfb-run -a",
"directory_cuda" : "/lustre/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/lib/",
"merged_xml" : "hdf5_test_unicore_merge"
},
"registration" :
{
"timepoint" : "\"Single Timepoint (Select from List)\"",
"illuminations" : "\"All illuminations\"",
"angle" : "\"All angles\"",
"channel" : "\"All channels\"",
"proc-ch" : "\"channel 1\"",
"algorithm" : "\"Fast 3d geometric hashing (rotation invariant)\"",
"label_interest_points" : "\"beads\"",
"type_of_registration" : "\"Register timepoints individually\"",
"type_of_registration_timelapse" : "\"Match against one reference timepoint (no global optimization)\"",
"type_of_detection" : "\"Difference-of-Mean (Integral image based)\"" ,
"subpixel_localization" : "\"3-dimensional quadratic fit\"",
"imglib_container" : "\"ArrayImg (faster)\"",
"radius_1" : "2",
"radius_2" : "3",
"threshold" : "0.005",
"interest_points_channel_0" : "\"[DO NOT register this channel]\"" ,
"interest_points_channel_1" : "\"beads\"",
"fix_tiles" : "\"Fix first tile\"" ,
"map_back_tiles" : "\"Map back to first tile using rigid model\"",
"transformation_model" : "Affine",
"model_to_regularize_with" : "Rigid",
"lambda" : "0.10" ,
"allowed_error_for_ransac" : "5",
"detection_min_max" : "find_maxima",
"initial_sigma" : "1.8",
"threshold_gaussian" : "0.0080",
"bsh_file" : "registration.bsh"
},
"xml_merge" :
{
"bsh_file" : "xml_merge.bsh"
},
"external_transform" :
{
"bsh_file" : "transform.bsh",
"angle" : "\"All angles\"",
"channel" : "\"All channels\"",
"illumination" : "\"All illuminations\"",
"timepoint" : "\"All Timepoints\"",
"transformation" : "\"Rigid\"",
"apply_transformation" : "\"Current view transformations (appends to current transforms)\"",
"define_mode_transform" : "\"Matrix\"",
"matrix_transform" : "\"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0\""
},
"deconvolution" :
{
"bsh_file" : "deconvolution_GPU.bsh",
"process_timepoint" : "\"Single Timepoint (Select from List)\"",
"process_channel" : "\"All channels\"",
"process_illumination" : "\"All illuminations\"",
"process_angle" : "\"All angles\"",
"minimal_x" : "76",
"minimal_y" : "4",
"minimal_z" : "-192",
"maximal_x" : "488",
"maximal_y" : "956",
"maximal_z" : "214",
"imglib2_container" : "\"ArrayImg \"",
"type_of_iteration" : "\"Efficient Bayesian - Optimization I (fast, precise)\"",
"osem_acceleration" : "\"1 (balanced)\"",
"Tikhonov_parameter" : "0.0006",
"compute" : "\"in 512x512x512 blocks\"",
"compute_on" : "\"GPU (Nvidia CUDA via JNA)\"",
"psf_estimation" : "\"Extract from beads\"",
"iterations" : "5",
"detections_to_extract_psf_for_channel_0" : "\"beads\"",
"detections_to_extract_psf_for_channel_1" : "\"beads\"",
"psf_size_x" : "19",
"psf_size_y" : "19",
"psf_size_z" : "25"
}
}
{
"common" :
{
"fiji-app" : "/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/ImageJ-linux64",
"fiji-prefix" : "/sw/bin/xvfb-run -a",
"directory_cuda" : "/lustre/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/lib/",
"merged_xml" : "hdf5_test_unicore_merge",
"bsh_directory" : "/home/steinbac/development/cschmied-snakemake-workflows/spim_registration/timelapse/",
"first_xml_filename" : "test_unicore",
"hdf5_xml_filename" : "\"hdf5_test_unicore\"",
"fusion_switch" : "deconvolution",
"ntimepoints" : 3
},
"define_xml_czi" :
{
"pixel_distance_x" : "0.2875535786151886",
"pixel_distance_y" : "0.2875535786151886",
"pixel_distance_z" : "1.50000",
"pixel_unit" : "um",
"first_czi" : "2015-02-21_LZ1_Stock68_3.czi",
"channel_1" : "green",
"channel_2" : "red",
"angle_1" : "0",
"angle_2" : "72",
"angle_3" :"144",
"angle_4" :"216",
"angle_5" : "288",
"illumination_1" : "0",
"rotation_around" : "X-Axis",
"bsh_file" : "define_czi.bsh"
},
"define_xml_tif" :
{
"timepoints" : "0-1",
"acquisition_angles" : "0,72,144,216,288",
"channels" : "0",
"image_file_pattern" : "img_TL{{t}}_Angle{{a}}.tif",
"pixel_distance_x" : "0.2875535786151886",
"pixel_distance_y" : "0.2875535786151886",
"pixel_distance_z" : "1.50000",
"pixel_unit" : "um",
"multiple_timepoints" : "\"YES (one file per time-point)\"",
"multiple_channels" : "\"NO (one channel)\"",
"multiple_illumination_directions" : "\"NO (one illumination direction)\"",
"multiple_angles" : "\"YES (one file per angle)\"",
"type_of_dataset" : "\"Image Stacks (ImageJ Opener)\"",
"imglib_container" : "\"ArrayImg (faster)\"",
"bsh_file" : "define_tif_zip.bsh"
},
"resave_hdf5" :
{
"parallel_timepoints" : "3",
"resave_angle" : "\"All angles\"",
"resave_channel" : "\"All channels\"",
"resave_illumination" : "\"All illuminations\"",
"resave_timepoint" : "\"All Timepoints\"",
"subsampling_factors" : "\"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}\"",
"hdf5_chunk_sizes" : "\"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}\"",
"timepoints_per_partition" : "1",
"setups_per_partition" : "0",
"bsh_file" : "export.bsh"
},
"registration" :
{
"timepoint" : "\"Single Timepoint (Select from List)\"",
"illuminations" : "\"All illuminations\"",
"angle" : "\"All angles\"",
"channel" : "\"All channels\"",
"proc-ch" : "\"channel 0\"",
"algorithm" : "\"Fast 3d geometric hashing (rotation invariant)\"",
"label_interest_points" : "\"beads\"",
"type_of_registration" : "\"Register timepoints individually\"",
"type_of_detection" : "\"Difference-of-Mean (Integral image based)\"" ,
"subpixel_localization" : "\"3-dimensional quadratic fit\"",
"imglib_container" : "\"ArrayImg (faster)\"",
"radius_1" : "2",
"radius_2" : "3",
"threshold" : "0.005",
"interest_points_channel_0" : "\"beads\"",
"interest_points_channel_1" : "\"beads\"",
"fix_tiles" : "\"Fix first tile\"",
"map_back_tiles" : "\"Map back to first tile using rigid model\"",
"transformation_model" : "Affine",
"model_to_regularize_with" : "Rigid",
"lambda" : "0.10" ,
"allowed_error_for_ransac" : "5",
"significance" : "10",
"detection_min_max" : "find_maxima",
"initial_sigma" : "1.8",
"threshold_gaussian" : "0.0080",
"bsh_file" : "registration.bsh"
},
"xml_merge" :
{
"bsh_file" : "xml_merge.bsh"
},
"timelapse" :
{
"reference_timepoint" : "0",
"timelapse_process_timepoints" : "\"All Timepoints\"",
"type_of_registration_timelapse" : "\"Match against one reference timepoint (no global optimization)\"",
"bsh_file" : "timelapse_registration.bsh"
},
"external_transform" :
{
"bsh_file" : "transform.bsh",
"angle" : "\"All angles\"",
"channel" : "\"All channels\"",
"illumination" : "\"All illuminations\"",
"timepoint" : "\"All Timepoints\"",
"transformation" : "\"Rigid\"",
"apply_transformation" : "\"Current view transformations (appends to current transforms)\"",
"define_mode_transform" : "\"Matrix\"",
"matrix_transform" : "\"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0\""
},
"fusion" :
{
"minimal_x" : "128",
"minimal_y" : "-13",
"minimal_z" : "-407",
"maximal_x" : "986",
"maximal_y" : "1927",
"maximal_z" : "498",
"downsample" : "1",
"process_timepoint" : "\"Single Timepoint (Select from List)\"",
"process_channel" : "\"All channels\"",
"process_illumination" : "\"All illuminations\"",
"process_angle" : "\"All angles\"",
"xml_output" : "\"Save every XML with user-provided unique id\"",
"fused_image" : "\"Append to current XML Project\"",
"pixel_type" : "\"16-bit unsigned integer\"",
"imglib2_container_fusion" : "\"ArrayImg\"",
"process_views_in_paralell" : "\"All\"",
"interpolation" : "\"Linear Interpolation\"",
"imglib2_data_container" : "\"ArrayImg (faster)\"",
"bsh_file" : "fusion.bsh"
},
"deconvolution" :
{
"bsh_file" : "deconvolution_GPU.bsh",
"process_timepoint" : "\"Single Timepoint (Select from List)\"",
"process_channel" : "\"All channels\"",
"process_illumination" : "\"All illuminations\"",
"process_angle" : "\"All angles\"",
"minimal_x" : "76",
"minimal_y" : "4",
"minimal_z" : "-192",
"maximal_x" : "488",
"maximal_y" : "956",
"maximal_z" : "214",
"imglib2_container" : "\"ArrayImg \"",
"type_of_iteration" : "\"Efficient Bayesian - Optimization I (fast, precise)\"",
"osem_acceleration" : "\"1 (balanced)\"",
"Tikhonov_parameter" : "0.0006",
"compute" : "\"in 512x512x512 blocks\"",
"compute_on" : "\"GPU (Nvidia CUDA via JNA)\"",
"psf_estimation" : "\"Extract from beads\"",
"iterations" : "5",
"detections_to_extract_psf_for_channel_0" : "\"beads\"",
"detections_to_extract_psf_for_channel_1" : "\"beads\"",
"psf_size_x" : "19",
"psf_size_y" : "19",
"psf_size_z" : "25"
},
"hdf5_output" :
{
"output_image_file_pattern" : "TP{{t}}_Ch{{c}}_Ill0_Ang0,72,144,216,288.tif",
"output_data_type" : "32Bit",
"output_xml" : "\"fused_Dual_Channel\"",
"output_hdf5_xml" : "\"hdf5_fused_Stock68\"",
"output_multiple_channels" : "\"NO (one channel)\"",
"output_timepoints" : "0-1",
"output_channels" : "green",
"output_pixel_distance_x" : "0.5718",
"output_pixel_distance_y" : "0.5718",
"output_pixel_distance_z" : "0.5718",
"output_pixel_unit" : "um",
"output_multiple_timepoints" : "\"YES (one file per time-point)\"",
"output_illumination_directions" : "\"NO (one illumination direction)\"",
"output_multiple_angles" : "\"NO (one angle)\"",
"output_type_of_dataset" : "\"Image Stacks (ImageJ Opener)\"",
"output_imglib_container" : "\"ArrayImg (faster)\"",
"bsh_file_define" : "/define_output.bsh",
"bsh_file_hdf5" : "/export_output.bsh",
"convert_32bit" : "\"[Use min/max of first image (might saturate intenities over time)]\""
}
}
{
"common" :
{
"fiji-app" : "/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/ImageJ-linux64",
"fiji-prefix" : "/sw/bin/xvfb-run -a",
"directory_cuda" : "/lustre/projects/hpcsupport/steinbac/unicore/christopher/unicore_jobs/Fiji.app.cuda_new/lib/",
"merged_xml" : "hdf5_test_unicore_merge",
"bsh_directory" : "/home/steinbac/development/schmied-snakemake-workflows/spim_registration/timelapse/",
"first_xml_filename" : "test_unicore",
"hdf5_xml_filename" : "\"hdf5_test_unicore\"",
"fusion_switch" : "deconvolution",
"ntimepoints" : 5
},
"define_xml_czi" :
{
"pixel_distance_x" : "0.28590106964",
"pixel_distance_y" : "0.28590106964",
"pixel_distance_z" : "1.50000",
"pixel_unit" : "um",
"first_czi" : "2015-04-11_LZ2_Stock68_3.czi",
"channel_1" : "green",
"channel_2" : "red",
"angle_1" : "0",
"angle_2" : "72",
"angle_3" : "144",
"angle_4" : "216",
"angle_5" : "288",
"illumination_1" : "0",
"rotation_around" : "X-Axis",
"bsh_file" : "define_czi.bsh"
},
"define_xml_tif" :
{
"acquisition_angles" : "0,72,144,216,288",
"channels" : "0",
"image_file_pattern" : "img_TL{{t}}_Angle{{a}}.tif",
"pixel_distance_x" : "0.28590106964",
"pixel_distance_y" : "0.28590106964",
"pixel_distance_z" : "1.50000",
"pixel_unit" : "um",
"multiple_timepoints" : "\"YES (one file per time-point)\"",
"multiple_channels" : "\"NO (one channel)\"",
"multiple_illumination_directions" : "\"NO (one illumination direction)\"",
"multiple_angles" : "\"YES (one file per angle)\"",
"type_of_dataset" : "\"Image Stacks (ImageJ Opener)\"",
"imglib_container" : "\"ArrayImg (faster)\"",
"bsh_file" : "define_tif_zip.bsh"
},
"resave_hdf5" :
{
"resave_angle" : "\"All angles\"",
"resave_channel" : "\"All channels\"",
"resave_illumination" : "\"All illuminations\"",
"resave_timepoint" : "\"All Timepoints\"",
"subsampling_factors" : "\"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}\"",
"hdf5_chunk_sizes" : "\"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}\"",
"timepoints_per_partition" : "1",
"setups_per_partition" : "0",
"bsh_file" : "export.bsh"
},
"registration" :
{
"timepoint" : "\"Single Timepoint (Select from List)\"",
"illuminations" : "\"All illuminations\"",
"angle" : "\"All angles\"",
"channel" : "\"All channels\"",
"proc-ch" : "\"channel 0\"",
"algorithm" : "\"Fast 3d geometric hashing (rotation invariant)\"",
"label_interest_points" : "\"beads\"",
"type_of_registration" : "\"Register timepoints individually\"",
"type_of_detection" : "\"Difference-of-Mean (Integral image based)\"" ,
"subpixel_localization" : "\"3-dimensional quadratic fit\"",
"imglib_container" : "\"ArrayImg (faster)\"",
"radius_1" : "2",
"radius_2" : "3",
"threshold" : "0.005",
"interest_points_channel_0" : "\"beads\"",
"interest_points_channel_1" : "\"beads\"",
"fix_tiles" : "\"Fix first tile\"",
"map_back_tiles" : "\"Map back to first tile using rigid model\"",
"transformation_model" : "Affine",
"model_to_regularize_with" : "Rigid",
"lambda" : "0.10" ,
"allowed_error_for_ransac" : "5",
"significance" : "10",
"detection_min_max" : "find_maxima",
"initial_sigma" : "1.8",
"threshold_gaussian" : "0.0080",
"bsh_file" : "registration.bsh"
},
"xml_merge" :
{
"bsh_file" : "xml_merge.bsh"
},
"timelapse" :
{
"reference_timepoint" : "0",
"timelapse_process_timepoints" : "\"All Timepoints\"",
"type_of_registration_timelapse" : "\"Match against one reference timepoint (no global optimization)\"",
"bsh_file" : "timelapse_registration.bsh"
},
"external_transform" :
{
"bsh_file" : "transform.bsh",
"angle" : "\"All angles\"",
"channel" : "\"All channels\"",
"illumination" : "\"All illuminations\"",
"timepoint" : "\"All Timepoints\"",
"transformation" : "\"Rigid\"",
"apply_transformation" : "\"Current view transformations (appends to current transforms)\"",
"define_mode_transform" : "\"Matrix\"",
"matrix_transform" : "\"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0\""
},
"fusion" :
{
"minimal_x" : "190",
"minimal_y" : "-16",
"minimal_z" : "-348",
"maximal_x" : "1019",
"maximal_y" : "1941",
"maximal_z" : "486",
"downsample" : "1",
"process_timepoint" : "\"Single Timepoint (Select from List)\"",
"process_channel" : "\"All channels\"",
"process_illumination" : "\"All illuminations\"",
"process_angle" : "\"All angles\"",
"xml_output" : "\"Save every XML with user-provided unique id\"",
"fused_image" : "\"Append to current XML Project\"",
"pixel_type" : "\"16-bit unsigned integer\"",
"imglib2_container_fusion" : "\"ArrayImg\"",
"process_views_in_paralell" : "\"All\"",
"interpolation" : "\"Linear Interpolation\"",
"imglib2_data_container" : "\"ArrayImg (faster)\"",
"bsh_file" : "fusion.bsh"
},
"deconvolution" :
{
"bsh_file" : "deconvolution_GPU.bsh",
"process_timepoint" : "\"Single Timepoint (Select from List)\"",
"process_channel" : "\"All channels\"",
"process_illumination" : "\"All illuminations\"",
"process_angle" : "\"All angles\"",
"minimal_x" : "95",
"minimal_y" : "-8",
"minimal_z" : "-174",
"maximal_x" : "509",
"maximal_y" : "970",
"maximal_z" : "243",
"imglib2_container" : "\"ArrayImg \"",
"type_of_iteration" : "\"Efficient Bayesian - Optimization I (fast, precise)\"",
"osem_acceleration" : "\"1 (balanced)\"",
"Tikhonov_parameter" : "0.0006",
"compute" : "\"in 512x512x512 blocks\"",
"compute_on" : "\"GPU (Nvidia CUDA via JNA)\"",
"psf_estimation" : "\"Extract from beads\"",
"iterations" : "5",
"detections_to_extract_psf_for_channel_0" : "\"beads\"",
"detections_to_extract_psf_for_channel_1" : "\"beads\"",
"psf_size_x" : "19",
"psf_size_y" : "19",
"psf_size_z" : "25"
},
"hdf5_output" :
{
"output_image_file_pattern" : "TP{{t}}_Ch{{c}}_Ill0_Ang0,72,144,216,288.tif",
"output_data_type" : "32Bit",
"output_xml" : "\"fused_Dual_Channel\"",
"output_hdf5_xml" : "\"hdf5_fused_Stock68\"",
"output_multiple_channels" : "\"NO (one channel)\"",
"output_timepoints" : "0-4",
"output_channels" : "green",
"output_pixel_distance_x" : "0.5718",
"output_pixel_distance_y" : "0.5718",
"output_pixel_distance_z" : "0.5718",
"output_pixel_unit" : "um",
"output_multiple_timepoints" : "\"YES (one file per time-point)\"",
"output_illumination_directions" : "\"NO (one illumination direction)\"",
"output_multiple_angles" : "\"NO (one angle)\"",
"output_type_of_dataset" : "\"Image Stacks (ImageJ Opener)\"",
"output_imglib_container" : "\"ArrayImg (faster)\"",
"bsh_file_define" : "/define_output.bsh",
"bsh_file_hdf5" : "/export_output.bsh",
"convert_32bit" : "\"[Use min/max of first image (might saturate intenities over time)]\""
}
}
common: {
# directory that contains the bean shell scripts
bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/",
directory_cuda: "/sw/users/schmied/packages/cuda/lib/",
fiji-app: "/sw/users/schmied/packages/2015-06-08_Fiji.app.cuda/ImageJ-linux64",
fiji-prefix: "/sw/bin/xvfb-run -a",
first_xml_filename: "Dual_Channel",
hdf5_xml_filename: '"hdf5_Dual_Channel"',
merged_xml: "hdf5_Dual_Channel_merge",
ntimepoints: 2,
angles: "0,72,144,216,288",
channels: "green,red",
illumination: "0",
pixel_distance_x: '0.28590',
pixel_distance_y: '0.28590',
pixel_distance_z: '1.50000',
pixel_unit: "um",
transformation_switch: "timelapse_dublicate",
# transformation_switch: "timelapse",
# fusion_switch: "deconvolution"
fusion_switch: "fusion"
# directory that contains the bean shell scripts and Snakefile
bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/",
# Directory that contains the cuda libraries
directory_cuda: "/sw/users/schmied/cuda/",
# Directory that contains the current working Fiji
#fiji-app: "/sw/users/schmied/packages/2015-06-08_Fiji.app.cuda/ImageJ-linux64",
fiji-app: "/sw/users/schmied/packages/2015-05-29_Fiji_2.3.9_SNAP.app.cuda/ImageJ-linux64",
fiji-prefix: "/sw/bin/xvfb-run -a", # calls xvfb for Fiji headless mode
# xml file names without .xml suffix
first_xml_filename: "Dual_Channel", # Name of the xml file for the .czi or .tif files
hdf5_xml_filename: '"hdf5_Dual_Channel"', # Name of .xml file for the hdf5 data after resave_hdf5
merged_xml: "hdf5_Dual_Channel_merge", # Name of .xml file after merge
# Describe the dataset
ntimepoints: 2, # number of timepoints of dataset
angles: "0,72,144,216,288", # angles
channels: "green,red", # channels
illumination: "0", # illuminations
pixel_distance_x: '0.28590', # Manual calibration x
pixel_distance_y: '0.28590', # Manual calibration y
pixel_distance_z: '1.50000', # Manual calibration z
pixel_unit: "um", # unit of manual calibration
# Use switches to decide which processing steps you need:
# transformation_switch: "timelapse" standard processing
# after timelapse registration directly goes into fusion, timelapse_duplicate
# "timelapse_duplicate" for dual channel processing one channel contains the beads
# duplicates transformations
transformation_switch: "timelapse_duplicate",
# Switches between content based fusion and deconvoltion
# "deconvolution" > for deconvolution
# "fusion" > for content based fusion
fusion_switch: "deconvolution"
}
define_xml_czi: {
first_czi: "2015-02-20_LZ2_Stock48_Stock58.czi", # essential
rotation_around: "X-Axis",
bsh_file: "define_czi.bsh"
first_czi: "2015-02-20_LZ2_Stock48_Stock58.czi", # master .czi file
rotation_around: "X-Axis", # axis of acquistion
bsh_file: "define_czi.bsh" # .bsh script for defining .czi file
}
define_xml_tif: {
image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif', # essential
imglib_container: '"ArrayImg (faster)"',
multiple_angles: '"YES (one file per angle)"',
multiple_channels: '"NO (one channel)"',
multiple_illumination_directions: '"NO (one illumination direction)"',
multiple_timepoints: '"YES (one file per time-point)"',
# file pattern of .tif files
# for multi channel give spim_TL{tt}_Angle{a}_Channel{c}.tif
# # SPIM file pattern: for padded zeros use tt
image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif',
# Settings for ImageJ Opener
type_of_dataset: '"Image Stacks (ImageJ Opener)"',
bsh_file: "define_tif_zip.bsh"
multiple_timepoints: '"YES (one file per time-point)"', # or NO (one time-point)
multiple_angles: '"YES (one file per angle)"', # or NO (one angle)
multiple_channels: '"NO (one channel)"', # or "\"NO (one channel)\""
multiple_illumination_directions: '"NO (one illumination direction)"', # or YES (one file per illumination direction)
imglib_container: '"ArrayImg (faster)"', # '"ArrayImg (faster)"'
bsh_file: "define_tif_zip.bsh"
}
resave_hdf5: {
hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"', # data dependent
# Resaves .tif or .czi data into hdf5
# Subsampling and resolution settings for hdf5: data dependent
hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"',
subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',
# Standard settings for cluster processing
setups_per_partition: '0',
timepoints_per_partition: '1',
resave_timepoint: '"All Timepoints"',
resave_angle: '"All angles"',
resave_channel: '"All channels"',
resave_illumination: '"All illuminations"',
resave_timepoint: '"All Timepoints"',
setups_per_partition: '0',
timepoints_per_partition: '1',
bsh_file: "export.bsh"
}
registration: {
reg_process_channel: '"Single channel (Select from List)"', # essential '"All channels"'; '"Single channel (Select from List)"'
reg_processing_channel: '"red"', # essential
reg_interest_points_channel: '"beads"', # essential
reg_radius_1: '2', # essential
reg_radius_2: '3', # essential
reg_threshold: '0.005', # essential
initial_sigma: '1.8', # essetial
threshold_gaussian: '0.0080', # essential
type_of_detection: '"Difference-of-Mean (Integral image based)"', # switch
label_interest_points: '"beads"',
# reg_process_channel:
# # Single Channel: '"All channels"'
# Dual Channel: '"All channels"'
# Dual Channel one Channel contains beads: '"Single channel (Select from List)"'
reg_process_channel: '"Single channel (Select from List)"',
# reg_processing_channel:
# Dual Channel setting for 1 Channel contains the beads
reg_processing_channel: '"red"',
# reg_interest_points_channel:
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel: Channel does not contain the beads '"[DO NOT register this channel],beads"'
reg_interest_points_channel: '"beads"',
# type of detection: '"Difference-of-Mean (Integral image based)"' or '"Difference-of-Gaussian"'
type_of_detection: '"Difference-of-Mean (Integral image based)"',
# Settings for Difference-of-Mean
# For multiple channels 'value1,value2' delimiter is ,
reg_radius_1: '2',
reg_radius_2: '3',
reg_threshold: '0.005',
# Settings for Difference-of-Gaussian
# For multiple channels 'value1,value2' delimiter is ,
sigma: '1.8',
threshold_gaussian: '0.0080',
# Processing setting for Difference-of-Gaussian detection
# compute_on:
compute_on: '"GPU accurate (Nvidia CUDA via JNA)"',
separableconvolution: '"libSeparableConvolutionCUDALib.so"',
# Downsampling settings
downsample_detection: "No", # "No" or "Yes"
downsample_xy: '"Match Z Resolution (less downsampling)"',
downsample_z: "1x",
# Standard Settings for bead based registration
label_interest_points: '"beads"',
reg_process_timepoint: '"Single Timepoint (Select from List)"',
reg_process_angle: '"All angles"',
reg_process_illumination: '"All illuminations"',
......@@ -76,7 +118,7 @@ registration: {
model_to_regularize_with: "Rigid",
lambda: '0.10',
imglib_container: '"ArrayImg (faster)"',
bsh_file: "registration.bsh"
bsh_file: "registration.bsh" # .bsh script for registration
}
xml_merge: {
......@@ -84,107 +126,135 @@ xml_merge: {
}
timelapse: {
reference_timepoint: '0', # essential
reference_timepoint: '0', # Reference timepoint
# Standard settings for timelapse registration
type_of_registration_timelapse: '"Match against one reference timepoint (no global optimization)"',
timelapse_process_timepoints: '"All Timepoints"',
bsh_file: "timelapse_registration.bsh"
}
dublicate_transformations: {
source_dublication: "red", # essential for 1 usecase
target_dublication: "green", # essential for 1 usecase
duplicate_which_transformations: '"Replace all transformations"',
bsh_file: "dublicate_transformations.bsh"
# If dual channel processing and only one channel contains beads
# this allows you to dublicate the transformation for the
# channel that does not contain beas
source_dublication: "red", # source channel
target_dublication: "green", # target channel
duplicate_which_transformations: '"Replace all transformations"', # mode of dublication
bsh_file: "dublicate_transformations.bsh" # .bsh script for dublication
}
fusion: {
downsample: '1', # essential
minimal_x: '220', # essential maybe automate
minimal_y: '40', # essential maybe automate
minimal_z: '-290', # essential maybe automate
maximal_x: '976', # essential maybe automate
maximal_y: '1892', # essential maybe automate
maximal_z: '472', # essential maybe automate
fused_image: '"Append to current XML Project"',
imglib2_container_fusion: '"ArrayImg"',
imglib2_data_container: '"ArrayImg (faster)"',
interpolation: '"Linear Interpolation"',
pixel_type: '"16-bit unsigned integer"',
# content based multiview fusion
# supports multi channel without new settings
downsample: '1', # set downsampling
# Cropping parameters of full resolution
minimal_x: '220',
minimal_y: '40',
minimal_z: '-290',
maximal_x: '976',
maximal_y: '1892',
maximal_z: '472',
# fused_image: '"Append to current XML Project"', does not work yet
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
process_timepoint: '"Single Timepoint (Select from List)"',
imglib2_container_fusion: '"ArrayImg"',
interpolation: '"Linear Interpolation"',
pixel_type: '"16-bit unsigned integer"',
imglib2_data_container: '"ArrayImg (faster)"',
process_views_in_paralell: '"All"',
xml_output: '"Save every XML with user-provided unique id"',
bsh_file: "fusion.bsh"
}
external_transform: {
# BUG
# Downsamples for deconvolution
# BUG: external transformation breaks .xml file
# channel setting: '"all_channels"'
channel_setting: '"green,red"',
transform_timepoint: '"All Timepoints"',
transform_angle: '"All angles"',
transform_channel: '"All channels"',
# illumination setting only one illumination side
transform_illumination: '"All illuminations"',
apply_transformation: '"Current view transformations (appends to current transforms)"',
define_mode_transform: '"Matrix"',
matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"', # essential
# Matrix for downsampling
matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"',
transformation: '"Rigid"',
bsh_file: "transform.bsh"
}
deconvolution: {
iterations: '1', # essential
minimal_x_deco: '190', # essential maybe automate
minimal_y_deco: '-16', # essential maybe automate
minimal_z_deco: '-348', # essential maybe automate
maximal_x_deco: '1019', # essential maybe automate
maximal_y_deco: '1941', # essential maybe automate
maximal_z_deco: '486', # essential maybe automate
detections_to_extract_psf_for_channel: '"beads"',
iterations: '1', # number of iterations
# Cropping parameters: take downsampling into account
minimal_x_deco: '190',
minimal_y_deco: '-16',
minimal_z_deco: '-348',
maximal_x_deco: '1019',
maximal_y_deco: '1941',
maximal_z_deco: '486',
# Channel settings for deconvolution
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel one channel contains beads: '"[Same PSF as channel red],beads"'
detections_to_extract_psf_for_channel: '"[Same PSF as channel red],beads"',
# Settings for GPU or CPU processing
# '"CPU (Java)"' or '"GPU (Nvidia CUDA via JNA)"'
compute_on: '"GPU (Nvidia CUDA via JNA)"',
cudafourierconvolution: "libFourierConvolutionCUDALib.so", # GPU processing name of cuda library
# Standard settings for deconvolution
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
Tikhonov_parameter: '0.0006',
compute: '"in 512x512x512 blocks"',
compute_on: '"GPU (Nvidia CUDA via JNA)"',
imglib2_container: '"ArrayImg"',
osem_acceleration: '"1 (balanced)"',
psf_estimation: '"Extract from beads"',
psf_size_x: '19',
psf_size_y: '19',
psf_size_z: '25',
type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
bsh_file: "deconvolution_GPU.bsh"
imglib2_container: '"ArrayImg"',
bsh_file: "deconvolution.bsh"
}
hdf5_output: {
# Will be obsolete
# writes new hdf5 dataset for fusion output: will be obsolete
# Naming pattern of output
# Single Channel: TP{t}_Chgreen_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
# Dual Channel: TP{t}_Ch{0}_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
output_image_file_pattern: 'TP{{t}}_Ch{{c}}_Ill0_Ang0,72,144,216,288.tif',
# channel setting
output_multiple_channels: '"YES (one file per channel)"', # '"YES (one file per channel)"' or '"NO (one channel)"'
output_channels: "green,red",
# .xml file names
output_xml: '"fused_Dual_Channel"',
output_hdf5_xml: '"hdf5_fused_Dual_Channel"',
output_multiple_channels: '"YES (one file per channel)"', # "\"YES (one file per channel)\"" or "\"NO (one channel)\""
output_timepoints: '0-1',
output_pixel_distance_x: 0.28590,
output_timepoints: '0-1', # Timepoints format: '1-2'
# pixel size of output: take downsampling into account!
output_pixel_distance_x: 0.28590,
output_pixel_distance_y: 0.28590,
output_pixel_distance_z: 0.28590,
output_pixel_unit: 'um',
output_channels: "green,red",
output_data_type: "16Bit",
# give if 16Bit data or 32Bit data
# output of fusion is 16Bit, of deconvolution it is 32Bit
output_data_type: "16Bit", # "32Bit" or "16Bit"
# if data is 32Bit then the data is converted into 16Bit data
convert_32bit: '"[Use min/max of first image (might saturate intenities over time)]"',
# subsampling and chunk size settings: dataset dependent
subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"', # data dependent
chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
# subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
# chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
# Standard settings for hdf5_output
output_type_of_dataset: '"Image Stacks (ImageJ Opener)"',
output_multiple_timepoints: '"YES (one file per time-point)"',
output_multiple_angles: '"NO (one angle)"',
output_illumination_directions: '"NO (one illumination direction)"',
output_imglib_container: '"ArrayImg (faster)"',
subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"', # data dependent
chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
# subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
# chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
bsh_file_define: "define_output.bsh",
bsh_file_hdf5: "export_output.bsh"
bsh_file_define: "define_output.bsh", # .bsh script for defining the dataset
bsh_file_hdf5: "export_output.bsh" # .bsh script for resaving into hdf5
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment