Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
common: {
# ============================================================================
#
# yaml example file for single channel processing
#
# General settings for processing
# 2015-04-01_LZ2_Stock46
# Dpse_Dip3_VK33
#
# ============================================================================
# directory that contains the bean shell scripts and Snakefile
bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/",
# Directory that contains the cuda libraries
directory_cuda: "/sw/users/schmied/cuda/",
# Directory that contains the current working Fiji
fiji-app: "/sw/users/schmied/packages/2015-06-30_Fiji.app.cuda/ImageJ-linux64",
fiji-prefix: "/sw/users/schmied/packages/xvfb-run -a", # calls xvfb for Fiji headless mode
# ============================================================================
# Processing switches
# Description: Use switches to decide which processing steps you need:
#
# Options:
# transformation_switch: "timelapse" standard processing
# after timelapse registration directly goes into fusion, timelapse_duplicate
# "timelapse_duplicate" for dual channel processing one channel contains the beads
#
# Switches between content based fusion and deconvoltion
# "deconvolution" > for deconvolution
# "fusion" > for content based fusion
# ============================================================================
#
# Transformation switch:
transformation_switch: "timelapse",
# Fusion switch:
fusion_switch: "fusion",
# ============================================================================
# xml file names
#
# xml file names without .xml suffix
# ============================================================================
first_xml_filename: 'Dpse_Dip3_VK33', # Name of the xml file for the .czi or .tif files
hdf5_xml_filename: '"hdf5_Dpse_Dip3_VK33"', # Name of .xml file for the hdf5 data after resave_hdf5
merged_xml: 'hdf5_Dpse_Dip3_VK33_merge', # Name of .xml file after merge
# ============================================================================
# Describe the dataset
#
# Options: number of timepoints
# angles
# channels
# illuminations
# pixel size
# ============================================================================
ntimepoints: 72, # number of timepoints of dataset
angles: "0,72,144,216,288", # angles
channels: "green", # channels, for tif numeric!
illumination: "0", # illuminations
# ----------------------------------------------------------------------------
# For .czi datasets
# master .czi file
first_czi: "2015-04-01_LZ2_Stock46.czi",
# ----------------------------------------------------------------------------
# For .tif datasets
# file pattern of .tif files:
# for multi channel with one file per channel give spim_TL{tt}_Angle{a}_Channel{c}.tif
# for padded zeros use tt
image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif',
multiple_channels: '"NO (one channel)"', # '"YES (all channels in one file)"' or '"YES (one file per channel)"' or '"NO (one channel)"'
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
# ============================================================================
# Detection and registration
#
# Description: settings for interest point detection and registration
# Options: Single channel and dual channel processing
# Difference-of-mean or difference-of-gaussian detection
# ============================================================================
# reg_process_channel:
# Single Channel: '"All channels"'
# Dual Channel: '"All channels"'
# Dual Channel one Channel contains beads: '"Single channel (Select from List)"'
reg_process_channel: '"All channels"',
#
# Dual channel 1 Channel contains the beads: which channel contains the beads?
reg_processing_channel: '"green"',
#
# reg_interest_points_channel:
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel: Channel does not contain the beads '"[DO NOT register this channel],beads"'
reg_interest_points_channel: '"beads"',
#
# type of detection: '"Difference-of-Mean (Integral image based)"' or '"Difference-of-Gaussian"'
type_of_detection: '"Difference-of-Gaussian"',
# Settings for Difference-of-Mean
# For multiple channels 'value1,value2' delimiter is ,
reg_radius_1: '2',
reg_radius_2: '3',
reg_threshold: '0.005',
# Settings for Difference-of-Gaussian
# For multiple channels 'value1,value2' delimiter is ,
sigma: '1.3',
threshold_gaussian: '0.025',
# ============================================================================
# Timelapse registration
#
# Description: settings for timelapse registration
# Options: reference timepoint
# ============================================================================
reference_timepoint: '40', # Reference timepoint
# ============================================================================
# Content-based multiview fusion
#
# Description: settings for content-based multiview fusion
# Options: downsampling
# Cropping parameters based on full resolution
# ============================================================================
downsample: '1', # set downsampling
minimal_x: '237', # Cropping parameters of full resolution
minimal_y: '-16',
minimal_z: '-292',
maximal_x: '1041',
maximal_y: '1853',
maximal_z: '509',
# ============================================================================
# Multiview deconvolution
#
# Description: settings for multiview deconvolution
# Options: number of iterations
# Cropping parameters taking downsampling into account
# Channel settings for deconvolution
# ============================================================================
iterations: '1', # number of iterations
minimal_x_deco: '237', # Cropping parameters: take downsampling into account
minimal_y_deco: '-16',
minimal_z_deco: '-292',
maximal_x_deco: '1041',
maximal_y_deco: '1853',
maximal_z_deco: '509',
#
# Channel settings for deconvolution
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel one channel contains beads: '"[Same PSF as channel red],beads"'
detections_to_extract_psf_for_channel: '"beads"',
#
# ============================================================================
# Resave output
#
# Description: writes new hdf5 dataset for fusion output
# Options: Naming pattern of output based on channel number
# Channel settings
# File name for resaving output into hdf5
# Pixel size > isotropic resolution
# Image type (16Bit from content-based fusion, 32Bit from deconvolution)
# ============================================================================
# Number of timepoints
output_timepoints: '0-71', # Timepoints format: '1-2'
#
# Naming pattern:
# Single Channel: TP{{t}}_Chgreen_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
# Dual Channel: TP{{t}}_Ch{{0}}_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
output_image_file_pattern: '"TP{{t}}_Chgreen_Ill0_Ang0,72,144,216,288.tif"',
#
# channel setting:
# Single channel: '"NO (one channel)"'
# Dual channel: '"YES (one file per channel)"'
output_multiple_channels: '"NO (one channel)"',
output_channels: "green",
#
# .xml file names
output_xml: '"fused_Dpse_Dip3_VK33"',
output_hdf5_xml: '"hdf5_fused_Dpse_Dip3_VK33"',
#
# pixel size of output: take downsampling into account!
output_pixel_distance_x: 0.285901069641113,
output_pixel_distance_y: 0.285901069641113,
output_pixel_distance_z: 0.285901069641113,
output_pixel_unit: 'um',
#
# File type
output_data_type: "16Bit" # "32Bit" or "16Bit"
}
# ============================================================================
# Advanced settings
# ============================================================================
define_xml_czi: {
manual_calibration_czi: "No", # calibration override: No or Yes
czi_pixel_distance_x: '0.285901069641113', # Manual calibration x
czi_pixel_distance_y: '0.285901069641113', # Manual calibration y
czi_pixel_distance_z: '1.500000000000000', # Manual calibration z
czi_pixel_unit: "um", # unit of manual calibration
rotation_around: "X-Axis", # axis of acquistion
bsh_file: "define_czi.bsh" # .bsh script for defining .czi file
}
define_xml_tif: {
# Settings for ImageJ Opener
manual_calibration_tif: "No", # calibration override: No or Yes
pixel_distance_x: '0.285901069641113', # Manual calibration x
pixel_distance_y: '0.285901069641113', # Manual calibration y
pixel_distance_z: '1.500000000000000', # Manual calibration z
pixel_unit: "um", # unit of manual calibration
type_of_dataset: '"Image Stacks (LOCI Bioformats)"', # '"Image Stacks (ImageJ Opener)"' or '"Image Stacks (LOCI Bioformats)"'
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
multiple_timepoints: '"YES (one file per time-point)"', # or NO (one time-point)
multiple_angles: '"YES (one file per angle)"', # or NO (one angle)
multiple_illumination_directions: '"NO (one illumination direction)"', # or YES (one file per illumination direction)
imglib_container: '"ArrayImg (faster)"', # '"ArrayImg (faster)"'
bsh_file: "define_tif_zip.bsh"
}
resave_hdf5: {
# Resaves .tif or .czi data into hdf5
# Subsampling and resolution settings for hdf5: data dependent
hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"',
subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',
# Standard settings for cluster processing
setups_per_partition: '0',
timepoints_per_partition: '1',
resave_timepoint: '"All Timepoints"',
resave_angle: '"All angles"',
resave_channel: '"All channels"',
resave_illumination: '"All illuminations"',
bsh_file: "export.bsh"
}
registration: {
# Processing setting for Difference-of-Gaussian detection
# compute_on:'"GPU accurate (Nvidia CUDA via JNA)"'
compute_on: '"CPU (Java)"',
separableconvolution: '"libSeparableConvolutionCUDALib.so"',
# Downsampling settings
downsample_detection: "Yes", # "No" or "Yes"
downsample_xy: '"Match Z Resolution (less downsampling)"',
downsample_z: "1x",
# Standard Settings for bead based registration
label_interest_points: '"beads"',
reg_process_timepoint: '"Single Timepoint (Select from List)"',
reg_process_angle: '"All angles"',
reg_process_illumination: '"All illuminations"',
subpixel_localization: '"3-dimensional quadratic fit"',
detection_min_max: "find_maxima",
type_of_registration: '"Register timepoints individually"',
algorithm: '"Fast 3d geometric hashing (rotation invariant)"',
transformation_model: "Affine",
allowed_error_for_ransac: '5',
significance: '10',
fix_tiles: '"Fix first tile"',
map_back_tiles: '"Map back to first tile using rigid model"',
model_to_regularize_with: "Rigid",
lambda: '0.10',
imglib_container: '"ArrayImg (faster)"',
bsh_file: "registration.bsh" # .bsh script for registration
}
xml_merge: {
bsh_file: "xml_merge.bsh"
}
timelapse: {
# Standard settings for timelapse registration
type_of_registration_timelapse: '"Match against one reference timepoint (no global optimization)"',
timelapse_process_timepoints: '"All Timepoints"',
bsh_file: "timelapse_registration.bsh"
}
dublicate_transformations: {
# If dual channel processing and only one channel contains beads
# this allows you to dublicate the transformation for the
# channel that does not contain beas
source_dublication: "red", # source channel
target_dublication: "green", # target channel
duplicate_which_transformations: '"Replace all transformations"', # mode of dublication
bsh_file: "dublicate_transformations.bsh" # .bsh script for dublication
}
fusion: {
# fused_image: '"Append to current XML Project"', does not work yet
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
imglib2_container_fusion: '"ArrayImg"',
interpolation: '"Linear Interpolation"',
pixel_type: '"16-bit unsigned integer"',
imglib2_data_container: '"ArrayImg (faster)"',
process_views_in_paralell: '"All"',
xml_output: '"Save every XML with user-provided unique id"',
bsh_file: "fusion.bsh"
}
external_transform: {
# Downsamples for deconvolution
# BUG: external transformation breaks .xml file
# channel setting: '"all_channels"'
channel_setting: '"green"',
transform_timepoint: '"All Timepoints"',
transform_angle: '"All angles"',
transform_channel: '"All channels"',
transform_illumination: '"All illuminations"',
apply_transformation: '"Current view transformations (appends to current transforms)"',
define_mode_transform: '"Matrix"',
# Matrix for downsampling
matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"',
transformation: '"Rigid"',
bsh_file: "transform.bsh"
}
deconvolution: {
# Settings for GPU or CPU processing
# '"CPU (Java)"' or '"GPU (Nvidia CUDA via JNA)"'
compute_on: '"GPU (Nvidia CUDA via JNA)"',
cudafourierconvolution: "libFourierConvolutionCUDALib.so", # GPU processing name of cuda library
# Standard settings for deconvolution
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
Tikhonov_parameter: '0.0006',
compute: '"in 512x512x512 blocks"',
osem_acceleration: '"1 (balanced)"',
psf_estimation: '"Extract from beads"',
psf_size_x: '19',
psf_size_y: '19',
psf_size_z: '25',
imglib2_container: '"ArrayImg"',
bsh_file: "deconvolution.bsh"
}
hdf5_output: {
# if data is 32Bit then the data is converted into 16Bit data
convert_32bit: '"[Use min/max of first image (might saturate intenities over time)]"',
# subsampling and chunk size settings: dataset dependent
subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"', # data dependent
chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
# subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
# chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
# Standard settings for hdf5_output
output_type_of_dataset: '"Image Stacks (ImageJ Opener)"',
output_multiple_timepoints: '"YES (one file per time-point)"',
output_multiple_angles: '"NO (one angle)"',
output_illumination_directions: '"NO (one illumination direction)"',
output_imglib_container: '"ArrayImg (faster)"',
bsh_file_define: "define_output.bsh", # .bsh script for defining the dataset
bsh_file_hdf5: "export_output.bsh" # .bsh script for resaving into hdf5
}