Skip to content
Snippets Groups Projects
Commit dac37009 authored by Peter Steinbach's avatar Peter Steinbach
Browse files

Merge pull request #9 from schmiedc/tempfiles

Tempfiles and .czi calibration, thanks!
parents 7bc00304 77d123aa
No related branches found
No related tags found
No related merge requests found
Showing
with 159 additions and 747 deletions
......@@ -106,6 +106,21 @@ If not:
```bash
snakemake -j2 -d /path/to/data/ --cluster-config ./cluster.json --cluster "bsub -q {cluster.lsf_q} {cluster.lsf_extra}"
```
For error and output of the cluser add -o test.out -e test.err e.g.:
DRMAA
```bash
snakemake -j2 -d /path/to/data/ --cluster-config ./cluster.json --drmaa " -q {cluster.lsf_q} {cluster.lsf_extra} -o test.out -e test.err"
```
LSF
```bash
snakemake -j2 -d /path/to/data/ --cluster-config ./cluster.json --cluster "bsub -q {cluster.lsf_q} {cluster.lsf_extra} -o test.out -e test.err"
```
Note: with this all the error and output files of one job would be written into these files.
Log files and supervision of the pipeline
---------------
......@@ -115,3 +130,5 @@ The log files are ordered according to their position in the workflow. Multiple
force certain rules:
use the -R flag to rerun a particular rule and everything downstream
-R <name of rule>
......@@ -7,13 +7,13 @@ if JOBDIR[-1] != "/": # this checks if jobdir ends with slash if not it adds a s
JOBDIR+="/"
# Test config file single Channel:
configfile: "single_test.yaml"
# configfile: "single_test.yaml"
# Test config file dual channel one channel contains beads:
# configfile: "dual_OneChannel.yaml"
# data specific config file, expected to be inside JOBDIR
# configfile: "tomancak_test_cluster.yaml"
configfile: "single.yaml"
padding_format = "{0:0"+str(padding_of_file_id(int(config["common"]["ntimepoints"])))+"d}"
ds_format = "-"+padding_format+"-00.h5"
......@@ -26,19 +26,18 @@ xml_merge_in = produce_xml_merge_job_files(datasets)
rule done:
input: [ ds + "_output_hdf5" for ds in datasets ]
localrules: define_xml_czi, define_xml_tif, xml_merge, timelapse,
duplicate_transformations, external_transform, define_output,
hdf5_xml_output
<<<<<<< HEAD
localrules: define_xml_tif, xml_merge, timelapse,
duplicate_transformations, external_transform, define_output
rule resave_prepared:
input: expand("{dataset}.{suffix}",dataset=[ config["common"]["hdf5_xml_filename"] ], suffix=["xml","h5"])
# defining xml for czi dataset
rule define_xml_czi:
input: config["common"]["first_czi"]
output: config["common"]["first_xml_filename"] + ".xml"
input: config["common"]["first_czi"]
output: temp(config["common"]["first_xml_filename"] + ".xml")
log: "logs/a1_define_xml_czi.log"
run:
cmd_string = produce_string("""{fiji-prefix} {fiji-app} \
......@@ -47,10 +46,11 @@ rule define_xml_czi:
-Dangles={angles} \
-Dchannels={channels} \
-Dillumination={illumination} \
-Dpixel_distance_x={pixel_distance_x} \
-Dpixel_distance_y={pixel_distance_y} \
-Dpixel_distance_z={pixel_distance_z} \
-Dpixel_unit={pixel_unit} \
-Dmanual_calibration_czi={manual_calibration_czi} \
-Dczi_pixel_distance_x={czi_pixel_distance_x} \
-Dczi_pixel_distance_y={czi_pixel_distance_y} \
-Dczi_pixel_distance_z={czi_pixel_distance_z} \
-Dczi_pixel_unit={czi_pixel_unit} \
-Dfirst_xml_filename={first_xml_filename} \
-Drotation_around={rotation_around} \
-- --no-splash {path_bsh}""",
......@@ -66,7 +66,7 @@ rule define_xml_czi:
# defining xml for tif dataset
rule define_xml_tif:
input: glob.glob(re.sub("{{.}}","*",config["common"]['image_file_pattern'])) #replaces all occurrences of {{a}} (a can be any character) by * to use the string for globbing
output: config["common"]["first_xml_filename"] + ".xml"
output: temp(config["common"]["first_xml_filename"] + ".xml")
log: "logs/a2_define_xml_tif.log"
run:
cmd_string = produce_string(
......@@ -105,7 +105,7 @@ ruleorder: define_xml_czi > define_xml_tif
rule hdf5_xml:
input: config["common"]["first_xml_filename"] + ".xml"
output: expand("{dataset}.{suffix}",dataset=[ config["common"]["hdf5_xml_filename"].strip('\"')], suffix=["xml","h5"]),
[ item+"_xml" for item in datasets ]
temp([ item+"_xml" for item in datasets ])
log: "logs/b1_hdf5_xml.log"
run:
part_string = produce_string(
......@@ -134,8 +134,8 @@ rule hdf5_xml:
# resave .czi/.tif dataset as hdf5
rule resave_hdf5:
input: rules.hdf5_xml.output # "{xml_base}-{file_id,\d+}-00.h5_xml"
output: "{xml_base}-{file_id,\d+}-00.h5", "{xml_base}-{file_id,\d+}-00.h5_hdf5"
input: rules.hdf5_xml.output, config["common"]["first_xml_filename"] + ".xml"
output: "{xml_base}-{file_id,\d+}-00.h5", temp("{xml_base}-{file_id,\d+}-00.h5_hdf5")
log: "logs/b2_resave_hdf5-{file_id}.log"
run:
part_string = produce_string(
......@@ -163,8 +163,8 @@ rule resave_hdf5:
shell(part_string)
rule registration:
input: "{xml_base}-{file_id}-00.h5"
output: "{xml_base}.job_{file_id,\d+}.xml"#, "{xml_base}-{file_id,\d+}-00.h5_registered",
input: "{xml_base}-{file_id}-00.h5", expand("{dataset}.{suffix}",dataset=[ config["common"]["hdf5_xml_filename"].strip('\"')], suffix=["xml","h5"])
output: temp("{xml_base}.job_{file_id,\d+}.xml") #, "{xml_base}-{file_id,\d+}-00.h5_registered",
log: "logs/c_{xml_base}-{file_id}-registration.log"
run:
cmd_string = produce_string(
......@@ -236,7 +236,7 @@ rule xml_merge:
rule timelapse:
input: rules.xml_merge.output
output: rules.xml_merge.output[0] + "_timelapse"
output: temp(rules.xml_merge.output[0] + "_timelapse")
log: "logs/d2_{xml_base}_timelapse.log"
run:
cmd_string = produce_string(
......@@ -269,7 +269,7 @@ rule timelapse:
rule duplicate_transformations:
input: rules.timelapse.output, merged_xml="{xml_base}_merge.xml"
output: rules.timelapse.output[0] + "_duplicate"
output: temp(rules.timelapse.output[0] + "_duplicate")
log: "logs/d3_{xml_base}_duplicate_transformations.log"
run:
cmd_string = produce_string(
......@@ -296,7 +296,7 @@ rule duplicate_transformations:
rule fusion:
input: [ str("{xml_base}_merge.xml_" + config["common"]["transformation_switch"] ) ], "{xml_base}-{file_id,\d+}-00.h5", merged_xml="{xml_base}_merge.xml" # rules.timelapse.output, "{xml_base}-{file_id,\d+}-00.h5", merged_xml="{xml_base}_merge.xml"
output: "{xml_base}-{file_id,\d+}-00.h5_fusion"
output: temp("{xml_base}-{file_id,\d+}-00.h5_fusion")
log: "logs/e1_{xml_base}-{file_id,\d+}-00-fusion.log"
run:
cmd_string = produce_string(
......@@ -339,7 +339,7 @@ rule fusion:
rule external_transform:
input: rules.timelapse.output, merged_xml="{xml_base}_merge.xml"
output: rules.timelapse.output[0] + "_external_trafo"
output: temp(rules.timelapse.output[0] + "_external_trafo")
log: "logs/e2_external_transform.log"
run:
cmd_string = produce_string(
......@@ -367,7 +367,7 @@ rule external_transform:
rule deconvolution:
input: [ str("{xml_base}_merge.xml_" + config["common"]["transformation_switch"] ) ], "{xml_base}-{file_id,\d+}-00.h5", merged_xml="{xml_base}_merge.xml" # rules.timelapse.output, "{xml_base}-{file_id,\d+}-00.h5", merged_xml="{xml_base}_merge.xml" # rules.external_transform.output, "{xml_base}-{file_id,\d+}-00.h5", merged_xml="{xml_base}_merge.xml"
output: "{xml_base}-{file_id,\d+}-00.h5_deconvolution"
output: temp("{xml_base}-{file_id,\d+}-00.h5_deconvolution")
log: "logs/e2_{xml_base}-{file_id,\d+}-00-deconvolution.log"
run:
cmd_string = produce_string(
......@@ -414,7 +414,7 @@ rule deconvolution:
rule define_output:
input: [ item + "_" + config["common"]["fusion_switch"] for item in datasets ], glob.glob('TP*')
output: config["common"]["output_xml"].strip('\"') + ".xml"
output: temp(config["common"]["output_xml"].strip('\"') + ".xml")
log: "logs/f1_define_output.log"
run:
cmd_string = produce_string(
......@@ -447,7 +447,7 @@ rule define_output:
rule hdf5_xml_output:
input: config["common"]["output_xml"].strip('\"') + ".xml"
output: expand("{dataset}.{suffix}",dataset=[ config["common"]["output_hdf5_xml"].strip('\"')], suffix=["xml","h5"]),
[ item+"_output" for item in datasets ]
temp([ item+"_output" for item in datasets ])
log: "logs/f2_output_hdf5_xml.log"
run:
part_string = produce_string(
......@@ -477,8 +477,8 @@ rule hdf5_xml_output:
shell(part_string)
rule resave_hdf5_output:
input: rules.hdf5_xml_output.output
output: "{xml_base}-{file_id,\d+}-00.h5_output_hdf5"
input: rules.hdf5_xml_output.output, config["common"]["output_xml"].strip('\"') + ".xml"
output: temp("{xml_base}-{file_id,\d+}-00.h5_output_hdf5")
log: "logs/f3_resave_output-{file_id}.log"
run:
part_string = produce_string(
......@@ -518,7 +518,7 @@ rule distclean:
# NOTE! The following enables mailing, which will send out a mail once an entire workflow is done (the below does not include anything in the message body, redirect from /dev/null)
# onsuccess:
# shell("mail -s \"[SUCCESS] our_cluster:{jdir} finished \" xxx@mpi-cbg.de < /dev/null".format(jdir=JOBDIR))
# shell("mail -s \"[SUCCESS] our_cluster:{jdir} finished \" schmied@mpi-cbg.de < /dev/null".format(jdir=JOBDIR))
# onerror:
# shell("mail -s \"[ERROR] out_cluster:{jdir}\" xxx@mpi-cbg.de < /dev/null".format(jdir=JOBDIR))
#onerror:
# shell("mail -s \"[ERROR] out_cluster:{jdir}\" schmied@mpi-cbg.de < /dev/null".format(jdir=JOBDIR))
......@@ -32,7 +32,7 @@
"deconvolution" :
{
"lsf_extra" : "-n 2 -R \"span[hosts=1] rusage[mem=50000]\"",
"lsf_extra" : "-n 7 -R \"span[hosts=1] rusage[mem=50000]\"",
"lsf_q" : "gpu"
},
......
......@@ -140,6 +140,7 @@ if (compute_on.equalsIgnoreCase( "GPU (Nvidia CUDA via JNA)" ) )
cuda_settings = "cuda_directory=" + directory_cuda + " " +
"select_native_library_for_cudafourierconvolution=" + cudafourierconvolution + " " +
"gpu_1 ";
System.out.println( "cuda_settings=" + cuda_settings );
}
else if (compute_on.equalsIgnoreCase( "CPU (Java)" ) )
......@@ -155,11 +156,43 @@ else
}
System.out.println( "compute_string=" + compute_string );
System.out.println( "cuda_settings=" + cuda_settings );
// Execute Fiji Plugin
System.out.println( "=======================================================" );
System.out.println( "Starting Deconvolution" );
System.out.println("Fuse/Deconvolve Dataset select_xml=" + image_file_directory + merged_xml + " " +
"process_angle=[All angles] " +
"process_channel=[All channels] " +
"process_illumination=[All illuminations] " +
"process_timepoint=[" + process_timepoint + "] " +
"processing_timepoint=[Timepoint " + parallel_timepoints + "] " +
"type_of_image_fusion=[Multi-view deconvolution] " +
"bounding_box=[Define manually] " +
"fused_image=[Save as TIFF stack] " +
"minimal_x=" + minimal_x_deco + " " +
"minimal_y=" + minimal_y_deco + " " +
"minimal_z=" + minimal_z_deco + " " +
"maximal_x=" + maximal_x_deco + " " +
"maximal_y=" + maximal_y_deco + " " +
"maximal_z=" + maximal_z_deco + " " +
"imglib2_container=" + imglib2_container_deco + " " +
"type_of_iteration=[" + type_of_iteration + "] " +
"osem_acceleration=[" + osem_acceleration + "] " +
"number_of_iterations=" + iterations + " " +
// "adjust_blending_parameters " +
"use_tikhonov_regularization " +
"tikhonov_parameter=" + Tikhonov_parameter + " " +
"compute=[" + compute + "] " +
compute_string +
"psf_estimation=[" + psf_estimation + "] " +
"psf_display=[Do not show PSFs] " +
"output_file_directory=" + deco_output_file_directory + " " +
cuda_settings +
channel_string +
"psf_size_x=" + psf_size_x + " " +
"psf_size_y=" + psf_size_y + " " +
"psf_size_z=" + psf_size_z + "");
try {
IJ.run("Fuse/Deconvolve Dataset",
"select_xml=" + image_file_directory + merged_xml + " " +
......
......@@ -9,8 +9,8 @@ import java.io.FilenameFilter;
runtime = Runtime.getRuntime();
System.out.println(runtime.availableProcessors() + " cores available for multi-threading");
//Prefs.setThreads(2); // defines the number of threads allowed
//print("Threads: "+Prefs.getThreads()); // prints thread setting in output
Prefs.setThreads(1); // defines the number of threads allowed
print("Threads: "+Prefs.getThreads()); // prints thread setting in output
System.out.println("Start loading parameters");
......@@ -114,15 +114,37 @@ System.out.println( "rotation_around = " + rotation_around );
// Calibaration
System.out.println("=========================================================");
System.out.println("Calibration:");
float pixel_distance_x = Float.parseFloat( System.getProperty( "pixel_distance_x" ) );
float pixel_distance_y = Float.parseFloat( System.getProperty( "pixel_distance_y" ) );
float pixel_distance_z = Float.parseFloat( System.getProperty( "pixel_distance_z" ) );
pixel_unit = System.getProperty( "pixel_unit" );
manual_calibration_czi = System.getProperty( "manual_calibration_czi" );
float czi_pixel_distance_x = Float.parseFloat( System.getProperty( "czi_pixel_distance_x" ) );
float czi_pixel_distance_y = Float.parseFloat( System.getProperty( "czi_pixel_distance_y" ) );
float czi_pixel_distance_z = Float.parseFloat( System.getProperty( "czi_pixel_distance_z" ) );
czi_pixel_unit = System.getProperty( "czi_pixel_unit" );
// builds string for calibration override
if (manual_calibration_czi.equalsIgnoreCase( "No" ) )
{
modify_calibration = "";
manual_calibration_string = "";
System.out.println( "Calibration set to automatic" );
}
System.out.println( "pixel_distance_x = " + pixel_distance_x );
System.out.println( "pixel_distance_y = " + pixel_distance_y );
System.out.println( "pixel_distance_z = " + pixel_distance_z );
System.out.println( "pixel_unit = " + pixel_unit );
else if (manual_calibration_czi.equalsIgnoreCase( "Yes" ) )
{
modify_calibration = "modify_calibration ";
manual_calibration_string = "pixel_distance_x=" + czi_pixel_distance_x + " " +
"pixel_distance_y=" + czi_pixel_distance_y + " " +
"pixel_distance_z=" + czi_pixel_distance_z + " " +
"pixel_unit=" + czi_pixel_unit + " ";
System.out.println( "Calibration set to manual" );
System.out.println( "modify_calibration:" + modify_calibration );
System.out.println( "manual_calibration_string:" + manual_calibration_string );
}
else
{
System.out.println( "Manual calibration setting bad" );
}
// Executes Fiji plugin
System.out.println("=========================================================");
......@@ -134,13 +156,11 @@ System.out.println("Define Multi-View Dataset type_of_dataset=[Zeiss Lightsheet
angle_string +
channel_string +
illum_string +
"modify_calibration " +
modify_calibration +
"modify_rotation_axis " +
"pixel_distance_x=" + pixel_distance_x + " " +
"pixel_distance_y=" + pixel_distance_y + " " +
"pixel_distance_z=" + pixel_distance_z + " " +
"pixel_unit=" + pixel_unit + " " +
"rotation_around=" + rotation_around + "");
manual_calibration_string +
"rotation_around=" + rotation_around +
"");
try {
IJ.run("Define Multi-View Dataset",
......@@ -151,12 +171,13 @@ IJ.run("Define Multi-View Dataset",
angle_string +
channel_string +
illum_string +
"modify_calibration " +
modify_calibration +
"modify_rotation_axis " +
"pixel_distance_x=" + pixel_distance_x + " " +
"pixel_distance_y=" + pixel_distance_y + " " +
"pixel_distance_z=" + pixel_distance_z + " " +
"pixel_unit=" + pixel_unit + " " +
manual_calibration_string +
//"pixel_distance_x=" + pixel_distance_x + " " +
//"pixel_distance_y=" + pixel_distance_y + " " +
//"pixel_distance_z=" + pixel_distance_z + " " +
//"pixel_unit=" + pixel_unit + " " +
"rotation_around=" + rotation_around + "");
}
catch ( e ) {
......
common: {
# directory that contains the bean shell scripts and Snakefile
bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/",
# Directory that contains the cuda libraries
directory_cuda: "/sw/users/schmied/cuda/",
# Directory that contains the current working Fiji
#fiji-app: "/sw/users/schmied/packages/2015-06-08_Fiji.app.cuda/ImageJ-linux64",
fiji-app: "/sw/users/schmied/packages/2015-05-29_Fiji_2.3.9_SNAP.app.cuda/ImageJ-linux64",
fiji-prefix: "/sw/bin/xvfb-run -a", # calls xvfb for Fiji headless mode
# xml file names without .xml suffix
first_xml_filename: "Dual_Channel", # Name of the xml file for the .czi or .tif files
hdf5_xml_filename: '"hdf5_Dual_Channel"', # Name of .xml file for the hdf5 data after resave_hdf5
merged_xml: "hdf5_Dual_Channel_merge", # Name of .xml file after merge
# Describe the dataset
ntimepoints: 2, # number of timepoints of dataset
angles: "0,72,144,216,288", # angles
channels: "green,red", # channels
illumination: "0", # illuminations
pixel_distance_x: '0.28590', # Manual calibration x
pixel_distance_y: '0.28590', # Manual calibration y
pixel_distance_z: '1.50000', # Manual calibration z
pixel_unit: "um", # unit of manual calibration
# Use switches to decide which processing steps you need:
# transformation_switch: "timelapse" standard processing
# after timelapse registration directly goes into fusion, timelapse_duplicate
# "timelapse_duplicate" for dual channel processing one channel contains the beads
# duplicates transformations
transformation_switch: "timelapse_duplicate",
# Switches between content based fusion and deconvoltion
# "deconvolution" > for deconvolution
# "fusion" > for content based fusion
fusion_switch: "fusion"
}
define_xml_czi: {
first_czi: "2015-02-20_LZ2_Stock48_Stock58.czi", # master .czi file
rotation_around: "X-Axis", # axis of acquistion
bsh_file: "define_czi.bsh" # .bsh script for defining .czi file
}
define_xml_tif: {
# file pattern of .tif files
# for multi channel give spim_TL{tt}_Angle{a}_Channel{c}.tif
# # SPIM file pattern: for padded zeros use tt
image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif',
# Settings for ImageJ Opener
type_of_dataset: '"Image Stacks (ImageJ Opener)"',
multiple_timepoints: '"YES (one file per time-point)"', # or NO (one time-point)
multiple_angles: '"YES (one file per angle)"', # or NO (one angle)
multiple_channels: '"NO (one channel)"', # or "\"NO (one channel)\""
multiple_illumination_directions: '"NO (one illumination direction)"', # or YES (one file per illumination direction)
imglib_container: '"ArrayImg (faster)"', # '"ArrayImg (faster)"'
bsh_file: "define_tif_zip.bsh"
}
resave_hdf5: {
# Resaves .tif or .czi data into hdf5
# Subsampling and resolution settings for hdf5: data dependent
hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"',
subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',
# Standard settings for cluster processing
setups_per_partition: '0',
timepoints_per_partition: '1',
resave_timepoint: '"All Timepoints"',
resave_angle: '"All angles"',
resave_channel: '"All channels"',
resave_illumination: '"All illuminations"',
bsh_file: "export.bsh"
}
registration: {
# reg_process_channel:
# # Single Channel: '"All channels"'
# Dual Channel: '"All channels"'
# Dual Channel one Channel contains beads: '"Single channel (Select from List)"'
reg_process_channel: '"Single channel (Select from List)"',
# reg_processing_channel:
# Dual Channel setting for 1 Channel contains the beads
reg_processing_channel: '"red"',
# reg_interest_points_channel:
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel: Channel does not contain the beads '"[DO NOT register this channel],beads"'
reg_interest_points_channel: '"beads"',
# type of detection: '"Difference-of-Mean (Integral image based)"' or '"Difference-of-Gaussian"'
type_of_detection: '"Difference-of-Mean (Integral image based)"',
# Settings for Difference-of-Mean
# For multiple channels 'value1,value2' delimiter is ,
reg_radius_1: '2',
reg_radius_2: '3',
reg_threshold: '0.005',
# Settings for Difference-of-Gaussian
# For multiple channels 'value1,value2' delimiter is ,
sigma: '1.8',
threshold_gaussian: '0.0080',
# Processing setting for Difference-of-Gaussian detection
# compute_on:
compute_on: '"GPU accurate (Nvidia CUDA via JNA)"',
separableconvolution: '"libSeparableConvolutionCUDALib.so"',
# Downsampling settings
downsample_detection: "No", # "No" or "Yes"
downsample_xy: '"Match Z Resolution (less downsampling)"',
downsample_z: "1x",
# Standard Settings for bead based registration
label_interest_points: '"beads"',
reg_process_timepoint: '"Single Timepoint (Select from List)"',
reg_process_angle: '"All angles"',
reg_process_illumination: '"All illuminations"',
subpixel_localization: '"3-dimensional quadratic fit"',
detection_min_max: "find_maxima",
type_of_registration: '"Register timepoints individually"',
algorithm: '"Fast 3d geometric hashing (rotation invariant)"',
transformation_model: "Affine",
allowed_error_for_ransac: '5',
significance: '10',
fix_tiles: '"Fix first tile"',
map_back_tiles: '"Map back to first tile using rigid model"',
model_to_regularize_with: "Rigid",
lambda: '0.10',
imglib_container: '"ArrayImg (faster)"',
bsh_file: "registration.bsh" # .bsh script for registration
}
xml_merge: {
bsh_file: "xml_merge.bsh"
}
timelapse: {
reference_timepoint: '0', # Reference timepoint
# Standard settings for timelapse registration
type_of_registration_timelapse: '"Match against one reference timepoint (no global optimization)"',
timelapse_process_timepoints: '"All Timepoints"',
bsh_file: "timelapse_registration.bsh"
}
dublicate_transformations: {
# If dual channel processing and only one channel contains beads
# this allows you to dublicate the transformation for the
# channel that does not contain beas
source_dublication: "red", # source channel
target_dublication: "green", # target channel
duplicate_which_transformations: '"Replace all transformations"', # mode of dublication
bsh_file: "dublicate_transformations.bsh" # .bsh script for dublication
}
fusion: {
# content based multiview fusion
# supports multi channel without new settings
downsample: '1', # set downsampling
# Cropping parameters of full resolution
minimal_x: '220',
minimal_y: '40',
minimal_z: '-290',
maximal_x: '976',
maximal_y: '1892',
maximal_z: '472',
# fused_image: '"Append to current XML Project"', does not work yet
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
imglib2_container_fusion: '"ArrayImg"',
interpolation: '"Linear Interpolation"',
pixel_type: '"16-bit unsigned integer"',
imglib2_data_container: '"ArrayImg (faster)"',
process_views_in_paralell: '"All"',
xml_output: '"Save every XML with user-provided unique id"',
bsh_file: "fusion.bsh"
}
external_transform: {
# Downsamples for deconvolution
# BUG: external transformation breaks .xml file
# channel setting: '"all_channels"'
channel_setting: '"green,red"',
transform_timepoint: '"All Timepoints"',
transform_angle: '"All angles"',
transform_channel: '"All channels"',
transform_illumination: '"All illuminations"',
apply_transformation: '"Current view transformations (appends to current transforms)"',
define_mode_transform: '"Matrix"',
# Matrix for downsampling
matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"',
transformation: '"Rigid"',
bsh_file: "transform.bsh"
}
deconvolution: {
iterations: '1', # number of iterations
# Cropping parameters: take downsampling into account
minimal_x_deco: '190',
minimal_y_deco: '-16',
minimal_z_deco: '-348',
maximal_x_deco: '1019',
maximal_y_deco: '1941',
maximal_z_deco: '486',
# Channel settings for deconvolution
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel one channel contains beads: '"[Same PSF as channel red],beads"'
detections_to_extract_psf_for_channel: '"[Same PSF as channel red],beads"',
# Settings for GPU or CPU processing
# '"CPU (Java)"' or '"GPU (Nvidia CUDA via JNA)"'
compute_on: '"GPU (Nvidia CUDA via JNA)"',
cudafourierconvolution: "libFourierConvolutionCUDALib.so", # GPU processing name of cuda library
# Standard settings for deconvolution
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
Tikhonov_parameter: '0.0006',
compute: '"in 512x512x512 blocks"',
osem_acceleration: '"1 (balanced)"',
psf_estimation: '"Extract from beads"',
psf_size_x: '19',
psf_size_y: '19',
psf_size_z: '25',
imglib2_container: '"ArrayImg"',
bsh_file: "deconvolution.bsh"
}
hdf5_output: {
# writes new hdf5 dataset for fusion output: will be obsolete
# Naming pattern of output
# Single Channel: TP{t}_Chgreen_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
# Dual Channel: TP{t}_Ch{0}_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
output_image_file_pattern: 'TP{{t}}_Ch{{c}}_Ill0_Ang0,72,144,216,288.tif',
# channel setting
output_multiple_channels: '"YES (one file per channel)"', # '"YES (one file per channel)"' or '"NO (one channel)"'
output_channels: "green,red",
# .xml file names
output_xml: '"fused_Dual_Channel"',
output_hdf5_xml: '"hdf5_fused_Dual_Channel"',
output_timepoints: '0-1', # Timepoints format: '1-2'
# pixel size of output: take downsampling into account!
output_pixel_distance_x: 0.28590,
output_pixel_distance_y: 0.28590,
output_pixel_distance_z: 0.28590,
output_pixel_unit: 'um',
# give if 16Bit data or 32Bit data
# output of fusion is 16Bit, of deconvolution it is 32Bit
output_data_type: "16Bit", # "32Bit" or "16Bit"
# if data is 32Bit then the data is converted into 16Bit data
convert_32bit: '"[Use min/max of first image (might saturate intenities over time)]"',
# subsampling and chunk size settings: dataset dependent
subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"', # data dependent
chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
# subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
# chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
# Standard settings for hdf5_output
output_type_of_dataset: '"Image Stacks (ImageJ Opener)"',
output_multiple_timepoints: '"YES (one file per time-point)"',
output_multiple_angles: '"NO (one angle)"',
output_illumination_directions: '"NO (one illumination direction)"',
output_imglib_container: '"ArrayImg (faster)"',
bsh_file_define: "define_output.bsh", # .bsh script for defining the dataset
bsh_file_hdf5: "export_output.bsh" # .bsh script for resaving into hdf5
}
......@@ -9,8 +9,8 @@ import java.io.FilenameFilter;
runtime = Runtime.getRuntime();
System.out.println(runtime.availableProcessors() + " cores available for multi-threading");
Prefs.setThreads(1); // defines the number of threads allowed
print("Threads: "+Prefs.getThreads()); // prints thread setting in output
//Prefs.setThreads(1); // defines the number of threads allowed
//print("Threads: "+Prefs.getThreads()); // prints thread setting in output
System.out.println("Start loading parameters");
......
......@@ -9,8 +9,8 @@ import java.io.FilenameFilter;
runtime = Runtime.getRuntime();
System.out.println(runtime.availableProcessors() + " cores available for multi-threading");
Prefs.setThreads(1); // defines the number of threads allowed
print("Threads: "+Prefs.getThreads()); // prints thread setting in output
//Prefs.setThreads(1); // defines the number of threads allowed
//print("Threads: "+Prefs.getThreads()); // prints thread setting in output
System.out.println("Start loading parameters");
......
Unstaged changes after reset:
M spim_registration/timelapse/dual_OneChannel.yaml
M spim_registration/timelapse/single.yaml
M spim_registration/timelapse/single_test.yaml
common: {
# ============================================================================
#
# yaml example file for single channel processing
#
# General settings for processing
#
# ============================================================================
# directory that contains the bean shell scripts and Snakefile
bsh_directory: "/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/timelapse/",
# Directory that contains the cuda libraries
directory_cuda: "/sw/users/schmied/cuda/",
# Directory that contains the current working Fiji
fiji-app: "/sw/users/schmied/packages/2015-06-30_Fiji.app.cuda/ImageJ-linux64",
fiji-prefix: "/sw/users/schmied/packages/xvfb-run -a", # calls xvfb for Fiji headless mode
sysconfcpus: "sysconfcpus -n",
# ============================================================================
# Processing switches
# Description: Use switches to decide which processing steps you need:
#
# Options:
# transformation_switch: "timelapse" standard processing
# after timelapse registration directly goes into fusion, timelapse_duplicate
# "timelapse_duplicate" for dual channel processing one channel contains the beads
#
# Switches between content based fusion and deconvoltion
# "deconvolution" > for deconvolution
# "fusion" > for content based fusion
# ============================================================================
#
# Transformation switch:
transformation_switch: "timelapse",
# Fusion switch:
fusion_switch: "fusion",
# ============================================================================
# xml file names
#
# xml file names without .xml suffix
# ============================================================================
first_xml_filename: 'single', # Name of the xml file for the .czi or .tif files
hdf5_xml_filename: '"hdf5_single"', # Name of .xml file for the hdf5 data after resave_hdf5
merged_xml: 'hdf5_single_merge', # Name of .xml file after merge
# ============================================================================
# Describe the dataset
#
# Options: number of timepoints
# angles
# channels
# illuminations
# pixel size
# ============================================================================
ntimepoints: 2, # number of timepoints of dataset
angles: "0,72,144,216,288", # angles
channels: "green", # channels
illumination: "0", # illuminations
pixel_distance_x: '0.28590106964', # Manual calibration x
pixel_distance_y: '0.28590106964', # Manual calibration y
pixel_distance_z: '1.50000', # Manual calibration z
pixel_unit: "um", # unit of manual calibration
# ----------------------------------------------------------------------------
# For .czi datasets
# master .czi file
first_czi: "2015-02-21_LZ1_Stock68_3.czi",
# ----------------------------------------------------------------------------
# For .tif datasets
# file pattern of .tif files:
# for multi channel give spim_TL{tt}_Angle{a}_Channel{c}.tif
# for padded zeros use tt
image_file_pattern: 'img_TL{{t}}_Angle{{a}}.tif',
# ============================================================================
# Detection and registration
#
# Description: settings for interest point detection and registration
# Options: Single channel and dual channel processing
# Difference-of-mean or difference-of-gaussian detection
# ============================================================================
# reg_process_channel:
# Single Channel: '"All channels"'
# Dual Channel: '"All channels"'
# Dual Channel one Channel contains beads: '"Single channel (Select from List)"'
reg_process_channel: '"All channels"',
#
# Dual channel 1 Channel contains the beads: which channel contains the beads?
reg_processing_channel: '"green"',
#
# reg_interest_points_channel:
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel: Channel does not contain the beads '"[DO NOT register this channel],beads"'
reg_interest_points_channel: '"beads"',
#
# type of detection: '"Difference-of-Mean (Integral image based)"' or '"Difference-of-Gaussian"'
type_of_detection: '"Difference-of-Mean (Integral image based)"',
# Settings for Difference-of-Mean
# For multiple channels 'value1,value2' delimiter is ,
reg_radius_1: '2',
reg_radius_2: '3',
reg_threshold: '0.005',
# Settings for Difference-of-Gaussian
# For multiple channels 'value1,value2' delimiter is ,
sigma: '1.8',
threshold_gaussian: '0.0080',
# ============================================================================
# Timelapse registration
#
# Description: settings for timelapse registration
# Options: reference timepoint
# ============================================================================
reference_timepoint: '0', # Reference timepoint
# ============================================================================
# Content-based multiview fusion
#
# Description: settings for content-based multiview fusion
# Options: downsampling
# Cropping parameters based on full resolution
# ============================================================================
downsample: '2', # set downsampling
minimal_x: '190', # Cropping parameters of full resolution
minimal_y: '-16',
minimal_z: '-348',
maximal_x: '1019',
maximal_y: '1941',
maximal_z: '486',
# ============================================================================
# Multiview deconvolution
#
# Description: settings for multiview deconvolution
# Options: number of iterations
# Cropping parameters taking downsampling into account
# Channel settings for deconvolution
# ============================================================================
iterations: '5', # number of iterations
minimal_x_deco: '190', # Cropping parameters: take downsampling into account
minimal_y_deco: '-16',
minimal_z_deco: '-348',
maximal_x_deco: '1019',
maximal_y_deco: '1941',
maximal_z_deco: '486',
#
# Channel settings for deconvolution
# Single Channel: '"beads"'
# Dual Channel: '"beads,beads"'
# Dual Channel one channel contains beads: '"[Same PSF as channel red],beads"'
detections_to_extract_psf_for_channel: '"[Same PSF as channel red],beads"',
#
# ============================================================================
# Resave output
#
# Description: writes new hdf5 dataset for fusion output
# Options: Naming pattern of output based on channel number
# Channel settings
# File name for resaving output into hdf5
# Pixel size > isotropic resolution
# Image type (16Bit from content-based fusion, 32Bit from deconvolution)
# ============================================================================
# Number of timepoints
output_timepoints: '0-1', # Timepoints format: '1-2'
#
# Naming pattern:
# Single Channel: TP{{t}}_Chgreen_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
# Dual Channel: TP{{t}}_Ch{{0}}_Ill0_Ang0,72,144,216,288.tif > Ch{name} is added here
output_image_file_pattern: '"TP{{t}}_Chgreen_Ill0_Ang0,72,144,216,288.tif"',
#
# channel setting:
# Single channel: '"NO (one channel)"'
# Dual channel: '"YES (one file per channel)"'
output_multiple_channels: '"NO (one channel)"',
output_channels: "green",
#
# .xml file names
output_xml: '"fused_Single"',
output_hdf5_xml: '"hdf5_fused_Single"',
#
# pixel size of output: take downsampling into account!
output_pixel_distance_x: 0.28590106964,
output_pixel_distance_y: 0.28590106964,
output_pixel_distance_z: 0.28590106964,
output_pixel_unit: 'um',
#
# File type
output_data_type: "16Bit" # "32Bit" or "16Bit"
}
define_xml_czi: {
rotation_around: "X-Axis", # axis of acquistion
bsh_file: "define_czi.bsh" # .bsh script for defining .czi file
}
define_xml_tif: {
# Settings for ImageJ Opener
type_of_dataset: '"Image Stacks (ImageJ Opener)"',
multiple_timepoints: '"YES (one file per time-point)"', # or NO (one time-point)
multiple_angles: '"YES (one file per angle)"', # or NO (one angle)
multiple_channels: '"NO (one channel)"', # or "\"NO (one channel)\""
multiple_illumination_directions: '"NO (one illumination direction)"', # or YES (one file per illumination direction)
imglib_container: '"ArrayImg (faster)"', # '"ArrayImg (faster)"'
bsh_file: "define_tif_zip.bsh"
}
resave_hdf5: {
# Resaves .tif or .czi data into hdf5
# Subsampling and resolution settings for hdf5: data dependent
hdf5_chunk_sizes: '"{{ {{32,32,4}}, {{32,32,4}}, {{16,16,16}}, {{16,16,16}} }}"',
subsampling_factors: '"{{ {{1,1,1}}, {{2,2,1}}, {{4,4,1}}, {{8,8,1}} }}"',
# Standard settings for cluster processing
setups_per_partition: '0',
timepoints_per_partition: '1',
resave_timepoint: '"All Timepoints"',
resave_angle: '"All angles"',
resave_channel: '"All channels"',
resave_illumination: '"All illuminations"',
num_cores_hdf5_xml: "2",
num_cores_resave_hdf5: "2",
bsh_file: "export.bsh"
}
registration: {
# Processing setting for Difference-of-Gaussian detection
# compute_on:
compute_on: '"GPU accurate (Nvidia CUDA via JNA)"',
separableconvolution: '"libSeparableConvolutionCUDALib.so"',
# Downsampling settings
downsample_detection: "No", # "No" or "Yes"
downsample_xy: '"Match Z Resolution (less downsampling)"',
downsample_z: "1x",
# Standard Settings for bead based registration
label_interest_points: '"beads"',
reg_process_timepoint: '"Single Timepoint (Select from List)"',
reg_process_angle: '"All angles"',
reg_process_illumination: '"All illuminations"',
subpixel_localization: '"3-dimensional quadratic fit"',
detection_min_max: "find_maxima",
type_of_registration: '"Register timepoints individually"',
algorithm: '"Fast 3d geometric hashing (rotation invariant)"',
transformation_model: "Affine",
allowed_error_for_ransac: '5',
significance: '10',
fix_tiles: '"Fix first tile"',
map_back_tiles: '"Map back to first tile using rigid model"',
model_to_regularize_with: "Rigid",
lambda: '0.10',
imglib_container: '"ArrayImg (faster)"',
num_cores_reg: "2",
bsh_file: "registration.bsh" # .bsh script for registration
}
xml_merge: {
bsh_file: "xml_merge.bsh"
}
timelapse: {
# Standard settings for timelapse registration
type_of_registration_timelapse: '"Match against one reference timepoint (no global optimization)"',
timelapse_process_timepoints: '"All Timepoints"',
bsh_file: "timelapse_registration.bsh"
}
dublicate_transformations: {
# If dual channel processing and only one channel contains beads
# this allows you to dublicate the transformation for the
# channel that does not contain beas
source_dublication: "red", # source channel
target_dublication: "green", # target channel
duplicate_which_transformations: '"Replace all transformations"', # mode of dublication
bsh_file: "dublicate_transformations.bsh" # .bsh script for dublication
}
fusion: {
# fused_image: '"Append to current XML Project"', does not work yet
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
imglib2_container_fusion: '"ArrayImg"',
interpolation: '"Linear Interpolation"',
pixel_type: '"16-bit unsigned integer"',
imglib2_data_container: '"ArrayImg (faster)"',
process_views_in_paralell: '"All"',
xml_output: '"Save every XML with user-provided unique id"',
num_cores_fusion: "6",
bsh_file: "fusion.bsh"
}
external_transform: {
# Downsamples for deconvolution
# BUG: external transformation breaks .xml file
# channel setting: '"all_channels"'
channel_setting: '"green"',
transform_timepoint: '"All Timepoints"',
transform_angle: '"All angles"',
transform_channel: '"All channels"',
transform_illumination: '"All illuminations"',
apply_transformation: '"Current view transformations (appends to current transforms)"',
define_mode_transform: '"Matrix"',
# Matrix for downsampling
matrix_transform: '"0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0"',
transformation: '"Rigid"',
bsh_file: "transform.bsh"
}
deconvolution: {
# Settings for GPU or CPU processing
# '"CPU (Java)"' or '"GPU (Nvidia CUDA via JNA)"'
compute_on: '"GPU (Nvidia CUDA via JNA)"',
cudafourierconvolution: "libFourierConvolutionCUDALib.so", # GPU processing name of cuda library
# Standard settings for deconvolution
process_timepoint: '"Single Timepoint (Select from List)"',
process_angle: '"All angles"',
process_channel: '"All channels"',
process_illumination: '"All illuminations"',
type_of_iteration: '"Efficient Bayesian - Optimization I (fast, precise)"',
Tikhonov_parameter: '0.0006',
compute: '"in 512x512x512 blocks"',
osem_acceleration: '"1 (balanced)"',
psf_estimation: '"Extract from beads"',
psf_size_x: '19',
psf_size_y: '19',
psf_size_z: '25',
imglib2_container: '"ArrayImg"',
num_cores_deco: "7",
bsh_file: "deconvolution.bsh"
}
hdf5_output: {
# if data is 32Bit then the data is converted into 16Bit data
convert_32bit: '"[Use min/max of first image (might saturate intenities over time)]"',
# subsampling and chunk size settings: dataset dependent
subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}}, {{4,4,4}}, {{8,8,8}} }}"', # data dependent
chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}}, {{16,16,16}}, {{16,16,16}} }}"', # data dependent
# subsampling_output: '"{{ {{1,1,1}}, {{2,2,2}} }}"',
# chunk_sizes_output: '"{{ {{16,16,16}}, {{16,16,16}} }}"',
# Standard settings for hdf5_output
output_type_of_dataset: '"Image Stacks (ImageJ Opener)"',
output_multiple_timepoints: '"YES (one file per time-point)"',
output_multiple_angles: '"NO (one angle)"',
output_illumination_directions: '"NO (one illumination direction)"',
output_imglib_container: '"ArrayImg (faster)"',
num_cores_hdf5_xml_output: "2",
num_cores_resave_hdf5_output: "2",
bsh_file_define: "define_output.bsh", # .bsh script for defining the dataset
bsh_file_hdf5: "export_output.bsh" # .bsh script for resaving into hdf5
}
#!/bin/bash
# path of master file
source ../../master_preprocessing.sh
source ../master_preprocessing.sh
# creates directory for job files if not present
mkdir -p $jobs_compress
......@@ -11,14 +11,14 @@ echo $czi_compress
# splits up resaving into 1 job per .czi file and writes the given parameters
# into the job file
for i in $parallel_timepoints
for i in $timepoints
do
for a in $angle_prep
do
job="$jobs_compress/compress-$i-$a.job"
echo $job
echo "$XVFB_RUN -a $Fiji_resave \
echo "$XVFB_RUN $sysconfcpus $Fiji_resave \
-Ddir=$image_file_directory \
-Dtimepoint=$i \
-Dangle=$a \
......
......@@ -2,5 +2,5 @@
for file in `ls ${1} | grep ".job$"`
do
bsub -q short -n 4 -R span[hosts=1] -o "out.%J" -e "err.%J" ${1}/$file
bsub -q short -n 2 -R span[hosts=1] -o "out.%J" -e "err.%J" ${1}/$file
done
#!/bin/bash
source ../../master_preprocessing.sh
timepoint=`seq 0 391`
dir=/projects/pilot_spim/Christopher/2014-10-23_H2A_gsb_G3/
num_angles=1
pad=3
job_dir=/projects/pilot_spim/Christopher/pipeline_3.0/jobs_alpha_3.1/czi_resave/
for i in $timepoint
do
i=`printf "%0${pad}d" "$i"`
num=$(ls $dir/spim_TL"$i"_Angle*.tif |wc -l)
if [ $num -ne $num_angles ]
then
echo "TL"$i": TP or angles missing"
//bsub -q short -n 4 -R span[hosts=1] -o "out.%J" -e "err.%J" ${1}/*${i}*
else
echo "TL"$i": Correct"
fi
done
#!/bin/bash
source ../../master_preprocessing.sh
timepoint=`seq 0 391`
dir=/projects/pilot_spim/Christopher/2014-10-23_H2A_gsb_G3/
num_angles=1
pad=3
job_dir=/projects/pilot_spim/Christopher/pipeline_3.0/jobs_alpha_3.1/czi_resave/
for i in $timepoint
do
i=`printf "%0${pad}d" "$i"`
num=$(ls $dir/spim_TL"$i"_Angle*.tif |wc -l)
if [ $num -ne $num_angles ]
then
echo "TL"$i": TP or angles missing"
//bsub -q short -n 4 -R span[hosts=1] -o "out.%J" -e "err.%J" ${1}/*${i}*
else
echo "TL"$i": Correct"
fi
done
#!/bin/bash
# path of master file
source ../../master_preprocessing.sh
source ../master_preprocessing.sh
# creates directory for job files if not present
mkdir -p $jobs_resaving
# splits up resaving into 1 job per .czi file and writes the given parameters
# into the job file
for i in $parallel_timepoints
for i in $timepoints
do
for a in $angle_prep
do
job="$jobs_resaving/resave-$i-$a.job"
echo $job
echo "$XVFB_RUN -a $Fiji_resave \
echo "$XVFB_RUN $sysconfcpus $Fiji_resave \
-Ddir=$image_file_directory \
-Dtimepoint=$i \
-Dangle=$a \
......
......@@ -2,5 +2,5 @@
for file in `ls ${1} | grep ".job$"`
do
bsub -q short -n 4 -R span[hosts=1] -o "out.%J" -e "err.%J" ${1}/$file
bsub -q short -n 2 -R span[hosts=1] -o "out.%J" -e "err.%J" ${1}/$file
done
......@@ -21,7 +21,7 @@
# c=0,1 etc
# spim_TL{tt}_Angle{a}_Channel{c}.tif
#===============================================================================
image_file_directory="/projects/pilot_spim/Christopher/Test_pipeline_3.0/czi/"
image_file_directory="/projects/pilot_spim/Christopher/test_pipeline/single_channel/resave_test/"
# --- jobs directory -----------------------------------------------------------
job_directory="/projects/pilot_spim/Christopher/snakemake-workflows/spim_registration/tools/"
......@@ -31,26 +31,27 @@ job_directory="/projects/pilot_spim/Christopher/snakemake-workflows/spim_registr
# Important: For renaming and resaving .czi files the first .czi file has to
# carry the index (0)
#-------------------------------------------------------------------------------
pad="3" # for padded zeros
angle_prep="1" # angles format: "1 2 3"
timepoints="`seq 0 1`" # number of time points format: "`seq 0 1`"
angle_prep="1 2 3 4 5" # angles format: "1 2 3"
pad="2" # for padded zeros
num_angles="5"
#--- Renaming ------------------------------------------------------------------
first_index="0" # First index of czi files
last_index="391" # Last index of czi files
last_index="9" # Last index of czi files
first_timepoint="0" # Starts with 0
angles_renaming=(1 2 3 4 5) # Angles format: (1 2 3)
source_pattern=2014-10-23_H2A_gsb_G3\(\{index\}\).czi # Name of .czi files
source_pattern=2015-02-21_LZ1_Stock68_3\(\{index\}\).czi # Name of .czi files
target_pattern=spim_TL\{timepoint\}_Angle\{angle\}.czi # The output pattern of renaming
#-------------------------------------------------------------------------------
# Fiji settings
#-------------------------------------------------------------------------------
XVFB_RUN="/sw/bin/xvfb-run" # virtual frame buffer
Fiji_resave="/sw/users/schmied/lifeline/Fiji.app.lifeline2/ImageJ-linux64" # Fiji that works for resaving
XVFB_RUN="/sw/bin/xvfb-run -a" # virtual frame buffer
sysconfcpus="sysconfcpus -n 2"
Fiji: "/sw/users/schmied/packages/2015-06-30_Fiji.app.cuda/ImageJ-linux64"
Fiji_resave="/sw/users/schmied/packages/2014-06-02_Fiji.app_lifeline/ImageJ-linux64" # Fiji that works for resaving
#-------------------------------------------------------------------------------
# Pre-processing
#-------------------------------------------------------------------------------
......
#!/bin/bash
# path of master file
source ../master_preprocessing.sh
source master_preprocessing.sh
# path of source and target files
source_pattern=${image_file_directory}${source_pattern}
......@@ -15,19 +15,18 @@ t=`printf "%0${pad}d" "${t}"`
while [ $i -le $last_index ]; do
for a in "${angles_renaming[@]}"; do
source=${source_pattern/\{index\}/${i}}
tmp=${target_pattern/\{timepoint\}/${t}}
target=${tmp/\{angle\}/${a}
target=${tmp/\{angle\}/${a}}
echo ${source} ${target} # displays source file and target file with path
mv ${source} ${target} # renames source file into target pattern
#cp ${source} ${target} # alternatively copy source file and resave into target pattern
#mv ${source} ${target} # renames source file into target pattern
cp ${source} ${target} # alternatively copy source file and resave into target pattern
let i=i+1
done
t=$(( 10#${t} ))
let t=t+1
t=`printf "%0${pad}d" "${t}"`
......
#!/bin/bash
source ../../master_preprocessing.sh
source ../master_preprocessing.sh
mkdir -p ${jobs_split}
......@@ -13,7 +13,7 @@ for i in $parallel_timepoints
job="$jobs_split/split-$i-$a.job"
echo $job
echo "#!/bin/bash" > "$job"
echo "$XVFB_RUN -a $Fiji \
echo "$XVFB_RUN $sysconfcpus $Fiji \
-Dimage_file_directory=$image_file_directory \
-Dparallel_timepoints=$i \
-Dangle_prep=$a \
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment