diff --git a/spim_registration/timelapse/Snakefile b/spim_registration/timelapse/Snakefile
index 5433300b952cc10ad819f39359c9eac443f73f70..d3abc3f4f98797299eb895f5f8ea7770211c8eac 100755
--- a/spim_registration/timelapse/Snakefile
+++ b/spim_registration/timelapse/Snakefile
@@ -1,5 +1,5 @@
 import os, glob, sys, re
-from timelaps_utils import produce_xml_merge_job_files, produce_string
+from timelaps_utils import produce_xml_merge_job_files, produce_string, padding_of_file_id
 
 #where are we (can be configured through -d/--directory flag)
 JOBDIR=os.path.abspath(os.path.curdir)
@@ -9,8 +9,9 @@ if JOBDIR[-1] != "/": # this checks if jobdir ends with slash if not it adds a s
 #data specific config file, expected to be inside JOBDIR
 configfile: "tomancak_czi.json"
 
+padding_format = "-{0:0"+str(padding_of_file_id(int(config["common"]["ntimepoints"])))+"d}-00.h5"
 # problematic needs padding of file_id
-datasets = [ config["common"]["hdf5_xml_filename"].strip('\"')+"-{0:02d}-00.h5".format(item) for item in range(int(config["common"]["ntimepoints"])) ]
+datasets = [ config["common"]["hdf5_xml_filename"].strip('\"')+padding_format.format(item) for item in range(int(config["common"]["ntimepoints"])) ]
 
 xml_merge_in = produce_xml_merge_job_files(datasets)
 
@@ -119,11 +120,7 @@ rule hdf5_xml:
 
         part_string += " > {log} 2>&1 && touch {output}"
         shell(part_string)
-
-        #create dummy files according to the number of timepoints found
-        # for index in range(int(config["common"]["ntimepoints"])):
-        #    shell("touch {basename}-0{file_id}-00.h5_empty".format(basename=config["common"]["hdf5_xml_filename"], file_id=index)) # problematic needs padding of file_id
-        
+       
 
 # resave .czi dataset as hdf5	
 rule resave_hdf5:
@@ -157,8 +154,7 @@ rule resave_hdf5:
         shell(part_string) 
        	       	
 rule registration:
-    input:  "{xml_base}-{file_id}-00.h5" # rules.resave_hdf5.output 
-    #input: rules.resave_hdf5.output, "{xml_base}-{file_id}-00.h5"
+    input:  "{xml_base}-{file_id}-00.h5" 
     output: "{xml_base}-{file_id,\d+}-00.h5_registered", #"{xml_base}.job_{file_id,\d+}.xml"
     log: "{xml_base}-{file_id}-registration.log"
     run:
@@ -195,10 +191,10 @@ rule registration:
         -- --no-splash {path_bsh}""",
            config["common"], 
            config["registration"],
-           file_id_w="{wildcards.file_id}",
+           file_id_w=wildcards.file_id,
            path_bsh=config["common"]["bsh_directory"] + config["registration"]["bsh_file"],
            jdir=JOBDIR,
-           input_xml="{wildcards.xml_base}")
+           input_xml=wildcards.xml_base)
         cmd_string += " > {log} 2>&1 && touch {output}"
        
         shell(cmd_string)
diff --git a/spim_registration/timelapse/timelaps_utils.py b/spim_registration/timelapse/timelaps_utils.py
index 9ce1c5fa12637ff16ab839185d94946be2b05579..353bd094593d54558975149ef5867aaf56053a77 100644
--- a/spim_registration/timelapse/timelaps_utils.py
+++ b/spim_registration/timelapse/timelaps_utils.py
@@ -1,5 +1,6 @@
 import re
 import os
+import math
 
 def produce_xml_merge_job_files(_datasets):
    fre = re.compile(r'(?P<xml_base>\w+)-(?P<file_id>\d+)-00.h5')
@@ -22,3 +23,6 @@ def produce_string(_fstring, *args, **kwargs):
          
    contents.update(kwargs)
    return _fstring.format(**contents)
+
+def padding_of_file_id(_n_timepoints):
+   return math.ceil(math.log10(_n_timepoints))