diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/environment.yaml b/environment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..909d33470d3670fd7a1248564daf4f5b71375204
Binary files /dev/null and b/environment.yaml differ
diff --git a/environment_GPU.yaml b/environment_GPU.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6e4271801aa89cbb97031dd4daea2fbebbcb8cb1
Binary files /dev/null and b/environment_GPU.yaml differ
diff --git a/eval_images.py b/eval_images.py
new file mode 100644
index 0000000000000000000000000000000000000000..2568b6cdde99be98edbe38de87ec350dc8753f98
--- /dev/null
+++ b/eval_images.py
@@ -0,0 +1,352 @@
+import argparse, sys, re, json
+import matplotlib.pyplot as plt
+import numpy as np
+from segment_anything import SamPredictor, SamAutomaticMaskGenerator, sam_model_registry
+import cv2, os
+from math import atan2, cos, sin, pi
+from pathlib import Path
+from PIL import Image
+from PIL.ExifTags import TAGS
+from datetime import datetime
+
+
+
+# helper functions
+def show_mask(mask, ax, random_color=False, color=np.array([30/255, 144/255, 255/255, 0.3])):
+    if random_color:
+        color = np.concatenate([np.random.random(3), np.array([0.3])], axis=0)
+
+    h, w = mask.shape[-2:]
+    mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
+    ax.imshow(mask_image)
+    
+def show_points(coords, labels, ax, marker_size=100):
+    pos_points = coords[labels==1]
+    neg_points = coords[labels==0]
+    ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='o', s=marker_size, edgecolor='white', linewidth=0.75)
+    ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='o', s=marker_size, edgecolor='white', linewidth=0.75)   
+    
+def show_box(box, ax, angle=0, rotation_point='xy'):
+    x0, y0 = box[0], box[1]
+    w, h = box[2], box[3]
+    ax.add_patch(plt.Rectangle((x0, y0), w, h, angle=angle, rotation_point=rotation_point, edgecolor='green', facecolor=(0,0,0,0), lw=2))
+
+def show_principal_axes(mean, angle, x_length, y_length, eigenvectors, ax, color=np.array([1, 1, 1, 1]), scale=1.0, linewidth=1.5):
+    cx = mean[0,0]
+    cy = mean[0,1]
+
+    angle1 = angle
+    o1x_start = cx - 0.5 * x_length * scale * cos(angle1)
+    o1y_start = cy - 0.5 * x_length * scale * sin(angle1)
+    o1x_end = cx + 0.5 * x_length * scale * cos(angle1)
+    o1y_end = cy + 0.5 * x_length * scale * sin(angle1)
+    
+    angle2 = angle1 - pi/2
+    o2x_start = cx - 0.5 * y_length * scale * cos(angle2)
+    o2y_start = cy - 0.5 * y_length * scale * sin(angle2)
+    o2x_end = cx + 0.5 * y_length * scale * cos(angle2)
+    o2y_end = cy + 0.5 * y_length * scale * sin(angle2)
+
+    ax.plot([o1x_start, o1x_end], [o1y_start, o1y_end], color=color, linewidth=linewidth)
+    ax.plot([o2x_start, o2x_end], [o2y_start, o2y_end], color=color, linewidth=linewidth)    
+
+def find_main_axes(segmentation_mask):
+    # Find contours in the segmentation mask
+    contours, _ = cv2.findContours(segmentation_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+
+    if len(contours) == 0:
+        return None
+    elif len(contours) >= 1:
+        area = 0
+        index = 0
+        for i, c in enumerate(contours):
+            area_c = cv2.contourArea(c)
+            if area_c > area:
+                area = area_c
+                index = i
+        contour = contours[index]
+
+    # Calculate the principal components (main axes) using PCA
+    points = np.array(contour[:, 0, :], dtype=np.float32)
+    mean, eigenvectors, eigenvalues = cv2.PCACompute2(points, mean=np.empty((0)))
+    return mean, eigenvectors, eigenvalues
+
+def find_bounding_rect(segmentation_mask):
+    # Find contours in the segmentation mask
+    contours, _ = cv2.findContours(segmentation_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+
+    if len(contours) == 0:
+        return None
+    elif len(contours) >= 1:
+        area = 0
+        index = 0
+        for i, c in enumerate(contours):
+            area_c = cv2.contourArea(c)
+            if area_c > area:
+                area = area_c
+                index = i
+        contour = contours[index]
+
+    x, y, w, h = cv2.boundingRect(contour)
+    return (x,y,w,h)
+
+def find_mask_margins(segmentation_mask):
+    # Ensure binary mask
+    segmentation_mask = cv2.threshold(segmentation_mask, 128, 255, cv2.THRESH_BINARY)[1]
+
+    # Calculate the projection along the specified direction
+    # horizontal
+    projection = np.sum(segmentation_mask, axis=0)
+    non_zero_indices = np.nonzero(projection)[0]
+    x_length = non_zero_indices[-1] - non_zero_indices[0]
+
+    # vertical
+    projection = np.sum(segmentation_mask, axis=1)
+    non_zero_indices = np.nonzero(projection)[0]
+    y_length = non_zero_indices[-1] - non_zero_indices[0]
+    
+    return x_length, y_length
+
+def scan_folder_for_images(measurement, folder_path, image_extensions=('jpg', 'jpeg', 'png', 'gif')):
+    # List all files in the folder
+    files = os.listdir(folder_path)
+    
+    output_unsorted = []
+    for file in files:
+        # Check if the file has a valid image extension
+        if file.lower().endswith(image_extensions):
+            # Full path to the image file
+            image_path = os.path.join(folder_path, file)
+            out = measurement.copy()
+            out['path'] = image_path
+            output_unsorted.append(out)
+    output = sorted(output_unsorted, key=lambda x: x['path'])
+    return output
+
+def resolve_timestamp(output):
+    for x in output:
+        try:
+            # Open the image using PIL
+            image = Image.open(x['path'])
+
+            # Extract EXIF data
+            exif_data = image._getexif()
+
+            # Check if EXIF data is available
+            if exif_data is not None:
+                # Iterate through EXIF tags and look for the capture time tag (DateTimeOriginal)
+                for tag_id, value in exif_data.items():
+                    tag_name = TAGS.get(tag_id, tag_id)
+                    if tag_name == 'DateTimeOriginal':
+                        x['time-string'] = value
+                        x['time-POSIX'] = datetime.strptime(value, "%Y:%m:%d %H:%M:%S").timestamp() # make as absolute time from certain point in seconds
+
+        except Exception as e:
+            print(f"Error reading image metadata: {e}")
+            sys.exit(1) # terminate program
+
+    output_temp = output.copy()
+    output = sorted(output_temp, key=lambda x: x['time-POSIX'])
+    start_time = output[0]['time-POSIX']
+    for x in output:
+        x['time-measurement'] = x['time-POSIX'] - start_time
+
+
+
+def eval(model_path, image_path, samp_time, samp_size):
+    model_path = Path(model_path)
+    image_path = Path(image_path)
+
+    try:
+        size, units = re.match(r"([\d.,]+)\s*([a-zA-Z]+)", samp_size).groups()
+        size = size.replace(',','.')
+        size = float(size)
+    except:
+        print('Wrong input for sample size. Exiting...')
+
+    for pth in (model_path, image_path):
+        if pth.exists():
+            continue
+        else:
+            print('Provided path "{}" does not exists! Exiting...'.format(pth))
+            return
+
+    keys = ['path', 'time-string', 'time-POSIX', 'time-measurement', 'pixels', 'size', 'units']
+    values = [None, None, None, None, None, None, None]
+    measurement = {}
+    for key, value in zip(keys, values):
+        measurement[key] = value
+    
+
+    output = scan_folder_for_images(measurement , image_path)
+    resolve_timestamp(output)
+
+    # check that samp_time is in the metadata of used measurements, write to this data point the input size
+    find = False
+    index_stamp = None
+    for i, item in enumerate(output):
+        if samp_time in item['time-string']:
+            item['size'] = size
+            item['units'] = units
+            index_stamp = i
+            find = True
+            break
+    if find == False:
+        print('Set time stamp not found in data. Exiting...')
+        return
+
+    # setup output folder
+    output_path = Path(output[0]['path']).parent / 'output'
+    output_path.mkdir(parents=True, exist_ok=True)
+
+
+    # main processing loop over all images
+    print('***START***')
+    for item in output:
+        img_path = item['path']
+        image_bgr = cv2.imread(img_path)
+        image = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
+
+        # Get the dimensions of the image
+        height, width, _ = image.shape
+
+        # Set the desired crop size
+        cropping_ratio = 0.1
+        crop_width, crop_height = int(cropping_ratio * width), int(cropping_ratio * height)
+
+        # Calculate the coordinates for cropping around the middle
+        left = (width - crop_width) // 2
+        bottom = (height - crop_height) // 2
+        right = (width + crop_width) // 2
+        top = (height + crop_height) // 2
+
+        # Generate array of investigated pixel points from cropping margins
+        pixel_step = 100
+        x_list = list(range(left,right+1,pixel_step))
+        y_list = list(range(bottom,top+1,pixel_step))
+        points = np.zeros((len(x_list)*len(y_list), 2),dtype=np.int64)
+        point_grid = np.zeros((len(x_list)*len(y_list), 2))
+        point_labels = np.ones((len(x_list)*len(y_list)))
+        for j, y in enumerate(y_list):
+            for i, x in enumerate(x_list):
+                points[i+(j*len(x_list)),0] = x 
+                points[i+(j*len(x_list)),1] = y
+                point_grid[i+(j*len(x_list)),0] = x / width
+                point_grid[i+(j*len(x_list)),1] = y / height
+
+        point_grid = [point_grid] # make it list of array
+
+
+        ############################ Generate for whole image using SamAutomaticMaskGenerator ##############################
+        sam = sam_model_registry["vit_l"](checkpoint=model_path)
+        mask_generator = SamAutomaticMaskGenerator(sam, points_per_side=None, point_grids=point_grid, min_mask_region_area=500)
+        masks = mask_generator.generate(image)
+        print('Number of detected masks is: {}'.format(len(masks)))
+        ###################################################################################################################
+
+        
+        selected_angle = None
+        selected_bbox = None
+        selected_mean = None
+        if masks: # non-empty masks
+            for mask in masks:
+                # Read the segmentation mask
+                segmentation_mask = cv2.Mat(255 * mask['segmentation'].astype(np.uint8))
+
+                # Find the main axes
+                mean, eigenvectors, eigenvalues = find_main_axes(segmentation_mask)
+
+                angle_rad = atan2(eigenvectors[0,1], eigenvectors[0,0]) # orientation in radians
+                angle_deg = 180/pi * angle_rad
+
+                M = cv2.getRotationMatrix2D(np.squeeze(mean), angle_deg, 1.0)
+                rotated_mask = cv2.warpAffine(segmentation_mask, M, (segmentation_mask.shape[1], segmentation_mask.shape[0]))
+
+                # Find the margins along the horizontal direction
+                x_length, y_length = find_mask_margins(rotated_mask)
+
+                # Find bounding rectangle of the rotated mask
+                bbox = find_bounding_rect(rotated_mask)
+
+                # simple evaluation of mask validity
+                # aspect ratio between 4-5
+                low = 3
+                high = 6
+                ratio = bbox[2] / bbox[3] # ratio between width and height of the object bounding box after rotation to initial CCS
+                if low < ratio < high:
+                    selected_angle = angle_deg
+                    selected_bbox = bbox
+                    selected_mean = mean
+                    break                    
+                         
+        else: # empty masks
+            print('No mask found in the image. Continuing to other image...')
+            continue # skip to another image
+
+        if selected_bbox:
+            # write result to output list
+            item['pixels'] = selected_bbox[2] # writing width of the bbox around the found pellet
+
+            # plot results
+            plt.figure(figsize=(18,10))
+            plt.imshow(image)
+            show_box(selected_bbox, plt.gca(), angle=selected_angle, rotation_point=(selected_mean[0][0], selected_mean[0][1]))
+            plt.axis('off')
+            # save image with found bbox to file
+            f_name = output_path / Path(img_path).name
+            f_name = f_name.as_posix()
+            plt.savefig(f_name)
+            plt.close()
+        else:
+            print('No pellet found in the image. Continuing to other image...')
+    
+    # final processing all outputs created
+    pix_size = output[index_stamp]['size'] / output[index_stamp]['pixels']
+    for item in output:
+        item['size'] = pix_size * item['pixels']
+        item['units'] = units
+    
+    # create final figure dependency of size during the time
+    time = np.array([x['time-measurement'] for x in output])
+    dim = np.array([x['size'] for x in output])
+    # plot results
+    plt.figure(figsize=(10,10))
+    plt.plot(time, dim)
+    plt.xlabel('Time [s]')
+    plt.ylabel('Size [{}]'.format(output[0]['units']))
+    plt.title('Pellet size evolution in time')
+    # save image with found bbox to file
+    f_path = output_path / 'size_evolution.png'
+    f_path = f_path.as_posix()
+    plt.savefig(f_path)
+
+    # save output to file
+    f_path = output_path / 'output.txt'
+    f_path = f_path.as_posix()
+
+    # Save the data to a text file
+    with open(f_path, 'w') as file:
+        json.dump(output, file, indent=2)  # The 'indent' parameter adds indentation for better readability
+        
+    print('***FINISHED***')
+
+
+
+if __name__ == "__main__":
+    # Create an argument parser
+    parser = argparse.ArgumentParser(description="Program for analysing images, finding heated pellet and outputing its size")
+
+    # Add command-line arguments
+    parser.add_argument("arg1", type=str, help="Path to model")
+    parser.add_argument("arg2", type=str, help="Path to images")
+    parser.add_argument("arg3", type=str, help="Time of sample where pellet has been measured")
+    parser.add_argument("arg4", type=str, help="Pellet size")
+
+    # Parse the command-line arguments
+    args = parser.parse_args()
+
+    # Call the main function with the provided arguments
+    eval(args.arg1, args.arg2, args.arg3, args.arg4)
+
+
+
diff --git a/eval_images_GPU.py b/eval_images_GPU.py
new file mode 100644
index 0000000000000000000000000000000000000000..425bb36ea00ddfd7c3f066c6e375467edd7c65d5
--- /dev/null
+++ b/eval_images_GPU.py
@@ -0,0 +1,354 @@
+import argparse, sys, re, json
+import matplotlib.pyplot as plt
+import numpy as np
+from segment_anything import SamPredictor, SamAutomaticMaskGenerator, sam_model_registry
+import cv2, os
+from math import atan2, cos, sin, pi
+from pathlib import Path
+from PIL import Image
+from PIL.ExifTags import TAGS
+from datetime import datetime
+
+
+
+# helper functions
+def show_mask(mask, ax, random_color=False, color=np.array([30/255, 144/255, 255/255, 0.3])):
+    if random_color:
+        color = np.concatenate([np.random.random(3), np.array([0.3])], axis=0)
+
+    h, w = mask.shape[-2:]
+    mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
+    ax.imshow(mask_image)
+    
+def show_points(coords, labels, ax, marker_size=100):
+    pos_points = coords[labels==1]
+    neg_points = coords[labels==0]
+    ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='o', s=marker_size, edgecolor='white', linewidth=0.75)
+    ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='o', s=marker_size, edgecolor='white', linewidth=0.75)   
+    
+def show_box(box, ax, angle=0, rotation_point='xy'):
+    x0, y0 = box[0], box[1]
+    w, h = box[2], box[3]
+    ax.add_patch(plt.Rectangle((x0, y0), w, h, angle=angle, rotation_point=rotation_point, edgecolor='green', facecolor=(0,0,0,0), lw=2))
+
+def show_principal_axes(mean, angle, x_length, y_length, eigenvectors, ax, color=np.array([1, 1, 1, 1]), scale=1.0, linewidth=1.5):
+    cx = mean[0,0]
+    cy = mean[0,1]
+
+    angle1 = angle
+    o1x_start = cx - 0.5 * x_length * scale * cos(angle1)
+    o1y_start = cy - 0.5 * x_length * scale * sin(angle1)
+    o1x_end = cx + 0.5 * x_length * scale * cos(angle1)
+    o1y_end = cy + 0.5 * x_length * scale * sin(angle1)
+    
+    angle2 = angle1 - pi/2
+    o2x_start = cx - 0.5 * y_length * scale * cos(angle2)
+    o2y_start = cy - 0.5 * y_length * scale * sin(angle2)
+    o2x_end = cx + 0.5 * y_length * scale * cos(angle2)
+    o2y_end = cy + 0.5 * y_length * scale * sin(angle2)
+
+    ax.plot([o1x_start, o1x_end], [o1y_start, o1y_end], color=color, linewidth=linewidth)
+    ax.plot([o2x_start, o2x_end], [o2y_start, o2y_end], color=color, linewidth=linewidth)    
+
+def find_main_axes(segmentation_mask):
+    # Find contours in the segmentation mask
+    contours, _ = cv2.findContours(segmentation_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+
+    if len(contours) == 0:
+        return None
+    elif len(contours) >= 1:
+        area = 0
+        index = 0
+        for i, c in enumerate(contours):
+            area_c = cv2.contourArea(c)
+            if area_c > area:
+                area = area_c
+                index = i
+        contour = contours[index]
+
+    # Calculate the principal components (main axes) using PCA
+    points = np.array(contour[:, 0, :], dtype=np.float32)
+    mean, eigenvectors, eigenvalues = cv2.PCACompute2(points, mean=np.empty((0)))
+    return mean, eigenvectors, eigenvalues
+
+def find_bounding_rect(segmentation_mask):
+    # Find contours in the segmentation mask
+    contours, _ = cv2.findContours(segmentation_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+
+    if len(contours) == 0:
+        return None
+    elif len(contours) >= 1:
+        area = 0
+        index = 0
+        for i, c in enumerate(contours):
+            area_c = cv2.contourArea(c)
+            if area_c > area:
+                area = area_c
+                index = i
+        contour = contours[index]
+
+    x, y, w, h = cv2.boundingRect(contour)
+    return (x,y,w,h)
+
+def find_mask_margins(segmentation_mask):
+    # Ensure binary mask
+    segmentation_mask = cv2.threshold(segmentation_mask, 128, 255, cv2.THRESH_BINARY)[1]
+
+    # Calculate the projection along the specified direction
+    # horizontal
+    projection = np.sum(segmentation_mask, axis=0)
+    non_zero_indices = np.nonzero(projection)[0]
+    x_length = non_zero_indices[-1] - non_zero_indices[0]
+
+    # vertical
+    projection = np.sum(segmentation_mask, axis=1)
+    non_zero_indices = np.nonzero(projection)[0]
+    y_length = non_zero_indices[-1] - non_zero_indices[0]
+    
+    return x_length, y_length
+
+def scan_folder_for_images(measurement, folder_path, image_extensions=('jpg', 'jpeg', 'png', 'gif')):
+    # List all files in the folder
+    files = os.listdir(folder_path)
+    
+    output_unsorted = []
+    for file in files:
+        # Check if the file has a valid image extension
+        if file.lower().endswith(image_extensions):
+            # Full path to the image file
+            image_path = os.path.join(folder_path, file)
+            out = measurement.copy()
+            out['path'] = image_path
+            output_unsorted.append(out)
+    output = sorted(output_unsorted, key=lambda x: x['path'])
+    return output
+
+def resolve_timestamp(output):
+    for x in output:
+        try:
+            # Open the image using PIL
+            image = Image.open(x['path'])
+
+            # Extract EXIF data
+            exif_data = image._getexif()
+
+            # Check if EXIF data is available
+            if exif_data is not None:
+                # Iterate through EXIF tags and look for the capture time tag (DateTimeOriginal)
+                for tag_id, value in exif_data.items():
+                    tag_name = TAGS.get(tag_id, tag_id)
+                    if tag_name == 'DateTimeOriginal':
+                        x['time-string'] = value
+                        x['time-POSIX'] = datetime.strptime(value, "%Y:%m:%d %H:%M:%S").timestamp() # make as absolute time from certain point in seconds
+
+        except Exception as e:
+            print(f"Error reading image metadata: {e}")
+            sys.exit(1) # terminate program
+
+    output_temp = output.copy()
+    output = sorted(output_temp, key=lambda x: x['time-POSIX'])
+    start_time = output[0]['time-POSIX']
+    for x in output:
+        x['time-measurement'] = x['time-POSIX'] - start_time
+
+
+
+def eval(model_path, image_path, samp_time, samp_size):
+    model_path = Path(model_path)
+    image_path = Path(image_path)
+
+    try:
+        size, units = re.match(r"([\d.,]+)\s*([a-zA-Z]+)", samp_size).groups()
+        size = size.replace(',','.')
+        size = float(size)
+    except:
+        print('Wrong input for sample size. Exiting...')
+
+    for pth in (model_path, image_path):
+        if pth.exists():
+            continue
+        else:
+            print('Provided path "{}" does not exists! Exiting...'.format(pth))
+            return
+
+    keys = ['path', 'time-string', 'time-POSIX', 'time-measurement', 'pixels', 'size', 'units']
+    values = [None, None, None, None, None, None, None]
+    measurement = {}
+    for key, value in zip(keys, values):
+        measurement[key] = value
+    
+
+    output = scan_folder_for_images(measurement , image_path)
+    resolve_timestamp(output)
+
+    # check that samp_time is in the metadata of used measurements, write to this data point the input size
+    find = False
+    index_stamp = None
+    for i, item in enumerate(output):
+        if samp_time in item['time-string']:
+            item['size'] = size
+            item['units'] = units
+            index_stamp = i
+            find = True
+            break
+    if find == False:
+        print('Set time stamp not found in data. Exiting...')
+        return
+
+    # setup output folder
+    output_path = Path(output[0]['path']).parent / 'output'
+    output_path.mkdir(parents=True, exist_ok=True)
+
+
+    # main processing loop over all images
+    print('***START***')
+    for item in output:
+        img_path = item['path']
+        image_bgr = cv2.imread(img_path)
+        image = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
+
+        # Get the dimensions of the image
+        height, width, _ = image.shape
+
+        # Set the desired crop size
+        cropping_ratio = 0.1
+        crop_width, crop_height = int(cropping_ratio * width), int(cropping_ratio * height)
+
+        # Calculate the coordinates for cropping around the middle
+        left = (width - crop_width) // 2
+        bottom = (height - crop_height) // 2
+        right = (width + crop_width) // 2
+        top = (height + crop_height) // 2
+
+        # Generate array of investigated pixel points from cropping margins
+        pixel_step = 100
+        x_list = list(range(left,right+1,pixel_step))
+        y_list = list(range(bottom,top+1,pixel_step))
+        points = np.zeros((len(x_list)*len(y_list), 2),dtype=np.int64)
+        point_grid = np.zeros((len(x_list)*len(y_list), 2))
+        point_labels = np.ones((len(x_list)*len(y_list)))
+        for j, y in enumerate(y_list):
+            for i, x in enumerate(x_list):
+                points[i+(j*len(x_list)),0] = x 
+                points[i+(j*len(x_list)),1] = y
+                point_grid[i+(j*len(x_list)),0] = x / width
+                point_grid[i+(j*len(x_list)),1] = y / height
+
+        point_grid = [point_grid] # make it list of array
+
+
+        ############################ Generate for whole image using SamAutomaticMaskGenerator ##############################
+        device = "cuda"
+        sam = sam_model_registry["vit_l"](checkpoint=model_path)
+        sam.to(device=device)
+        mask_generator = SamAutomaticMaskGenerator(sam, points_per_side=None, point_grids=point_grid, min_mask_region_area=500)
+        masks = mask_generator.generate(image)
+        print('Number of detected masks is: {}'.format(len(masks)))
+        ###################################################################################################################
+
+        
+        selected_angle = None
+        selected_bbox = None
+        selected_mean = None
+        if masks: # non-empty masks
+            for mask in masks:
+                # Read the segmentation mask
+                segmentation_mask = cv2.Mat(255 * mask['segmentation'].astype(np.uint8))
+
+                # Find the main axes
+                mean, eigenvectors, eigenvalues = find_main_axes(segmentation_mask)
+
+                angle_rad = atan2(eigenvectors[0,1], eigenvectors[0,0]) # orientation in radians
+                angle_deg = 180/pi * angle_rad
+
+                M = cv2.getRotationMatrix2D(np.squeeze(mean), angle_deg, 1.0)
+                rotated_mask = cv2.warpAffine(segmentation_mask, M, (segmentation_mask.shape[1], segmentation_mask.shape[0]))
+
+                # Find the margins along the horizontal direction
+                x_length, y_length = find_mask_margins(rotated_mask)
+
+                # Find bounding rectangle of the rotated mask
+                bbox = find_bounding_rect(rotated_mask)
+
+                # simple evaluation of mask validity
+                # aspect ratio between 4-5
+                low = 3
+                high = 6
+                ratio = bbox[2] / bbox[3] # ratio between width and height of the object bounding box after rotation to initial CCS
+                if low < ratio < high:
+                    selected_angle = angle_deg
+                    selected_bbox = bbox
+                    selected_mean = mean
+                    break                    
+                         
+        else: # empty masks
+            print('No mask found in the image. Continuing to other image...')
+            continue # skip to another image
+
+        if selected_bbox:
+            # write result to output list
+            item['pixels'] = selected_bbox[2] # writing width of the bbox around the found pellet
+
+            # plot results
+            plt.figure(figsize=(18,10))
+            plt.imshow(image)
+            show_box(selected_bbox, plt.gca(), angle=selected_angle, rotation_point=(selected_mean[0][0], selected_mean[0][1]))
+            plt.axis('off')
+            # save image with found bbox to file
+            f_name = output_path / Path(img_path).name
+            f_name = f_name.as_posix()
+            plt.savefig(f_name)
+            plt.close()
+        else:
+            print('No pellet found in the image. Continuing to other image...')
+    
+    # final processing all outputs created
+    pix_size = output[index_stamp]['size'] / output[index_stamp]['pixels']
+    for item in output:
+        item['size'] = pix_size * item['pixels']
+        item['units'] = units
+    
+    # create final figure dependency of size during the time
+    time = np.array([x['time-measurement'] for x in output])
+    dim = np.array([x['size'] for x in output])
+    # plot results
+    plt.figure(figsize=(10,10))
+    plt.plot(time, dim)
+    plt.xlabel('Time [s]')
+    plt.ylabel('Size [{}]'.format(output[0]['units']))
+    plt.title('Pellet size evolution in time')
+    # save image with found bbox to file
+    f_path = output_path / 'size_evolution.png'
+    f_path = f_path.as_posix()
+    plt.savefig(f_path)
+
+    # save output to file
+    f_path = output_path / 'output.txt'
+    f_path = f_path.as_posix()
+
+    # Save the data to a text file
+    with open(f_path, 'w') as file:
+        json.dump(output, file, indent=2)  # The 'indent' parameter adds indentation for better readability
+        
+    print('***FINISHED***')
+
+
+
+if __name__ == "__main__":
+    # Create an argument parser
+    parser = argparse.ArgumentParser(description="Program for analysing images, finding heated pellet and outputing its size")
+
+    # Add command-line arguments
+    parser.add_argument("arg1", type=str, help="Path to model")
+    parser.add_argument("arg2", type=str, help="Path to images")
+    parser.add_argument("arg3", type=str, help="Time of sample where pellet has been measured")
+    parser.add_argument("arg4", type=str, help="Pellet size")
+
+    # Parse the command-line arguments
+    args = parser.parse_args()
+
+    # Call the main function with the provided arguments
+    eval(args.arg1, args.arg2, args.arg3, args.arg4)
+
+
+
diff --git a/model/model.pth b/model/model.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9abad3d79620a8be44427c2a07139c906d1d9716
Binary files /dev/null and b/model/model.pth differ