diff --git a/io_scene_gltf2/__init__.py b/io_scene_gltf2/__init__.py
index 542c21e9d065df9825cc2d81189dd3e2076c7409..c8067771ba3fa2ed726a8e6516c446178c68f0c6 100755
--- a/io_scene_gltf2/__init__.py
+++ b/io_scene_gltf2/__init__.py
@@ -15,7 +15,7 @@
 bl_info = {
     'name': 'glTF 2.0 format',
     'author': 'Julien Duroure, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
-    "version": (1, 1, 30),
+    "version": (1, 1, 31),
     'blender': (2, 81, 6),
     'location': 'File > Import-Export',
     'description': 'Import-Export as glTF 2.0',
diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_gather_image.py b/io_scene_gltf2/blender/exp/gltf2_blender_gather_image.py
index 183da33e2ce3e27b11d117f20e0df304e5150bfc..81da3780c6437d0557457c613417cee78491195d 100755
--- a/io_scene_gltf2/blender/exp/gltf2_blender_gather_image.py
+++ b/io_scene_gltf2/blender/exp/gltf2_blender_gather_image.py
@@ -24,7 +24,7 @@ from io_scene_gltf2.blender.exp import gltf2_blender_search_node_tree
 from io_scene_gltf2.io.exp import gltf2_io_binary_data
 from io_scene_gltf2.io.exp import gltf2_io_image_data
 from io_scene_gltf2.io.com import gltf2_io_debug
-from io_scene_gltf2.blender.exp import gltf2_blender_image
+from io_scene_gltf2.blender.exp.gltf2_blender_image import Channel, ExportImage
 from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
 from io_scene_gltf2.io.exp.gltf2_io_user_extensions import export_user_extensions
 
@@ -38,8 +38,8 @@ def gather_image(
         return None
 
     image_data = __get_image_data(blender_shader_sockets_or_texture_slots, export_settings)
-    if image_data is None:
-        # The blender image has no data
+    if image_data.empty():
+        # The export image has no data
         return None
 
     mime_type = __gather_mime_type(blender_shader_sockets_or_texture_slots, export_settings)
@@ -144,13 +144,13 @@ def __is_slot(sockets_or_slots):
     return isinstance(sockets_or_slots[0], bpy.types.MaterialTextureSlot)
 
 
-def __get_image_data(sockets_or_slots, export_settings) -> gltf2_blender_image.ExportImage:
+def __get_image_data(sockets_or_slots, export_settings) -> ExportImage:
     # For shared resources, such as images, we just store the portion of data that is needed in the glTF property
     # in a helper class. During generation of the glTF in the exporter these will then be combined to actual binary
     # resources.
     if __is_socket(sockets_or_slots):
         results = [__get_tex_from_socket(socket, export_settings) for socket in sockets_or_slots]
-        composed_image = None
+        composed_image = ExportImage()
         for result, socket in zip(results, sockets_or_slots):
             if result.shader_node.image.channels == 0:
                 gltf2_io_debug.print_console("WARNING",
@@ -159,44 +159,45 @@ def __get_image_data(sockets_or_slots, export_settings) -> gltf2_blender_image.E
                 continue
 
             # rudimentarily try follow the node tree to find the correct image data.
-            source_channel = 0
+            src_chan = Channel.R
             for elem in result.path:
                 if isinstance(elem.from_node, bpy.types.ShaderNodeSeparateRGB):
-                    source_channel = {
-                        'R': 0,
-                        'G': 1,
-                        'B': 2
+                   src_chan = {
+                        'R': Channel.R,
+                        'G': Channel.G,
+                        'B': Channel.B,
                     }[elem.from_socket.name]
 
-            image = gltf2_blender_image.ExportImage.from_blender_image(result.shader_node.image)
-
-            target_channel = None
+            dst_chan = None
 
             # some sockets need channel rewriting (gltf pbr defines fixed channels for some attributes)
             if socket.name == 'Metallic':
-                target_channel = 2
+                dst_chan = Channel.B
             elif socket.name == 'Roughness':
-                target_channel = 1
+                dst_chan = Channel.G
             elif socket.name == 'Occlusion' and len(sockets_or_slots) > 1 and sockets_or_slots[1] is not None:
-                target_channel = 0
+                dst_chan = Channel.R
             elif socket.name == 'Alpha' and len(sockets_or_slots) > 1 and sockets_or_slots[1] is not None:
-                composed_image.set_alpha(True)
-                target_channel = 3
+                dst_chan = Channel.A
 
-            if target_channel is not None:
-                if composed_image is None:
-                    composed_image = gltf2_blender_image.ExportImage.white_image(image.width, image.height)
+            if dst_chan is not None:
+                composed_image.fill_image(result.shader_node.image, dst_chan, src_chan)
 
-                composed_image[target_channel] = image[source_channel]
+                # Since metal/roughness are always used together, make sure
+                # the other channel is filled.
+                if socket.name == 'Metallic' and not composed_image.is_filled(Channel.G):
+                    composed_image.fill_white(Channel.G)
+                elif socket.name == 'Roughness' and not composed_image.is_filled(Channel.B):
+                    composed_image.fill_white(Channel.B)
             else:
                 # copy full image...eventually following sockets might overwrite things
-                composed_image = image
+                composed_image = ExportImage.from_blender_image(result.shader_node.image)
 
         return composed_image
 
     elif __is_slot(sockets_or_slots):
         texture = __get_tex_from_slot(sockets_or_slots[0])
-        image = gltf2_blender_image.ExportImage.from_blender_image(texture.image)
+        image = ExportImage.from_blender_image(texture.image)
         return image
     else:
         raise NotImplementedError()
diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_image.py b/io_scene_gltf2/blender/exp/gltf2_blender_image.py
index 828b07febab5aae91c8999ca01981099591f5c12..e0eecd3c802e2bf92817cc3bce9c3dd64c2b0a2a 100644
--- a/io_scene_gltf2/blender/exp/gltf2_blender_image.py
+++ b/io_scene_gltf2/blender/exp/gltf2_blender_image.py
@@ -14,122 +14,206 @@
 
 import bpy
 import os
-import typing
+from typing import Optional
 import numpy as np
 import tempfile
+import enum
 
 
-class ExportImage:
-    """Custom image class that allows manipulation and encoding of images"""
-    # FUTURE_WORK: as a method to allow the node graph to be better supported, we could model some of
-    # the node graph elements with numpy functions
-
-    def __init__(self, img: typing.Union[np.ndarray, typing.List[np.ndarray]], max_channels: int = 4,\
-            blender_image: bpy.types.Image = None, has_alpha: bool = False):
-        if isinstance(img, list):
-            np.stack(img, axis=2)
-
-        if len(img.shape) == 2:
-            # images must always have a channels dimension
-            img = np.expand_dims(img, axis=2)
-
-        if not len(img.shape) == 3 or img.shape[2] > 4:
-            raise RuntimeError("Cannot construct an export image from an array of shape {}".format(img.shape))
-
-        self._img = img
-        self._max_channels = max_channels
-        self._blender_image = blender_image
-        self._has_alpha = has_alpha
-
-    def set_alpha(self, alpha: bool):
-        self._has_alpha = alpha
-
-    @classmethod
-    def from_blender_image(cls, blender_image: bpy.types.Image):
-        img = np.array(blender_image.pixels[:])
-        img = img.reshape((blender_image.size[0], blender_image.size[1], blender_image.channels))
-        has_alpha = blender_image.depth == 32
-        return ExportImage(img=img, blender_image=blender_image, has_alpha=has_alpha)
-
-    @classmethod
-    def white_image(cls, width, height, num_channels: int = 4):
-        img = np.ones((width, height, num_channels))
-        return ExportImage(img=img)
-
-    def split_channels(self):
-        """return a list of numpy arrays where each list element corresponds to one image channel (r,g?,b?,a?)"""
-        return np.split(self._img, self._img.shape[2], axis=2)
-
-    @property
-    def img(self) -> np.ndarray:
-        return self._img
-
-    @property
-    def shape(self):
-        return self._img.shape
-
-    @property
-    def width(self):
-        return self.shape[0]
-
-    @property
-    def height(self):
-        return self.shape[1]
-
-    @property
-    def channels(self):
-        return self.shape[2]
-
-    def __getitem__(self, key):
-        """returns a new ExportImage with only the selected channels"""
-        return ExportImage(self._img[:, :, key])
-
-    def __setitem__(self, key, value):
-        """set the selected channels to a new value"""
-        if isinstance(key, slice):
-            self._img[:, :, key] = value.img
-        else:
-            self._img[:, :, key] = value.img[:, :, 0]
-
-    def append(self, other):
-        if self.channels + other.channels > self._max_channels:
-            raise RuntimeError("Cannot append image data to this image "
-                               "because the maximum number of channels is exceeded.")
-
-        self._img = np.concatenate([self.img, other.img], axis=2)
-
-    def __add__(self, other):
-        self.append(other)
-
-    def encode(self, mime_type: typing.Optional[str]) -> bytes:
-        file_format = {
-            "image/jpeg": "JPEG",
-            "image/png": "PNG"
-        }.get(mime_type, "PNG")
+class Channel(enum.IntEnum):
+    R = 0
+    G = 1
+    B = 2
+    A = 3
 
-        if self._blender_image is not None and file_format == self._blender_image.file_format:
-            src_path = bpy.path.abspath(self._blender_image.filepath_raw)
-            if os.path.isfile(src_path):
-                with open(src_path, "rb") as f:
-                    encoded_image = f.read()
-                return encoded_image
+# These describe how an ExportImage's channels should be filled.
 
-        image = bpy.data.images.new("TmpImage", width=self.width, height=self.height, alpha=self._has_alpha)
-        pixels = self._img.flatten().tolist()
-        image.pixels = pixels
+class FillImage:
+    """Fills a channel with the channel src_chan from a Blender image."""
+    def __init__(self, image: bpy.types.Image, src_chan: Channel):
+        self.image = image
+        self.src_chan = src_chan
 
-        # we just use blenders built in save mechanism, this can be considered slightly dodgy but currently is the only
-        # way to support
-        with tempfile.TemporaryDirectory() as tmpdirname:
-            tmpfilename = tmpdirname + "/img"
-            image.filepath_raw = tmpfilename
-            image.file_format = file_format
-            image.save()
+class FillWhite:
+    """Fills a channel with all ones (1.0)."""
+    pass
 
-            with open(tmpfilename, "rb") as f:
-                encoded_image = f.read()
 
-        bpy.data.images.remove(image, do_unlink=True)
+class ExportImage:
+    """Custom image class.
+
+    An image is represented by giving a description of how to fill its red,
+    green, blue, and alpha channels. For example:
+
+        self.fills = {
+            Channel.R: FillImage(image=bpy.data.images['Im1'], src_chan=Channel.B),
+            Channel.G: FillWhite(),
+        }
+
+    This says that the ExportImage's R channel should be filled with the B
+    channel of the Blender image 'Im1', and the ExportImage's G channel
+    should be filled with all 1.0s. Undefined channels mean we don't care
+    what values that channel has.
+
+    This is flexible enough to handle the case where eg. the user used the R
+    channel of one image as the metallic value and the G channel of another
+    image as the roughness, and we need to synthesize an ExportImage that
+    packs those into the B and G channels for glTF.
+
+    Storing this description (instead of raw pixels) lets us make more
+    intelligent decisions about how to encode the image.
+    """
+
+    def __init__(self):
+        self.fills = {}
+
+    @staticmethod
+    def from_blender_image(image: bpy.types.Image):
+        export_image = ExportImage()
+        for chan in range(image.channels):
+            export_image.fill_image(image, dst_chan=chan, src_chan=chan)
+        return export_image
+
+    def fill_image(self, image: bpy.types.Image, dst_chan: Channel, src_chan: Channel):
+        self.fills[dst_chan] = FillImage(image, src_chan)
+
+    def fill_white(self, dst_chan: Channel):
+        self.fills[dst_chan] = FillWhite()
+
+    def is_filled(self, chan: Channel) -> bool:
+        return chan in self.fills
+
+    def empty(self) -> bool:
+        return not self.fills
+
+    def __on_happy_path(self) -> bool:
+        # Whether there is an existing Blender image we can use for this
+        # ExportImage because all the channels come from the matching
+        # channel of that image, eg.
+        #
+        #     self.fills = {
+        #         Channel.R: FillImage(image=im, src_chan=Channel.R),
+        #         Channel.G: FillImage(image=im, src_chan=Channel.G),
+        #     }
+        return (
+            all(isinstance(fill, FillImage) for fill in self.fills.values()) and
+            all(dst_chan == fill.src_chan for dst_chan, fill in self.fills.items()) and
+            len(set(fill.image.name for fill in self.fills.values())) == 1
+        )
+
+    def encode(self, mime_type: Optional[str]) -> bytes:
+        self.file_format = {
+            "image/jpeg": "JPEG",
+            "image/png": "PNG"
+        }.get(mime_type, "PNG")
 
-        return encoded_image
+        # Happy path = we can just use an existing Blender image
+        if self.__on_happy_path():
+            return self.__encode_happy()
+
+        # Unhappy path = we need to create the image self.fills describes.
+        return self.__encode_unhappy()
+
+    def __encode_happy(self) -> bytes:
+        for fill in self.fills.values():
+            return self.__encode_from_image(fill.image)
+
+    def __encode_unhappy(self) -> bytes:
+        # This will be a numpy array we fill in with pixel data.
+        result = None
+
+        img_fills = {
+            chan: fill
+            for chan, fill in self.fills.items()
+            if isinstance(fill, FillImage)
+        }
+        # Loop over images instead of dst_chans; ensures we only decode each
+        # image once even if it's used in multiple channels.
+        image_names = list(set(fill.image.name for fill in img_fills.values()))
+        for image_name in image_names:
+            image = bpy.data.images[image_name]
+
+            if result is None:
+                result = np.ones((image.size[0], image.size[1], 4), np.float32)
+            # Images should all be the same size (should be guaranteed by
+            # gather_texture_info).
+            assert (image.size[0], image.size[1]) == result.shape[:2]
+
+            # Slow and eats all your memory.
+            pixels = np.array(image.pixels[:])
+
+            pixels = pixels.reshape((image.size[0], image.size[1], image.channels))
+
+            for dst_chan, img_fill in img_fills.items():
+                if img_fill.image == image:
+                    result[:, :, dst_chan] = pixels[:, :, img_fill.src_chan]
+
+            pixels = None  # GC this please
+
+        if result is None:
+            # No ImageFills; use a 1x1 white pixel
+            result = np.array([1.0, 1.0, 1.0, 1.0])
+            result = result.reshape((1, 1, 4))
+
+        return self.__encode_from_numpy_array(result)
+
+    def __encode_from_numpy_array(self, array: np.ndarray) -> bytes:
+        tmp_image = None
+        try:
+            tmp_image = bpy.data.images.new(
+                "##gltf-export:tmp-image##",
+                width=array.shape[0],
+                height=array.shape[1],
+                alpha=Channel.A in self.fills,
+            )
+            assert tmp_image.channels == 4  # 4 regardless of the alpha argument above.
+
+            # Also slow and eats all your memory.
+            tmp_image.pixels = array.flatten().tolist()
+
+            return _encode_temp_image(tmp_image, self.file_format)
+
+        finally:
+            if tmp_image is not None:
+                bpy.data.images.remove(tmp_image, do_unlink=True)
+
+    def __encode_from_image(self, image: bpy.types.Image) -> bytes:
+        # See if there is an existing file we can use.
+        if image.source == 'FILE' and image.file_format == self.file_format and \
+                not image.is_dirty:
+            if image.packed_file is not None:
+                return image.packed_file.data
+            else:
+                src_path = bpy.path.abspath(image.filepath_raw)
+                if os.path.isfile(src_path):
+                    with open(src_path, 'rb') as f:
+                        return f.read()
+
+        # Copy to a temp image and save.
+        tmp_image = None
+        try:
+            tmp_image = image.copy()
+            if image.is_dirty:
+                tmp_image.pixels = image.pixels[:]
+
+            return _encode_temp_image(tmp_image, self.file_format)
+        finally:
+            if tmp_image is not None:
+                bpy.data.images.remove(tmp_image, do_unlink=True)
+
+
+def _encode_temp_image(tmp_image: bpy.types.Image, file_format: str) -> bytes:
+    with tempfile.TemporaryDirectory() as tmpdirname:
+        tmpfilename = tmpdirname + '/img'
+        tmp_image.filepath_raw = tmpfilename
+
+        # NOT A TYPO!!! If you delete this line, the
+        # assignment on the next line will not work.
+        tmp_image.file_format
+        tmp_image.file_format = file_format
+
+        tmp_image.save()
+
+        with open(tmpfilename, "rb") as f:
+            return f.read()