blend2json.py 15.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
#!/usr/bin/env python3

# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
#
# (c) 2015, Blender Foundation - Bastien Montagne

# <pep8 compliant>


"""
This is a tool for generating a JSon version of a blender file (only its structure, or all its data included).

It can also run some simple validity checks over a .blend file.

WARNING! This is still WIP tool!

Example usage:

   ./blend2json.py -i foo.blend

To output complete DNA struct info:

   ./blend2json.py --full-dna -i foo.blend

To avoid getting all 'uid' old addresses (those will change really often even when data itself does not change,
making diff pretty noisy):

   ./blend2json.py --no-old-addresses -i foo.blend

To check a .blend file instead of outputting its JSon version (use explicit -o option to do both at the same time):

   ./blend2json.py -c -i foo.blend

"""

FILTER_DOC = """
Each generic filter is made of three arguments, the include/exclude toggle ('+'/'-'), a regex to match against the name
of the field to check (either one of the 'meta-data' generated by json exporter, or actual data field from DNA structs),
and some regex to match against the data of this field (JSON-ified representation of the data, hence always a string).

Filters are evaluated in the order they are given, that is, if a block does not pass the first filter,
it is immediately rejected and no further check is done on it.

You can add some recursivity to a filter (that is, if an 'include' filter is successful over a 'pointer' property,
it will also automatically include pointed data, with a level of recusivity), by adding either
'*' (for infinite recursion) or a number (to specify the maximum level of recursion) to the include/exclude toggle.
Note that it only makes sense in 'include' case, and gets ignored for 'exclude' one.

Examples:

To include only MESH blocks:

69
   ./blend2json.py --filter-block "+" "code" "ME" foo.blend
70
71
72

To include only MESH or CURVE blocks and all data used by them:

73
   ./blend2json.py --filter-block "+" "code" "(ME)|(CU)" --filter-block "+*" ".*" ".*" foo.blend
74
75
76
77
78
79
80

"""

import os
import json
import re

81
82
83
# Avoid maintaining multiple blendfile modules
import sys
sys.path.append(os.path.join(
Campbell Barton's avatar
Campbell Barton committed
84
85
86
87
    os.path.dirname(__file__),
    "..", "..", "..",
    "release", "scripts", "addons", "io_blend_utils", "blend",
))
88
del sys
89
90
91
92
93
94

import blendfile


##### Utils (own json formating) #####

95
96
97
98
99
100
101

def json_default(o):
    if isinstance(o, bytes):
        return repr(o)[2:-1]
    elif i is ...:
        return "<...>"
    return o
102
103
104


def json_dumps(i):
105
    return json.dumps(i, default=json_default)
106
107
108
109
110
111
112


def keyval_to_json(kvs, indent, indent_step, compact_output=False):
    if compact_output:
        return ('{' + ', '.join('"%s": %s' % (k, v) for k, v in kvs) + '}')
    else:
        return ('{%s' % indent_step[:-1] +
113
114
115
                (',\n%s%s' % (indent, indent_step)).join(
                    ('"%s":\n%s%s%s' % (k, indent, indent_step, v) if (v[0] in {'[', '{'}) else
                     '"%s": %s' % (k, v)) for k, v in kvs) +
116
117
118
119
120
121
122
123
124
125
126
                '\n%s}' % indent)


def list_to_json(lst, indent, indent_step, compact_output=False):
    if compact_output:
        return ('[' + ', '.join(l for l in lst) + ']')
    else:
        return ('[%s' % indent_step[:-1] +
                ((',\n%s%s' % (indent, indent_step)).join(
                    ('\n%s%s%s' % (indent, indent_step, l) if (i == 0 and l[0] in {'[', '{'}) else l)
                    for i, l in enumerate(lst))
Campbell Barton's avatar
Campbell Barton committed
127
                 ) +
128
129
130
131
132
                '\n%s]' % indent)


##### Main 'struct' writers #####

133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
def gen_fake_addresses(args, blend):
    if args.use_fake_address:
        hashes = set()
        ret = {}
        for block in blend.blocks:
            if not block.addr_old:
                continue
            hsh = block.get_data_hash()
            while hsh in hashes:
                hsh += 1
            hashes.add(hsh)
            ret[block.addr_old] = hsh
        return ret

    return {}


150
def bheader_to_json(args, fw, blend, indent, indent_step):
151
152
153
    fw('%s"%s": [\n' % (indent, "HEADER"))
    indent = indent + indent_step

154
155
156
157
158
159
    keyval = (
        ("magic", json_dumps(blend.header.magic)),
        ("pointer_size", json_dumps(blend.header.pointer_size)),
        ("is_little_endian", json_dumps(blend.header.is_little_endian)),
        ("version", json_dumps(blend.header.version)),
    )
160
161
162
163
164
    keyval = keyval_to_json(keyval, indent, indent_step)
    fw('%s%s' % (indent, keyval))

    indent = indent[:-len(indent_step)]
    fw('\n%s]' % indent)
165
166
167
168
169
170
171
172
173
174


def do_bblock_filter(filters, blend, block, meta_keyval, data_keyval):
    def do_bblock_filter_data_recursive(blend, block, rec_lvl, rec_iter, key=None):
        fields = (blend.structs[block.sdna_index].fields if key is None else
                  [blend.structs[block.sdna_index].field_from_name.get(key[1:-1].encode())])
        for fld in fields:
            if fld is None:
                continue
            if fld.dna_name.is_pointer:
175
                paths = ([(fld.dna_name.name_only, i) for i in range(fld.dna_name.array_size)]
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
                         if fld.dna_name.array_size > 1 else [fld.dna_name.name_only])
                for p in paths:
                    child_block = block.get_pointer(p)
                    if child_block is not None:
                        child_block.user_data = max(block.user_data, rec_iter)
                        if rec_lvl != 0:
                            do_bblock_filter_data_recursive(blend, child_block, rec_lvl - 1, rec_iter + 1)

    has_include = False
    do_break = False
    rec_iter = 1
    if block.user_data is None:
        block.user_data = 0
    for include, rec_lvl, key, val in filters:
        if rec_lvl < 0:
            rec_lvl = 100
        has_include = has_include or include
        # Skip exclude filters if block was already processed some way.
        if not include and block.user_data is not None:
            continue
        has_match = False
        for k, v in meta_keyval:
            if key.search(k) and val.search(v):
                has_match = True
                if include:
                    block.user_data = max(block.user_data, rec_iter)
                    # Note that in include cases, we have to keep checking filters, since some 'include recursive'
                    # ones may still have to be processed...
                else:
                    block.user_data = min(block.user_data, -rec_iter)
Campbell Barton's avatar
Campbell Barton committed
206
                    do_break = True  # No need to check more filters in exclude case...
207
208
209
210
211
212
213
214
215
216
217
218
                    break
        for k, v in data_keyval:
            if key.search(k) and val.search(v):
                has_match = True
                if include:
                    block.user_data = max(block.user_data, rec_iter)
                    if rec_lvl != 0:
                        do_bblock_filter_data_recursive(blend, block, rec_lvl - 1, rec_iter + 1, k)
                    # Note that in include cases, we have to keep checking filters, since some 'include recursive'
                    # ones may still have to be processed...
                else:
                    block.user_data = min(block.user_data, -rec_iter)
Campbell Barton's avatar
Campbell Barton committed
219
                    do_break = True  # No need to check more filters in exclude case...
220
221
222
                    break
        if include and not has_match:  # Include check failed, implies exclusion.
            block.user_data = min(block.user_data, -rec_iter)
Campbell Barton's avatar
Campbell Barton committed
223
            do_break = True  # No need to check more filters in exclude case...
224
225
226
227
228
229
230
        if do_break:
            break
    # Implicit 'include all' in case no include filter is specified...
    if block.user_data == 0 and not has_include:
        block.user_data = max(block.user_data, rec_iter)


231
def bblocks_to_json(args, fw, blend, address_map, indent, indent_step):
232
    no_address = args.no_address
233
    full_data = args.full_data
234
235
236
237
238
239
240

    def gen_meta_keyval(blend, block):
        keyval = [
            ("code", json_dumps(block.code)),
            ("size", json_dumps(block.size)),
        ]
        if not no_address:
241
            keyval += [("addr_old", json_dumps(address_map.get(block.addr_old, block.addr_old)))]
242
243
244
245
246
247
248
        keyval += [
            ("dna_type_id", json_dumps(blend.structs[block.sdna_index].dna_type_id)),
            ("count", json_dumps(block.count)),
        ]
        return keyval

    def gen_data_keyval(blend, block):
249
250
251
252
        def _is_pointer(k):
            return blend.structs[block.sdna_index].field_from_path(blend.header, blend.handle, k).dna_name.is_pointer
        return [(json_dumps(k)[1:-1], json_dumps(address_map.get(v, v) if _is_pointer(k) else v))
                for k, v in block.items_recursive_iter()]
253
254
255
256
257
258
259

    if args.block_filters:
        for block in blend.blocks:
            meta_keyval = gen_meta_keyval(blend, block)
            data_keyval = gen_data_keyval(blend, block)
            do_bblock_filter(args.block_filters, blend, block, meta_keyval, data_keyval)

260
261
262
    fw('%s"%s": [\n' % (indent, "DATA"))
    indent = indent + indent_step

263
264
265
    is_first = True
    for i, block in enumerate(blend.blocks):
        if block.user_data is None or block.user_data > 0:
266
267
268
269
270
            meta_keyval = gen_meta_keyval(blend, block)
            if full_data:
                meta_keyval.append(("data", keyval_to_json(gen_data_keyval(blend, block),
                                                           indent + indent_step, indent_step, args.compact_output)))
            keyval = keyval_to_json(meta_keyval, indent, indent_step, args.compact_output)
271
            fw('%s%s%s' % ('' if is_first else ',\n', indent, keyval))
272
273
            is_first = False

274
275
276
    indent = indent[:-len(indent_step)]
    fw('\n%s]' % indent)

277
278
279
280
281
282
283
284

def bdna_to_json(args, fw, blend, indent, indent_step):
    full_dna = args.full_dna and not args.compact_output

    def bdna_fields_to_json(blend, dna, indent, indent_step):
        lst = []
        for i, field in enumerate(dna.fields):
            keyval = (
285
286
287
288
289
                ("dna_name", json_dumps(field.dna_name.name_only)),
                ("dna_type_id", json_dumps(field.dna_type.dna_type_id)),
                ("is_pointer", json_dumps(field.dna_name.is_pointer)),
                ("is_method_pointer", json_dumps(field.dna_name.is_method_pointer)),
                ("array_size", json_dumps(field.dna_name.array_size)),
290
291
292
293
            )
            lst.append(keyval_to_json(keyval, indent + indent_step, indent_step))
        return list_to_json(lst, indent, indent_step)

294
295
296
    fw('%s"%s": [\n' % (indent, "DNA_STRUCT"))
    indent = indent + indent_step

297
298
299
    is_first = True
    for dna in blend.structs:
        keyval = [
300
301
            ("dna_type_id", json_dumps(dna.dna_type_id)),
            ("size", json_dumps(dna.size)),
302
303
        ]
        if full_dna:
304
            keyval += [("fields", bdna_fields_to_json(blend, dna, indent + indent_step, indent_step))]
305
        else:
306
            keyval += [("nbr_fields", json_dumps(len(dna.fields)))]
307
308
        keyval = keyval_to_json(keyval, indent, indent_step, args.compact_output)
        fw('%s%s%s' % ('' if is_first else ',\n', indent, keyval))
309
310
        is_first = False

311
312
313
    indent = indent[:-len(indent_step)]
    fw('\n%s]' % indent)

314

315
def blend_to_json(args, f, blend, address_map):
316
    fw = f.write
317
    fw('{\n')
318
319
320
    indent = indent_step = "  "
    bheader_to_json(args, fw, blend, indent, indent_step)
    fw(',\n')
321
    bblocks_to_json(args, fw, blend, address_map, indent, indent_step)
322
323
    fw(',\n')
    bdna_to_json(args, fw, blend, indent, indent_step)
324
    fw('\n}\n')
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352


##### Checks #####

def check_file(args, blend):
    addr_old = set()
    for block in blend.blocks:
        if block.addr_old in addr_old:
            print("ERROR! Several data blocks share same 'addr_old' uuid %d, "
                  "this should never happen!" % block.addr_old)
            continue
        addr_old.add(block.addr_old)


##### Main #####

def argparse_create():
    import argparse
    global __doc__

    # When --help or no args are given, print this help
    usage_text = __doc__

    epilog = "This script is typically used to check differences between .blend files, or to check their validity."

    parser = argparse.ArgumentParser(description=usage_text, epilog=epilog,
                                     formatter_class=argparse.RawDescriptionHelpFormatter)

Campbell Barton's avatar
Campbell Barton committed
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
    parser.add_argument(
        dest="input", nargs="+", metavar='PATH',
        help="Input .blend file(s)")
    parser.add_argument(
        "-o", "--output", dest="output", action="append", metavar='PATH', required=False,
        help="Output .json file(s) (same path/name as input file(s) if not specified)")
    parser.add_argument(
        "-c", "--check-file", dest="check_file", default=False, action='store_true', required=False,
        help=("Perform some basic validation checks over the .blend file"))
    parser.add_argument(
        "--compact-output", dest="compact_output", default=False, action='store_true', required=False,
        help=("Output a very compact representation of blendfile (one line per block/DNAStruct)"))
    parser.add_argument(
        "--no-old-addresses", dest="no_address", default=False, action='store_true', required=False,
        help=("Do not output old memory address of each block of data "
              "(used as 'uuid' in .blend files, but change pretty noisily)"))
    parser.add_argument(
        "--no-fake-old-addresses", dest="use_fake_address", default=True, action='store_false',
        required=False,
        help=("Do not 'rewrite' old memory address of each block of data "
              "(they are rewritten by default to some hash of their content, "
              "to try to avoid too much diff noise between different but similar files)"))
    parser.add_argument(
        "--full-data", dest="full_data",
        default=False, action='store_true', required=False,
        help=("Also put in JSon file data itself "
                              "(WARNING! will generate *huge* verbose files - and is far from complete yet)"))
    parser.add_argument(
        "--full-dna", dest="full_dna", default=False, action='store_true', required=False,
        help=("Also put in JSon file dna properties description (ignored when --compact-output is used)"))
383
384

    group = parser.add_argument_group("Filters", FILTER_DOC)
Campbell Barton's avatar
Campbell Barton committed
385
386
387
    group.add_argument(
        "--filter-block", dest="block_filters", nargs=3, action='append',
        help=("Filter to apply to BLOCKS (a.k.a. data itself)"))
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411

    return parser


def main():
    # ----------
    # Parse Args

    args = argparse_create().parse_args()

    if not args.output:
        if args.check_file:
            args.output = [None] * len(args.input)
        else:
            args.output = [os.path.splitext(infile)[0] + ".json" for infile in args.input]

    if args.block_filters:
        args.block_filters = [(True if m[0] == "+" else False,
                               0 if len(m) == 1 else (-1 if m[1] == "*" else int(m[1:])),
                               re.compile(f), re.compile(d))
                              for m, f, d in args.block_filters]

    for infile, outfile in zip(args.input, args.output):
        with blendfile.open_blend(infile) as blend:
412
413
            address_map = gen_fake_addresses(args, blend)

414
415
416
417
418
            if args.check_file:
                check_file(args, blend)

            if outfile:
                with open(outfile, 'w', encoding="ascii", errors='xmlcharrefreplace') as f:
419
                    blend_to_json(args, f, blend, address_map)
420
421
422
423


if __name__ == "__main__":
    main()