| # Copyright 2013 Google, Inc. All Rights Reserved. |
| # |
| # Google Author(s): Behdad Esfahbod |
| |
| from fontTools.misc.py23 import * |
| from fontTools.misc.fixedTools import otRound |
| from fontTools import ttLib |
| from fontTools.ttLib.tables import otTables |
| from fontTools.otlLib.maxContextCalc import maxCtxFont |
| from fontTools.pens.basePen import NullPen |
| from fontTools.misc.loggingTools import Timer |
| from fontTools.subset.cff import * |
| from fontTools.varLib import varStore |
| import sys |
| import struct |
| import array |
| import logging |
| from collections import Counter |
| from types import MethodType |
| |
| __usage__ = "pyftsubset font-file [glyph...] [--option=value]..." |
| |
| __doc__="""\ |
| pyftsubset -- OpenType font subsetter and optimizer |
| |
| pyftsubset is an OpenType font subsetter and optimizer, based on fontTools. |
| It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff) |
| font file. The subsetted glyph set is based on the specified glyphs |
| or characters, and specified OpenType layout features. |
| |
| The tool also performs some size-reducing optimizations, aimed for using |
| subset fonts as webfonts. Individual optimizations can be enabled or |
| disabled, and are enabled by default when they are safe. |
| |
| Usage: |
| """+__usage__+""" |
| |
| At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file, |
| --text, --text-file, --unicodes, or --unicodes-file, must be specified. |
| |
| Arguments: |
| font-file |
| The input font file. |
| glyph |
| Specify one or more glyph identifiers to include in the subset. Must be |
| PS glyph names, or the special string '*' to keep the entire glyph set. |
| |
| Initial glyph set specification: |
| These options populate the initial glyph set. Same option can appear |
| multiple times, and the results are accummulated. |
| --gids=<NNN>[,<NNN>...] |
| Specify comma/whitespace-separated list of glyph IDs or ranges as |
| decimal numbers. For example, --gids=10-12,14 adds glyphs with |
| numbers 10, 11, 12, and 14. |
| --gids-file=<path> |
| Like --gids but reads from a file. Anything after a '#' on any line |
| is ignored as comments. |
| --glyphs=<glyphname>[,<glyphname>...] |
| Specify comma/whitespace-separated PS glyph names to add to the subset. |
| Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc |
| that are accepted on the command line. The special string '*' will keep |
| the entire glyph set. |
| --glyphs-file=<path> |
| Like --glyphs but reads from a file. Anything after a '#' on any line |
| is ignored as comments. |
| --text=<text> |
| Specify characters to include in the subset, as UTF-8 string. |
| --text-file=<path> |
| Like --text but reads from a file. Newline character are not added to |
| the subset. |
| --unicodes=<XXXX>[,<XXXX>...] |
| Specify comma/whitespace-separated list of Unicode codepoints or |
| ranges as hex numbers, optionally prefixed with 'U+', 'u', etc. |
| For example, --unicodes=41-5a,61-7a adds ASCII letters, so does |
| the more verbose --unicodes=U+0041-005A,U+0061-007A. |
| The special strings '*' will choose all Unicode characters mapped |
| by the font. |
| --unicodes-file=<path> |
| Like --unicodes, but reads from a file. Anything after a '#' on any |
| line in the file is ignored as comments. |
| --ignore-missing-glyphs |
| Do not fail if some requested glyphs or gids are not available in |
| the font. |
| --no-ignore-missing-glyphs |
| Stop and fail if some requested glyphs or gids are not available |
| in the font. [default] |
| --ignore-missing-unicodes [default] |
| Do not fail if some requested Unicode characters (including those |
| indirectly specified using --text or --text-file) are not available |
| in the font. |
| --no-ignore-missing-unicodes |
| Stop and fail if some requested Unicode characters are not available |
| in the font. |
| Note the default discrepancy between ignoring missing glyphs versus |
| unicodes. This is for historical reasons and in the future |
| --no-ignore-missing-unicodes might become default. |
| |
| Other options: |
| For the other options listed below, to see the current value of the option, |
| pass a value of '?' to it, with or without a '='. |
| Examples: |
| $ pyftsubset --glyph-names? |
| Current setting for 'glyph-names' is: False |
| $ ./pyftsubset --name-IDs=? |
| Current setting for 'name-IDs' is: [0, 1, 2, 3, 4, 5, 6] |
| $ ./pyftsubset --hinting? --no-hinting --hinting? |
| Current setting for 'hinting' is: True |
| Current setting for 'hinting' is: False |
| |
| Output options: |
| --output-file=<path> |
| The output font file. If not specified, the subsetted font |
| will be saved in as font-file.subset. |
| --flavor=<type> |
| Specify flavor of output font file. May be 'woff' or 'woff2'. |
| Note that WOFF2 requires the Brotli Python extension, available |
| at https://github.com/google/brotli |
| --with-zopfli |
| Use the Google Zopfli algorithm to compress WOFF. The output is 3-8 % |
| smaller than pure zlib, but the compression speed is much slower. |
| The Zopfli Python bindings are available at: |
| https://pypi.python.org/pypi/zopfli |
| |
| Glyph set expansion: |
| These options control how additional glyphs are added to the subset. |
| --retain-gids |
| Retain glyph indices; just empty glyphs not needed in-place. |
| --notdef-glyph |
| Add the '.notdef' glyph to the subset (ie, keep it). [default] |
| --no-notdef-glyph |
| Drop the '.notdef' glyph unless specified in the glyph set. This |
| saves a few bytes, but is not possible for Postscript-flavored |
| fonts, as those require '.notdef'. For TrueType-flavored fonts, |
| this works fine as long as no unsupported glyphs are requested |
| from the font. |
| --notdef-outline |
| Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is |
| used when glyphs not supported by the font are to be shown. It is not |
| needed otherwise. |
| --no-notdef-outline |
| When including a '.notdef' glyph, remove its outline. This saves |
| a few bytes. [default] |
| --recommended-glyphs |
| Add glyphs 0, 1, 2, and 3 to the subset, as recommended for |
| TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'. |
| Some legacy software might require this, but no modern system does. |
| --no-recommended-glyphs |
| Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in |
| glyph set. [default] |
| --no-layout-closure |
| Do not expand glyph set to add glyphs produced by OpenType layout |
| features. Instead, OpenType layout features will be subset to only |
| rules that are relevant to the otherwise-specified glyph set. |
| --layout-features[+|-]=<feature>[,<feature>...] |
| Specify (=), add to (+=) or exclude from (-=) the comma-separated |
| set of OpenType layout feature tags that will be preserved. |
| Glyph variants used by the preserved features are added to the |
| specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs', |
| 'dnom', 'frac', 'kern', 'liga', 'locl', 'mark', 'mkmk', 'numr', 'rclt', |
| 'rlig', 'rvrn', and all features required for script shaping are |
| preserved. To see the full list, try '--layout-features=?'. |
| Use '*' to keep all features. |
| Multiple --layout-features options can be provided if necessary. |
| Examples: |
| --layout-features+=onum,pnum,ss01 |
| * Keep the default set of features and 'onum', 'pnum', 'ss01'. |
| --layout-features-='mark','mkmk' |
| * Keep the default set of features but drop 'mark' and 'mkmk'. |
| --layout-features='kern' |
| * Only keep the 'kern' feature, drop all others. |
| --layout-features='' |
| * Drop all features. |
| --layout-features='*' |
| * Keep all features. |
| --layout-features+=aalt --layout-features-=vrt2 |
| * Keep default set of features plus 'aalt', but drop 'vrt2'. |
| --layout-scripts[+|-]=<script>[,<script>...] |
| Specify (=), add to (+=) or exclude from (-=) the comma-separated |
| set of OpenType layout script tags that will be preserved. By |
| default all scripts are retained ('*'). |
| |
| Hinting options: |
| --hinting |
| Keep hinting [default] |
| --no-hinting |
| Drop glyph-specific hinting and font-wide hinting tables, as well |
| as remove hinting-related bits and pieces from other tables (eg. GPOS). |
| See --hinting-tables for list of tables that are dropped by default. |
| Instructions and hints are stripped from 'glyf' and 'CFF ' tables |
| respectively. This produces (sometimes up to 30%) smaller fonts that |
| are suitable for extremely high-resolution systems, like high-end |
| mobile devices and retina displays. |
| |
| Optimization options: |
| --desubroutinize |
| Remove CFF use of subroutinizes. Subroutinization is a way to make CFF |
| fonts smaller. For small subsets however, desubroutinizing might make |
| the font smaller. It has even been reported that desubroutinized CFF |
| fonts compress better (produce smaller output) WOFF and WOFF2 fonts. |
| Also see note under --no-hinting. |
| --no-desubroutinize [default] |
| Leave CFF subroutinizes as is, only throw away unused subroutinizes. |
| |
| Font table options: |
| --drop-tables[+|-]=<table>[,<table>...] |
| Specify (=), add to (+=) or exclude from (-=) the comma-separated |
| set of tables that will be be dropped. |
| By default, the following tables are dropped: |
| 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ', 'PCLT', 'LTSH' |
| and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill'. |
| The tool will attempt to subset the remaining tables. |
| Examples: |
| --drop-tables-='SVG ' |
| * Drop the default set of tables but keep 'SVG '. |
| --drop-tables+=GSUB |
| * Drop the default set of tables and 'GSUB'. |
| --drop-tables=DSIG |
| * Only drop the 'DSIG' table, keep all others. |
| --drop-tables= |
| * Keep all tables. |
| --no-subset-tables+=<table>[,<table>...] |
| Add to the set of tables that will not be subsetted. |
| By default, the following tables are included in this list, as |
| they do not need subsetting (ignore the fact that 'loca' is listed |
| here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', 'name', |
| 'cvt ', 'fpgm', 'prep', 'VMDX', 'DSIG', 'CPAL', 'MVAR', 'cvar', 'STAT'. |
| By default, tables that the tool does not know how to subset and are not |
| specified here will be dropped from the font, unless --passthrough-tables |
| option is passed. |
| Example: |
| --no-subset-tables+=FFTM |
| * Keep 'FFTM' table in the font by preventing subsetting. |
| --passthrough-tables |
| Do not drop tables that the tool does not know how to subset. |
| --no-passthrough-tables |
| Tables that the tool does not know how to subset and are not specified |
| in --no-subset-tables will be dropped from the font. [default] |
| --hinting-tables[-]=<table>[,<table>...] |
| Specify (=), add to (+=) or exclude from (-=) the list of font-wide |
| hinting tables that will be dropped if --no-hinting is specified, |
| Examples: |
| --hinting-tables-='VDMX' |
| * Drop font-wide hinting tables except 'VDMX'. |
| --hinting-tables='' |
| * Keep all font-wide hinting tables (but strip hints from glyphs). |
| --legacy-kern |
| Keep TrueType 'kern' table even when OpenType 'GPOS' is available. |
| --no-legacy-kern |
| Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default] |
| |
| Font naming options: |
| These options control what is retained in the 'name' table. For numerical |
| codes, see: http://www.microsoft.com/typography/otspec/name.htm |
| --name-IDs[+|-]=<nameID>[,<nameID>...] |
| Specify (=), add to (+=) or exclude from (-=) the set of 'name' table |
| entry nameIDs that will be preserved. By default, only nameIDs between 0 |
| and 6 are preserved, the rest are dropped. Use '*' to keep all entries. |
| Examples: |
| --name-IDs+=7,8,9 |
| * Also keep Trademark, Manufacturer and Designer name entries. |
| --name-IDs='' |
| * Drop all 'name' table entries. |
| --name-IDs='*' |
| * keep all 'name' table entries |
| --name-legacy |
| Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.). |
| XXX Note: This might be needed for some fonts that have no Unicode name |
| entires for English. See: https://github.com/fonttools/fonttools/issues/146 |
| --no-name-legacy |
| Drop legacy (non-Unicode) 'name' table entries [default] |
| --name-languages[+|-]=<langID>[,<langID>] |
| Specify (=), add to (+=) or exclude from (-=) the set of 'name' table |
| langIDs that will be preserved. By default only records with langID |
| 0x0409 (English) are preserved. Use '*' to keep all langIDs. |
| --obfuscate-names |
| Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4, |
| and 6 with dummy strings (it is still fully functional as webfont). |
| |
| Glyph naming and encoding options: |
| --glyph-names |
| Keep PS glyph names in TT-flavored fonts. In general glyph names are |
| not needed for correct use of the font. However, some PDF generators |
| and PDF viewers might rely on glyph names to extract Unicode text |
| from PDF documents. |
| --no-glyph-names |
| Drop PS glyph names in TT-flavored fonts, by using 'post' table |
| version 3.0. [default] |
| --legacy-cmap |
| Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.). |
| --no-legacy-cmap |
| Drop the legacy 'cmap' subtables. [default] |
| --symbol-cmap |
| Keep the 3.0 symbol 'cmap'. |
| --no-symbol-cmap |
| Drop the 3.0 symbol 'cmap'. [default] |
| |
| Other font-specific options: |
| --recalc-bounds |
| Recalculate font bounding boxes. |
| --no-recalc-bounds |
| Keep original font bounding boxes. This is faster and still safe |
| for all practical purposes. [default] |
| --recalc-timestamp |
| Set font 'modified' timestamp to current time. |
| --no-recalc-timestamp |
| Do not modify font 'modified' timestamp. [default] |
| --canonical-order |
| Order tables as recommended in the OpenType standard. This is not |
| required by the standard, nor by any known implementation. |
| --no-canonical-order |
| Keep original order of font tables. This is faster. [default] |
| --prune-unicode-ranges |
| Update the 'OS/2 ulUnicodeRange*' bits after subsetting. The Unicode |
| ranges defined in the OpenType specification v1.7 are intersected with |
| the Unicode codepoints specified in the font's Unicode 'cmap' subtables: |
| when no overlap is found, the bit will be switched off. However, it will |
| *not* be switched on if an intersection is found. [default] |
| --no-prune-unicode-ranges |
| Don't change the 'OS/2 ulUnicodeRange*' bits. |
| --recalc-average-width |
| Update the 'OS/2 xAvgCharWidth' field after subsetting. |
| --no-recalc-average-width |
| Don't change the 'OS/2 xAvgCharWidth' field. [default] |
| --recalc-max-context |
| Update the 'OS/2 usMaxContext' field after subsetting. |
| --no-recalc-max-context |
| Don't change the 'OS/2 usMaxContext' field. [default] |
| --font-number=<number> |
| Select font number for TrueType Collection (.ttc/.otc), starting from 0. |
| |
| Application options: |
| --verbose |
| Display verbose information of the subsetting process. |
| --timing |
| Display detailed timing information of the subsetting process. |
| --xml |
| Display the TTX XML representation of subsetted font. |
| |
| Example: |
| Produce a subset containing the characters ' !"#$%' without performing |
| size-reducing optimizations: |
| |
| $ pyftsubset font.ttf --unicodes="U+0020-0025" \\ |
| --layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\ |
| --notdef-glyph --notdef-outline --recommended-glyphs \\ |
| --name-IDs='*' --name-legacy --name-languages='*' |
| """ |
| |
| |
| log = logging.getLogger("fontTools.subset") |
| |
| def _log_glyphs(self, glyphs, font=None): |
| self.info("Glyph names: %s", sorted(glyphs)) |
| if font: |
| reverseGlyphMap = font.getReverseGlyphMap() |
| self.info("Glyph IDs: %s", sorted(reverseGlyphMap[g] for g in glyphs)) |
| |
| # bind "glyphs" function to 'log' object |
| log.glyphs = MethodType(_log_glyphs, log) |
| |
| # I use a different timing channel so I can configure it separately from the |
| # main module's logger |
| timer = Timer(logger=logging.getLogger("fontTools.subset.timer")) |
| |
| |
| def _add_method(*clazzes): |
| """Returns a decorator function that adds a new method to one or |
| more classes.""" |
| def wrapper(method): |
| done = [] |
| for clazz in clazzes: |
| if clazz in done: continue # Support multiple names of a clazz |
| done.append(clazz) |
| assert clazz.__name__ != 'DefaultTable', \ |
| 'Oops, table class not found.' |
| assert not hasattr(clazz, method.__name__), \ |
| "Oops, class '%s' has method '%s'." % (clazz.__name__, |
| method.__name__) |
| setattr(clazz, method.__name__, method) |
| return None |
| return wrapper |
| |
| def _uniq_sort(l): |
| return sorted(set(l)) |
| |
| def _dict_subset(d, glyphs): |
| return {g:d[g] for g in glyphs} |
| |
| def _list_subset(l, indices): |
| count = len(l) |
| return [l[i] for i in indices if i < count] |
| |
| @_add_method(otTables.Coverage) |
| def intersect(self, glyphs): |
| """Returns ascending list of matching coverage values.""" |
| return [i for i,g in enumerate(self.glyphs) if g in glyphs] |
| |
| @_add_method(otTables.Coverage) |
| def intersect_glyphs(self, glyphs): |
| """Returns set of intersecting glyphs.""" |
| return set(g for g in self.glyphs if g in glyphs) |
| |
| @_add_method(otTables.Coverage) |
| def subset(self, glyphs): |
| """Returns ascending list of remaining coverage values.""" |
| indices = self.intersect(glyphs) |
| self.glyphs = [g for g in self.glyphs if g in glyphs] |
| return indices |
| |
| @_add_method(otTables.Coverage) |
| def remap(self, coverage_map): |
| """Remaps coverage.""" |
| self.glyphs = [self.glyphs[i] for i in coverage_map] |
| |
| @_add_method(otTables.ClassDef) |
| def intersect(self, glyphs): |
| """Returns ascending list of matching class values.""" |
| return _uniq_sort( |
| ([0] if any(g not in self.classDefs for g in glyphs) else []) + |
| [v for g,v in self.classDefs.items() if g in glyphs]) |
| |
| @_add_method(otTables.ClassDef) |
| def intersect_class(self, glyphs, klass): |
| """Returns set of glyphs matching class.""" |
| if klass == 0: |
| return set(g for g in glyphs if g not in self.classDefs) |
| return set(g for g,v in self.classDefs.items() |
| if v == klass and g in glyphs) |
| |
| @_add_method(otTables.ClassDef) |
| def subset(self, glyphs, remap=False): |
| """Returns ascending list of remaining classes.""" |
| self.classDefs = {g:v for g,v in self.classDefs.items() if g in glyphs} |
| # Note: while class 0 has the special meaning of "not matched", |
| # if no glyph will ever /not match/, we can optimize class 0 out too. |
| indices = _uniq_sort( |
| ([0] if any(g not in self.classDefs for g in glyphs) else []) + |
| list(self.classDefs.values())) |
| if remap: |
| self.remap(indices) |
| return indices |
| |
| @_add_method(otTables.ClassDef) |
| def remap(self, class_map): |
| """Remaps classes.""" |
| self.classDefs = {g:class_map.index(v) for g,v in self.classDefs.items()} |
| |
| @_add_method(otTables.SingleSubst) |
| def closure_glyphs(self, s, cur_glyphs): |
| s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs) |
| |
| @_add_method(otTables.SingleSubst) |
| def subset_glyphs(self, s): |
| self.mapping = {g:v for g,v in self.mapping.items() |
| if g in s.glyphs and v in s.glyphs} |
| return bool(self.mapping) |
| |
| @_add_method(otTables.MultipleSubst) |
| def closure_glyphs(self, s, cur_glyphs): |
| for glyph, subst in self.mapping.items(): |
| if glyph in cur_glyphs: |
| s.glyphs.update(subst) |
| |
| @_add_method(otTables.MultipleSubst) |
| def subset_glyphs(self, s): |
| self.mapping = {g:v for g,v in self.mapping.items() |
| if g in s.glyphs and all(sub in s.glyphs for sub in v)} |
| return bool(self.mapping) |
| |
| @_add_method(otTables.AlternateSubst) |
| def closure_glyphs(self, s, cur_glyphs): |
| s.glyphs.update(*(vlist for g,vlist in self.alternates.items() |
| if g in cur_glyphs)) |
| |
| @_add_method(otTables.AlternateSubst) |
| def subset_glyphs(self, s): |
| self.alternates = {g:[v for v in vlist if v in s.glyphs] |
| for g,vlist in self.alternates.items() |
| if g in s.glyphs and |
| any(v in s.glyphs for v in vlist)} |
| return bool(self.alternates) |
| |
| @_add_method(otTables.LigatureSubst) |
| def closure_glyphs(self, s, cur_glyphs): |
| s.glyphs.update(*([seq.LigGlyph for seq in seqs |
| if all(c in s.glyphs for c in seq.Component)] |
| for g,seqs in self.ligatures.items() |
| if g in cur_glyphs)) |
| |
| @_add_method(otTables.LigatureSubst) |
| def subset_glyphs(self, s): |
| self.ligatures = {g:v for g,v in self.ligatures.items() |
| if g in s.glyphs} |
| self.ligatures = {g:[seq for seq in seqs |
| if seq.LigGlyph in s.glyphs and |
| all(c in s.glyphs for c in seq.Component)] |
| for g,seqs in self.ligatures.items()} |
| self.ligatures = {g:v for g,v in self.ligatures.items() if v} |
| return bool(self.ligatures) |
| |
| @_add_method(otTables.ReverseChainSingleSubst) |
| def closure_glyphs(self, s, cur_glyphs): |
| if self.Format == 1: |
| indices = self.Coverage.intersect(cur_glyphs) |
| if(not indices or |
| not all(c.intersect(s.glyphs) |
| for c in self.LookAheadCoverage + self.BacktrackCoverage)): |
| return |
| s.glyphs.update(self.Substitute[i] for i in indices) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.ReverseChainSingleSubst) |
| def subset_glyphs(self, s): |
| if self.Format == 1: |
| indices = self.Coverage.subset(s.glyphs) |
| self.Substitute = _list_subset(self.Substitute, indices) |
| # Now drop rules generating glyphs we don't want |
| indices = [i for i,sub in enumerate(self.Substitute) |
| if sub in s.glyphs] |
| self.Substitute = _list_subset(self.Substitute, indices) |
| self.Coverage.remap(indices) |
| self.GlyphCount = len(self.Substitute) |
| return bool(self.GlyphCount and |
| all(c.subset(s.glyphs) |
| for c in self.LookAheadCoverage+self.BacktrackCoverage)) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.SinglePos) |
| def subset_glyphs(self, s): |
| if self.Format == 1: |
| return len(self.Coverage.subset(s.glyphs)) |
| elif self.Format == 2: |
| indices = self.Coverage.subset(s.glyphs) |
| values = self.Value |
| count = len(values) |
| self.Value = [values[i] for i in indices if i < count] |
| self.ValueCount = len(self.Value) |
| return bool(self.ValueCount) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.SinglePos) |
| def prune_post_subset(self, font, options): |
| if not options.hinting: |
| # Drop device tables |
| self.ValueFormat &= ~0x00F0 |
| # Downgrade to Format 1 if all ValueRecords are the same |
| if self.Format == 2 and all(v == self.Value[0] for v in self.Value): |
| self.Format = 1 |
| self.Value = self.Value[0] if self.ValueFormat != 0 else None |
| del self.ValueCount |
| return True |
| |
| @_add_method(otTables.PairPos) |
| def subset_glyphs(self, s): |
| if self.Format == 1: |
| indices = self.Coverage.subset(s.glyphs) |
| pairs = self.PairSet |
| count = len(pairs) |
| self.PairSet = [pairs[i] for i in indices if i < count] |
| for p in self.PairSet: |
| p.PairValueRecord = [r for r in p.PairValueRecord if r.SecondGlyph in s.glyphs] |
| p.PairValueCount = len(p.PairValueRecord) |
| # Remove empty pairsets |
| indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount] |
| self.Coverage.remap(indices) |
| self.PairSet = _list_subset(self.PairSet, indices) |
| self.PairSetCount = len(self.PairSet) |
| return bool(self.PairSetCount) |
| elif self.Format == 2: |
| class1_map = [c for c in self.ClassDef1.subset(s.glyphs, remap=True) if c < self.Class1Count] |
| class2_map = [c for c in self.ClassDef2.subset(s.glyphs, remap=True) if c < self.Class2Count] |
| self.Class1Record = [self.Class1Record[i] for i in class1_map] |
| for c in self.Class1Record: |
| c.Class2Record = [c.Class2Record[i] for i in class2_map] |
| self.Class1Count = len(class1_map) |
| self.Class2Count = len(class2_map) |
| return bool(self.Class1Count and |
| self.Class2Count and |
| self.Coverage.subset(s.glyphs)) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.PairPos) |
| def prune_post_subset(self, font, options): |
| if not options.hinting: |
| # Drop device tables |
| self.ValueFormat1 &= ~0x00F0 |
| self.ValueFormat2 &= ~0x00F0 |
| return True |
| |
| @_add_method(otTables.CursivePos) |
| def subset_glyphs(self, s): |
| if self.Format == 1: |
| indices = self.Coverage.subset(s.glyphs) |
| records = self.EntryExitRecord |
| count = len(records) |
| self.EntryExitRecord = [records[i] for i in indices if i < count] |
| self.EntryExitCount = len(self.EntryExitRecord) |
| return bool(self.EntryExitCount) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.Anchor) |
| def prune_hints(self): |
| # Drop device tables / contour anchor point |
| self.ensureDecompiled() |
| self.Format = 1 |
| |
| @_add_method(otTables.CursivePos) |
| def prune_post_subset(self, font, options): |
| if not options.hinting: |
| for rec in self.EntryExitRecord: |
| if rec.EntryAnchor: rec.EntryAnchor.prune_hints() |
| if rec.ExitAnchor: rec.ExitAnchor.prune_hints() |
| return True |
| |
| @_add_method(otTables.MarkBasePos) |
| def subset_glyphs(self, s): |
| if self.Format == 1: |
| mark_indices = self.MarkCoverage.subset(s.glyphs) |
| self.MarkArray.MarkRecord = _list_subset(self.MarkArray.MarkRecord, mark_indices) |
| self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) |
| base_indices = self.BaseCoverage.subset(s.glyphs) |
| self.BaseArray.BaseRecord = _list_subset(self.BaseArray.BaseRecord, base_indices) |
| self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord) |
| # Prune empty classes |
| class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) |
| self.ClassCount = len(class_indices) |
| for m in self.MarkArray.MarkRecord: |
| m.Class = class_indices.index(m.Class) |
| for b in self.BaseArray.BaseRecord: |
| b.BaseAnchor = _list_subset(b.BaseAnchor, class_indices) |
| return bool(self.ClassCount and |
| self.MarkArray.MarkCount and |
| self.BaseArray.BaseCount) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.MarkBasePos) |
| def prune_post_subset(self, font, options): |
| if not options.hinting: |
| for m in self.MarkArray.MarkRecord: |
| if m.MarkAnchor: |
| m.MarkAnchor.prune_hints() |
| for b in self.BaseArray.BaseRecord: |
| for a in b.BaseAnchor: |
| if a: |
| a.prune_hints() |
| return True |
| |
| @_add_method(otTables.MarkLigPos) |
| def subset_glyphs(self, s): |
| if self.Format == 1: |
| mark_indices = self.MarkCoverage.subset(s.glyphs) |
| self.MarkArray.MarkRecord = _list_subset(self.MarkArray.MarkRecord, mark_indices) |
| self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) |
| ligature_indices = self.LigatureCoverage.subset(s.glyphs) |
| self.LigatureArray.LigatureAttach = _list_subset(self.LigatureArray.LigatureAttach, ligature_indices) |
| self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach) |
| # Prune empty classes |
| class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) |
| self.ClassCount = len(class_indices) |
| for m in self.MarkArray.MarkRecord: |
| m.Class = class_indices.index(m.Class) |
| for l in self.LigatureArray.LigatureAttach: |
| for c in l.ComponentRecord: |
| c.LigatureAnchor = _list_subset(c.LigatureAnchor, class_indices) |
| return bool(self.ClassCount and |
| self.MarkArray.MarkCount and |
| self.LigatureArray.LigatureCount) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.MarkLigPos) |
| def prune_post_subset(self, font, options): |
| if not options.hinting: |
| for m in self.MarkArray.MarkRecord: |
| if m.MarkAnchor: |
| m.MarkAnchor.prune_hints() |
| for l in self.LigatureArray.LigatureAttach: |
| for c in l.ComponentRecord: |
| for a in c.LigatureAnchor: |
| if a: |
| a.prune_hints() |
| return True |
| |
| @_add_method(otTables.MarkMarkPos) |
| def subset_glyphs(self, s): |
| if self.Format == 1: |
| mark1_indices = self.Mark1Coverage.subset(s.glyphs) |
| self.Mark1Array.MarkRecord = _list_subset(self.Mark1Array.MarkRecord, mark1_indices) |
| self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord) |
| mark2_indices = self.Mark2Coverage.subset(s.glyphs) |
| self.Mark2Array.Mark2Record = _list_subset(self.Mark2Array.Mark2Record, mark2_indices) |
| self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record) |
| # Prune empty classes |
| class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord) |
| self.ClassCount = len(class_indices) |
| for m in self.Mark1Array.MarkRecord: |
| m.Class = class_indices.index(m.Class) |
| for b in self.Mark2Array.Mark2Record: |
| b.Mark2Anchor = _list_subset(b.Mark2Anchor, class_indices) |
| return bool(self.ClassCount and |
| self.Mark1Array.MarkCount and |
| self.Mark2Array.MarkCount) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.MarkMarkPos) |
| def prune_post_subset(self, font, options): |
| if not options.hinting: |
| # Drop device tables or contour anchor point |
| for m in self.Mark1Array.MarkRecord: |
| if m.MarkAnchor: |
| m.MarkAnchor.prune_hints() |
| for b in self.Mark2Array.Mark2Record: |
| for m in b.Mark2Anchor: |
| if m: |
| m.prune_hints() |
| return True |
| |
| @_add_method(otTables.SingleSubst, |
| otTables.MultipleSubst, |
| otTables.AlternateSubst, |
| otTables.LigatureSubst, |
| otTables.ReverseChainSingleSubst, |
| otTables.SinglePos, |
| otTables.PairPos, |
| otTables.CursivePos, |
| otTables.MarkBasePos, |
| otTables.MarkLigPos, |
| otTables.MarkMarkPos) |
| def subset_lookups(self, lookup_indices): |
| pass |
| |
| @_add_method(otTables.SingleSubst, |
| otTables.MultipleSubst, |
| otTables.AlternateSubst, |
| otTables.LigatureSubst, |
| otTables.ReverseChainSingleSubst, |
| otTables.SinglePos, |
| otTables.PairPos, |
| otTables.CursivePos, |
| otTables.MarkBasePos, |
| otTables.MarkLigPos, |
| otTables.MarkMarkPos) |
| def collect_lookups(self): |
| return [] |
| |
| @_add_method(otTables.SingleSubst, |
| otTables.MultipleSubst, |
| otTables.AlternateSubst, |
| otTables.LigatureSubst, |
| otTables.ReverseChainSingleSubst, |
| otTables.ContextSubst, |
| otTables.ChainContextSubst, |
| otTables.ContextPos, |
| otTables.ChainContextPos) |
| def prune_post_subset(self, font, options): |
| return True |
| |
| @_add_method(otTables.SingleSubst, |
| otTables.AlternateSubst, |
| otTables.ReverseChainSingleSubst) |
| def may_have_non_1to1(self): |
| return False |
| |
| @_add_method(otTables.MultipleSubst, |
| otTables.LigatureSubst, |
| otTables.ContextSubst, |
| otTables.ChainContextSubst) |
| def may_have_non_1to1(self): |
| return True |
| |
| @_add_method(otTables.ContextSubst, |
| otTables.ChainContextSubst, |
| otTables.ContextPos, |
| otTables.ChainContextPos) |
| def __subset_classify_context(self): |
| |
| class ContextHelper(object): |
| def __init__(self, klass, Format): |
| if klass.__name__.endswith('Subst'): |
| Typ = 'Sub' |
| Type = 'Subst' |
| else: |
| Typ = 'Pos' |
| Type = 'Pos' |
| if klass.__name__.startswith('Chain'): |
| Chain = 'Chain' |
| InputIdx = 1 |
| DataLen = 3 |
| else: |
| Chain = '' |
| InputIdx = 0 |
| DataLen = 1 |
| ChainTyp = Chain+Typ |
| |
| self.Typ = Typ |
| self.Type = Type |
| self.Chain = Chain |
| self.ChainTyp = ChainTyp |
| self.InputIdx = InputIdx |
| self.DataLen = DataLen |
| |
| self.LookupRecord = Type+'LookupRecord' |
| |
| if Format == 1: |
| Coverage = lambda r: r.Coverage |
| ChainCoverage = lambda r: r.Coverage |
| ContextData = lambda r:(None,) |
| ChainContextData = lambda r:(None, None, None) |
| SetContextData = None |
| SetChainContextData = None |
| RuleData = lambda r:(r.Input,) |
| ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) |
| def SetRuleData(r, d): |
| (r.Input,) = d |
| (r.GlyphCount,) = (len(x)+1 for x in d) |
| def ChainSetRuleData(r, d): |
| (r.Backtrack, r.Input, r.LookAhead) = d |
| (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) |
| elif Format == 2: |
| Coverage = lambda r: r.Coverage |
| ChainCoverage = lambda r: r.Coverage |
| ContextData = lambda r:(r.ClassDef,) |
| ChainContextData = lambda r:(r.BacktrackClassDef, |
| r.InputClassDef, |
| r.LookAheadClassDef) |
| def SetContextData(r, d): |
| (r.ClassDef,) = d |
| def SetChainContextData(r, d): |
| (r.BacktrackClassDef, |
| r.InputClassDef, |
| r.LookAheadClassDef) = d |
| RuleData = lambda r:(r.Class,) |
| ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) |
| def SetRuleData(r, d): |
| (r.Class,) = d |
| (r.GlyphCount,) = (len(x)+1 for x in d) |
| def ChainSetRuleData(r, d): |
| (r.Backtrack, r.Input, r.LookAhead) = d |
| (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) |
| elif Format == 3: |
| Coverage = lambda r: r.Coverage[0] |
| ChainCoverage = lambda r: r.InputCoverage[0] |
| ContextData = None |
| ChainContextData = None |
| SetContextData = None |
| SetChainContextData = None |
| RuleData = lambda r: r.Coverage |
| ChainRuleData = lambda r:(r.BacktrackCoverage + |
| r.InputCoverage + |
| r.LookAheadCoverage) |
| def SetRuleData(r, d): |
| (r.Coverage,) = d |
| (r.GlyphCount,) = (len(x) for x in d) |
| def ChainSetRuleData(r, d): |
| (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d |
| (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(x) for x in d) |
| else: |
| assert 0, "unknown format: %s" % Format |
| |
| if Chain: |
| self.Coverage = ChainCoverage |
| self.ContextData = ChainContextData |
| self.SetContextData = SetChainContextData |
| self.RuleData = ChainRuleData |
| self.SetRuleData = ChainSetRuleData |
| else: |
| self.Coverage = Coverage |
| self.ContextData = ContextData |
| self.SetContextData = SetContextData |
| self.RuleData = RuleData |
| self.SetRuleData = SetRuleData |
| |
| if Format == 1: |
| self.Rule = ChainTyp+'Rule' |
| self.RuleCount = ChainTyp+'RuleCount' |
| self.RuleSet = ChainTyp+'RuleSet' |
| self.RuleSetCount = ChainTyp+'RuleSetCount' |
| self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] |
| elif Format == 2: |
| self.Rule = ChainTyp+'ClassRule' |
| self.RuleCount = ChainTyp+'ClassRuleCount' |
| self.RuleSet = ChainTyp+'ClassSet' |
| self.RuleSetCount = ChainTyp+'ClassSetCount' |
| self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c |
| else (set(glyphs) if r == 0 else set())) |
| |
| self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' |
| self.ClassDefIndex = 1 if Chain else 0 |
| self.Input = 'Input' if Chain else 'Class' |
| |
| if self.Format not in [1, 2, 3]: |
| return None # Don't shoot the messenger; let it go |
| if not hasattr(self.__class__, "__ContextHelpers"): |
| self.__class__.__ContextHelpers = {} |
| if self.Format not in self.__class__.__ContextHelpers: |
| helper = ContextHelper(self.__class__, self.Format) |
| self.__class__.__ContextHelpers[self.Format] = helper |
| return self.__class__.__ContextHelpers[self.Format] |
| |
| @_add_method(otTables.ContextSubst, |
| otTables.ChainContextSubst) |
| def closure_glyphs(self, s, cur_glyphs): |
| c = self.__subset_classify_context() |
| |
| indices = c.Coverage(self).intersect(cur_glyphs) |
| if not indices: |
| return [] |
| cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs) |
| |
| if self.Format == 1: |
| ContextData = c.ContextData(self) |
| rss = getattr(self, c.RuleSet) |
| rssCount = getattr(self, c.RuleSetCount) |
| for i in indices: |
| if i >= rssCount or not rss[i]: continue |
| for r in getattr(rss[i], c.Rule): |
| if not r: continue |
| if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) |
| for cd,klist in zip(ContextData, c.RuleData(r))): |
| continue |
| chaos = set() |
| for ll in getattr(r, c.LookupRecord): |
| if not ll: continue |
| seqi = ll.SequenceIndex |
| if seqi in chaos: |
| # TODO Can we improve this? |
| pos_glyphs = None |
| else: |
| if seqi == 0: |
| pos_glyphs = frozenset([c.Coverage(self).glyphs[i]]) |
| else: |
| pos_glyphs = frozenset([r.Input[seqi - 1]]) |
| lookup = s.table.LookupList.Lookup[ll.LookupListIndex] |
| chaos.add(seqi) |
| if lookup.may_have_non_1to1(): |
| chaos.update(range(seqi, len(r.Input)+2)) |
| lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) |
| elif self.Format == 2: |
| ClassDef = getattr(self, c.ClassDef) |
| indices = ClassDef.intersect(cur_glyphs) |
| ContextData = c.ContextData(self) |
| rss = getattr(self, c.RuleSet) |
| rssCount = getattr(self, c.RuleSetCount) |
| for i in indices: |
| if i >= rssCount or not rss[i]: continue |
| for r in getattr(rss[i], c.Rule): |
| if not r: continue |
| if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) |
| for cd,klist in zip(ContextData, c.RuleData(r))): |
| continue |
| chaos = set() |
| for ll in getattr(r, c.LookupRecord): |
| if not ll: continue |
| seqi = ll.SequenceIndex |
| if seqi in chaos: |
| # TODO Can we improve this? |
| pos_glyphs = None |
| else: |
| if seqi == 0: |
| pos_glyphs = frozenset(ClassDef.intersect_class(cur_glyphs, i)) |
| else: |
| pos_glyphs = frozenset(ClassDef.intersect_class(s.glyphs, getattr(r, c.Input)[seqi - 1])) |
| lookup = s.table.LookupList.Lookup[ll.LookupListIndex] |
| chaos.add(seqi) |
| if lookup.may_have_non_1to1(): |
| chaos.update(range(seqi, len(getattr(r, c.Input))+2)) |
| lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) |
| elif self.Format == 3: |
| if not all(x.intersect(s.glyphs) for x in c.RuleData(self)): |
| return [] |
| r = self |
| chaos = set() |
| for ll in getattr(r, c.LookupRecord): |
| if not ll: continue |
| seqi = ll.SequenceIndex |
| if seqi in chaos: |
| # TODO Can we improve this? |
| pos_glyphs = None |
| else: |
| if seqi == 0: |
| pos_glyphs = frozenset(cur_glyphs) |
| else: |
| pos_glyphs = frozenset(r.InputCoverage[seqi].intersect_glyphs(s.glyphs)) |
| lookup = s.table.LookupList.Lookup[ll.LookupListIndex] |
| chaos.add(seqi) |
| if lookup.may_have_non_1to1(): |
| chaos.update(range(seqi, len(r.InputCoverage)+1)) |
| lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.ContextSubst, |
| otTables.ContextPos, |
| otTables.ChainContextSubst, |
| otTables.ChainContextPos) |
| def subset_glyphs(self, s): |
| c = self.__subset_classify_context() |
| |
| if self.Format == 1: |
| indices = self.Coverage.subset(s.glyphs) |
| rss = getattr(self, c.RuleSet) |
| rssCount = getattr(self, c.RuleSetCount) |
| rss = [rss[i] for i in indices if i < rssCount] |
| for rs in rss: |
| if not rs: continue |
| ss = getattr(rs, c.Rule) |
| ss = [r for r in ss |
| if r and all(all(g in s.glyphs for g in glist) |
| for glist in c.RuleData(r))] |
| setattr(rs, c.Rule, ss) |
| setattr(rs, c.RuleCount, len(ss)) |
| # Prune empty rulesets |
| indices = [i for i,rs in enumerate(rss) if rs and getattr(rs, c.Rule)] |
| self.Coverage.remap(indices) |
| rss = _list_subset(rss, indices) |
| setattr(self, c.RuleSet, rss) |
| setattr(self, c.RuleSetCount, len(rss)) |
| return bool(rss) |
| elif self.Format == 2: |
| if not self.Coverage.subset(s.glyphs): |
| return False |
| ContextData = c.ContextData(self) |
| klass_maps = [x.subset(s.glyphs, remap=True) if x else None for x in ContextData] |
| |
| # Keep rulesets for class numbers that survived. |
| indices = klass_maps[c.ClassDefIndex] |
| rss = getattr(self, c.RuleSet) |
| rssCount = getattr(self, c.RuleSetCount) |
| rss = [rss[i] for i in indices if i < rssCount] |
| del rssCount |
| # Delete, but not renumber, unreachable rulesets. |
| indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs) |
| rss = [rss if i in indices else None for i,rss in enumerate(rss)] |
| |
| for rs in rss: |
| if not rs: continue |
| ss = getattr(rs, c.Rule) |
| ss = [r for r in ss |
| if r and all(all(k in klass_map for k in klist) |
| for klass_map,klist in zip(klass_maps, c.RuleData(r)))] |
| setattr(rs, c.Rule, ss) |
| setattr(rs, c.RuleCount, len(ss)) |
| |
| # Remap rule classes |
| for r in ss: |
| c.SetRuleData(r, [[klass_map.index(k) for k in klist] |
| for klass_map,klist in zip(klass_maps, c.RuleData(r))]) |
| |
| # Prune empty rulesets |
| rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss] |
| while rss and rss[-1] is None: |
| del rss[-1] |
| setattr(self, c.RuleSet, rss) |
| setattr(self, c.RuleSetCount, len(rss)) |
| |
| # TODO: We can do a second round of remapping class values based |
| # on classes that are actually used in at least one rule. Right |
| # now we subset classes to c.glyphs only. Or better, rewrite |
| # the above to do that. |
| |
| return bool(rss) |
| elif self.Format == 3: |
| return all(x.subset(s.glyphs) for x in c.RuleData(self)) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.ContextSubst, |
| otTables.ChainContextSubst, |
| otTables.ContextPos, |
| otTables.ChainContextPos) |
| def subset_lookups(self, lookup_indices): |
| c = self.__subset_classify_context() |
| |
| if self.Format in [1, 2]: |
| for rs in getattr(self, c.RuleSet): |
| if not rs: continue |
| for r in getattr(rs, c.Rule): |
| if not r: continue |
| setattr(r, c.LookupRecord, |
| [ll for ll in getattr(r, c.LookupRecord) |
| if ll and ll.LookupListIndex in lookup_indices]) |
| for ll in getattr(r, c.LookupRecord): |
| if not ll: continue |
| ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) |
| elif self.Format == 3: |
| setattr(self, c.LookupRecord, |
| [ll for ll in getattr(self, c.LookupRecord) |
| if ll and ll.LookupListIndex in lookup_indices]) |
| for ll in getattr(self, c.LookupRecord): |
| if not ll: continue |
| ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.ContextSubst, |
| otTables.ChainContextSubst, |
| otTables.ContextPos, |
| otTables.ChainContextPos) |
| def collect_lookups(self): |
| c = self.__subset_classify_context() |
| |
| if self.Format in [1, 2]: |
| return [ll.LookupListIndex |
| for rs in getattr(self, c.RuleSet) if rs |
| for r in getattr(rs, c.Rule) if r |
| for ll in getattr(r, c.LookupRecord) if ll] |
| elif self.Format == 3: |
| return [ll.LookupListIndex |
| for ll in getattr(self, c.LookupRecord) if ll] |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.ExtensionSubst) |
| def closure_glyphs(self, s, cur_glyphs): |
| if self.Format == 1: |
| self.ExtSubTable.closure_glyphs(s, cur_glyphs) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.ExtensionSubst) |
| def may_have_non_1to1(self): |
| if self.Format == 1: |
| return self.ExtSubTable.may_have_non_1to1() |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.ExtensionSubst, |
| otTables.ExtensionPos) |
| def subset_glyphs(self, s): |
| if self.Format == 1: |
| return self.ExtSubTable.subset_glyphs(s) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.ExtensionSubst, |
| otTables.ExtensionPos) |
| def prune_post_subset(self, font, options): |
| if self.Format == 1: |
| return self.ExtSubTable.prune_post_subset(font, options) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.ExtensionSubst, |
| otTables.ExtensionPos) |
| def subset_lookups(self, lookup_indices): |
| if self.Format == 1: |
| return self.ExtSubTable.subset_lookups(lookup_indices) |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.ExtensionSubst, |
| otTables.ExtensionPos) |
| def collect_lookups(self): |
| if self.Format == 1: |
| return self.ExtSubTable.collect_lookups() |
| else: |
| assert 0, "unknown format: %s" % self.Format |
| |
| @_add_method(otTables.Lookup) |
| def closure_glyphs(self, s, cur_glyphs=None): |
| if cur_glyphs is None: |
| cur_glyphs = frozenset(s.glyphs) |
| |
| # Memoize |
| key = id(self) |
| doneLookups = s._doneLookups |
| count,covered = doneLookups.get(key, (0, None)) |
| if count != len(s.glyphs): |
| count,covered = doneLookups[key] = (len(s.glyphs), set()) |
| if cur_glyphs.issubset(covered): |
| return |
| covered.update(cur_glyphs) |
| |
| for st in self.SubTable: |
| if not st: continue |
| st.closure_glyphs(s, cur_glyphs) |
| |
| @_add_method(otTables.Lookup) |
| def subset_glyphs(self, s): |
| self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)] |
| self.SubTableCount = len(self.SubTable) |
| return bool(self.SubTableCount) |
| |
| @_add_method(otTables.Lookup) |
| def prune_post_subset(self, font, options): |
| ret = False |
| for st in self.SubTable: |
| if not st: continue |
| if st.prune_post_subset(font, options): ret = True |
| return ret |
| |
| @_add_method(otTables.Lookup) |
| def subset_lookups(self, lookup_indices): |
| for s in self.SubTable: |
| s.subset_lookups(lookup_indices) |
| |
| @_add_method(otTables.Lookup) |
| def collect_lookups(self): |
| return sum((st.collect_lookups() for st in self.SubTable if st), []) |
| |
| @_add_method(otTables.Lookup) |
| def may_have_non_1to1(self): |
| return any(st.may_have_non_1to1() for st in self.SubTable if st) |
| |
| @_add_method(otTables.LookupList) |
| def subset_glyphs(self, s): |
| """Returns the indices of nonempty lookups.""" |
| return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)] |
| |
| @_add_method(otTables.LookupList) |
| def prune_post_subset(self, font, options): |
| ret = False |
| for l in self.Lookup: |
| if not l: continue |
| if l.prune_post_subset(font, options): ret = True |
| return ret |
| |
| @_add_method(otTables.LookupList) |
| def subset_lookups(self, lookup_indices): |
| self.ensureDecompiled() |
| self.Lookup = [self.Lookup[i] for i in lookup_indices |
| if i < self.LookupCount] |
| self.LookupCount = len(self.Lookup) |
| for l in self.Lookup: |
| l.subset_lookups(lookup_indices) |
| |
| @_add_method(otTables.LookupList) |
| def neuter_lookups(self, lookup_indices): |
| """Sets lookups not in lookup_indices to None.""" |
| self.ensureDecompiled() |
| self.Lookup = [l if i in lookup_indices else None for i,l in enumerate(self.Lookup)] |
| |
| @_add_method(otTables.LookupList) |
| def closure_lookups(self, lookup_indices): |
| """Returns sorted index of all lookups reachable from lookup_indices.""" |
| lookup_indices = _uniq_sort(lookup_indices) |
| recurse = lookup_indices |
| while True: |
| recurse_lookups = sum((self.Lookup[i].collect_lookups() |
| for i in recurse if i < self.LookupCount), []) |
| recurse_lookups = [l for l in recurse_lookups |
| if l not in lookup_indices and l < self.LookupCount] |
| if not recurse_lookups: |
| return _uniq_sort(lookup_indices) |
| recurse_lookups = _uniq_sort(recurse_lookups) |
| lookup_indices.extend(recurse_lookups) |
| recurse = recurse_lookups |
| |
| @_add_method(otTables.Feature) |
| def subset_lookups(self, lookup_indices): |
| """"Returns True if feature is non-empty afterwards.""" |
| self.LookupListIndex = [l for l in self.LookupListIndex |
| if l in lookup_indices] |
| # Now map them. |
| self.LookupListIndex = [lookup_indices.index(l) |
| for l in self.LookupListIndex] |
| self.LookupCount = len(self.LookupListIndex) |
| return self.LookupCount or self.FeatureParams |
| |
| @_add_method(otTables.FeatureList) |
| def subset_lookups(self, lookup_indices): |
| """Returns the indices of nonempty features.""" |
| # Note: Never ever drop feature 'pref', even if it's empty. |
| # HarfBuzz chooses shaper for Khmer based on presence of this |
| # feature. See thread at: |
| # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html |
| return [i for i,f in enumerate(self.FeatureRecord) |
| if (f.Feature.subset_lookups(lookup_indices) or |
| f.FeatureTag == 'pref')] |
| |
| @_add_method(otTables.FeatureList) |
| def collect_lookups(self, feature_indices): |
| return sum((self.FeatureRecord[i].Feature.LookupListIndex |
| for i in feature_indices |
| if i < self.FeatureCount), []) |
| |
| @_add_method(otTables.FeatureList) |
| def subset_features(self, feature_indices): |
| self.ensureDecompiled() |
| self.FeatureRecord = _list_subset(self.FeatureRecord, feature_indices) |
| self.FeatureCount = len(self.FeatureRecord) |
| return bool(self.FeatureCount) |
| |
| @_add_method(otTables.FeatureTableSubstitution) |
| def subset_lookups(self, lookup_indices): |
| """Returns the indices of nonempty features.""" |
| return [r.FeatureIndex for r in self.SubstitutionRecord |
| if r.Feature.subset_lookups(lookup_indices)] |
| |
| @_add_method(otTables.FeatureVariations) |
| def subset_lookups(self, lookup_indices): |
| """Returns the indices of nonempty features.""" |
| return sum((f.FeatureTableSubstitution.subset_lookups(lookup_indices) |
| for f in self.FeatureVariationRecord), []) |
| |
| @_add_method(otTables.FeatureVariations) |
| def collect_lookups(self, feature_indices): |
| return sum((r.Feature.LookupListIndex |
| for vr in self.FeatureVariationRecord |
| for r in vr.FeatureTableSubstitution.SubstitutionRecord |
| if r.FeatureIndex in feature_indices), []) |
| |
| @_add_method(otTables.FeatureTableSubstitution) |
| def subset_features(self, feature_indices): |
| self.ensureDecompiled() |
| self.SubstitutionRecord = [r for r in self.SubstitutionRecord |
| if r.FeatureIndex in feature_indices] |
| # remap feature indices |
| for r in self.SubstitutionRecord: |
| r.FeatureIndex = feature_indices.index(r.FeatureIndex) |
| self.SubstitutionCount = len(self.SubstitutionRecord) |
| return bool(self.SubstitutionCount) |
| |
| @_add_method(otTables.FeatureVariations) |
| def subset_features(self, feature_indices): |
| self.ensureDecompiled() |
| self.FeaturVariationRecord = [r for r in self.FeatureVariationRecord |
| if r.FeatureTableSubstitution.subset_features(feature_indices)] |
| self.FeatureVariationCount = len(self.FeatureVariationRecord) |
| return bool(self.FeatureVariationCount) |
| |
| @_add_method(otTables.DefaultLangSys, |
| otTables.LangSys) |
| def subset_features(self, feature_indices): |
| if self.ReqFeatureIndex in feature_indices: |
| self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex) |
| else: |
| self.ReqFeatureIndex = 65535 |
| self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices] |
| # Now map them. |
| self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex |
| if f in feature_indices] |
| self.FeatureCount = len(self.FeatureIndex) |
| return bool(self.FeatureCount or self.ReqFeatureIndex != 65535) |
| |
| @_add_method(otTables.DefaultLangSys, |
| otTables.LangSys) |
| def collect_features(self): |
| feature_indices = self.FeatureIndex[:] |
| if self.ReqFeatureIndex != 65535: |
| feature_indices.append(self.ReqFeatureIndex) |
| return _uniq_sort(feature_indices) |
| |
| @_add_method(otTables.Script) |
| def subset_features(self, feature_indices, keepEmptyDefaultLangSys=False): |
| if(self.DefaultLangSys and |
| not self.DefaultLangSys.subset_features(feature_indices) and |
| not keepEmptyDefaultLangSys): |
| self.DefaultLangSys = None |
| self.LangSysRecord = [l for l in self.LangSysRecord |
| if l.LangSys.subset_features(feature_indices)] |
| self.LangSysCount = len(self.LangSysRecord) |
| return bool(self.LangSysCount or self.DefaultLangSys) |
| |
| @_add_method(otTables.Script) |
| def collect_features(self): |
| feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord] |
| if self.DefaultLangSys: |
| feature_indices.append(self.DefaultLangSys.collect_features()) |
| return _uniq_sort(sum(feature_indices, [])) |
| |
| @_add_method(otTables.ScriptList) |
| def subset_features(self, feature_indices, retain_empty): |
| # https://bugzilla.mozilla.org/show_bug.cgi?id=1331737#c32 |
| self.ScriptRecord = [s for s in self.ScriptRecord |
| if s.Script.subset_features(feature_indices, s.ScriptTag=='DFLT') or |
| retain_empty] |
| self.ScriptCount = len(self.ScriptRecord) |
| return bool(self.ScriptCount) |
| |
| @_add_method(otTables.ScriptList) |
| def collect_features(self): |
| return _uniq_sort(sum((s.Script.collect_features() |
| for s in self.ScriptRecord), [])) |
| |
| # CBLC will inherit it |
| @_add_method(ttLib.getTableClass('EBLC')) |
| def subset_glyphs(self, s): |
| for strike in self.strikes: |
| for indexSubTable in strike.indexSubTables: |
| indexSubTable.names = [n for n in indexSubTable.names if n in s.glyphs] |
| strike.indexSubTables = [i for i in strike.indexSubTables if i.names] |
| self.strikes = [s for s in self.strikes if s.indexSubTables] |
| |
| return True |
| |
| # CBDT will inherit it |
| @_add_method(ttLib.getTableClass('EBDT')) |
| def subset_glyphs(self, s): |
| strikeData = [ |
| {g: strike[g] for g in s.glyphs if g in strike} |
| for strike in self.strikeData |
| ] |
| # Prune empty strikes |
| # https://github.com/fonttools/fonttools/issues/1633 |
| self.strikeData = [strike for strike in strikeData if strike] |
| return True |
| |
| @_add_method(ttLib.getTableClass('sbix')) |
| def subset_glyphs(self, s): |
| for strike in self.strikes.values(): |
| strike.glyphs = {g: strike.glyphs[g] for g in s.glyphs if g in strike.glyphs} |
| |
| return True |
| |
| @_add_method(ttLib.getTableClass('GSUB')) |
| def closure_glyphs(self, s): |
| s.table = self.table |
| if self.table.ScriptList: |
| feature_indices = self.table.ScriptList.collect_features() |
| else: |
| feature_indices = [] |
| if self.table.FeatureList: |
| lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) |
| else: |
| lookup_indices = [] |
| if getattr(self.table, 'FeatureVariations', None): |
| lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices) |
| lookup_indices = _uniq_sort(lookup_indices) |
| if self.table.LookupList: |
| s._doneLookups = {} |
| while True: |
| orig_glyphs = frozenset(s.glyphs) |
| for i in lookup_indices: |
| if i >= self.table.LookupList.LookupCount: continue |
| if not self.table.LookupList.Lookup[i]: continue |
| self.table.LookupList.Lookup[i].closure_glyphs(s) |
| if orig_glyphs == s.glyphs: |
| break |
| del s._doneLookups |
| del s.table |
| |
| @_add_method(ttLib.getTableClass('GSUB'), |
| ttLib.getTableClass('GPOS')) |
| def subset_glyphs(self, s): |
| s.glyphs = s.glyphs_gsubed |
| if self.table.LookupList: |
| lookup_indices = self.table.LookupList.subset_glyphs(s) |
| else: |
| lookup_indices = [] |
| self.subset_lookups(lookup_indices) |
| return True |
| |
| @_add_method(ttLib.getTableClass('GSUB'), |
| ttLib.getTableClass('GPOS')) |
| def retain_empty_scripts(self): |
| # https://github.com/fonttools/fonttools/issues/518 |
| # https://bugzilla.mozilla.org/show_bug.cgi?id=1080739#c15 |
| return self.__class__ == ttLib.getTableClass('GSUB') |
| |
| @_add_method(ttLib.getTableClass('GSUB'), |
| ttLib.getTableClass('GPOS')) |
| def subset_lookups(self, lookup_indices): |
| """Retains specified lookups, then removes empty features, language |
| systems, and scripts.""" |
| if self.table.LookupList: |
| self.table.LookupList.subset_lookups(lookup_indices) |
| if self.table.FeatureList: |
| feature_indices = self.table.FeatureList.subset_lookups(lookup_indices) |
| else: |
| feature_indices = [] |
| if getattr(self.table, 'FeatureVariations', None): |
| feature_indices += self.table.FeatureVariations.subset_lookups(lookup_indices) |
| feature_indices = _uniq_sort(feature_indices) |
| if self.table.FeatureList: |
| self.table.FeatureList.subset_features(feature_indices) |
| if getattr(self.table, 'FeatureVariations', None): |
| self.table.FeatureVariations.subset_features(feature_indices) |
| if self.table.ScriptList: |
| self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts()) |
| |
| @_add_method(ttLib.getTableClass('GSUB'), |
| ttLib.getTableClass('GPOS')) |
| def neuter_lookups(self, lookup_indices): |
| """Sets lookups not in lookup_indices to None.""" |
| if self.table.LookupList: |
| self.table.LookupList.neuter_lookups(lookup_indices) |
| |
| @_add_method(ttLib.getTableClass('GSUB'), |
| ttLib.getTableClass('GPOS')) |
| def prune_lookups(self, remap=True): |
| """Remove (default) or neuter unreferenced lookups""" |
| if self.table.ScriptList: |
| feature_indices = self.table.ScriptList.collect_features() |
| else: |
| feature_indices = [] |
| if self.table.FeatureList: |
| lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) |
| else: |
| lookup_indices = [] |
| if getattr(self.table, 'FeatureVariations', None): |
| lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices) |
| lookup_indices = _uniq_sort(lookup_indices) |
| if self.table.LookupList: |
| lookup_indices = self.table.LookupList.closure_lookups(lookup_indices) |
| else: |
| lookup_indices = [] |
| if remap: |
| self.subset_lookups(lookup_indices) |
| else: |
| self.neuter_lookups(lookup_indices) |
| |
| @_add_method(ttLib.getTableClass('GSUB'), |
| ttLib.getTableClass('GPOS')) |
| def subset_feature_tags(self, feature_tags): |
| if self.table.FeatureList: |
| feature_indices = \ |
| [i for i,f in enumerate(self.table.FeatureList.FeatureRecord) |
| if f.FeatureTag in feature_tags] |
| self.table.FeatureList.subset_features(feature_indices) |
| if getattr(self.table, 'FeatureVariations', None): |
| self.table.FeatureVariations.subset_features(feature_indices) |
| else: |
| feature_indices = [] |
| if self.table.ScriptList: |
| self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts()) |
| |
| @_add_method(ttLib.getTableClass('GSUB'), |
| ttLib.getTableClass('GPOS')) |
| def subset_script_tags(self, script_tags): |
| if self.table.ScriptList: |
| self.table.ScriptList.ScriptRecord = \ |
| [s for s in self.table.ScriptList.ScriptRecord |
| if s.ScriptTag in script_tags] |
| self.table.ScriptList.ScriptCount = len(self.table.ScriptList.ScriptRecord) |
| |
| @_add_method(ttLib.getTableClass('GSUB'), |
| ttLib.getTableClass('GPOS')) |
| def prune_features(self): |
| """Remove unreferenced features""" |
| if self.table.ScriptList: |
| feature_indices = self.table.ScriptList.collect_features() |
| else: |
| feature_indices = [] |
| if self.table.FeatureList: |
| self.table.FeatureList.subset_features(feature_indices) |
| if getattr(self.table, 'FeatureVariations', None): |
| self.table.FeatureVariations.subset_features(feature_indices) |
| if self.table.ScriptList: |
| self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts()) |
| |
| @_add_method(ttLib.getTableClass('GSUB'), |
| ttLib.getTableClass('GPOS')) |
| def prune_pre_subset(self, font, options): |
| # Drop undesired features |
| if '*' not in options.layout_scripts: |
| self.subset_script_tags(options.layout_scripts) |
| if '*' not in options.layout_features: |
| self.subset_feature_tags(options.layout_features) |
| # Neuter unreferenced lookups |
| self.prune_lookups(remap=False) |
| return True |
| |
| @_add_method(ttLib.getTableClass('GSUB'), |
| ttLib.getTableClass('GPOS')) |
| def remove_redundant_langsys(self): |
| table = self.table |
| if not table.ScriptList or not table.FeatureList: |
| return |
| |
| features = table.FeatureList.FeatureRecord |
| |
| for s in table.ScriptList.ScriptRecord: |
| d = s.Script.DefaultLangSys |
| if not d: |
| continue |
| for lr in s.Script.LangSysRecord[:]: |
| l = lr.LangSys |
| # Compare d and l |
| if len(d.FeatureIndex) != len(l.FeatureIndex): |
| continue |
| if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535): |
| continue |
| |
| if d.ReqFeatureIndex != 65535: |
| if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]: |
| continue |
| |
| for i in range(len(d.FeatureIndex)): |
| if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]: |
| break |
| else: |
| # LangSys and default are equal; delete LangSys |
| s.Script.LangSysRecord.remove(lr) |
| |
| @_add_method(ttLib.getTableClass('GSUB'), |
| ttLib.getTableClass('GPOS')) |
| def prune_post_subset(self, font, options): |
| table = self.table |
| |
| self.prune_lookups() # XXX Is this actually needed?! |
| |
| if table.LookupList: |
| table.LookupList.prune_post_subset(font, options) |
| # XXX Next two lines disabled because OTS is stupid and |
| # doesn't like NULL offsets here. |
| #if not table.LookupList.Lookup: |
| # table.LookupList = None |
| |
| if not table.LookupList: |
| table.FeatureList = None |
| |
| |
| if table.FeatureList: |
| self.remove_redundant_langsys() |
| # Remove unreferenced features |
| self.prune_features() |
| |
| # XXX Next two lines disabled because OTS is stupid and |
| # doesn't like NULL offsets here. |
| #if table.FeatureList and not table.FeatureList.FeatureRecord: |
| # table.FeatureList = None |
| |
| # Never drop scripts themselves as them just being available |
| # holds semantic significance. |
| # XXX Next two lines disabled because OTS is stupid and |
| # doesn't like NULL offsets here. |
| #if table.ScriptList and not table.ScriptList.ScriptRecord: |
| # table.ScriptList = None |
| |
| if not table.FeatureList and hasattr(table, 'FeatureVariations'): |
| table.FeatureVariations = None |
| |
| if hasattr(table, 'FeatureVariations') and not table.FeatureVariations: |
| if table.Version == 0x00010001: |
| table.Version = 0x00010000 |
| |
| return True |
| |
| @_add_method(ttLib.getTableClass('GDEF')) |
| def subset_glyphs(self, s): |
| glyphs = s.glyphs_gsubed |
| table = self.table |
| if table.LigCaretList: |
| indices = table.LigCaretList.Coverage.subset(glyphs) |
| table.LigCaretList.LigGlyph = _list_subset(table.LigCaretList.LigGlyph, indices) |
| table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph) |
| if table.MarkAttachClassDef: |
| table.MarkAttachClassDef.classDefs = \ |
| {g:v for g,v in table.MarkAttachClassDef.classDefs.items() |
| if g in glyphs} |
| if table.GlyphClassDef: |
| table.GlyphClassDef.classDefs = \ |
| {g:v for g,v in table.GlyphClassDef.classDefs.items() |
| if g in glyphs} |
| if table.AttachList: |
| indices = table.AttachList.Coverage.subset(glyphs) |
| GlyphCount = table.AttachList.GlyphCount |
| table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i] |
| for i in indices if i < GlyphCount] |
| table.AttachList.GlyphCount = len(table.AttachList.AttachPoint) |
| if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef: |
| for coverage in table.MarkGlyphSetsDef.Coverage: |
| if coverage: |
| coverage.subset(glyphs) |
| |
| # TODO: The following is disabled. If enabling, we need to go fixup all |
| # lookups that use MarkFilteringSet and map their set. |
| # indices = table.MarkGlyphSetsDef.Coverage = \ |
| # [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs] |
| # TODO: The following is disabled, as ots doesn't like it. Phew... |
| # https://github.com/khaledhosny/ots/issues/172 |
| # table.MarkGlyphSetsDef.Coverage = [c if c.glyphs else None for c in table.MarkGlyphSetsDef.Coverage] |
| return True |
| |
| |
| def _pruneGDEF(font): |
| if 'GDEF' not in font: return |
| gdef = font['GDEF'] |
| table = gdef.table |
| if not hasattr(table, 'VarStore'): return |
| |
| store = table.VarStore |
| |
| usedVarIdxes = set() |
| |
| # Collect. |
| table.collect_device_varidxes(usedVarIdxes) |
| if 'GPOS' in font: |
| font['GPOS'].table.collect_device_varidxes(usedVarIdxes) |
| |
| # Subset. |
| varidx_map = store.subset_varidxes(usedVarIdxes) |
| |
| # Map. |
| table.remap_device_varidxes(varidx_map) |
| if 'GPOS' in font: |
| font['GPOS'].table.remap_device_varidxes(varidx_map) |
| |
| @_add_method(ttLib.getTableClass('GDEF')) |
| def prune_post_subset(self, font, options): |
| table = self.table |
| # XXX check these against OTS |
| if table.LigCaretList and not table.LigCaretList.LigGlyphCount: |
| table.LigCaretList = None |
| if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs: |
| table.MarkAttachClassDef = None |
| if table.GlyphClassDef and not table.GlyphClassDef.classDefs: |
| table.GlyphClassDef = None |
| if table.AttachList and not table.AttachList.GlyphCount: |
| table.AttachList = None |
| if hasattr(table, "VarStore"): |
| _pruneGDEF(font) |
| if table.VarStore.VarDataCount == 0: |
| if table.Version == 0x00010003: |
| table.Version = 0x00010002 |
| if (not hasattr(table, "MarkGlyphSetsDef") or |
| not table.MarkGlyphSetsDef or |
| not table.MarkGlyphSetsDef.Coverage): |
| table.MarkGlyphSetsDef = None |
| if table.Version == 0x00010002: |
| table.Version = 0x00010000 |
| return bool(table.LigCaretList or |
| table.MarkAttachClassDef or |
| table.GlyphClassDef or |
| table.AttachList or |
| (table.Version >= 0x00010002 and table.MarkGlyphSetsDef) or |
| (table.Version >= 0x00010003 and table.VarStore)) |
| |
| @_add_method(ttLib.getTableClass('kern')) |
| def prune_pre_subset(self, font, options): |
| # Prune unknown kern table types |
| self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')] |
| return bool(self.kernTables) |
| |
| @_add_method(ttLib.getTableClass('kern')) |
| def subset_glyphs(self, s): |
| glyphs = s.glyphs_gsubed |
| for t in self.kernTables: |
| t.kernTable = {(a,b):v for (a,b),v in t.kernTable.items() |
| if a in glyphs and b in glyphs} |
| self.kernTables = [t for t in self.kernTables if t.kernTable] |
| return bool(self.kernTables) |
| |
| @_add_method(ttLib.getTableClass('vmtx')) |
| def subset_glyphs(self, s): |
| self.metrics = _dict_subset(self.metrics, s.glyphs) |
| for g in s.glyphs_emptied: |
| self.metrics[g] = (0,0) |
| return bool(self.metrics) |
| |
| @_add_method(ttLib.getTableClass('hmtx')) |
| def subset_glyphs(self, s): |
| self.metrics = _dict_subset(self.metrics, s.glyphs) |
| for g in s.glyphs_emptied: |
| self.metrics[g] = (0,0) |
| return True # Required table |
| |
| @_add_method(ttLib.getTableClass('hdmx')) |
| def subset_glyphs(self, s): |
| self.hdmx = {sz:_dict_subset(l, s.glyphs) for sz,l in self.hdmx.items()} |
| for sz in self.hdmx: |
| for g in s.glyphs_emptied: |
| self.hdmx[sz][g] = 0 |
| return bool(self.hdmx) |
| |
| @_add_method(ttLib.getTableClass('ankr')) |
| def subset_glyphs(self, s): |
| table = self.table.AnchorPoints |
| assert table.Format == 0, "unknown 'ankr' format %s" % table.Format |
| table.Anchors = {glyph: table.Anchors[glyph] for glyph in s.glyphs |
| if glyph in table.Anchors} |
| return len(table.Anchors) > 0 |
| |
| @_add_method(ttLib.getTableClass('bsln')) |
| def closure_glyphs(self, s): |
| table = self.table.Baseline |
| if table.Format in (2, 3): |
| s.glyphs.add(table.StandardGlyph) |
| |
| @_add_method(ttLib.getTableClass('bsln')) |
| def subset_glyphs(self, s): |
| table = self.table.Baseline |
| if table.Format in (1, 3): |
| baselines = {glyph: table.BaselineValues.get(glyph, table.DefaultBaseline) |
| for glyph in s.glyphs} |
| if len(baselines) > 0: |
| mostCommon, _cnt = Counter(baselines.values()).most_common(1)[0] |
| table.DefaultBaseline = mostCommon |
| baselines = {glyph: b for glyph, b in baselines.items() |
| if b != mostCommon} |
| if len(baselines) > 0: |
| table.BaselineValues = baselines |
| else: |
| table.Format = {1: 0, 3: 2}[table.Format] |
| del table.BaselineValues |
| return True |
| |
| @_add_method(ttLib.getTableClass('lcar')) |
| def subset_glyphs(self, s): |
| table = self.table.LigatureCarets |
| if table.Format in (0, 1): |
| table.Carets = {glyph: table.Carets[glyph] for glyph in s.glyphs |
| if glyph in table.Carets} |
| return len(table.Carets) > 0 |
| else: |
| assert False, "unknown 'lcar' format %s" % table.Format |
| |
| @_add_method(ttLib.getTableClass('gvar')) |
| def prune_pre_subset(self, font, options): |
| if options.notdef_glyph and not options.notdef_outline: |
| self.variations[font.glyphOrder[0]] = [] |
| return True |
| |
| @_add_method(ttLib.getTableClass('gvar')) |
| def subset_glyphs(self, s): |
| self.variations = _dict_subset(self.variations, s.glyphs) |
| self.glyphCount = len(self.variations) |
| return bool(self.variations) |
| |
| def _remap_index_map(s, varidx_map, table_map): |
| map_ = {k:varidx_map[v] for k,v in table_map.mapping.items()} |
| # Emptied glyphs are remapped to: |
| # if GID <= last retained GID, 0/0: delta set for 0/0 is expected to exist & zeros compress well |
| # if GID > last retained GID, major/minor of the last retained glyph: will be optimized out by table compiler |
| last_idx = varidx_map[table_map.mapping[s.last_retained_glyph]] |
| for g,i in s.reverseEmptiedGlyphMap.items(): |
| map_[g] = last_idx if i > s.last_retained_order else 0 |
| return map_ |
| |
| @_add_method(ttLib.getTableClass('HVAR')) |
| def subset_glyphs(self, s): |
| table = self.table |
| |
| used = set() |
| advIdxes_ = set() |
| retainAdvMap = False |
| |
| if table.AdvWidthMap: |
| table.AdvWidthMap.mapping = _dict_subset(table.AdvWidthMap.mapping, s.glyphs) |
| used.update(table.AdvWidthMap.mapping.values()) |
| else: |
| used.update(s.reverseOrigGlyphMap.values()) |
| advIdxes_ = used.copy() |
| retainAdvMap = s.options.retain_gids |
| |
| if table.LsbMap: |
| table.LsbMap.mapping = _dict_subset(table.LsbMap.mapping, s.glyphs) |
| used.update(table.LsbMap.mapping.values()) |
| if table.RsbMap: |
| table.RsbMap.mapping = _dict_subset(table.RsbMap.mapping, s.glyphs) |
| used.update(table.RsbMap.mapping.values()) |
| |
| varidx_map = varStore.VarStore_subset_varidxes(table.VarStore, used, retainFirstMap=retainAdvMap, advIdxes=advIdxes_) |
| |
| if table.AdvWidthMap: |
| table.AdvWidthMap.mapping = _remap_index_map(s, varidx_map, table.AdvWidthMap) |
| if table.LsbMap: |
| table.LsbMap.mapping = _remap_index_map(s, varidx_map, table.LsbMap) |
| if table.RsbMap: |
| table.RsbMap.mapping = _remap_index_map(s, varidx_map, table.RsbMap) |
| |
| # TODO Return emptiness... |
| return True |
| |
| @_add_method(ttLib.getTableClass('VVAR')) |
| def subset_glyphs(self, s): |
| table = self.table |
| |
| used = set() |
| advIdxes_ = set() |
| retainAdvMap = False |
| |
| if table.AdvHeightMap: |
| table.AdvHeightMap.mapping = _dict_subset(table.AdvHeightMap.mapping, s.glyphs) |
| used.update(table.AdvHeightMap.mapping.values()) |
| else: |
| used.update(s.reverseOrigGlyphMap.values()) |
| advIdxes_ = used.copy() |
| retainAdvMap = s.options.retain_gids |
| |
| if table.TsbMap: |
| table.TsbMap.mapping = _dict_subset(table.TsbMap.mapping, s.glyphs) |
| used.update(table.TsbMap.mapping.values()) |
| if table.BsbMap: |
| table.BsbMap.mapping = _dict_subset(table.BsbMap.mapping, s.glyphs) |
| used.update(table.BsbMap.mapping.values()) |
| if table.VOrgMap: |
| table.VOrgMap.mapping = _dict_subset(table.VOrgMap.mapping, s.glyphs) |
| used.update(table.VOrgMap.mapping.values()) |
| |
| varidx_map = varStore.VarStore_subset_varidxes(table.VarStore, used, retainFirstMap=retainAdvMap, advIdxes=advIdxes_) |
| |
| if table.AdvHeightMap: |
| table.AdvHeightMap.mapping = _remap_index_map(s, varidx_map, table.AdvHeightMap) |
| if table.TsbMap: |
| table.TsbMap.mapping = _remap_index_map(s, varidx_map, table.TsbMap) |
| if table.BsbMap: |
| table.BsbMap.mapping = _remap_index_map(s, varidx_map, table.BsbMap) |
| if table.VOrgMap: |
| table.VOrgMap.mapping = _remap_index_map(s, varidx_map, table.VOrgMap) |
| |
| # TODO Return emptiness... |
| return True |
| |
| @_add_method(ttLib.getTableClass('VORG')) |
| def subset_glyphs(self, s): |
| self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items() |
| if g in s.glyphs} |
| self.numVertOriginYMetrics = len(self.VOriginRecords) |
| return True # Never drop; has default metrics |
| |
| @_add_method(ttLib.getTableClass('opbd')) |
| def subset_glyphs(self, s): |
| table = self.table.OpticalBounds |
| if table.Format == 0: |
| table.OpticalBoundsDeltas = {glyph: table.OpticalBoundsDeltas[glyph] |
| for glyph in s.glyphs |
| if glyph in table.OpticalBoundsDeltas} |
| return len(table.OpticalBoundsDeltas) > 0 |
| elif table.Format == 1: |
| table.OpticalBoundsPoints = {glyph: table.OpticalBoundsPoints[glyph] |
| for glyph in s.glyphs |
| if glyph in table.OpticalBoundsPoints} |
| return len(table.OpticalBoundsPoints) > 0 |
| else: |
| assert False, "unknown 'opbd' format %s" % table.Format |
| |
| @_add_method(ttLib.getTableClass('post')) |
| def prune_pre_subset(self, font, options): |
| if not options.glyph_names: |
| self.formatType = 3.0 |
| return True # Required table |
| |
| @_add_method(ttLib.getTableClass('post')) |
| def subset_glyphs(self, s): |
| self.extraNames = [] # This seems to do it |
| return True # Required table |
| |
| @_add_method(ttLib.getTableClass('prop')) |
| def subset_glyphs(self, s): |
| prop = self.table.GlyphProperties |
| if prop.Format == 0: |
| return prop.DefaultProperties != 0 |
| elif prop.Format == 1: |
| prop.Properties = {g: prop.Properties.get(g, prop.DefaultProperties) |
| for g in s.glyphs} |
| mostCommon, _cnt = Counter(prop.Properties.values()).most_common(1)[0] |
| prop.DefaultProperties = mostCommon |
| prop.Properties = {g: prop for g, prop in prop.Properties.items() |
| if prop != mostCommon} |
| if len(prop.Properties) == 0: |
| del prop.Properties |
| prop.Format = 0 |
| return prop.DefaultProperties != 0 |
| return True |
| else: |
| assert False, "unknown 'prop' format %s" % prop.Format |
| |
| @_add_method(ttLib.getTableClass('COLR')) |
| def closure_glyphs(self, s): |
| decompose = s.glyphs |
| while decompose: |
| layers = set() |
| for g in decompose: |
| for l in self.ColorLayers.get(g, []): |
| layers.add(l.name) |
| layers -= s.glyphs |
| s.glyphs.update(layers) |
| decompose = layers |
| |
| @_add_method(ttLib.getTableClass('COLR')) |
| def subset_glyphs(self, s): |
| self.ColorLayers = {g: self.ColorLayers[g] for g in s.glyphs if g in self.ColorLayers} |
| return bool(self.ColorLayers) |
| |
| # TODO: prune unused palettes |
| @_add_method(ttLib.getTableClass('CPAL')) |
| def prune_post_subset(self, font, options): |
| return True |
| |
| @_add_method(otTables.MathGlyphConstruction) |
| def closure_glyphs(self, glyphs): |
| variants = set() |
| for v in self.MathGlyphVariantRecord: |
| variants.add(v.VariantGlyph) |
| if self.GlyphAssembly: |
| for p in self.GlyphAssembly.PartRecords: |
| variants.add(p.glyph) |
| return variants |
| |
| @_add_method(otTables.MathVariants) |
| def closure_glyphs(self, s): |
| glyphs = frozenset(s.glyphs) |
| variants = set() |
| |
| if self.VertGlyphCoverage: |
| indices = self.VertGlyphCoverage.intersect(glyphs) |
| for i in indices: |
| variants.update(self.VertGlyphConstruction[i].closure_glyphs(glyphs)) |
| |
| if self.HorizGlyphCoverage: |
| indices = self.HorizGlyphCoverage.intersect(glyphs) |
| for i in indices: |
| variants.update(self.HorizGlyphConstruction[i].closure_glyphs(glyphs)) |
| |
| s.glyphs.update(variants) |
| |
| @_add_method(ttLib.getTableClass('MATH')) |
| def closure_glyphs(self, s): |
| if self.table.MathVariants: |
| self.table.MathVariants.closure_glyphs(s) |
| |
| @_add_method(otTables.MathItalicsCorrectionInfo) |
| def subset_glyphs(self, s): |
| indices = self.Coverage.subset(s.glyphs) |
| self.ItalicsCorrection = _list_subset(self.ItalicsCorrection, indices) |
| self.ItalicsCorrectionCount = len(self.ItalicsCorrection) |
| return bool(self.ItalicsCorrectionCount) |
| |
| @_add_method(otTables.MathTopAccentAttachment) |
| def subset_glyphs(self, s): |
| indices = self.TopAccentCoverage.subset(s.glyphs) |
| self.TopAccentAttachment = _list_subset(self.TopAccentAttachment, indices) |
| self.TopAccentAttachmentCount = len(self.TopAccentAttachment) |
| return bool(self.TopAccentAttachmentCount) |
| |
| @_add_method(otTables.MathKernInfo) |
| def subset_glyphs(self, s): |
| indices = self.MathKernCoverage.subset(s.glyphs) |
| self.MathKernInfoRecords = _list_subset(self.MathKernInfoRecords, indices) |
| self.MathKernCount = len(self.MathKernInfoRecords) |
| return bool(self.MathKernCount) |
| |
| @_add_method(otTables.MathGlyphInfo) |
| def subset_glyphs(self, s): |
| if self.MathItalicsCorrectionInfo: |
| self.MathItalicsCorrectionInfo.subset_glyphs(s) |
| if self.MathTopAccentAttachment: |
| self.MathTopAccentAttachment.subset_glyphs(s) |
| if self.MathKernInfo: |
| self.MathKernInfo.subset_glyphs(s) |
| if self.ExtendedShapeCoverage: |
| self.ExtendedShapeCoverage.subset(s.glyphs) |
| return True |
| |
| @_add_method(otTables.MathVariants) |
| def subset_glyphs(self, s): |
| if self.VertGlyphCoverage: |
| indices = self.VertGlyphCoverage.subset(s.glyphs) |
| self.VertGlyphConstruction = _list_subset(self.VertGlyphConstruction, indices) |
| self.VertGlyphCount = len(self.VertGlyphConstruction) |
| |
| if self.HorizGlyphCoverage: |
| indices = self.HorizGlyphCoverage.subset(s.glyphs) |
| self.HorizGlyphConstruction = _list_subset(self.HorizGlyphConstruction, indices) |
| self.HorizGlyphCount = len(self.HorizGlyphConstruction) |
| |
| return True |
| |
| @_add_method(ttLib.getTableClass('MATH')) |
| def subset_glyphs(self, s): |
| s.glyphs = s.glyphs_mathed |
| if self.table.MathGlyphInfo: |
| self.table.MathGlyphInfo.subset_glyphs(s) |
| if self.table.MathVariants: |
| self.table.MathVariants.subset_glyphs(s) |
| return True |
| |
| @_add_method(ttLib.getTableModule('glyf').Glyph) |
| def remapComponentsFast(self, glyphidmap): |
| if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: |
| return # Not composite |
| data = array.array("B", self.data) |
| i = 10 |
| more = 1 |
| while more: |
| flags =(data[i] << 8) | data[i+1] |
| glyphID =(data[i+2] << 8) | data[i+3] |
| # Remap |
| glyphID = glyphidmap[glyphID] |
| data[i+2] = glyphID >> 8 |
| data[i+3] = glyphID & 0xFF |
| i += 4 |
| flags = int(flags) |
| |
| if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS |
| else: i += 2 |
| if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE |
| elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE |
| elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO |
| more = flags & 0x0020 # MORE_COMPONENTS |
| |
| self.data = data.tobytes() |
| |
| @_add_method(ttLib.getTableClass('glyf')) |
| def closure_glyphs(self, s): |
| glyphSet = self.glyphs |
| decompose = s.glyphs |
| while decompose: |
| components = set() |
| for g in decompose: |
| if g not in glyphSet: |
| continue |
| gl = glyphSet[g] |
| for c in gl.getComponentNames(self): |
| components.add(c) |
| components -= s.glyphs |
| s.glyphs.update(components) |
| decompose = components |
| |
| @_add_method(ttLib.getTableClass('glyf')) |
| def prune_pre_subset(self, font, options): |
| if options.notdef_glyph and not options.notdef_outline: |
| g = self[self.glyphOrder[0]] |
| # Yay, easy! |
| g.__dict__.clear() |
| g.data = "" |
| return True |
| |
| @_add_method(ttLib.getTableClass('glyf')) |
| def subset_glyphs(self, s): |
| self.glyphs = _dict_subset(self.glyphs, s.glyphs) |
| if not s.options.retain_gids: |
| indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs] |
| glyphmap = {o:n for n,o in enumerate(indices)} |
| for v in self.glyphs.values(): |
| if hasattr(v, "data"): |
| v.remapComponentsFast(glyphmap) |
| Glyph = ttLib.getTableModule('glyf').Glyph |
| for g in s.glyphs_emptied: |
| self.glyphs[g] = Glyph() |
| self.glyphs[g].data = '' |
| self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs or g in s.glyphs_emptied] |
| # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset. |
| return True |
| |
| @_add_method(ttLib.getTableClass('glyf')) |
| def prune_post_subset(self, font, options): |
| remove_hinting = not options.hinting |
| for v in self.glyphs.values(): |
| v.trim(remove_hinting=remove_hinting) |
| return True |
| |
| |
| @_add_method(ttLib.getTableClass('cmap')) |
| def closure_glyphs(self, s): |
| tables = [t for t in self.tables if t.isUnicode()] |
| |
| # Close glyphs |
| for table in tables: |
| if table.format == 14: |
| for cmap in table.uvsDict.values(): |
| glyphs = {g for u,g in cmap if u in s.unicodes_requested} |
| if None in glyphs: |
| glyphs.remove(None) |
| s.glyphs.update(glyphs) |
| else: |
| cmap = table.cmap |
| intersection = s.unicodes_requested.intersection(cmap.keys()) |
| s.glyphs.update(cmap[u] for u in intersection) |
| |
| # Calculate unicodes_missing |
| s.unicodes_missing = s.unicodes_requested.copy() |
| for table in tables: |
| s.unicodes_missing.difference_update(table.cmap) |
| |
| @_add_method(ttLib.getTableClass('cmap')) |
| def prune_pre_subset(self, font, options): |
| if not options.legacy_cmap: |
| # Drop non-Unicode / non-Symbol cmaps |
| self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()] |
| if not options.symbol_cmap: |
| self.tables = [t for t in self.tables if not t.isSymbol()] |
| # TODO(behdad) Only keep one subtable? |
| # For now, drop format=0 which can't be subset_glyphs easily? |
| self.tables = [t for t in self.tables if t.format != 0] |
| self.numSubTables = len(self.tables) |
| return True # Required table |
| |
| @_add_method(ttLib.getTableClass('cmap')) |
| def subset_glyphs(self, s): |
| s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only |
| for t in self.tables: |
| if t.format == 14: |
| # TODO(behdad) We drop all the default-UVS mappings |
| # for glyphs_requested. So it's the caller's responsibility to make |
| # sure those are included. |
| t.uvsDict = {v:[(u,g) for u,g in l |
| if g in s.glyphs_requested or u in s.unicodes_requested] |
| for v,l in t.uvsDict.items()} |
| t.uvsDict = {v:l for v,l in t.uvsDict.items() if l} |
| elif t.isUnicode(): |
| t.cmap = {u:g for u,g in t.cmap.items() |
| if g in s.glyphs_requested or u in s.unicodes_requested} |
| else: |
| t.cmap = {u:g for u,g in t.cmap.items() |
| if g in s.glyphs_requested} |
| self.tables = [t for t in self.tables |
| if (t.cmap if t.format != 14 else t.uvsDict)] |
| self.numSubTables = len(self.tables) |
| # TODO(behdad) Convert formats when needed. |
| # In particular, if we have a format=12 without non-BMP |
| # characters, either drop format=12 one or convert it |
| # to format=4 if there's not one. |
| return True # Required table |
| |
| @_add_method(ttLib.getTableClass('DSIG')) |
| def prune_pre_subset(self, font, options): |
| # Drop all signatures since they will be invalid |
| self.usNumSigs = 0 |
| self.signatureRecords = [] |
| return True |
| |
| @_add_method(ttLib.getTableClass('maxp')) |
| def prune_pre_subset(self, font, options): |
| if not options.hinting: |
| if self.tableVersion == 0x00010000: |
| self.maxZones = 1 |
| self.maxTwilightPoints = 0 |
| self.maxStorage = 0 |
| self.maxFunctionDefs = 0 |
| self.maxInstructionDefs = 0 |
| self.maxStackElements = 0 |
| self.maxSizeOfInstructions = 0 |
| return True |
| |
| @_add_method(ttLib.getTableClass('name')) |
| def prune_pre_subset(self, font, options): |
| nameIDs = set(options.name_IDs) |
| fvar = font.get('fvar') |
| if fvar: |
| nameIDs.update([axis.axisNameID for axis in fvar.axes]) |
| nameIDs.update([inst.subfamilyNameID for inst in fvar.instances]) |
| nameIDs.update([inst.postscriptNameID for inst in fvar.instances |
| if inst.postscriptNameID != 0xFFFF]) |
| stat = font.get('STAT') |
| if stat: |
| if stat.table.AxisValueArray: |
| nameIDs.update([val_rec.ValueNameID for val_rec in stat.table.AxisValueArray.AxisValue]) |
| nameIDs.update([axis_rec.AxisNameID for axis_rec in stat.table.DesignAxisRecord.Axis]) |
| if '*' not in options.name_IDs: |
| self.names = [n for n in self.names if n.nameID in nameIDs] |
| if not options.name_legacy: |
| # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman |
| # entry for Latin and no Unicode names. |
| self.names = [n for n in self.names if n.isUnicode()] |
| # TODO(behdad) Option to keep only one platform's |
| if '*' not in options.name_languages: |
| # TODO(behdad) This is Windows-platform specific! |
| self.names = [n for n in self.names |
| if n.langID in options.name_languages] |
| if options.obfuscate_names: |
| namerecs = [] |
| for n in self.names: |
| if n.nameID in [1, 4]: |
| n.string = ".\x7f".encode('utf_16_be') if n.isUnicode() else ".\x7f" |
| elif n.nameID in [2, 6]: |
| n.string = "\x7f".encode('utf_16_be') if n.isUnicode() else "\x7f" |
| elif n.nameID == 3: |
| n.string = "" |
| elif n.nameID in [16, 17, 18]: |
| continue |
| namerecs.append(n) |
| self.names = namerecs |
| return True # Required table |
| |
| |
| @_add_method(ttLib.getTableClass('head')) |
| def prune_post_subset(self, font, options): |
| # Force re-compiling head table, to update any recalculated values. |
| return True |
| |
| |
| # TODO(behdad) OS/2 ulCodePageRange? |
| # TODO(behdad) Drop AAT tables. |
| # TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries. |
| # TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left |
| # TODO(behdad) Drop GDEF subitems if unused by lookups |
| # TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF) |
| # TODO(behdad) Text direction considerations. |
| # TODO(behdad) Text script / language considerations. |
| # TODO(behdad) Optionally drop 'kern' table if GPOS available |
| # TODO(behdad) Implement --unicode='*' to choose all cmap'ed |
| # TODO(behdad) Drop old-spec Indic scripts |
| |
| |
| class Options(object): |
| |
| class OptionError(Exception): pass |
| class UnknownOptionError(OptionError): pass |
| |
| # spaces in tag names (e.g. "SVG ", "cvt ") are stripped by the argument parser |
| _drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', |
| 'EBSC', 'SVG', 'PCLT', 'LTSH'] |
| _drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite |
| _no_subset_tables_default = ['avar', 'fvar', |
| 'gasp', 'head', 'hhea', 'maxp', |
| 'vhea', 'OS/2', 'loca', 'name', 'cvt', |
| 'fpgm', 'prep', 'VDMX', 'DSIG', 'CPAL', |
| 'MVAR', 'cvar', 'STAT'] |
| _hinting_tables_default = ['cvt', 'cvar', 'fpgm', 'prep', 'hdmx', 'VDMX'] |
| |
| # Based on HarfBuzz shapers |
| _layout_features_groups = { |
| # Default shaper |
| 'common': ['rvrn', 'ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'], |
| 'fractions': ['frac', 'numr', 'dnom'], |
| 'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'], |
| 'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'], |
| 'ltr': ['ltra', 'ltrm'], |
| 'rtl': ['rtla', 'rtlm'], |
| # Complex shapers |
| 'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3', |
| 'cswh', 'mset', 'stch'], |
| 'hangul': ['ljmo', 'vjmo', 'tjmo'], |
| 'tibetan': ['abvs', 'blws', 'abvm', 'blwm'], |
| 'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half', |
| 'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres', |
| 'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'], |
| } |
| _layout_features_default = _uniq_sort(sum( |
| iter(_layout_features_groups.values()), [])) |
| |
| def __init__(self, **kwargs): |
| |
| self.drop_tables = self._drop_tables_default[:] |
| self.no_subset_tables = self._no_subset_tables_default[:] |
| self.passthrough_tables = False # keep/drop tables we can't subset |
| self.hinting_tables = self._hinting_tables_default[:] |
| self.legacy_kern = False # drop 'kern' table if GPOS available |
| self.layout_closure = True |
| self.layout_features = self._layout_features_default[:] |
| self.layout_scripts = ['*'] |
| self.ignore_missing_glyphs = False |
| self.ignore_missing_unicodes = True |
| self.hinting = True |
| self.glyph_names = False |
| self.legacy_cmap = False |
| self.symbol_cmap = False |
| self.name_IDs = [0, 1, 2, 3, 4, 5, 6] # https://github.com/fonttools/fonttools/issues/1170#issuecomment-364631225 |
| self.name_legacy = False |
| self.name_languages = [0x0409] # English |
| self.obfuscate_names = False # to make webfont unusable as a system font |
| self.retain_gids = False |
| self.notdef_glyph = True # gid0 for TrueType / .notdef for CFF |
| self.notdef_outline = False # No need for notdef to have an outline really |
| self.recommended_glyphs = False # gid1, gid2, gid3 for TrueType |
| self.recalc_bounds = False # Recalculate font bounding boxes |
| self.recalc_timestamp = False # Recalculate font modified timestamp |
| self.prune_unicode_ranges = True # Clear unused 'ulUnicodeRange' bits |
| self.recalc_average_width = False # update 'xAvgCharWidth' |
| self.recalc_max_context = False # update 'usMaxContext' |
| self.canonical_order = None # Order tables as recommended |
| self.flavor = None # May be 'woff' or 'woff2' |
| self.with_zopfli = False # use zopfli instead of zlib for WOFF 1.0 |
| self.desubroutinize = False # Desubroutinize CFF CharStrings |
| self.verbose = False |
| self.timing = False |
| self.xml = False |
| self.font_number = -1 |
| |
| self.set(**kwargs) |
| |
| def set(self, **kwargs): |
| for k,v in kwargs.items(): |
| if not hasattr(self, k): |
| raise self.UnknownOptionError("Unknown option '%s'" % k) |
| setattr(self, k, v) |
| |
| def parse_opts(self, argv, ignore_unknown=[]): |
| posargs = [] |
| passthru_options = [] |
| for a in argv: |
| orig_a = a |
| if not a.startswith('--'): |
| posargs.append(a) |
| continue |
| a = a[2:] |
| i = a.find('=') |
| op = '=' |
| if i == -1: |
| if a.startswith("no-"): |
| k = a[3:] |
| if k == "canonical-order": |
| # reorderTables=None is faster than False (the latter |
| # still reorders to "keep" the original table order) |
| v = None |
| else: |
| v = False |
| else: |
| k = a |
| v = True |
| if k.endswith("?"): |
| k = k[:-1] |
| v = '?' |
| else: |
| k = a[:i] |
| if k[-1] in "-+": |
| op = k[-1]+'=' # Op is '-=' or '+=' now. |
| k = k[:-1] |
| v = a[i+1:] |
| ok = k |
| k = k.replace('-', '_') |
| if not hasattr(self, k): |
| if ignore_unknown is True or ok in ignore_unknown: |
| passthru_options.append(orig_a) |
| continue |
| else: |
| raise self.UnknownOptionError("Unknown option '%s'" % a) |
| |
| ov = getattr(self, k) |
| if v == '?': |
| print("Current setting for '%s' is: %s" % (ok, ov)) |
| continue |
| if isinstance(ov, bool): |
| v = bool(v) |
| elif isinstance(ov, int): |
| v = int(v) |
| elif isinstance(ov, str): |
| v = str(v) # redundant |
| elif isinstance(ov, list): |
| if isinstance(v, bool): |
| raise self.OptionError("Option '%s' requires values to be specified using '='" % a) |
| vv = v.replace(',', ' ').split() |
| if vv == ['']: |
| vv = [] |
| vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] |
| if op == '=': |
| v = vv |
| elif op == '+=': |
| v = ov |
| v.extend(vv) |
| elif op == '-=': |
| v = ov |
| for x in vv: |
| if x in v: |
| v.remove(x) |
| else: |
| assert False |
| |
| setattr(self, k, v) |
| |
| return posargs + passthru_options |
| |
| |
| class Subsetter(object): |
| |
| class SubsettingError(Exception): pass |
| class MissingGlyphsSubsettingError(SubsettingError): pass |
| class MissingUnicodesSubsettingError(SubsettingError): pass |
| |
| def __init__(self, options=None): |
| |
| if not options: |
| options = Options() |
| |
| self.options = options |
| self.unicodes_requested = set() |
| self.glyph_names_requested = set() |
| self.glyph_ids_requested = set() |
| |
| def populate(self, glyphs=[], gids=[], unicodes=[], text=""): |
| self.unicodes_requested.update(unicodes) |
| if isinstance(text, bytes): |
| text = text.decode("utf_8") |
| text_utf32 = text.encode("utf-32-be") |
| nchars = len(text_utf32)//4 |
| for u in struct.unpack('>%dL' % nchars, text_utf32): |
| self.unicodes_requested.add(u) |
| self.glyph_names_requested.update(glyphs) |
| self.glyph_ids_requested.update(gids) |
| |
| def _prune_pre_subset(self, font): |
| for tag in self._sort_tables(font): |
| if (tag.strip() in self.options.drop_tables or |
| (tag.strip() in self.options.hinting_tables and not self.options.hinting) or |
| (tag == 'kern' and (not self.options.legacy_kern and 'GPOS' in font))): |
| log.info("%s dropped", tag) |
| del font[tag] |
| continue |
| |
| clazz = ttLib.getTableClass(tag) |
| |
| if hasattr(clazz, 'prune_pre_subset'): |
| with timer("load '%s'" % tag): |
| table = font[tag] |
| with timer("prune '%s'" % tag): |
| retain = table.prune_pre_subset(font, self.options) |
| if not retain: |
| log.info("%s pruned to empty; dropped", tag) |
| del font[tag] |
| continue |
| else: |
| log.info("%s pruned", tag) |
| |
| def _closure_glyphs(self, font): |
| |
| realGlyphs = set(font.getGlyphOrder()) |
| glyph_order = font.getGlyphOrder() |
| |
| self.glyphs_requested = set() |
| self.glyphs_requested.update(self.glyph_names_requested) |
| self.glyphs_requested.update(glyph_order[i] |
| for i in self.glyph_ids_requested |
| if i < len(glyph_order)) |
| |
| self.glyphs_missing = set() |
| self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs)) |
| self.glyphs_missing.update(i for i in self.glyph_ids_requested |
| if i >= len(glyph_order)) |
| if self.glyphs_missing: |
| log.info("Missing requested glyphs: %s", self.glyphs_missing) |
| if not self.options.ignore_missing_glyphs: |
| raise self.MissingGlyphsSubsettingError(self.glyphs_missing) |
| |
| self.glyphs = self.glyphs_requested.copy() |
| |
| self.unicodes_missing = set() |
| if 'cmap' in font: |
| with timer("close glyph list over 'cmap'"): |
| font['cmap'].closure_glyphs(self) |
| self.glyphs.intersection_update(realGlyphs) |
| self.glyphs_cmaped = frozenset(self.glyphs) |
| if self.unicodes_missing: |
| missing = ["U+%04X" % u for u in self.unicodes_missing] |
| log.info("Missing glyphs for requested Unicodes: %s", missing) |
| if not self.options.ignore_missing_unicodes: |
| raise self.MissingUnicodesSubsettingError(missing) |
| del missing |
| |
| if self.options.notdef_glyph: |
| if 'glyf' in font: |
| self.glyphs.add(font.getGlyphName(0)) |
| log.info("Added gid0 to subset") |
| else: |
| self.glyphs.add('.notdef') |
| log.info("Added .notdef to subset") |
| if self.options.recommended_glyphs: |
| if 'glyf' in font: |
| for i in range(min(4, len(font.getGlyphOrder()))): |
| self.glyphs.add(font.getGlyphName(i)) |
| log.info("Added first four glyphs to subset") |
| |
| if self.options.layout_closure and 'GSUB' in font: |
| with timer("close glyph list over 'GSUB'"): |
| log.info("Closing glyph list over 'GSUB': %d glyphs before", |
| len(self.glyphs)) |
| log.glyphs(self.glyphs, font=font) |
| font['GSUB'].closure_glyphs(self) |
| self.glyphs.intersection_update(realGlyphs) |
| log.info("Closed glyph list over 'GSUB': %d glyphs after", |
| len(self.glyphs)) |
| log.glyphs(self.glyphs, font=font) |
| self.glyphs_gsubed = frozenset(self.glyphs) |
| |
| if 'MATH' in font: |
| with timer("close glyph list over 'MATH'"): |
| log.info("Closing glyph list over 'MATH': %d glyphs before", |
| len(self.glyphs)) |
| log.glyphs(self.glyphs, font=font) |
| font['MATH'].closure_glyphs(self) |
| self.glyphs.intersection_update(realGlyphs) |
| log.info("Closed glyph list over 'MATH': %d glyphs after", |
| len(self.glyphs)) |
| log.glyphs(self.glyphs, font=font) |
| self.glyphs_mathed = frozenset(self.glyphs) |
| |
| for table in ('COLR', 'bsln'): |
| if table in font: |
| with timer("close glyph list over '%s'" % table): |
| log.info("Closing glyph list over '%s': %d glyphs before", |
| table, len(self.glyphs)) |
| log.glyphs(self.glyphs, font=font) |
| font[table].closure_glyphs(self) |
| self.glyphs.intersection_update(realGlyphs) |
| log.info("Closed glyph list over '%s': %d glyphs after", |
| table, len(self.glyphs)) |
| log.glyphs(self.glyphs, font=font) |
| |
| if 'glyf' in font: |
| with timer("close glyph list over 'glyf'"): |
| log.info("Closing glyph list over 'glyf': %d glyphs before", |
| len(self.glyphs)) |
| log.glyphs(self.glyphs, font=font) |
| font['glyf'].closure_glyphs(self) |
| self.glyphs.intersection_update(realGlyphs) |
| log.info("Closed glyph list over 'glyf': %d glyphs after", |
| len(self.glyphs)) |
| log.glyphs(self.glyphs, font=font) |
| self.glyphs_glyfed = frozenset(self.glyphs) |
| |
| if 'CFF ' in font: |
| with timer("close glyph list over 'CFF '"): |
| log.info("Closing glyph list over 'CFF ': %d glyphs before", |
| len(self.glyphs)) |
| log.glyphs(self.glyphs, font=font) |
| font['CFF '].closure_glyphs(self) |
| self.glyphs.intersection_update(realGlyphs) |
| log.info("Closed glyph list over 'CFF ': %d glyphs after", |
| len(self.glyphs)) |
| log.glyphs(self.glyphs, font=font) |
| self.glyphs_cffed = frozenset(self.glyphs) |
| |
| self.glyphs_retained = frozenset(self.glyphs) |
| |
| order = font.getReverseGlyphMap() |
| self.reverseOrigGlyphMap = {g:order[g] for g in self.glyphs_retained} |
| |
| self.last_retained_order = max(self.reverseOrigGlyphMap.values()) |
| self.last_retained_glyph = font.getGlyphOrder()[self.last_retained_order] |
| |
| self.glyphs_emptied = frozenset() |
| if self.options.retain_gids: |
| self.glyphs_emptied = {g for g in realGlyphs - self.glyphs_retained if order[g] <= self.last_retained_order} |
| |
| self.reverseEmptiedGlyphMap = {g:order[g] for g in self.glyphs_emptied} |
| |
| |
| log.info("Retaining %d glyphs", len(self.glyphs_retained)) |
| |
| del self.glyphs |
| |
| def _subset_glyphs(self, font): |
| for tag in self._sort_tables(font): |
| clazz = ttLib.getTableClass(tag) |
| |
| if tag.strip() in self.options.no_subset_tables: |
| log.info("%s subsetting not needed", tag) |
| elif hasattr(clazz, 'subset_glyphs'): |
| with timer("subset '%s'" % tag): |
| table = font[tag] |
| self.glyphs = self.glyphs_retained |
| retain = table.subset_glyphs(self) |
| del self.glyphs |
| if not retain: |
| log.info("%s subsetted to empty; dropped", tag) |
| del font[tag] |
| else: |
| log.info("%s subsetted", tag) |
| elif self.options.passthrough_tables: |
| log.info("%s NOT subset; don't know how to subset", tag) |
| else: |
| log.warning("%s NOT subset; don't know how to subset; dropped", tag) |
| del font[tag] |
| |
| with timer("subset GlyphOrder"): |
| glyphOrder = font.getGlyphOrder() |
| if not self.options.retain_gids: |
| glyphOrder = [g for g in glyphOrder if g in self.glyphs_retained] |
| else: |
| glyphOrder = [g for g in glyphOrder if font.getGlyphID(g) <= self.last_retained_order] |
| |
| font.setGlyphOrder(glyphOrder) |
| font._buildReverseGlyphOrderDict() |
| |
| |
| def _prune_post_subset(self, font): |
| for tag in font.keys(): |
| if tag == 'GlyphOrder': continue |
| if tag == 'OS/2' and self.options.prune_unicode_ranges: |
| old_uniranges = font[tag].getUnicodeRanges() |
| new_uniranges = font[tag].recalcUnicodeRanges(font, pruneOnly=True) |
| if old_uniranges != new_uniranges: |
| log.info("%s Unicode ranges pruned: %s", tag, sorted(new_uniranges)) |
| if self.options.recalc_average_width: |
| widths = [m[0] for m in font["hmtx"].metrics.values() if m[0] > 0] |
| avg_width = otRound(sum(widths) / len(widths)) |
| if avg_width != font[tag].xAvgCharWidth: |
| font[tag].xAvgCharWidth = avg_width |
| log.info("%s xAvgCharWidth updated: %d", tag, avg_width) |
| if self.options.recalc_max_context: |
| max_context = maxCtxFont(font) |
| if max_context != font[tag].usMaxContext: |
| font[tag].usMaxContext = max_context |
| log.info("%s usMaxContext updated: %d", tag, max_context) |
| clazz = ttLib.getTableClass(tag) |
| if hasattr(clazz, 'prune_post_subset'): |
| with timer("prune '%s'" % tag): |
| table = font[tag] |
| retain = table.prune_post_subset(font, self.options) |
| if not retain: |
| log.info("%s pruned to empty; dropped", tag) |
| del font[tag] |
| else: |
| log.info("%s pruned", tag) |
| |
| def _sort_tables(self, font): |
| tagOrder = ['fvar', 'avar', 'gvar', 'name', 'glyf'] |
| tagOrder = {t: i + 1 for i, t in enumerate(tagOrder)} |
| tags = sorted(font.keys(), key=lambda tag: tagOrder.get(tag, 0)) |
| return [t for t in tags if t != 'GlyphOrder'] |
| |
| def subset(self, font): |
| self._prune_pre_subset(font) |
| self._closure_glyphs(font) |
| self._subset_glyphs(font) |
| self._prune_post_subset(font) |
| |
| |
| @timer("load font") |
| def load_font(fontFile, |
| options, |
| allowVID=False, |
| checkChecksums=False, |
| dontLoadGlyphNames=False, |
| lazy=True): |
| |
| font = ttLib.TTFont(fontFile, |
| allowVID=allowVID, |
| checkChecksums=checkChecksums, |
| recalcBBoxes=options.recalc_bounds, |
| recalcTimestamp=options.recalc_timestamp, |
| lazy=lazy, |
| fontNumber=options.font_number) |
| |
| # Hack: |
| # |
| # If we don't need glyph names, change 'post' class to not try to |
| # load them. It avoid lots of headache with broken fonts as well |
| # as loading time. |
| # |
| # Ideally ttLib should provide a way to ask it to skip loading |
| # glyph names. But it currently doesn't provide such a thing. |
| # |
| if dontLoadGlyphNames: |
| post = ttLib.getTableClass('post') |
| saved = post.decode_format_2_0 |
| post.decode_format_2_0 = post.decode_format_3_0 |
| f = font['post'] |
| if f.formatType == 2.0: |
| f.formatType = 3.0 |
| post.decode_format_2_0 = saved |
| |
| return font |
| |
| @timer("compile and save font") |
| def save_font(font, outfile, options): |
| if options.with_zopfli and options.flavor == "woff": |
| from fontTools.ttLib import sfnt |
| sfnt.USE_ZOPFLI = True |
| font.flavor = options.flavor |
| font.save(outfile, reorderTables=options.canonical_order) |
| |
| def parse_unicodes(s): |
| import re |
| s = re.sub (r"0[xX]", " ", s) |
| s = re.sub (r"[<+>,;&#\\xXuU\n ]", " ", s) |
| l = [] |
| for item in s.split(): |
| fields = item.split('-') |
| if len(fields) == 1: |
| l.append(int(item, 16)) |
| else: |
| start,end = fields |
| l.extend(range(int(start, 16), int(end, 16)+1)) |
| return l |
| |
| def parse_gids(s): |
| l = [] |
| for item in s.replace(',', ' ').split(): |
| fields = item.split('-') |
| if len(fields) == 1: |
| l.append(int(fields[0])) |
| else: |
| l.extend(range(int(fields[0]), int(fields[1])+1)) |
| return l |
| |
| def parse_glyphs(s): |
| return s.replace(',', ' ').split() |
| |
| def usage(): |
| print("usage:", __usage__, file=sys.stderr) |
| print("Try pyftsubset --help for more information.\n", file=sys.stderr) |
| |
| @timer("make one with everything (TOTAL TIME)") |
| def main(args=None): |
| from os.path import splitext |
| from fontTools import configLogger |
| |
| if args is None: |
| args = sys.argv[1:] |
| |
| if '--help' in args: |
| print(__doc__) |
| return 0 |
| |
| options = Options() |
| try: |
| args = options.parse_opts(args, |
| ignore_unknown=['gids', 'gids-file', |
| 'glyphs', 'glyphs-file', |
| 'text', 'text-file', |
| 'unicodes', 'unicodes-file', |
| 'output-file']) |
| except options.OptionError as e: |
| usage() |
| print("ERROR:", e, file=sys.stderr) |
| return 2 |
| |
| if len(args) < 2: |
| usage() |
| return 1 |
| |
| configLogger(level=logging.INFO if options.verbose else logging.WARNING) |
| if options.timing: |
| timer.logger.setLevel(logging.DEBUG) |
| else: |
| timer.logger.disabled = True |
| |
| fontfile = args[0] |
| args = args[1:] |
| |
| subsetter = Subsetter(options=options) |
| outfile = None |
| glyphs = [] |
| gids = [] |
| unicodes = [] |
| wildcard_glyphs = False |
| wildcard_unicodes = False |
| text = "" |
| for g in args: |
| if g == '*': |
| wildcard_glyphs = True |
| continue |
| if g.startswith('--output-file='): |
| outfile = g[14:] |
| continue |
| if g.startswith('--text='): |
| text += g[7:] |
| continue |
| if g.startswith('--text-file='): |
| with open(g[12:], encoding='utf-8') as f: |
| text += f.read().replace('\n', '') |
| continue |
| if g.startswith('--unicodes='): |
| if g[11:] == '*': |
| wildcard_unicodes = True |
| else: |
| unicodes.extend(parse_unicodes(g[11:])) |
| continue |
| if g.startswith('--unicodes-file='): |
| with open(g[16:]) as f: |
| for line in f.readlines(): |
| unicodes.extend(parse_unicodes(line.split('#')[0])) |
| continue |
| if g.startswith('--gids='): |
| gids.extend(parse_gids(g[7:])) |
| continue |
| if g.startswith('--gids-file='): |
| with open(g[12:]) as f: |
| for line in f.readlines(): |
| gids.extend(parse_gids(line.split('#')[0])) |
| continue |
| if g.startswith('--glyphs='): |
| if g[9:] == '*': |
| wildcard_glyphs = True |
| else: |
| glyphs.extend(parse_glyphs(g[9:])) |
| continue |
| if g.startswith('--glyphs-file='): |
| with open(g[14:]) as f: |
| for line in f.readlines(): |
| glyphs.extend(parse_glyphs(line.split('#')[0])) |
| continue |
| glyphs.append(g) |
| |
| dontLoadGlyphNames = not options.glyph_names and not glyphs |
| font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames) |
| |
| if outfile is None: |
| basename, _ = splitext(fontfile) |
| if options.flavor is not None: |
| ext = "." + options.flavor.lower() |
| else: |
| ext = ".ttf" if font.sfntVersion == "\0\1\0\0" else ".otf" |
| outfile = basename + ".subset" + ext |
| |
| with timer("compile glyph list"): |
| if wildcard_glyphs: |
| glyphs.extend(font.getGlyphOrder()) |
| if wildcard_unicodes: |
| for t in font['cmap'].tables: |
| if t.isUnicode(): |
| unicodes.extend(t.cmap.keys()) |
| assert '' not in glyphs |
| |
| log.info("Text: '%s'" % text) |
| log.info("Unicodes: %s", unicodes) |
| log.info("Glyphs: %s", glyphs) |
| log.info("Gids: %s", gids) |
| |
| subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text) |
| subsetter.subset(font) |
| |
| save_font(font, outfile, options) |
| |
| if options.verbose: |
| import os |
| log.info("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile)) |
| log.info("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile)) |
| |
| if options.xml: |
| font.saveXML(sys.stdout) |
| |
| font.close() |
| |
| |
| __all__ = [ |
| 'Options', |
| 'Subsetter', |
| 'load_font', |
| 'save_font', |
| 'parse_gids', |
| 'parse_glyphs', |
| 'parse_unicodes', |
| 'main' |
| ] |
| |
| if __name__ == '__main__': |
| sys.exit(main()) |