summaryrefslogtreecommitdiff
path: root/misc
diff options
context:
space:
mode:
Diffstat (limited to 'misc')
-rwxr-xr-xmisc/cleanup-kerning.py353
-rw-r--r--misc/doc/install-mac.txt24
-rw-r--r--misc/doc/install-win.txt19
-rw-r--r--misc/e-alt-straight-close.glif37
-rwxr-xr-xmisc/enrich-glypnames.py650
-rwxr-xr-xmisc/fixup-diacritics.py167
-rwxr-xr-xmisc/fixup-features.py324
-rwxr-xr-xmisc/fixup-kerning.py362
-rwxr-xr-xmisc/fontinfo.py391
-rwxr-xr-xmisc/gen-glyphinfo.py245
-rwxr-xr-xmisc/gen-glyphorder.py65
-rw-r--r--misc/gen-kern.py37
-rw-r--r--misc/gen-num-pairs.js10
-rwxr-xr-xmisc/glyf-props.py63
-rwxr-xr-xmisc/mac-tmp-disk-mount.sh25
-rwxr-xr-xmisc/mac-tmp-disk-unmount.sh5
-rwxr-xr-xmisc/notify41
-rw-r--r--misc/pylib/fontbuild/Build.py300
-rw-r--r--misc/pylib/fontbuild/LICENSE201
-rw-r--r--misc/pylib/fontbuild/ORIGIN.txt1
-rw-r--r--misc/pylib/fontbuild/__init__.py6
-rw-r--r--misc/pylib/fontbuild/alignpoints.py173
-rw-r--r--misc/pylib/fontbuild/anchors.py77
-rw-r--r--misc/pylib/fontbuild/convertCurves.py102
-rw-r--r--misc/pylib/fontbuild/curveFitPen.py422
-rw-r--r--misc/pylib/fontbuild/decomposeGlyph.py23
-rwxr-xr-xmisc/pylib/fontbuild/features.py189
-rw-r--r--misc/pylib/fontbuild/generateGlyph.py97
-rw-r--r--misc/pylib/fontbuild/instanceNames.py232
-rw-r--r--misc/pylib/fontbuild/italics.py308
-rwxr-xr-xmisc/pylib/fontbuild/markFeature.py55
-rw-r--r--misc/pylib/fontbuild/mitreGlyph.py111
-rw-r--r--misc/pylib/fontbuild/mix.py360
-rw-r--r--misc/restore-diacritics-kerning.py431
-rwxr-xr-xmisc/rewrite-glyphorder.py305
-rw-r--r--misc/rf-scripts/AdjustWidth.py53
-rw-r--r--misc/rf-scripts/ChangeUPM.py107
-rw-r--r--misc/rf-scripts/GridAdjust.py83
-rw-r--r--misc/rf-scripts/RemoveLocalGuides.py15
-rw-r--r--misc/rf-scripts/StripGlyphs.py384
-rw-r--r--misc/rf-scripts/ZeroWidth.py26
-rw-r--r--misc/stems.txt25
-rwxr-xr-xmisc/svgsync.py435
-rwxr-xr-xmisc/svgsync2.py626
-rw-r--r--misc/ttf2woff/.gitignore9
-rw-r--r--misc/ttf2woff/Makefile68
-rw-r--r--misc/ttf2woff/comp-zlib.c34
-rw-r--r--misc/ttf2woff/comp-zopfli.c54
-rw-r--r--misc/ttf2woff/compat.c43
-rw-r--r--misc/ttf2woff/genttf.c63
-rw-r--r--misc/ttf2woff/genwoff.c95
-rw-r--r--misc/ttf2woff/optimize.c319
-rw-r--r--misc/ttf2woff/readttc.c29
-rw-r--r--misc/ttf2woff/readttf.c47
-rw-r--r--misc/ttf2woff/readwoff.c88
-rw-r--r--misc/ttf2woff/ttf2woff.c523
-rw-r--r--misc/ttf2woff/ttf2woff.h94
-rw-r--r--misc/ttf2woff/ttf2woff.rc39
-rw-r--r--misc/ttf2woff/zopfli/blocksplitter.c332
-rw-r--r--misc/ttf2woff/zopfli/blocksplitter.h73
-rw-r--r--misc/ttf2woff/zopfli/cache.c125
-rw-r--r--misc/ttf2woff/zopfli/cache.h66
-rw-r--r--misc/ttf2woff/zopfli/deflate.c933
-rw-r--r--misc/ttf2woff/zopfli/deflate.h92
-rw-r--r--misc/ttf2woff/zopfli/hash.c143
-rw-r--r--misc/ttf2woff/zopfli/hash.h73
-rw-r--r--misc/ttf2woff/zopfli/katajainen.c262
-rw-r--r--misc/ttf2woff/zopfli/katajainen.h42
-rw-r--r--misc/ttf2woff/zopfli/lz77.c630
-rw-r--r--misc/ttf2woff/zopfli/lz77.h142
-rw-r--r--misc/ttf2woff/zopfli/squeeze.c560
-rw-r--r--misc/ttf2woff/zopfli/squeeze.h61
-rw-r--r--misc/ttf2woff/zopfli/symbols.h239
-rw-r--r--misc/ttf2woff/zopfli/tree.c101
-rw-r--r--misc/ttf2woff/zopfli/tree.h51
-rw-r--r--misc/ttf2woff/zopfli/util.c35
-rw-r--r--misc/ttf2woff/zopfli/util.h158
-rw-r--r--misc/ttf2woff/zopfli/zlib_container.c79
-rw-r--r--misc/ttf2woff/zopfli/zlib_container.h50
-rw-r--r--misc/ttf2woff/zopfli/zopfli.h94
-rwxr-xr-xmisc/ufo-color-glyphs.py105
-rwxr-xr-xmisc/ufocompile163
-rw-r--r--misc/unicode_util.py104
83 files changed, 14183 insertions, 0 deletions
diff --git a/misc/cleanup-kerning.py b/misc/cleanup-kerning.py
new file mode 100755
index 000000000..03ddffefd
--- /dev/null
+++ b/misc/cleanup-kerning.py
@@ -0,0 +1,353 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib, re
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+from argparse import ArgumentParser
+from fontTools import ttLib
+from robofab.objects.objectsRF import OpenFont
+
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
+
+
+def unicodeForDefaultGlyphName(glyphName):
+ m = uniNameRe.match(glyphName)
+ if m is not None:
+ try:
+ return int(m.group(1), 16)
+ except:
+ pass
+ return None
+
+
+def canonicalGlyphName(glyphName, uc2names):
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ names = uc2names.get(uc)
+ if names is not None and len(names) > 0:
+ return names[0]
+ return glyphName
+
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def loadAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def loadLocalNamesDB(fonts, agl, diacriticComps):
+ uc2names = None # { 2126: ['Omega', ...], ...}
+ allNames = set() # set('Omega', ...)
+
+ for font in fonts:
+ _uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
+ if uc2names is None:
+ uc2names = _uc2names
+ else:
+ for uc, _names in _uc2names.iteritems():
+ names = uc2names.setdefault(uc, [])
+ for name in _names:
+ if name not in names:
+ names.append(name)
+ for g in font:
+ allNames.add(g.name)
+
+ # agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
+ aglName2Ucs = {}
+ for uc, name in agl.iteritems():
+ aglName2Ucs.setdefault(name, []).append(uc)
+
+ for glyphName, comp in diacriticComps.iteritems():
+ aglUCs = aglName2Ucs.get(glyphName)
+ if aglUCs is None:
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ glyphName2 = agl.get(uc)
+ if glyphName2 is not None:
+ glyphName = glyphName2
+ names = uc2names.setdefault(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ allNames.add(glyphName)
+ else:
+ allNames.add(glyphName)
+ for uc in aglUCs:
+ names = uc2names.get(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ uc2names[uc] = names
+
+ name2ucs = {} # { 'Omega': [2126, ...], ...}
+ for uc, names in uc2names.iteritems():
+ for name in names:
+ name2ucs.setdefault(name, set()).add(uc)
+
+ return uc2names, name2ucs, allNames
+
+
+# def getNameToGroupsMap(groups): # => { glyphName => set(groupName) }
+# nameMap = {}
+# for groupName, glyphNames in groups.iteritems():
+# for glyphName in glyphNames:
+# nameMap.setdefault(glyphName, set()).add(groupName)
+# return nameMap
+
+
+# def inspectKerning(kerning):
+# leftIndex = {} # { glyph-name => <ref to plist right-hand side dict> }
+# rightIndex = {} # { glyph-name => [(left-hand-side-name, kernVal), ...] }
+# rightGroupIndex = {} # { group-name => [(left-hand-side-name, kernVal), ...] }
+# for leftName, right in kerning.iteritems():
+# if leftName[0] != '@':
+# leftIndex[leftName] = right
+# for rightName, kernVal in right.iteritems():
+# if rightName[0] != '@':
+# rightIndex.setdefault(rightName, []).append((leftName, kernVal))
+# else:
+# rightGroupIndex.setdefault(rightName, []).append((leftName, kernVal))
+# return leftIndex, rightIndex, rightGroupIndex
+
+
+class RefTracker:
+ def __init__(self):
+ self.refs = {}
+
+ def incr(self, name):
+ self.refs[name] = self.refs.get(name, 0) + 1
+
+ def decr(self, name): # => bool hasNoRefs
+ r = self.refs.get(name)
+
+ if r is None:
+ raise Exception('decr untracked ref ' + repr(name))
+
+ if r < 1:
+ raise Exception('decr already zero ref ' + repr(name))
+
+ if r == 1:
+ del self.refs[name]
+ return True
+
+ self.refs[name] = r - 1
+
+ def __contains__(self, name):
+ return name in self.refs
+
+
+def main():
+ argparser = ArgumentParser(description='Remove unused kerning')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ agl = loadAGL('src/glyphlist.txt') # { 2126: 'Omega', ... }
+ diacriticComps = loadGlyphCompositions('src/diacritics.txt') # {glyphName => (baseName, a, o)}
+
+ for fontPath in args.fontPaths:
+ print(fontPath)
+
+ groupsFilename = os.path.join(fontPath, 'groups.plist')
+ kerningFilename = os.path.join(fontPath, 'kerning.plist')
+
+ groups = plistlib.readPlist(groupsFilename) # { groupName => [glyphName] }
+ kerning = plistlib.readPlist(kerningFilename) # { leftName => {rightName => kernVal} }
+
+ font = OpenFont(fontPath)
+ uc2names, name2ucs, allNames = loadLocalNamesDB([font], agl, diacriticComps)
+
+ # start with eliminating non-existent glyphs from groups and completely
+ # eliminate groups with all-dead glyphs.
+ eliminatedGroups = set()
+ for groupName, glyphNames in list(groups.items()):
+ glyphNames2 = []
+ for name in glyphNames:
+ if name in allNames:
+ glyphNames2.append(name)
+ else:
+ name2 = canonicalGlyphName(name, uc2names)
+ if name2 != name and name2 in allNames:
+ print('group: rename glyph', name, '->', name2)
+ glyphNames2.append(name2)
+
+ if len(glyphNames2) == 0:
+ print('group: eliminate', groupName)
+ eliminatedGroups.add(groupName)
+ del groups[groupName]
+ elif len(glyphNames2) != len(glyphNames):
+ print('group: shrink', groupName)
+ groups[groupName] = glyphNames2
+
+ # now eliminate kerning
+ groupRefs = RefTracker() # tracks group references, so we can eliminate unreachable ones
+
+ for leftName, right in list(kerning.items()):
+ leftIsGroup = leftName[0] == '@'
+
+ if leftIsGroup:
+ if leftName in eliminatedGroups:
+ print('kerning: eliminate LHS', leftName)
+ del kerning[leftName]
+ continue
+ groupRefs.incr(leftName)
+ else:
+ if leftName not in allNames:
+ print('kerning: eliminate LHS', leftName)
+ del kerning[leftName]
+ continue
+
+ right2 = {}
+ for rightName, kernVal in right.iteritems():
+ rightIsGroup = rightName[0] == '@'
+ if rightIsGroup:
+ if rightIsGroup in eliminatedGroups:
+ print('kerning: eliminate RHS group', rightName)
+ else:
+ groupRefs.incr(rightName)
+ right2[rightName] = kernVal
+ else:
+ if rightName not in allNames:
+ # maybe an unnamed glyph?
+ rightName2 = canonicalGlyphName(rightName, uc2names)
+ if rightName2 != rightName:
+ print('kerning: rename & update RHS glyph', rightName, '->', rightName2)
+ right2[rightName2] = kernVal
+ else:
+ print('kerning: eliminate RHS glyph', rightName)
+ else:
+ right2[rightName] = kernVal
+
+ if len(right2) == 0:
+ print('kerning: eliminate LHS', leftName)
+ del kerning[leftName]
+ if leftIsGroup:
+ groupRefs.decr(leftName)
+ else:
+ kerning[leftName] = right2
+
+ # eliminate any unreferenced groups
+ for groupName, glyphNames in list(groups.items()):
+ if not groupName in groupRefs:
+ print('group: eliminate unreferenced group', groupName)
+ del groups[groupName]
+
+
+ # verify that there are no conflicting kerning pairs
+ pairs = {} # { key => [...] }
+ conflictingPairs = set()
+
+ for leftName, right in kerning.iteritems():
+ # expand LHS group -> names
+ topLeftName = leftName
+ for leftName in groups[leftName] if leftName[0] == '@' else [leftName]:
+ if leftName not in allNames:
+ raise Exception('unknown LHS glyph name ' + repr(leftName))
+ keyPrefix = leftName + '+'
+ for rightName, kernVal in right.iteritems():
+ # expand RHS group -> names
+ topRightName = rightName
+ for rightName in groups[rightName] if rightName[0] == '@' else [rightName]:
+ if rightName not in allNames:
+ raise Exception('unknown RHS glyph name ' + repr(rightName))
+ # print(leftName, '+', rightName, '=>', kernVal)
+ key = keyPrefix + rightName
+ isConflict = key in pairs
+ pairs.setdefault(key, []).append(( topLeftName, topRightName, kernVal ))
+ if isConflict:
+ conflictingPairs.add(key)
+
+ # # resolve pair conflicts by preferring pairs defined via group kerning
+ # for key in conflictingPairs:
+ # pairs = pairs[key]
+ # print('kerning: conflicting pairs %r: %r' % (key, pairs))
+ # bestPair = None
+ # redundantPairs = []
+ # for pair in pairs:
+ # leftName, rightName, kernVal = pair
+ # if bestPair is None:
+ # bestPair = pair
+ # else:
+ # bestLeftName, bestRightName, _ = bestPair
+ # bestScore = 0
+ # score = 0
+ # if bestLeftName[0] == '@': bestScore += 1
+ # if bestRightName[0] == '@': bestScore += 1
+ # if leftName[0] == '@': score += 1
+ # if rightName[0] == '@': score += 1
+ # if bestScore == 2:
+ # # doesn't get better than this
+ # break
+ # elif score > bestScore:
+ # redundantPairs.append(bestPair)
+ # bestPair = pair
+ # else:
+ # redundantPairs.append(pair)
+ # print('- keeping', bestPair)
+ # print('- eliminating', redundantPairs)
+ # for redundantPairs
+
+
+ # # eliminate any unreferenced groups
+ # for groupName, glyphNames in list(groups.items()):
+ # if not groupName in groupRefs:
+ # print('group: eliminate unreferenced group', groupName)
+ # del groups[groupName]
+
+
+ print('Write', groupsFilename)
+ if not dryRun:
+ plistlib.writePlist(groups, groupsFilename)
+
+ print('Write', kerningFilename)
+ if not dryRun:
+ plistlib.writePlist(kerning, kerningFilename)
+
+ # [end] for fontPath in args.fontPaths
+
+
+main()
diff --git a/misc/doc/install-mac.txt b/misc/doc/install-mac.txt
new file mode 100644
index 000000000..b3aae815e
--- /dev/null
+++ b/misc/doc/install-mac.txt
@@ -0,0 +1,24 @@
+
+Installing on macOS:
+
+1. Open the "Interface (OTF)" folder
+2. Select all font files
+3. Right-click (or ctrl-click) the selected files
+ and choose "Open with..." → "Font Book"
+4. Press the "Install" button
+
+If you get any errors, like Font Book saying there're duplicate fonts,
+cancel the installation and instead try the instructions below:
+
+
+Installing on macOS, manually:
+
+1. Copy the "Interface (OTF)" folder
+2. Press cmd-shift-G in Finder
+3. Enter "~/Library/Fonts" into the dialog that shows up and press RETURN.
+4. Paste the "Interface (OTF)" folder.
+
+If you have a previous installation of Interface, you should make sure to
+remove those fonts files before installing new ones.
+
+See https://github.com/rsms/interface for more information
diff --git a/misc/doc/install-win.txt b/misc/doc/install-win.txt
new file mode 100644
index 000000000..ed9dbf8b7
--- /dev/null
+++ b/misc/doc/install-win.txt
@@ -0,0 +1,19 @@
+
+Installing on Windows 10:
+
+1. Open the "Interface (hinted TTF)" folder
+2. Select all font files
+3. Right-click the selected files and choose "Install"
+
+
+Installing on Windows 10, manually:
+
+1. Double-click the downloaded zip file
+2. Copy the "Interface (hinted TTF)" folder
+3. Press Win-Q on your keyboard, then type "fonts" and hit ENTER
+4. Paste the "Interface (hinted TTF)" folder.
+
+If you have a previous installation of Interface, you should make sure
+to remove those fonts files before installing new ones.
+
+See https://github.com/rsms/interface for more information
diff --git a/misc/e-alt-straight-close.glif b/misc/e-alt-straight-close.glif
new file mode 100644
index 000000000..28266ecb6
--- /dev/null
+++ b/misc/e-alt-straight-close.glif
@@ -0,0 +1,37 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<glyph name="e.alt1" format="1">
+ <advance width="1600"/>
+ <outline>
+ <contour>
+ <point x="820" y="-32" type="curve"/>
+ <point x="1176" y="-32"/>
+ <point x="1400" y="176"/>
+ <point x="1440" y="448" type="curve"/>
+ <point x="1204" y="448" type="line"/>
+ <point x="1160" y="280"/>
+ <point x="1020" y="184"/>
+ <point x="820" y="184" type="curve" smooth="yes"/>
+ <point x="556" y="184"/>
+ <point x="384" y="424"/>
+ <point x="384" y="768" type="curve"/>
+ <point x="384" y="1104"/>
+ <point x="556" y="1344"/>
+ <point x="824" y="1344" type="curve" smooth="yes"/>
+ <point x="1036" y="1344"/>
+ <point x="1240" y="1176"/>
+ <point x="1240" y="872" type="curve"/>
+ <point x="308" y="872" type="line"/>
+ <point x="308" y="668" type="line"/>
+ <point x="1476" y="668" type="line"/>
+ <point x="1476" y="768" type="line" smooth="yes"/>
+ <point x="1476" y="1352"/>
+ <point x="1180" y="1556"/>
+ <point x="820" y="1556" type="curve" smooth="yes"/>
+ <point x="408" y="1556"/>
+ <point x="140" y="1228"/>
+ <point x="140" y="768" type="curve"/>
+ <point x="140" y="296"/>
+ <point x="408" y="-32"/>
+ </contour>
+ </outline>
+</glyph>
diff --git a/misc/enrich-glypnames.py b/misc/enrich-glypnames.py
new file mode 100755
index 000000000..b4c401217
--- /dev/null
+++ b/misc/enrich-glypnames.py
@@ -0,0 +1,650 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os
+import sys
+import argparse
+import json
+import plistlib
+import re
+from collections import OrderedDict
+from textwrap import TextWrapper
+from StringIO import StringIO
+from ConfigParser import RawConfigParser
+from fontTools import ttLib
+from robofab.objects.objectsRF import RFont, OpenFont
+
+# from feaTools import parser as feaParser
+# from feaTools.parser import parseFeatures
+# from feaTools import FDKSyntaxFeatureWriter
+# from fontbuild.features import updateFeature, compileFeatureRE
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)[0-9A-F]{4,8}$')
+
+
+def defaultGlyphName(uc):
+ return 'uni%04X' % uc
+
+def defaultGlyphName2(uc):
+ return 'u%04X' % uc
+
+
+def isDefaultGlyphName(name):
+ return True if uniNameRe.match(name) else False
+
+
+def isDefaultGlyphNameForUnicode(name, uc):
+ return name == defaultGlyphName(uc) or name == defaultGlyphName2(uc)
+
+
+def getFirstNonDefaultGlyphName(uc, names):
+ for name in names:
+ if not isDefaultGlyphNameForUnicode(name, uc):
+ return name
+ return None
+
+
+def getTTGlyphList(font): # -> { 'Omega': [2126, ...], ... }
+ if isinstance(font, str):
+ font = ttLib.TTFont(font)
+
+ if not 'cmap' in font:
+ raise Exception('missing cmap table')
+
+ gl = {}
+ bestCodeSubTable = None
+ bestCodeSubTableFormat = 0
+
+ for st in font['cmap'].tables:
+ if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
+ if st.format > bestCodeSubTableFormat:
+ bestCodeSubTable = st
+ bestCodeSubTableFormat = st.format
+
+ if bestCodeSubTable is not None:
+ for cp, glyphname in bestCodeSubTable.cmap.items():
+ if glyphname in gl:
+ gl[glyphname].append(cp)
+ else:
+ gl[glyphname] = [cp]
+
+ return gl, font
+
+
+def getUFOGlyphList(font): # -> { 'Omega': [2126, ...], ... }
+ # Note: font.getCharacterMapping() returns {2126:['Omega', ...], ...}
+ gl = {}
+ for g in font:
+ ucv = g.unicodes
+ if len(ucv) > 0:
+ gl[g.name] = ucv
+ return gl
+
+
+def appendNames(uc2names, extraUc2names, uc, name, isDestination):
+ if uc in uc2names:
+ names = uc2names[uc]
+ if name not in names:
+ names.append(name)
+ elif isDestination:
+ uc2names[uc] = [name]
+ else:
+ if uc in extraUc2names:
+ names = extraUc2names[uc]
+ if name not in names:
+ names.append(name)
+ else:
+ extraUc2names[uc] = [name]
+
+
+def buildGlyphNames(dstFonts, srcFonts, glyphOrder, fallbackGlyphNames):
+ # fallbackGlyphNames: { 2126: 'Omega', ...}
+ uc2names = {} # { 2126: ['Omega', 'Omegagreek', ...], ...}
+ extraUc2names = {} # { 2126: ['Omega', 'Omegagreek', ...], ...}
+ # -- codepoints in Nth fonts, not found in first font
+ name2ucsv = [] # [ { 'Omega': [2126, ...] }, ... ] -- same order as fonts
+
+ fontIndex = 0
+ for font in dstFonts + srcFonts:
+ gl = None
+ if isinstance(font, RFont):
+ print('Inspecting', font.info.familyName, font.info.styleName)
+ gl = getUFOGlyphList(font)
+ else:
+ print('Inspecting', font)
+ gl, font = getTTGlyphList(font)
+
+ name2ucsv.append(gl)
+
+ isDestination = fontIndex < len(dstFonts)
+
+ for name, unicodes in gl.iteritems():
+ # if len(uc2names) > 100: break
+ for uc in unicodes:
+ appendNames(uc2names, extraUc2names, uc, name, isDestination)
+ if isDestination:
+ fallbackName = fallbackGlyphNames.get(uc)
+ if fallbackName is not None:
+ appendNames(uc2names, extraUc2names, uc, fallbackName, isDestination)
+
+ fontIndex += 1
+
+ # for name in glyphOrder:
+ # if len(name) > 7 and name.startswith('uni') and name.find('.') == -1 and name.find('_') == -1:
+ # try:
+ # print('name: %r, %r' % (name, name[3:]))
+ # uc = int(name[3:], 16)
+ # appendNames(uc2names, extraUc2names, uc, name, isDestination=True)
+ # except:
+ # print()
+ # pass
+
+ return uc2names, extraUc2names, name2ucsv
+
+
+def renameStrings(listofstrs, newNames):
+ v = []
+ for s in listofstrs:
+ s2 = newNames.get(s)
+ if s2 is not None:
+ s = s2
+ v.append(s)
+ return v
+
+
+def renameUFOLib(ufoPath, newNames, dryRun=False, print=print):
+ filename = os.path.join(ufoPath, 'lib.plist')
+ plist = plistlib.readPlist(filename)
+
+ glyphOrder = plist.get('public.glyphOrder')
+ if glyphOrder is not None:
+ plist['public.glyphOrder'] = renameStrings(glyphOrder, newNames)
+
+ roboSort = plist.get('com.typemytype.robofont.sort')
+ if roboSort is not None:
+ for entry in roboSort:
+ if isinstance(entry, dict) and entry.get('type') == 'glyphList':
+ asc = entry.get('ascending')
+ desc = entry.get('descending')
+ if asc is not None:
+ entry['ascending'] = renameStrings(asc, newNames)
+ if desc is not None:
+ entry['descending'] = renameStrings(desc, newNames)
+
+ print('Writing', filename)
+ if not dryRun:
+ plistlib.writePlist(plist, filename)
+
+
+def renameUFOGroups(ufoPath, newNames, dryRun=False, print=print):
+ filename = os.path.join(ufoPath, 'groups.plist')
+
+ plist = None
+ try:
+ plist = plistlib.readPlist(filename)
+ except:
+ return
+
+ didChange = False
+
+ for groupName, glyphNames in plist.items():
+ for i in range(len(glyphNames)):
+ name = glyphNames[i]
+ if name in newNames:
+ didChange = True
+ glyphNames[i] = newNames[name]
+
+ if didChange:
+ print('Writing', filename)
+ if not dryRun:
+ plistlib.writePlist(plist, filename)
+
+
+def renameUFOKerning(ufoPath, newNames, dryRun=False, print=print):
+ filename = os.path.join(ufoPath, 'kerning.plist')
+
+ plist = None
+ try:
+ plist = plistlib.readPlist(filename)
+ except:
+ return
+
+ didChange = False
+
+ newPlist = {}
+ for leftName, right in plist.items():
+ if leftName in newNames:
+ didChange = True
+ leftName = newNames[leftName]
+ newRight = {}
+ for rightName, kernValue in plist.items():
+ if rightName in newNames:
+ didChange = True
+ rightName = newNames[rightName]
+ newRight[rightName] = kernValue
+ newPlist[leftName] = right
+
+ if didChange:
+ print('Writing', filename)
+ if not dryRun:
+ plistlib.writePlist(newPlist, filename)
+
+
+def subFeaName(m, newNames, state):
+ try:
+ int(m[3], 16)
+ except:
+ return m[0]
+
+ name = m[2]
+
+ if name in newNames:
+ # print('sub %r => %r' % (m[0], m[1] + newNames[name] + m[4]))
+ if name == 'uni0402':
+ print('sub %r => %r' % (m[0], m[1] + newNames[name] + m[4]))
+ state['didChange'] = True
+ return m[1] + newNames[name] + m[4]
+
+ return m[0]
+
+
+FEA_TOK = 'tok'
+FEA_SEP = 'sep'
+FEA_END = 'end'
+
+def feaTokenizer(feaText):
+ separators = set('; \t\r\n,[]\'"')
+ tokStartIndex = -1
+ sepStartIndex = -1
+
+ for i in xrange(len(feaText)):
+ ch = feaText[i]
+ if ch in separators:
+ if tokStartIndex != -1:
+ yield (FEA_TOK, feaText[tokStartIndex:i])
+ tokStartIndex = -1
+ if sepStartIndex == -1:
+ sepStartIndex = i
+ else:
+ if sepStartIndex != -1:
+ yield (FEA_SEP, feaText[sepStartIndex:i])
+ sepStartIndex = -1
+ if tokStartIndex == -1:
+ tokStartIndex = i
+
+ if sepStartIndex != -1 and tokStartIndex != -1:
+ yield (FEA_END, feaText[min(sepStartIndex, tokStartIndex):])
+ elif sepStartIndex != -1:
+ yield (FEA_END, feaText[sepStartIndex:])
+ elif tokStartIndex != -1:
+ yield (FEA_END, feaText[tokStartIndex:])
+ else:
+ yield (FEA_END, '')
+
+
+def renameUFOFeatures(font, ufoPath, newNames, dryRun=False, print=print):
+ filename = os.path.join(ufoPath, 'features.fea')
+
+ feaText = ''
+ try:
+ with open(filename, 'r') as f:
+ feaText = f.read()
+ except:
+ return
+
+ didChange = False
+ feaText2 = ''
+
+ for t, v in feaTokenizer(feaText):
+ if t is FEA_TOK and len(v) > 6 and v.startswith('uni'):
+ if v in newNames:
+ # print('sub', v, newNames[v])
+ didChange = True
+ v = newNames[v]
+ feaText2 += v
+
+ feaText = feaText2
+
+ if didChange:
+ print('Writing', filename)
+ if not dryRun:
+ with open(filename, 'w') as f:
+ f.write(feaText)
+ print(
+ 'Important: you need to manually verify that', filename, 'looks okay.',
+ 'We did an optimistic update which is not perfect.'
+ )
+
+ # classes = feaParser.classDefinitionRE.findall(feaText)
+ # for precedingMark, className, classContent in classes:
+ # content = feaParser.classContentRE.findall(classContent)
+ # print('class', className, content)
+
+ # didChange = False
+ # content2 = []
+ # for name in content:
+ # if name in newNames:
+ # didChange = True
+ # content2.append(newNames[name])
+ # if didChange:
+ # print('content2', content2)
+ # feaText = feaParser.classDefinitionRE.sub('', feaText)
+
+ # featureTags = feaParser.feature_findAll_RE.findall(feaText)
+ # for precedingMark, featureTag in featureTags:
+ # print('feat', featureTag)
+
+
+def renameUFODetails(font, ufoPath, newNames, dryRun=False, print=print):
+ renameUFOLib(ufoPath, newNames, dryRun, print)
+ renameUFOGroups(ufoPath, newNames, dryRun, print)
+ renameUFOKerning(ufoPath, newNames, dryRun, print)
+ renameUFOFeatures(font, ufoPath, newNames, dryRun, print)
+
+
+def readLines(filename):
+ with open(filename, 'r') as f:
+ return f.read().strip().splitlines()
+
+
+def readGlyphOrderFile(filename):
+ names = []
+ for line in readLines(filename):
+ line = line.lstrip()
+ if len(line) > 0 and line[0] != '#':
+ names.append(line)
+ return names
+
+
+def renameGlyphOrderFile(filename, newNames, dryRun=False, print=print):
+ lines = []
+ didRename = False
+ for line in readLines(filename):
+ line = line.lstrip()
+ if len(line) > 0 and line[0] != '#':
+ newName = newNames.get(line)
+ if newName is not None:
+ didRename = True
+ line = newName
+ lines.append(line)
+ if didRename:
+ print('Writing', filename)
+ if not dryRun:
+ with open(filename, 'w') as f:
+ f.write('\n'.join(lines))
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def fmtGlyphComposition(glyphName, baseName, accentNames, offset):
+ # glyphName = 'uni03D3'
+ # baseName = 'uni03D2'
+ # accentNames = [['tonos', 'top'], ['acute', 'top']]
+ # offset = [100, 0]
+ # => "uni03D2+tonos:top+acute:top=uni03D3/100,0"
+ s = baseName
+ for accentNameTuple in accentNames:
+ s += '+' + accentNameTuple[0]
+ if len(accentNameTuple) > 1:
+ s += ':' + accentNameTuple[1]
+ s += '=' + glyphName
+ if offset[0] != 0 or offset[1] != 0:
+ s += '/%d,%d' % tuple(offset)
+ return s
+
+
+def renameDiacriticsFile(filename, newNames, dryRun=False, print=print):
+ lines = []
+ didRename = False
+ for line in readLines(filename):
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+
+ # rename
+ glyphName = newNames.get(glyphName, glyphName)
+ baseName = newNames.get(baseName, baseName)
+ for accentTuple in accentNames:
+ accentTuple[0] = newNames.get(accentTuple[0], accentTuple[0])
+
+ line2 = fmtGlyphComposition(glyphName, baseName, accentNames, offset)
+
+ if line != line2:
+ line = line2
+ didRename = True
+ # print(line, '=>', line2)
+
+ lines.append(line)
+
+ if didRename:
+ print('Writing', filename)
+ if not dryRun:
+ with open(filename, 'w') as f:
+ f.write('\n'.join(lines))
+
+
+def configFindResFile(config, basedir, name):
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ basedir = os.path.dirname(basedir)
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ fn = None
+ return fn
+
+
+def renameConfigFile(config, filename, newNames, dryRun=False, print=print):
+ wrapper = TextWrapper()
+ wrapper.width = 80
+ wrapper.break_long_words = False
+ wrapper.break_on_hyphens = False
+
+ wrap = lambda names: '\n'.join(wrapper.wrap(' '.join(names)))
+
+ didRename = False
+ for propertyName, values in config.items('glyphs'):
+ glyphNames = values.split()
+ # print(propertyName, glyphNames)
+ propChanged = False
+ for name in glyphNames:
+ if name in newNames:
+ sectionChanged = True
+ if sectionChanged:
+ config.set('glyphs', propertyName, wrap(glyphNames)+'\n')
+ didRename = True
+
+ # config.set(section, option, value)
+ if didRename:
+ s = StringIO()
+ config.write(s)
+ s = s.getvalue()
+ s = re.sub(r'\n(\w+)\s+=\s*', '\n\\1: ', s, flags=re.M)
+ s = re.sub(r'((?:^|\n)\[[^\]]*\])', '\\1\n', s, flags=re.M)
+ s = re.sub(r'\n\t\n', '\n\n', s, flags=re.M)
+ s = s.strip() + '\n'
+ print('Writing', filename)
+ if not dryRun:
+ with open(filename, 'w') as f:
+ f.write(s)
+
+
+def parseAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ for line in readLines(filename):
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def main():
+ argparser = argparse.ArgumentParser(description='Enrich UFO glyphnames')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ '-list-missing', dest='listMissing', action='store_const', const=True, default=False,
+ help='List glyphs with unicodes found in source files but missing in any of the target UFOs.')
+
+ argparser.add_argument(
+ '-list-unnamed', dest='listUnnamed', action='store_const', const=True, default=False,
+ help="List glyphs with unicodes in target UFOs that don't have symbolic names.")
+
+ argparser.add_argument(
+ '-backfill-agl', dest='backfillWithAgl', action='store_const', const=True, default=False,
+ help="Use glyphnames from Adobe Glyph List for any glyphs that no names in any of"+
+ " the input font files")
+
+ argparser.add_argument(
+ '-src', dest='srcFonts', metavar='<fontfile>', type=str, nargs='*',
+ help='TrueType, OpenType or UFO fonts to gather glyph info from. '+
+ 'Names found in earlier-listed fonts are prioritized over later listings.')
+
+ argparser.add_argument(
+ 'dstFonts', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+
+ # Load UFO fonts
+ dstFonts = []
+ dstFontPaths = {} # keyed by RFont object
+ srcDir = None
+ for fn in args.dstFonts:
+ fn = fn.rstrip('/')
+ font = OpenFont(fn)
+ dstFonts.append(font)
+ dstFontPaths[font] = fn
+ srcDir2 = os.path.dirname(fn)
+ if srcDir is None:
+ srcDir = srcDir2
+ elif srcDir != srcDir2:
+ raise Exception('All <ufofile>s must be rooted in same directory')
+
+ # load fontbuild configuration
+ config = RawConfigParser(dict_type=OrderedDict)
+ configFilename = os.path.join(srcDir, 'fontbuild.cfg')
+ config.read(configFilename)
+ glyphOrderFile = configFindResFile(config, srcDir, 'glyphorder')
+ diacriticsFile = configFindResFile(config, srcDir, 'diacriticfile')
+ glyphOrder = readGlyphOrderFile(glyphOrderFile)
+
+ fallbackGlyphNames = {} # { 2126: 'Omega', ... }
+ if args.backfillWithAgl:
+ fallbackGlyphNames = parseAGL(configFindResFile(config, srcDir, 'agl_glyphlistfile'))
+
+ # find glyph names
+ uc2names, extraUc2names, name2ucsv = buildGlyphNames(
+ dstFonts,
+ args.srcFonts,
+ glyphOrder,
+ fallbackGlyphNames
+ )
+ # Note: name2ucsv has same order as parameters to buildGlyphNames
+
+ if args.listMissing:
+ print('# Missing glyphs: (found in -src but not in any <ufofile>)')
+ for uc, names in extraUc2names.iteritems():
+ print('U+%04X\t%s' % (uc, ', '.join(names)))
+ return
+
+ elif args.listUnnamed:
+ print('# Unnamed glyphs:')
+ unnamed = set()
+ for name in glyphOrder:
+ if len(name) > 7 and name.startswith('uni'):
+ unnamed.add(name)
+ for gl in name2ucsv[:len(dstFonts)]:
+ for name, ucs in gl.iteritems():
+ for uc in ucs:
+ if isDefaultGlyphNameForUnicode(name, uc):
+ unnamed.add(name)
+ break
+ for name in unnamed:
+ print(name)
+ return
+
+ printDry = lambda *args: print(*args)
+ if args.dryRun:
+ printDry = lambda *args: print('[dry-run]', *args)
+
+ newNames = {}
+ renameGlyphsQueue = {} # keyed by RFont object
+
+ for font in dstFonts:
+ renameGlyphsQueue[font] = {}
+
+ for uc, names in uc2names.iteritems():
+ if len(names) < 2:
+ continue
+ dstGlyphName = names[0]
+ if isDefaultGlyphNameForUnicode(dstGlyphName, uc):
+ newGlyphName = getFirstNonDefaultGlyphName(uc, names[1:])
+ # if newGlyphName is None:
+ # # if we found no symbolic name, check in fallback list
+ # newGlyphName = fallbackGlyphNames.get(uc)
+ # if newGlyphName is not None:
+ # printDry('Using fallback %s' % newGlyphName)
+ if newGlyphName is not None:
+ printDry('Rename %s -> %s' % (dstGlyphName, newGlyphName))
+ for font in dstFonts:
+ if dstGlyphName in font:
+ renameGlyphsQueue[font][dstGlyphName] = newGlyphName
+ newNames[dstGlyphName] = newGlyphName
+
+ if len(newNames) == 0:
+ printDry('No changes')
+ return
+
+ # rename component instances
+ for font in dstFonts:
+ componentMap = font.getReverseComponentMapping()
+ for currName, newName in renameGlyphsQueue[font].iteritems():
+ for depName in componentMap.get(currName, []):
+ depG = font[depName]
+ for c in depG.components:
+ if c.baseGlyph == currName:
+ c.baseGlyph = newName
+ c.setChanged()
+
+ # rename glyphs
+ for font in dstFonts:
+ for currName, newName in renameGlyphsQueue[font].iteritems():
+ font[currName].name = newName
+
+ # save fonts and update font data
+ for font in dstFonts:
+ fontPath = dstFontPaths[font]
+ printDry('Saving %d glyphs in %s' % (len(newNames), fontPath))
+ if not args.dryRun:
+ font.save()
+ renameUFODetails(font, fontPath, newNames, dryRun=args.dryRun, print=printDry)
+
+ # update resource files
+ renameGlyphOrderFile(glyphOrderFile, newNames, dryRun=args.dryRun, print=printDry)
+ renameDiacriticsFile(diacriticsFile, newNames, dryRun=args.dryRun, print=printDry)
+ renameConfigFile(config, configFilename, newNames, dryRun=args.dryRun, print=printDry)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/fixup-diacritics.py b/misc/fixup-diacritics.py
new file mode 100755
index 000000000..2453e7f3c
--- /dev/null
+++ b/misc/fixup-diacritics.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib, re
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
+
+
+def unicodeForDefaultGlyphName(glyphName):
+ m = uniNameRe.match(glyphName)
+ if m is not None:
+ try:
+ return int(m.group(1), 16)
+ except:
+ pass
+ return None
+
+
+def canonicalGlyphName(glyphName, uc2names):
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ names = uc2names.get(uc)
+ if names is not None and len(names) > 0:
+ return names[0]
+ return glyphName
+
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def fmtGlyphComposition(glyphName, baseName, accentNames, offset):
+ # glyphName = 'uni03D3'
+ # baseName = 'uni03D2'
+ # accentNames = [['tonos', 'top'], ['acute', 'top']]
+ # offset = [100, 0]
+ # => "uni03D2+tonos:top+acute:top=uni03D3/100,0"
+ s = baseName
+ for accentNameTuple in accentNames:
+ s += '+' + accentNameTuple[0]
+ if len(accentNameTuple) > 1:
+ s += ':' + accentNameTuple[1]
+ s += '=' + glyphName
+ if offset[0] != 0 or offset[1] != 0:
+ s += '/%d,%d' % tuple(offset)
+ return s
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def loadAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def loadFontGlyphs(font):
+ uc2names = {} # { 2126: ['Omega', ...], ...}
+ name2ucs = {} # { 'Omega': [2126, ...], '.notdef': [], ...}
+ for g in font:
+ name = g.name
+ ucs = g.unicodes
+ name2ucs[name] = ucs
+ for uc in ucs:
+ names = uc2names.setdefault(uc, [])
+ if name not in names:
+ names.append(name)
+ return uc2names, name2ucs
+
+
+def main():
+ argparser = ArgumentParser(description='Fixup diacritic names')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ uc2names = {}
+ name2ucs = {}
+
+ for fontPath in args.fontPaths:
+ font = OpenFont(fontPath)
+ _uc2names, _name2ucs = loadFontGlyphs(font)
+ for uc, _names in _uc2names.iteritems():
+ names = uc2names.setdefault(uc, [])
+ for name in _names:
+ if name not in names:
+ names.append(name)
+ for name, _ucs in _name2ucs.iteritems():
+ ucs = name2ucs.setdefault(name, [])
+ for uc in _ucs:
+ if uc not in ucs:
+ ucs.append(uc)
+
+ agl = loadAGL('src/glyphlist.txt') # { 2126: 'Omega', ... }
+
+ diacriticsFilename = 'src/diacritics.txt'
+ diacriticComps = loadGlyphCompositions(diacriticsFilename) # {glyphName => (baseName, a, o)}
+
+ for glyphName, comp in list(diacriticComps.items()):
+ if glyphName not in name2ucs:
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ aglName = agl.get(uc)
+ if aglName is not None:
+ if aglName in diacriticComps:
+ raise Exception('composing same glyph with different names:', aglName, glyphName)
+ print('rename', glyphName, '->', aglName, '(U+%04X)' % uc)
+ del diacriticComps[glyphName]
+ diacriticComps[aglName] = comp
+
+ lines = []
+ for glyphName, comp in diacriticComps.iteritems():
+ lines.append(fmtGlyphComposition(glyphName, *comp))
+ # print('\n'.join(lines))
+ print('Write', diacriticsFilename)
+ if not dryRun:
+ with open(diacriticsFilename, 'w') as f:
+ for line in lines:
+ f.write(line + '\n')
+
+
+
+
+main()
diff --git a/misc/fixup-features.py b/misc/fixup-features.py
new file mode 100755
index 000000000..1c2c0d087
--- /dev/null
+++ b/misc/fixup-features.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib, re
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+from fontTools.feaLib.parser import Parser as FeaParser
+from fontTools.feaLib.builder import Builder as FeaBuilder
+from fontTools.ttLib import TTFont
+
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
+
+
+def unicodeForDefaultGlyphName(glyphName):
+ m = uniNameRe.match(glyphName)
+ if m is not None:
+ try:
+ return int(m.group(1), 16)
+ except:
+ pass
+ return None
+
+
+def canonicalGlyphName(glyphName, uc2names):
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ names = uc2names.get(uc)
+ if names is not None and len(names) > 0:
+ return names[0]
+ return glyphName
+
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def loadAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def loadLocalNamesDB(fonts, agl, diacriticComps):
+ uc2names = None # { 2126: ['Omega', ...], ...}
+ allNames = set() # set('Omega', ...)
+
+ for font in fonts:
+ _uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
+ if uc2names is None:
+ uc2names = _uc2names
+ else:
+ for uc, _names in _uc2names.iteritems():
+ names = uc2names.setdefault(uc, [])
+ for name in _names:
+ if name not in names:
+ names.append(name)
+ for g in font:
+ allNames.add(g.name)
+
+ # agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
+ aglName2Ucs = {}
+ for uc, name in agl.iteritems():
+ aglName2Ucs.setdefault(name, []).append(uc)
+
+ for glyphName, comp in diacriticComps.iteritems():
+ aglUCs = aglName2Ucs.get(glyphName)
+ if aglUCs is None:
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ glyphName2 = agl.get(uc)
+ if glyphName2 is not None:
+ glyphName = glyphName2
+ names = uc2names.setdefault(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ allNames.add(glyphName)
+ else:
+ allNames.add(glyphName)
+ for uc in aglUCs:
+ names = uc2names.get(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ uc2names[uc] = names
+
+ name2ucs = {} # { 'Omega': [2126, ...], ...}
+ for uc, names in uc2names.iteritems():
+ for name in names:
+ name2ucs.setdefault(name, set()).add(uc)
+
+ return uc2names, name2ucs, allNames
+
+
+def main():
+ argparser = ArgumentParser(description='Fixup features.fea')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ agl = loadAGL('src/glyphlist.txt') # { 2126: 'Omega', ... }
+ diacriticComps = loadGlyphCompositions('src/diacritics.txt') # {glyphName => (baseName, a, o)}
+
+ # collect glyph names
+ fonts = [OpenFont(fontPath) for fontPath in args.fontPaths]
+ uc2names, name2ucs, allNames = loadLocalNamesDB(fonts, agl, diacriticComps)
+
+ # open feature.fea
+ featuresFilename = ''
+ featuresLines = []
+ for fontPath in args.fontPaths:
+ try:
+ featuresFilename = os.path.join(fontPath, 'features.fea')
+ with open(featuresFilename, 'r') as f:
+ print('read', featuresFilename)
+ featuresLines = f.read().splitlines()
+ break
+ except:
+ pass
+
+ classDefRe = re.compile(r'^@([^\s=]+)\s*=\s*\[([^\]]+)\]\s*;\s*$')
+ subRe = re.compile(r'^\s*sub\s+(.+)(\'?)\s+by\s+(.+)\s*;\s*$')
+ sub2Re = re.compile(r'^\s*sub\s+([^\[]+)\s+\[\s*([^\]]+)\s*\](\'?)\s+by\s+(.+)\s*;\s*$')
+ # sub lmidtilde [uni1ABB uni1ABD uni1ABE]' by uni1ABE.w2;
+ # sub lmidtilde uni1ABC' by uni1ABC.w2;
+ spacesRe = re.compile(r'[\s\r\n]+')
+
+ classDefs = {}
+ featuresLines2 = []
+
+ for line in featuresLines:
+ clsM = classDefRe.match(line)
+ if clsM is not None:
+ clsName = clsM.group(1)
+ names = spacesRe.split(clsM.group(2).strip())
+ if clsName in classDefs:
+ raise Exception('duplicate class definition ' + clsName)
+ # print('classdef', clsName, ' '.join(names))
+ # print('classdef', clsName)
+ names2 = []
+ for name in names:
+ if name == '-':
+ # e.g. A - Z
+ names2.append(name)
+ continue
+ if name[0] != '@':
+ canonName = canonicalGlyphName(name, uc2names)
+ if canonName != name:
+ # print('renaming ' + name + ' -> ' + canonName)
+ names2.append(canonName)
+ elif name not in allNames:
+ print('skipping unknown glyph ' + name)
+ else:
+ names2.append(name)
+ else:
+ raise Exception('todo: class-ref ' + name + ' in class-def ' + clsName)
+ classDefs[clsName] = names2
+ line = '@%s = [ %s ];' % (clsName, ' '.join(names2))
+ featuresLines2.append(line)
+ continue
+
+
+ # sub2M = sub2Re.match(line)
+ # if sub2M is not None:
+ # findNames1 = spacesRe.split(sub2M.group(1))
+ # findNames2 = spacesRe.split(sub2M.group(2))
+ # apos = sub2M.group(3)
+ # rightName = sub2M.group(4)
+ # print('TODO: sub2', findNames1, findNames2, apos, rightName)
+ # featuresLines2.append(line)
+ # continue
+
+
+ sub2M = sub2Re.match(line)
+ subM = None
+ if sub2M is None:
+ subM = subRe.match(line)
+ if subM is not None or sub2M is not None:
+ findNamesStr = ''
+ findNamesHasBrackets = False
+ findNames = []
+
+ findNamesBStr = ''
+ findNamesBHasBrackets = False
+ findNamesB = []
+
+ newNamesStr = ''
+ newNamesHasBrackets = False
+ newNames = []
+
+ apos0 = ''
+
+ if subM is not None:
+ findNamesStr = subM.group(1)
+ apos0 = subM.group(2)
+ newNamesStr = subM.group(3)
+ else: # sub2M
+ findNamesStr = sub2M.group(1)
+ findNamesBStr = sub2M.group(2)
+ apos0 = sub2M.group(3)
+ newNamesStr = sub2M.group(4)
+
+ if newNamesStr[0] == '[':
+ newNamesHasBrackets = True
+ newNamesStr = newNamesStr.strip('[ ]')
+ newNames = spacesRe.split(newNamesStr)
+
+ if findNamesStr[0] == '[':
+ findNamesHasBrackets = True
+ findNamesStr = findNamesStr.strip('[ ]')
+ findNames = spacesRe.split(findNamesStr)
+
+ if findNamesBStr != '':
+ if findNamesBStr[0] == '[':
+ findNamesBHasBrackets = True
+ findNamesBStr = findNamesBStr.strip('[ ]')
+ findNamesB = spacesRe.split(findNamesBStr)
+
+
+ names22 = []
+ for names in [findNames, findNamesB, newNames]:
+ names2 = []
+ for name in names:
+ if name[0] == '@':
+ clsName = name[1:].rstrip("'")
+ if clsName not in classDefs:
+ raise Exception('sub: missing target class ' + clsName + ' at\n' + line)
+ names2.append(name)
+ else:
+ apos = name[-1] == "'"
+ if apos:
+ name = name[:-1]
+ if name not in allNames:
+ canonName = canonicalGlyphName(name, uc2names)
+ if canonName != name:
+ print('renaming ' + name + ' -> ' + canonName)
+ name = canonName
+ else:
+ raise Exception('TODO: unknown name', name)
+ # if we remove names, we also need to remove subs (that become empty), and so on.
+ if apos:
+ name += "'"
+ names2.append(name)
+ names22.append(names2)
+
+ findNames2, findNamesB2, newNames2 = names22
+
+ findNamesStr = ' '.join(findNames2)
+ if findNamesHasBrackets: findNamesStr = '[' + findNamesStr + ']'
+
+ if findNamesBStr != '':
+ findNamesBStr = ' '.join(findNamesB2)
+ if findNamesBHasBrackets: findNamesBStr = '[' + findNamesBStr + ']'
+
+ newNamesStr = ' '.join(newNames2)
+ if newNamesHasBrackets: newNamesStr = '[' + newNamesStr + ']'
+
+ if subM is not None:
+ line = ' sub %s%s by %s;' % (findNamesStr, apos0, newNamesStr)
+ else:
+ # if subM is None:
+ # sub bbar [uni1ABB uni1ABD uni1ABE]' by uni1ABE.w2;
+ line = ' sub %s [%s]%s by %s;' % (findNamesStr, findNamesBStr, apos0, newNamesStr)
+
+ featuresLines2.append(line)
+
+
+ print('Write', featuresFilename)
+ if not dryRun:
+ with open(featuresFilename + '2', 'w') as f:
+ for line in featuresLines2:
+ f.write(line + '\n')
+
+ # FeaParser(featuresFilename + '2', allNames).parse()
+
+ # font = TTFont('build/dist-unhinted/Interface-Regular.otf')
+ # FeaBuilder(font, featuresFilename + '2').build()
+
+
+
+
+
+main()
diff --git a/misc/fixup-kerning.py b/misc/fixup-kerning.py
new file mode 100755
index 000000000..fc4ce8071
--- /dev/null
+++ b/misc/fixup-kerning.py
@@ -0,0 +1,362 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib, json
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+from argparse import ArgumentParser
+from fontTools import ttLib
+from robofab.objects.objectsRF import OpenFont
+
+
+def getTTCharMap(font): # -> { 2126: 'Omegagreek', ...}
+ if isinstance(font, str):
+ font = ttLib.TTFont(font)
+
+ if not 'cmap' in font:
+ raise Exception('missing cmap table')
+
+ gl = {}
+ bestCodeSubTable = None
+ bestCodeSubTableFormat = 0
+
+ for st in font['cmap'].tables:
+ if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
+ if st.format > bestCodeSubTableFormat:
+ bestCodeSubTable = st
+ bestCodeSubTableFormat = st.format
+
+ if bestCodeSubTable is not None:
+ for cp, glyphname in bestCodeSubTable.cmap.items():
+ if cp in gl:
+ raise Exception('duplicate unicode-to-glyphname mapping: U+%04X => %r and %r' % (
+ cp, glyphname, gl[cp]))
+ gl[cp] = glyphname
+
+ return gl
+
+
+def revCharMap(ucToNames):
+ # {2126:['Omega','Omegagr']} -> {'Omega':2126, 'Omegagr':2126}
+ # {2126:'Omega'} -> {'Omega':2126}
+ m = {}
+ if len(ucToNames) == 0:
+ return m
+
+ lists = True
+ for v in ucToNames.itervalues():
+ lists = not isinstance(v, str)
+ break
+
+ if lists:
+ for uc, names in ucToNames.iteritems():
+ for name in names:
+ m[name] = uc
+ else:
+ for uc, name in ucToNames.iteritems():
+ m[name] = uc
+
+ return m
+
+
+def getGlyphNameDifferenceMap(srcCharMap, dstCharMap, dstRevCharMap):
+ m = {} # { 'Omegagreek': 'Omega', ... }
+ for uc, srcName in srcCharMap.iteritems():
+ dstNames = dstCharMap.get(uc)
+ if dstNames is not None and len(dstNames) > 0:
+ if len(dstNames) != 1:
+ print('warning: ignoring multi-glyph map for U+%04X in source font' % uc)
+ dstName = dstNames[0]
+ if srcName != dstName and srcName not in dstRevCharMap:
+ # Only include names that differ. also, The `srcName not in dstRevCharMap` condition
+ # makes sure that we don't rename glyphs that are already valid.
+ m[srcName] = dstName
+ return m
+
+
+def fixupGroups(fontPath, dstGlyphNames, srcToDstMap, dryRun, stats):
+ filename = os.path.join(fontPath, 'groups.plist')
+ groups = plistlib.readPlist(filename)
+ groups2 = {}
+ glyphToGroups = {}
+
+ for groupName, glyphNames in groups.iteritems():
+ glyphNames2 = []
+ for glyphName in glyphNames:
+ if glyphName in srcToDstMap:
+ gn2 = srcToDstMap[glyphName]
+ stats.renamedGlyphs[glyphName] = gn2
+ glyphName = gn2
+ if glyphName in dstGlyphNames:
+ glyphNames2.append(glyphName)
+ glyphToGroups[glyphName] = glyphToGroups.get(glyphName, []) + [groupName]
+ else:
+ stats.removedGlyphs.add(glyphName)
+ if len(glyphNames2) > 0:
+ groups2[groupName] = glyphNames2
+ else:
+ stats.removedGroups.add(groupName)
+
+ print('Writing', filename)
+ if not dryRun:
+ plistlib.writePlist(groups2, filename)
+
+ return groups2, glyphToGroups
+
+
+def fixupKerning(fontPath, dstGlyphNames, srcToDstMap, groups, glyphToGroups, dryRun, stats):
+ filename = os.path.join(fontPath, 'kerning.plist')
+ kerning = plistlib.readPlist(filename)
+ kerning2 = {}
+ groupPairs = {} # { "lglyphname+lglyphname": ("lgroupname"|"", "rgroupname"|"", 123) }
+ # pairs = {} # { "name+name" => 123 }
+
+ for leftName, right in kerning.items():
+ leftIsGroup = leftName[0] == '@'
+ leftGroupNames = None
+
+ if leftIsGroup:
+ # left is a group
+ if leftName not in groups:
+ # dead group -- skip
+ stats.removedGroups.add(leftName)
+ continue
+ leftGroupNames = groups[leftName]
+ else:
+ if leftName in srcToDstMap:
+ leftName2 = srcToDstMap[leftName]
+ stats.renamedGlyphs[leftName] = leftName2
+ leftName = leftName2
+ if leftName not in dstGlyphNames:
+ # dead glyphname -- skip
+ stats.removedGlyphs.add(leftName)
+ continue
+
+ right2 = {}
+ rightGroupNamesAndValues = []
+ for rightName, kerningValue in right.iteritems():
+ rightIsGroup = rightName[0] == '@'
+ if rightIsGroup:
+ if leftIsGroup and leftGroupNames is None:
+ leftGroupNames = [leftName]
+ if rightName in groups:
+ right2[rightName] = kerningValue
+ rightGroupNamesAndValues.append((groups[rightName], rightName, kerningValue))
+ else:
+ stats.removedGroups.add(rightName)
+ else:
+ if rightName in srcToDstMap:
+ rightName2 = srcToDstMap[rightName]
+ stats.renamedGlyphs[rightName] = rightName2
+ rightName = rightName2
+ if rightName in dstGlyphNames:
+ right2[rightName] = kerningValue
+ if leftIsGroup:
+ rightGroupNamesAndValues.append(([rightName], '', kerningValue))
+ else:
+ stats.removedGlyphs.add(rightName)
+
+ if len(right2):
+ kerning2[leftName] = right2
+
+ # update groupPairs
+ lgroupname = leftName if rightIsGroup else ''
+ if leftIsGroup:
+ for lname in leftGroupNames:
+ kPrefix = lname + '+'
+ for rnames, rgroupname, kernv in rightGroupNamesAndValues:
+ for rname in rnames:
+ k = kPrefix + rname
+ v = (lgroupname, rgroupname, kernv)
+ if k in groupPairs:
+ raise Exception('duplicate group pair %s: %r and %r' % (k, groupPairs[k], v))
+ groupPairs[k] = v
+
+ elif leftIsGroup:
+ stats.removedGroups.add(leftName)
+ else:
+ stats.removedGlyphs.add(leftName)
+
+ # print('groupPairs:', groupPairs)
+
+ # remove individual pairs that are already represented through groups
+ kerning = kerning2
+ kerning2 = {}
+ for leftName, right in kerning.items():
+ leftIsGroup = leftName[0] == '@'
+ # leftNames = groups[leftName] if leftIsGroup else [leftName]
+
+ if not leftIsGroup:
+ right2 = {}
+ for rightName, kernVal in right.iteritems():
+ rightIsGroup = rightName[0] == '@'
+ if not rightIsGroup:
+ k = leftName + '+' + rightName
+ if k in groupPairs:
+ groupPair = groupPairs[k]
+ print(('simplify individual pair %r: kern %r (individual) -> %r (group)') % (
+ k, kernVal, groupPair[2]))
+ stats.simplifiedKerningPairs.add(k)
+ else:
+ right2[rightName] = kernVal
+ else:
+ right2[rightName] = kernVal
+ else:
+ # TODO, probably
+ right2 = right
+
+ kerning2[leftName] = right2
+
+ print('Writing', filename)
+ if not dryRun:
+ plistlib.writePlist(kerning2, filename)
+
+ return kerning2
+
+
+def loadJSONCharMap(filename):
+ m = None
+ if filename == '-':
+ m = json.load(sys.stdin)
+ else:
+ with open(filename, 'r') as f:
+ m = json.load(f)
+ if not isinstance(m, dict):
+ raise Exception('json root is not a dict')
+ if len(m) > 0:
+ for k, v in m.iteritems():
+ if not isinstance(k, int) and not isinstance(k, float):
+ raise Exception('json dict key is not a number')
+ if not isinstance(v, str):
+ raise Exception('json dict value is not a string')
+ break
+ return m
+
+
+class Stats:
+ def __init__(self):
+ self.removedGroups = set()
+ self.removedGlyphs = set()
+ self.simplifiedKerningPairs = set()
+ self.renamedGlyphs = {}
+
+
+def configFindResFile(config, basedir, name):
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ basedir = os.path.dirname(basedir)
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ fn = None
+ return fn
+
+
+def main():
+ jsonSchemaDescr = '{[unicode:int]: glyphname:string, ...}'
+
+ argparser = ArgumentParser(
+ description='Rename glyphnames in UFO kerning and remove unused groups and glyphnames.')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ '-no-stats', dest='noStats', action='store_const', const=True, default=False,
+ help='Do not print statistics at the end.')
+
+ argparser.add_argument(
+ '-save-stats', dest='saveStatsPath', metavar='<file>', type=str,
+ help='Write detailed statistics to JSON file.')
+
+ argparser.add_argument(
+ '-src-json', dest='srcJSONFile', metavar='<file>', type=str,
+ help='JSON file to read glyph names from.'+
+ ' Expected schema: ' + jsonSchemaDescr + ' (e.g. {2126: "Omega"})')
+
+ argparser.add_argument(
+ '-src-font', dest='srcFontFile', metavar='<file>', type=str,
+ help='TrueType or OpenType font to read glyph names from.')
+
+ argparser.add_argument(
+ 'dstFontsPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ if args.srcJSONFile and args.srcFontFile:
+ argparser.error('Both -src-json and -src-font specified -- please provide only one.')
+
+ # Strip trailing slashes from font paths
+ args.dstFontsPaths = [s.rstrip('/ ') for s in args.dstFontsPaths]
+
+ # Load source char map
+ srcCharMap = None
+ if args.srcJSONFile:
+ try:
+ srcCharMap = loadJSONCharMap(args.srcJSONFile)
+ except Exception as err:
+ argparser.error('Invalid JSON: Expected schema %s (%s)' % (jsonSchemaDescr, err))
+ elif args.srcFontFile:
+ srcCharMap = getTTCharMap(args.srcFontFile.rstrip('/ ')) # -> { 2126: 'Omegagreek', ...}
+ else:
+ argparser.error('No source provided (-src-* argument missing)')
+ if len(srcCharMap) == 0:
+ print('Empty character map', file=sys.stderr)
+ sys.exit(1)
+
+ # Find project source dir
+ srcDir = ''
+ for dstFontPath in args.dstFontsPaths:
+ s = os.path.dirname(dstFontPath)
+ if not srcDir:
+ srcDir = s
+ elif srcDir != s:
+ raise Exception('All <ufofile>s must be rooted in the same directory')
+
+ # Load font project config
+ # load fontbuild configuration
+ config = RawConfigParser(dict_type=OrderedDict)
+ configFilename = os.path.join(srcDir, 'fontbuild.cfg')
+ config.read(configFilename)
+ diacriticsFile = configFindResFile(config, srcDir, 'diacriticfile')
+
+ for dstFontPath in args.dstFontsPaths:
+ dstFont = OpenFont(dstFontPath)
+ dstCharMap = dstFont.getCharacterMapping() # -> { 2126: [ 'Omega', ...], ...}
+ dstRevCharMap = revCharMap(dstCharMap) # { 'Omega': 2126, ...}
+ srcToDstMap = getGlyphNameDifferenceMap(srcCharMap, dstCharMap, dstRevCharMap)
+
+ stats = Stats()
+
+ groups, glyphToGroups = fixupGroups(dstFontPath, dstRevCharMap, srcToDstMap, dryRun, stats)
+ fixupKerning(dstFontPath, dstRevCharMap, srcToDstMap, groups, glyphToGroups, dryRun, stats)
+
+ # stats
+ if args.saveStatsPath or not args.noStats:
+ if not args.noStats:
+ print('stats for %s:' % dstFontPath)
+ print(' Deleted %d groups and %d glyphs.' % (
+ len(stats.removedGroups), len(stats.removedGlyphs)))
+ print(' Renamed %d glyphs.' % len(stats.renamedGlyphs))
+ print(' Simplified %d kerning pairs.' % len(stats.simplifiedKerningPairs))
+ if args.saveStatsPath:
+ statsObj = {
+ 'deletedGroups': stats.removedGroups,
+ 'deletedGlyphs': stats.removedGlyphs,
+ 'simplifiedKerningPairs': stats.simplifiedKerningPairs,
+ 'renamedGlyphs': stats.renamedGlyphs,
+ }
+ f = sys.stdout
+ try:
+ if args.saveStatsPath != '-':
+ f = open(args.saveStatsPath, 'w')
+ print('Writing stats to', args.saveStatsPath)
+ json.dump(statsObj, sys.stdout, indent=2, separators=(',', ': '))
+ finally:
+ if f is not sys.stdout:
+ f.close()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/fontinfo.py b/misc/fontinfo.py
new file mode 100755
index 000000000..47e2d66b1
--- /dev/null
+++ b/misc/fontinfo.py
@@ -0,0 +1,391 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Generates JSON-encoded information about fonts
+#
+import os
+import sys
+import argparse
+import json
+
+from fontTools import ttLib
+from fontTools.misc import sstruct
+from fontTools.ttLib.tables._h_e_a_d import headFormat
+from fontTools.ttLib.tables._h_h_e_a import hheaFormat
+from fontTools.ttLib.tables._m_a_x_p import maxpFormat_0_5, maxpFormat_1_0_add
+from fontTools.ttLib.tables._p_o_s_t import postFormat
+from fontTools.ttLib.tables.O_S_2f_2 import OS2_format_1, OS2_format_2, OS2_format_5
+# from robofab.world import world, RFont, RGlyph, OpenFont, NewFont
+# from robofab.objects.objectsRF import RFont, RGlyph, OpenFont, NewFont, RContour
+
+_NAME_IDS = {}
+
+
+def num(s):
+ return int(s) if s.find('.') == -1 else float(s)
+
+
+def tableNamesToDict(table, names):
+ t = {}
+ for name in names:
+ if name.find('reserved') == 0:
+ continue
+ t[name] = getattr(table, name)
+ return t
+
+
+def sstructTableToDict(table, format):
+ _, names, _ = sstruct.getformat(format)
+ return tableNamesToDict(table, names)
+
+
+OUTPUT_TYPE_COMPLETE = 'complete'
+OUTPUT_TYPE_GLYPHLIST = 'glyphlist'
+
+
+GLYPHS_TYPE_UNKNOWN = '?'
+GLYPHS_TYPE_TT = 'tt'
+GLYPHS_TYPE_CFF = 'cff'
+
+def getGlyphsType(tt):
+ if 'CFF ' in tt:
+ return GLYPHS_TYPE_CFF
+ elif 'glyf' in tt:
+ return GLYPHS_TYPE_TT
+ return GLYPHS_TYPE_UNKNOWN
+
+
+class GlyphInfo:
+ def __init__(self, g, name, unicodes, type, glyphTable):
+ self._type = type # GLYPHS_TYPE_*
+ self._glyphTable = glyphTable
+
+ self.name = name
+ self.width = g.width
+ self.lsb = g.lsb
+ self.unicodes = unicodes
+
+ if g.height is not None:
+ self.tsb = g.tsb
+ self.height = g.height
+ else:
+ self.tsb = 0
+ self.height = 0
+
+ self.numContours = 0
+ self.contoursBBox = (0,0,0,0) # xMin, yMin, xMax, yMax
+ self.hasHints = False
+
+ if self._type is GLYPHS_TYPE_CFF:
+ self._addCFFInfo()
+ elif self._type is GLYPHS_TYPE_TT:
+ self._addTTInfo()
+
+ def _addTTInfo(self):
+ g = self._glyphTable[self.name]
+ self.numContours = g.numberOfContours
+ if g.numberOfContours:
+ self.contoursBBox = (g.xMin,g.xMin,g.xMax,g.yMax)
+ self.hasHints = hasattr(g, "program")
+
+ def _addCFFInfo(self):
+ # TODO: parse CFF dict tree
+ pass
+
+ @classmethod
+ def structKeys(cls, type):
+ v = [
+ 'name',
+ 'unicodes',
+ 'width',
+ 'lsb',
+ 'height',
+ 'tsb',
+ 'hasHints',
+ ]
+ if type is GLYPHS_TYPE_TT:
+ v += (
+ 'numContours',
+ 'contoursBBox',
+ )
+ return v
+
+ def structValues(self):
+ v = [
+ self.name,
+ self.unicodes,
+ self.width,
+ self.lsb,
+ self.height,
+ self.tsb,
+ self.hasHints,
+ ]
+ if self._type is GLYPHS_TYPE_TT:
+ v += (
+ self.numContours,
+ self.contoursBBox,
+ )
+ return v
+
+
+# exported convenience function
+def GenGlyphList(font, withGlyphs=None):
+ if isinstance(font, str):
+ font = ttLib.TTFont(font)
+ return genGlyphsInfo(font, OUTPUT_TYPE_GLYPHLIST)
+
+
+def genGlyphsInfo(tt, outputType, glyphsType=GLYPHS_TYPE_UNKNOWN, glyphsTable=None, withGlyphs=None):
+ unicodeMap = {}
+
+ glyphnameFilter = None
+ if isinstance(withGlyphs, str):
+ glyphnameFilter = withGlyphs.split(',')
+
+ if 'cmap' in tt:
+ # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cmap.html
+ bestCodeSubTable = None
+ bestCodeSubTableFormat = 0
+ for st in tt['cmap'].tables:
+ if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
+ if st.format > bestCodeSubTableFormat:
+ bestCodeSubTable = st
+ bestCodeSubTableFormat = st.format
+ for cp, glyphname in bestCodeSubTable.cmap.items():
+ if glyphname in unicodeMap:
+ unicodeMap[glyphname].append(cp)
+ else:
+ unicodeMap[glyphname] = [cp]
+
+ glyphValues = []
+ glyphset = tt.getGlyphSet(preferCFF=glyphsType is GLYPHS_TYPE_CFF)
+
+ glyphnames = tt.getGlyphOrder() if glyphnameFilter is None else glyphnameFilter
+
+ if outputType is OUTPUT_TYPE_GLYPHLIST:
+ glyphValues = []
+ for glyphname in glyphnames:
+ v = [glyphname]
+ if glyphname in unicodeMap:
+ v += unicodeMap[glyphname]
+ glyphValues.append(v)
+ return glyphValues
+
+ for glyphname in glyphnames:
+ unicodes = unicodeMap[glyphname] if glyphname in unicodeMap else []
+ try:
+ g = glyphset[glyphname]
+ except KeyError:
+ raise Exception('no such glyph "'+glyphname+'"')
+ gi = GlyphInfo(g, glyphname, unicodes, glyphsType, glyphsTable)
+ glyphValues.append(gi.structValues())
+
+ return {
+ 'keys': GlyphInfo.structKeys(glyphsType),
+ 'values': glyphValues,
+ }
+
+
+def copyDictEntry(srcD, srcName, dstD, dstName):
+ try:
+ dstD[dstName] = srcD[srcName]
+ except:
+ pass
+
+
+def addCFFFontInfo(tt, info, cffTable):
+ d = cffTable.rawDict
+
+ nameDict = None
+ if 'name' not in info:
+ nameDict = {}
+ info['name'] = nameDict
+ else:
+ nameDict = info['name']
+
+ copyDictEntry(d, 'Weight', nameDict, 'weight')
+ copyDictEntry(d, 'version', nameDict, 'version')
+
+
+def genFontInfo(fontpath, outputType, withGlyphs=True):
+ tt = ttLib.TTFont(fontpath) # lazy=True
+ info = {
+ 'id': fontpath,
+ }
+
+ # for tableName in tt.keys():
+ # print 'table', tableName
+
+ nameDict = {}
+ if 'name' in tt:
+ nameDict = {}
+ for rec in tt['name'].names:
+ k = _NAME_IDS[rec.nameID] if rec.nameID in _NAME_IDS else ('#%d' % rec.nameID)
+ nameDict[k] = rec.toUnicode()
+ if 'fontId' in nameDict:
+ info['id'] = nameDict['fontId']
+
+ if 'postscriptName' in nameDict:
+ info['name'] = nameDict['postscriptName']
+ elif 'familyName' in nameDict:
+ info['name'] = nameDict['familyName'].replace(' ', '')
+ if 'subfamilyName' in nameDict:
+ info['name'] += '-' + nameDict['subfamilyName'].replace(' ', '')
+
+ if outputType is not OUTPUT_TYPE_GLYPHLIST:
+ if len(nameDict):
+ info['names'] = nameDict
+
+ if 'head' in tt:
+ info['head'] = sstructTableToDict(tt['head'], headFormat)
+
+ if 'hhea' in tt:
+ info['hhea'] = sstructTableToDict(tt['hhea'], hheaFormat)
+
+ if 'post' in tt:
+ info['post'] = sstructTableToDict(tt['post'], postFormat)
+
+ if 'OS/2' in tt:
+ t = tt['OS/2']
+ if t.version == 1:
+ info['os/2'] = sstructTableToDict(t, OS2_format_1)
+ elif t.version in (2, 3, 4):
+ info['os/2'] = sstructTableToDict(t, OS2_format_2)
+ elif t.version == 5:
+ info['os/2'] = sstructTableToDict(t, OS2_format_5)
+ info['os/2']['usLowerOpticalPointSize'] /= 20
+ info['os/2']['usUpperOpticalPointSize'] /= 20
+ if 'panose' in info['os/2']:
+ del info['os/2']['panose']
+
+ # if 'maxp' in tt:
+ # table = tt['maxp']
+ # _, names, _ = sstruct.getformat(maxpFormat_0_5)
+ # if table.tableVersion != 0x00005000:
+ # _, names_1_0, _ = sstruct.getformat(maxpFormat_1_0_add)
+ # names += names_1_0
+ # info['maxp'] = tableNamesToDict(table, names)
+
+ glyphsType = getGlyphsType(tt)
+ glyphsTable = None
+ if glyphsType is GLYPHS_TYPE_CFF:
+ cff = tt["CFF "].cff
+ cffDictIndex = cff.topDictIndex
+ if len(cffDictIndex) > 1:
+ sys.stderr.write(
+ 'warning: multi-font CFF table is unsupported. Only reporting first table.\n'
+ )
+ cffTable = cffDictIndex[0]
+ if outputType is not OUTPUT_TYPE_GLYPHLIST:
+ addCFFFontInfo(tt, info, cffTable)
+ elif glyphsType is GLYPHS_TYPE_TT:
+ glyphsTable = tt["glyf"]
+ # print 'glyphs type:', glyphsType, 'flavor:', tt.flavor, 'sfntVersion:', tt.sfntVersion
+
+ if (withGlyphs is not False or outputType is OUTPUT_TYPE_GLYPHLIST) and withGlyphs is not '':
+ info['glyphs'] = genGlyphsInfo(tt, outputType, glyphsType, glyphsTable, withGlyphs)
+
+ # sys.exit(1)
+
+ return info
+
+
+# ————————————————————————————————————————————————————————————————————————
+# main
+
+def main():
+ argparser = argparse.ArgumentParser(description='Generate JSON describing fonts')
+
+ argparser.add_argument('-out', dest='outfile', metavar='<file>', type=str,
+ help='Write JSON to <file>. Writes to stdout if not specified')
+
+ argparser.add_argument('-pretty', dest='prettyJson', action='store_const',
+ const=True, default=False,
+ help='Generate pretty JSON with linebreaks and indentation')
+
+ argparser.add_argument('-with-all-glyphs', dest='withGlyphs', action='store_const',
+ const=True, default=False,
+ help='Include glyph information on all glyphs.')
+
+ argparser.add_argument('-with-glyphs', dest='withGlyphs', metavar='glyphname[,glyphname ...]',
+ type=str,
+ help='Include glyph information on specific glyphs')
+
+ argparser.add_argument('-as-glyphlist', dest='asGlyphList',
+ action='store_const', const=True, default=False,
+ help='Only generate a list of glyphs and their unicode mappings.')
+
+ argparser.add_argument('fontpaths', metavar='<path>', type=str, nargs='+',
+ help='TrueType or OpenType font files')
+
+ args = argparser.parse_args()
+
+ fonts = {}
+ outputType = OUTPUT_TYPE_COMPLETE
+ if args.asGlyphList:
+ outputType = OUTPUT_TYPE_GLYPHLIST
+
+ n = 0
+ for fontpath in args.fontpaths:
+ if n > 0:
+ # workaround for a bug in fontTools.misc.sstruct where it keeps a global
+ # internal cache that mixes up values for different fonts.
+ reload(sstruct)
+ font = genFontInfo(fontpath, outputType=outputType, withGlyphs=args.withGlyphs)
+ fonts[font['id']] = font
+ n += 1
+
+ ostream = sys.stdout
+ if args.outfile is not None:
+ ostream = open(args.outfile, 'w')
+
+
+ if args.prettyJson:
+ json.dump(fonts, ostream, sort_keys=True, indent=2, separators=(',', ': '))
+ else:
+ json.dump(fonts, ostream, separators=(',', ':'))
+
+
+ if ostream is not sys.stdout:
+ ostream.close()
+
+
+
+# "name" table name identifiers
+_NAME_IDS = {
+ # TrueType & OpenType
+ 0: 'copyright',
+ 1: 'familyName',
+ 2: 'subfamilyName',
+ 3: 'fontId',
+ 4: 'fullName',
+ 5: 'version', # e.g. 'Version <number>.<number>'
+ 6: 'postscriptName',
+ 7: 'trademark',
+ 8: 'manufacturerName',
+ 9: 'designer',
+ 10: 'description',
+ 11: 'vendorURL',
+ 12: 'designerURL',
+ 13: 'licenseDescription',
+ 14: 'licenseURL',
+ 15: 'RESERVED',
+ 16: 'typoFamilyName',
+ 17: 'typoSubfamilyName',
+ 18: 'macCompatibleFullName', # Mac only (FOND)
+ 19: 'sampleText',
+
+ # OpenType
+ 20: 'postScriptCIDName',
+ 21: 'wwsFamilyName',
+ 22: 'wwsSubfamilyName',
+ 23: 'lightBackgoundPalette',
+ 24: 'darkBackgoundPalette',
+ 25: 'variationsPostScriptNamePrefix',
+
+ # 26-255: Reserved for future expansion
+ # 256-32767: Font-specific names (layout features and settings, variations, track names, etc.)
+}
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/gen-glyphinfo.py b/misc/gen-glyphinfo.py
new file mode 100755
index 000000000..cec90b048
--- /dev/null
+++ b/misc/gen-glyphinfo.py
@@ -0,0 +1,245 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Grab http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+#
+from __future__ import print_function
+import os, sys, json, re
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+from collections import OrderedDict
+from unicode_util import parseUnicodeDataFile
+
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
+
+
+def unicodeForDefaultGlyphName(glyphName):
+ m = uniNameRe.match(glyphName)
+ if m is not None:
+ try:
+ return int(m.group(1), 16)
+ except:
+ pass
+ return None
+
+
+def loadAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def loadLocalNamesDB(fonts, agl, diacriticComps):
+ uc2names = None # { 2126: ['Omega', ...], ...}
+ allNames = OrderedDict() # {'Omega':True, ...}
+
+ for font in fonts:
+ _uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
+ if uc2names is None:
+ uc2names = _uc2names
+ else:
+ for uc, _names in _uc2names.iteritems():
+ names = uc2names.setdefault(uc, [])
+ for name in _names:
+ if name not in names:
+ names.append(name)
+ for g in font:
+ allNames.setdefault(g.name, True)
+
+ # agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
+ aglName2Ucs = {}
+ for uc, name in agl.iteritems():
+ aglName2Ucs.setdefault(name, []).append(uc)
+
+ for glyphName, comp in diacriticComps.iteritems():
+ aglUCs = aglName2Ucs.get(glyphName)
+ if aglUCs is None:
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ glyphName2 = agl.get(uc)
+ if glyphName2 is not None:
+ glyphName = glyphName2
+ names = uc2names.setdefault(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ allNames.setdefault(glyphName, True)
+ else:
+ allNames.setdefault(glyphName, True)
+ for uc in aglUCs:
+ names = uc2names.get(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ uc2names[uc] = names
+
+ name2ucs = {} # { 'Omega': [2126, ...], ...}
+ for uc, names in uc2names.iteritems():
+ for name in names:
+ name2ucs.setdefault(name, set()).add(uc)
+
+ return uc2names, name2ucs, allNames
+
+
+def canonicalGlyphName(glyphName, uc2names):
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ names = uc2names.get(uc)
+ if names is not None and len(names) > 0:
+ return names[0]
+ return glyphName
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def rgbaToCSSColor(r=0, g=0, b=0, a=1):
+ R,G,B = int(r * 255), int(g * 255), int(b * 255)
+ if a == 1:
+ return '#%02x%02x%02x' % (R,G,B)
+ else:
+ return 'rgba(%d,%d,%d,%f)' % (R,G,B,a)
+
+
+def unicodeName(cp):
+ if cp is not None and len(cp.name):
+ if cp.name[0] == '<':
+ return '[' + cp.categoryName + ']'
+ elif len(cp.name):
+ return cp.name
+ return None
+
+
+def main():
+ argparser = ArgumentParser(
+ description='Generate info on name, unicodes and color mark for all glyphs')
+
+ argparser.add_argument(
+ '-ucd', dest='ucdFile', metavar='<file>', type=str,
+ help='UnicodeData.txt file from http://www.unicode.org/')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ markLibKey = 'com.typemytype.robofont.mark'
+
+ fontPaths = []
+ for fontPath in args.fontPaths:
+ fontPath = fontPath.rstrip('/ ')
+ if 'regular' or 'Regular' in fontPath:
+ fontPaths = [fontPath] + fontPaths
+ else:
+ fontPaths.append(fontPath)
+
+ fonts = [OpenFont(fontPath) for fontPath in args.fontPaths]
+
+ agl = loadAGL('src/glyphlist.txt') # { 2126: 'Omega', ... }
+ diacriticComps = loadGlyphCompositions('src/diacritics.txt')
+ uc2names, name2ucs, allNames = loadLocalNamesDB(fonts, agl, diacriticComps)
+
+ ucd = {}
+ if args.ucdFile:
+ ucd = parseUnicodeDataFile(args.ucdFile)
+
+ glyphorder = OrderedDict()
+ with open(os.path.join(os.path.dirname(args.fontPaths[0]), 'glyphorder.txt'), 'r') as f:
+ for name in f.read().splitlines():
+ if len(name) and name[0] != '#':
+ glyphorder[name] = True
+
+ for name in diacriticComps.iterkeys():
+ glyphorder[name] = True
+
+ glyphNames = glyphorder.keys()
+ visitedGlyphNames = set()
+ glyphs = []
+
+ for font in fonts:
+ for name, v in glyphorder.iteritems():
+ if name in visitedGlyphNames:
+ continue
+
+ g = None
+ ucs = []
+ try:
+ g = font[name]
+ ucs = g.unicodes
+ except:
+ ucs = name2ucs.get(name)
+ if ucs is None:
+ continue
+
+ color = None
+ if g is not None and markLibKey in g.lib:
+ # TODO: translate from (r,g,b,a) to #RRGGBB (skip A)
+ rgba = g.lib[markLibKey]
+ if isinstance(rgba, list) or isinstance(rgba, tuple):
+ color = rgbaToCSSColor(*rgba)
+ elif name in diacriticComps:
+ color = '<derived>'
+
+ # name[, unicode[, unicodeName[, color]]]
+ if len(ucs):
+ for uc in ucs:
+ ucName = unicodeName(ucd.get(uc))
+
+ if not ucName and uc >= 0xE000 and uc <= 0xF8FF:
+ ucName = '[private use %04X]' % uc
+
+ if color:
+ glyph = [name, uc, ucName, color]
+ elif ucName:
+ glyph = [name, uc, ucName]
+ else:
+ glyph = [name, uc]
+ glyphs.append(glyph)
+ else:
+ glyph = [name, None, None, color] if color else [name]
+ glyphs.append(glyph)
+
+ visitedGlyphNames.add(name)
+
+ print('{"glyphs":[')
+ prefix = ' '
+ for g in glyphs:
+ print(prefix + json.dumps(g))
+ if prefix == ' ':
+ prefix = ', '
+ print(']}')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/gen-glyphorder.py b/misc/gen-glyphorder.py
new file mode 100755
index 000000000..0817e97b0
--- /dev/null
+++ b/misc/gen-glyphorder.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, plistlib
+from collections import OrderedDict
+from argparse import ArgumentParser
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def main():
+ argparser = ArgumentParser(description='Generate glyph order list from UFO files')
+ argparser.add_argument('fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO files')
+ args = argparser.parse_args()
+
+ glyphorderUnion = OrderedDict()
+
+ fontPaths = []
+ for fontPath in args.fontPaths:
+ if 'regular' or 'Regular' in fontPath:
+ fontPaths = [fontPath] + fontPaths
+ else:
+ fontPaths.append(fontPath)
+
+ for fontPath in fontPaths:
+ libPlist = plistlib.readPlist(os.path.join(fontPath, 'lib.plist'))
+ if 'public.glyphOrder' in libPlist:
+ for name in libPlist['public.glyphOrder']:
+ glyphorderUnion[name] = True
+
+ # incorporate src/diacritics.txt
+ # diacriticComps = loadGlyphCompositions('src/diacritics.txt')
+ # for glyphName in diacriticComps.iterkeys():
+ # glyphorderUnion[glyphName] = True
+
+ glyphorderUnionNames = glyphorderUnion.keys()
+ print('\n'.join(glyphorderUnionNames))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/gen-kern.py b/misc/gen-kern.py
new file mode 100644
index 000000000..e5a4c4875
--- /dev/null
+++ b/misc/gen-kern.py
@@ -0,0 +1,37 @@
+
+def parseFeaList(s):
+ v = []
+ for e in s.split(' '):
+ if e.find('-') != -1:
+ (a,b) = e.split('-')
+ #print 'split: %s, %s' % (a,chr(ord(a)+1))
+ i = ord(a)
+ end = ord(b)+1
+ while i < end:
+ v.append(chr(i))
+ i += 1
+ else:
+ v.append(e)
+ return v
+
+UC_ROMAN = parseFeaList('A-Z AE AEacute Aacute Abreve Acircumflex Adieresis Agrave Alpha Alphatonos Amacron Aogonek Aogonek.NAV Aring Aringacute Atilde Beta Cacute Ccaron Ccedilla Ccircumflex Chi Dcaron Dcroat Delta Eacute Ebreve Ecaron Ecircumflex Edieresis Edotaccent Egrave Emacron Eng Eogonek Eogonek.NAV Epsilon Epsilontonos Eta Etatonos Eth Gamma Gbreve Gcircumflex Gcommaaccent Germandbls Hbar Hcircumflex IJ Iacute Ibreve Icircumflex Idieresis Igrave Imacron Iogonek Iota Iotadieresis Iotatonos Itilde Jcircumflex Kappa Kcommaaccent Lacute Lambda Lcaron Lcommaaccent Ldot Lslash Nacute Ncaron Ncommaaccent Ntilde Nu OE Oacute Obreve Ocircumflex Odieresis Ograve Ohungarumlaut Omacron Omega Omegatonos Omicron Omicrontonos Oogonek Oogonek.NAV Oslash Oslashacute Otilde Phi Pi Psi Racute Rcaron Rcommaaccent Rho Sacute Scaron Scedilla Scircumflex Sigma Tau Tbar Tcaron Theta Thorn Uacute Ubreve Ucircumflex Udieresis Ugrave Uhungarumlaut Umacron Uogonek Upsilon Upsilondieresis Upsilontonos Uring Utilde Wacute Wcircumflex Wdieresis Wgrave Xi Yacute Ycircumflex Ydieresis Ygrave Zacute Zcaron Zdotaccent Zeta ampersand uni010A uni0120 uni0162 uni0218 uni021A uni037F')
+LC_ROMAN = parseFeaList('a-z ae aeacute aacute abreve acircumflex adieresis agrave alpha alphatonos amacron aogonek aogonek.NAV aring aringacute atilde beta cacute ccaron ccedilla ccircumflex chi dcaron dcroat delta eacute ebreve ecaron ecircumflex edieresis edotaccent egrave emacron eng eogonek eogonek.NAV epsilon epsilontonos eta etatonos eth gamma gbreve gcircumflex gcommaaccent germandbls hbar hcircumflex ij iacute ibreve icircumflex idieresis igrave imacron iogonek iota iotadieresis iotatonos itilde jcircumflex kappa kcommaaccent lacute lambda lcaron lcommaaccent ldot lslash nacute ncaron ncommaaccent ntilde nu oe oacute obreve ocircumflex odieresis ograve ohungarumlaut omacron omega omegatonos omicron omicrontonos oogonek oogonek.NAV oslash oslashacute otilde phi pi psi racute rcaron rcommaaccent rho sacute scaron scedilla scircumflex sigma tau tbar tcaron theta thorn uacute ubreve ucircumflex udieresis ugrave uhungarumlaut umacron uogonek upsilon upsilondieresis upsilontonos uring utilde wacute wcircumflex wdieresis wgrave xi yacute ycircumflex ydieresis ygrave zacute zcaron zdotaccent zeta ampersand uni010B uni0121 uni0163 uni0219 uni021B uni03F3')
+
+UC_AF = parseFeaList('A-F')
+LC_AF = parseFeaList('a-f')
+
+LNUM = parseFeaList('zero one two three four five six seven eight nine')
+
+HEXNUM = LNUM + UC_AF + LC_AF
+ALL = UC_ROMAN + LC_ROMAN + LNUM
+
+glyphs = HEXNUM
+for g in glyphs:
+ print ' <key>%s</key><dict>' % g
+ for g in glyphs:
+ print ' <key>%s</key><integer>-256</integer>' % g
+ print ' </dict>'
+
+# print ', '.join(LC_ROMAN)
+
+
diff --git a/misc/gen-num-pairs.js b/misc/gen-num-pairs.js
new file mode 100644
index 000000000..9dbb92090
--- /dev/null
+++ b/misc/gen-num-pairs.js
@@ -0,0 +1,10 @@
+
+const chars = '0 1 2 3 4 5 6 7 8 9 A B C D E F a b c d e f'.split(' ')
+
+for (let c1 of chars) {
+ let s = []
+ for (let c2 of chars) {
+ s.push(c1 + c2)
+ }
+ console.log(s.join(' '))
+}
diff --git a/misc/glyf-props.py b/misc/glyf-props.py
new file mode 100755
index 000000000..8783a422d
--- /dev/null
+++ b/misc/glyf-props.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+
+
+dryRun = False
+
+def renameProps(font, renames):
+ for g in font:
+ for currname, newname in renames:
+ if currname in g.lib:
+ if newname in g.lib:
+ raise Exception('property %r already exist in glyph %r' % (newname, g))
+ g.lib[newname] = g.lib[currname]
+ del g.lib[currname]
+
+
+def main():
+ argparser = ArgumentParser(
+ description='Operate on UFO glyf "lib" properties')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ '-m', dest='renameProps', metavar='<currentName>=<newName>[,...]', type=str,
+ help='Rename properties')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ renames = []
+ if args.renameProps:
+ renames = [tuple(s.split('=')) for s in args.renameProps.split(',')]
+ # TODO: verify data structure
+ print('renaming properties:')
+ for rename in renames:
+ print(' %r => %r' % rename)
+
+ # Strip trailing slashes from font paths and iterate
+ for fontPath in [s.rstrip('/ ') for s in args.fontPaths]:
+ font = OpenFont(fontPath)
+
+ if len(renames):
+ print('Renaming properties in %s' % fontPath)
+ renameProps(font, renames)
+
+ if dryRun:
+ print('Saving changes to %s (dry run)' % fontPath)
+ if not dryRun:
+ print('Saving changes to %s' % fontPath)
+ font.save()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/mac-tmp-disk-mount.sh b/misc/mac-tmp-disk-mount.sh
new file mode 100755
index 000000000..f752c54f6
--- /dev/null
+++ b/misc/mac-tmp-disk-mount.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+set -e
+cd "$(dirname "$0")/.."
+
+# Create if needed
+if [[ ! -f build/tmp.sparseimage ]]; then
+ echo "Creating sparse disk image with case-sensitive file system build/tmp.sparseimage"
+ mkdir -p build
+ hdiutil create build/tmp.sparseimage \
+ -size 1g \
+ -type SPARSE \
+ -fs JHFS+X \
+ -volname tmp
+fi
+
+# Mount if needed
+if ! (diskutil info build/tmp >/dev/null); then
+ echo "Mounting sparse disk image with case-sensitive file system at build/tmp"
+ hdiutil attach build/tmp.sparseimage \
+ -readwrite \
+ -mountpoint "$(pwd)/build/tmp" \
+ -nobrowse \
+ -noautoopen \
+ -noidmereveal
+fi
diff --git a/misc/mac-tmp-disk-unmount.sh b/misc/mac-tmp-disk-unmount.sh
new file mode 100755
index 000000000..d0dcff925
--- /dev/null
+++ b/misc/mac-tmp-disk-unmount.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+set -e
+cd "$(dirname "$0")/.."
+
+diskutil unmount build/tmp
diff --git a/misc/notify b/misc/notify
new file mode 100755
index 000000000..ab10e4e8e
--- /dev/null
+++ b/misc/notify
@@ -0,0 +1,41 @@
+#!/bin/bash
+#
+# Shows macOS desktop notifications when a command completes.
+# Depending on exit status of the command, a different notification message is shown.
+#
+# Examples:
+# misc/nofify make -j 8 >/dev/null
+# Make all font styles in all formats without printing detailed messages
+#
+# misc/notify make Regular
+# Make the regular style in all formats
+#
+
+HAS_NOTIFIER=true
+if ! (which terminal-notifier >/dev/null); then
+ HAS_NOTIFIER=false
+ echo "$0: terminal-notifier not found in PATH (will not notify)" >&2
+ echo "$0: You can install through: brew install terminal-notifier"
+fi
+
+CMDS="$@"
+"$@"
+STATUS=$?
+
+if $HAS_NOTIFIER; then
+ if [[ $STATUS -eq 0 ]]; then
+ terminal-notifier \
+ -title "$1 ✅" \
+ -message "$CMDS" \
+ -activate com.apple.Terminal \
+ -timeout 8 >/dev/null &
+ else
+ terminal-notifier \
+ -title "$1 failed ❌" \
+ -message "$CMDS => $STATUS" \
+ -activate com.apple.Terminal \
+ -timeout 20 >/dev/null &
+ fi
+fi
+
+exit $STATUS
diff --git a/misc/pylib/fontbuild/Build.py b/misc/pylib/fontbuild/Build.py
new file mode 100644
index 000000000..5046f9f91
--- /dev/null
+++ b/misc/pylib/fontbuild/Build.py
@@ -0,0 +1,300 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import ConfigParser
+import os
+import sys
+
+from booleanOperations import BooleanOperationManager
+from cu2qu.ufo import fonts_to_quadratic
+from fontTools.misc.transform import Transform
+from robofab.world import OpenFont
+from ufo2ft import compileOTF, compileTTF
+
+from fontbuild.decomposeGlyph import decomposeGlyph
+from fontbuild.features import readFeatureFile, writeFeatureFile
+from fontbuild.generateGlyph import generateGlyph
+from fontbuild.instanceNames import setInfoRF
+from fontbuild.italics import italicizeGlyph
+from fontbuild.markFeature import RobotoFeatureCompiler, RobotoKernWriter
+from fontbuild.mitreGlyph import mitreGlyph
+from fontbuild.mix import Mix,Master,narrowFLGlyph
+
+
+class FontProject:
+
+ def __init__(self, basefont, basedir, configfile, buildTag=''):
+ self.basefont = basefont
+ self.basedir = basedir
+ self.config = ConfigParser.RawConfigParser()
+ self.configfile = os.path.join(self.basedir, configfile)
+ self.config.read(self.configfile)
+ self.buildTag = buildTag
+
+ self.diacriticList = [
+ line.strip() for line in self.openResource("diacriticfile")
+ if not line.startswith("#")]
+ self.adobeGlyphList = dict(
+ line.split(";") for line in self.openResource("agl_glyphlistfile")
+ if not line.startswith("#"))
+ self.glyphOrder = self.openResource("glyphorder")
+
+ # map exceptional glyph names in Roboto to names in the AGL
+ roboNames = (
+ ('Obar', 'Ocenteredtilde'), ('obar', 'obarred'),
+ ('eturn', 'eturned'), ('Iota1', 'Iotaafrican'))
+ for roboName, aglName in roboNames:
+ self.adobeGlyphList[roboName] = self.adobeGlyphList[aglName]
+
+ self.builddir = "out"
+ self.decompose = self.config.get("glyphs","decompose").split()
+ self.predecompose = self.config.get("glyphs","predecompose").split()
+ self.lessItalic = self.config.get("glyphs","lessitalic").split()
+ self.deleteList = self.config.get("glyphs","delete").split()
+ self.noItalic = self.config.get("glyphs","noitalic").split()
+
+ self.buildOTF = False
+ self.compatible = False
+ self.generatedFonts = []
+
+ def openResource(self, name):
+ with open(os.path.join(
+ self.basedir, self.config.get("res", name))) as resourceFile:
+ resource = resourceFile.read()
+ return resource.splitlines()
+
+ def generateOutputPath(self, font, ext):
+ family = font.info.familyName.replace(" ", "")
+ style = font.info.styleName.replace(" ", "")
+ path = os.path.join(self.basedir, self.builddir, family + ext.upper())
+ if not os.path.exists(path):
+ os.makedirs(path)
+ return os.path.join(path, "%s-%s.%s" % (family, style, ext))
+
+ def generateFont(self, mix, names, italic=False, swapSuffixes=None, stemWidth=185,
+ italicMeanYCenter=-825, italicNarrowAmount=1):
+
+ n = names.split("/")
+ log("---------------------\n%s %s\n----------------------" %(n[0],n[1]))
+ log(">> Mixing masters")
+ if isinstance( mix, Mix):
+ f = mix.generateFont(self.basefont)
+ else:
+ f = mix.copy()
+
+ if italic == True:
+ log(">> Italicizing")
+ i = 0
+ for g in f:
+ i += 1
+ if i % 10 == 0: print g.name
+
+ if g.name == "uniFFFD":
+ continue
+
+ decomposeGlyph(f, g)
+ removeGlyphOverlap(g)
+
+ if g.name in self.lessItalic:
+ italicizeGlyph(f, g, 9, stemWidth=stemWidth,
+ meanYCenter=italicMeanYCenter,
+ narrowAmount=italicNarrowAmount)
+ elif g.name not in self.noItalic:
+ italicizeGlyph(f, g, 10, stemWidth=stemWidth,
+ meanYCenter=italicMeanYCenter,
+ narrowAmount=italicNarrowAmount)
+ if g.width != 0:
+ g.width += 10
+
+ # set the oblique flag in fsSelection
+ f.info.openTypeOS2Selection.append(9)
+
+ if swapSuffixes != None:
+ for swap in swapSuffixes:
+ swapList = [g.name for g in f if g.name.endswith(swap)]
+ for gname in swapList:
+ print gname
+ swapContours(f, gname.replace(swap,""), gname)
+ for gname in self.predecompose:
+ if f.has_key(gname):
+ decomposeGlyph(f, f[gname])
+
+ log(">> Generating glyphs")
+ generateGlyphs(f, self.diacriticList, self.adobeGlyphList)
+ log(">> Copying features")
+ readFeatureFile(f, self.basefont.features.text)
+ log(">> Decomposing")
+ for g in f:
+ if len(g.components) > 0:
+ decomposeGlyph(f, g)
+ # for gname in self.decompose:
+ # if f.has_key(gname):
+ # decomposeGlyph(f, f[gname])
+
+ copyrightHolderName = ''
+ if self.config.has_option('main', 'copyrightHolderName'):
+ copyrightHolderName = self.config.get('main', 'copyrightHolderName')
+
+ def getcfg(name, fallback=''):
+ if self.config.has_option('main', name):
+ return self.config.get('main', name)
+ else:
+ return fallback
+
+ setInfoRF(f, n, {
+ 'foundry': getcfg('foundry'),
+ 'foundryURL': getcfg('foundryURL'),
+ 'designer': getcfg('designer'),
+ 'copyrightHolderName': getcfg('copyrightHolderName'),
+ 'build': self.buildTag,
+ 'version': getcfg('version'),
+ 'license': getcfg('license'),
+ 'licenseURL': getcfg('licenseURL'),
+ })
+
+ if not self.compatible:
+ cleanCurves(f)
+ deleteGlyphs(f, self.deleteList)
+
+ log(">> Generating font files")
+ ufoName = self.generateOutputPath(f, "ufo")
+ f.save(ufoName)
+ self.generatedFonts.append(ufoName)
+
+ if self.buildOTF:
+ log(">> Generating OTF file")
+ newFont = OpenFont(ufoName)
+ otfName = self.generateOutputPath(f, "otf")
+ saveOTF(newFont, otfName, self.glyphOrder)
+
+ def generateTTFs(self):
+ """Build TTF for each font generated since last call to generateTTFs."""
+
+ fonts = [OpenFont(ufo) for ufo in self.generatedFonts]
+ self.generatedFonts = []
+
+ log(">> Converting curves to quadratic")
+ # using a slightly higher max error (e.g. 0.0025 em), dots will have
+ # fewer control points and look noticeably different
+ max_err = 0.001
+ if self.compatible:
+ fonts_to_quadratic(fonts, max_err_em=max_err, dump_stats=True, reverse_direction=True)
+ else:
+ for font in fonts:
+ fonts_to_quadratic([font], max_err_em=max_err, dump_stats=True, reverse_direction=True)
+
+ log(">> Generating TTF files")
+ for font in fonts:
+ ttfName = self.generateOutputPath(font, "ttf")
+ log(os.path.basename(ttfName))
+ saveOTF(font, ttfName, self.glyphOrder, truetype=True)
+
+
+def transformGlyphMembers(g, m):
+ g.width = int(g.width * m.a)
+ g.Transform(m)
+ for a in g.anchors:
+ p = Point(a.p)
+ p.Transform(m)
+ a.p = p
+ for c in g.components:
+ # Assumes that components have also been individually transformed
+ p = Point(0,0)
+ d = Point(c.deltas[0])
+ d.Transform(m)
+ p.Transform(m)
+ d1 = d - p
+ c.deltas[0].x = d1.x
+ c.deltas[0].y = d1.y
+ s = Point(c.scale)
+ s.Transform(m)
+ #c.scale = s
+
+
+def swapContours(f,gName1,gName2):
+ try:
+ g1 = f[gName1]
+ g2 = f[gName2]
+ except KeyError:
+ log("swapGlyphs failed for %s %s" % (gName1, gName2))
+ return
+ g3 = g1.copy()
+
+ while g1.contours:
+ g1.removeContour(0)
+ for contour in g2.contours:
+ g1.appendContour(contour)
+ g1.width = g2.width
+
+ while g2.contours:
+ g2.removeContour(0)
+ for contour in g3.contours:
+ g2.appendContour(contour)
+ g2.width = g3.width
+
+
+def log(msg):
+ print msg
+
+
+def generateGlyphs(f, glyphNames, glyphList={}):
+ log(">> Generating diacritics")
+ glyphnames = [gname for gname in glyphNames if not gname.startswith("#") and gname != ""]
+
+ for glyphName in glyphNames:
+ generateGlyph(f, glyphName, glyphList)
+
+def cleanCurves(f):
+ log(">> Removing overlaps")
+ for g in f:
+ removeGlyphOverlap(g)
+
+ # log(">> Mitring sharp corners")
+ # for g in f:
+ # mitreGlyph(g, 3., .7)
+
+ # log(">> Converting curves to quadratic")
+ # for g in f:
+ # glyphCurvesToQuadratic(g)
+
+
+def deleteGlyphs(f, deleteList):
+ for name in deleteList:
+ if f.has_key(name):
+ f.removeGlyph(name)
+
+
+def removeGlyphOverlap(glyph):
+ """Remove overlaps in contours from a glyph."""
+ #TODO(jamesgk) verify overlaps exist first, as per library's recommendation
+ manager = BooleanOperationManager()
+ contours = glyph.contours
+ glyph.clearContours()
+ manager.union(contours, glyph.getPointPen())
+
+
+def saveOTF(font, destFile, glyphOrder, truetype=False):
+ """Save a RoboFab font as an OTF binary using ufo2fdk."""
+
+ if truetype:
+ otf = compileTTF(font, featureCompilerClass=RobotoFeatureCompiler,
+ kernWriter=RobotoKernWriter, glyphOrder=glyphOrder,
+ convertCubics=False,
+ useProductionNames=False)
+ else:
+ otf = compileOTF(font, featureCompilerClass=RobotoFeatureCompiler,
+ kernWriter=RobotoKernWriter, glyphOrder=glyphOrder,
+ useProductionNames=False)
+ otf.save(destFile)
diff --git a/misc/pylib/fontbuild/LICENSE b/misc/pylib/fontbuild/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/misc/pylib/fontbuild/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/misc/pylib/fontbuild/ORIGIN.txt b/misc/pylib/fontbuild/ORIGIN.txt
new file mode 100644
index 000000000..1b0a3cf79
--- /dev/null
+++ b/misc/pylib/fontbuild/ORIGIN.txt
@@ -0,0 +1 @@
+https://github.com/google/roboto/tree/master/scripts/lib/fontbuild
diff --git a/misc/pylib/fontbuild/__init__.py b/misc/pylib/fontbuild/__init__.py
new file mode 100644
index 000000000..4ed720308
--- /dev/null
+++ b/misc/pylib/fontbuild/__init__.py
@@ -0,0 +1,6 @@
+"""
+fontbuild
+
+A collection of font production tools written for FontLab
+"""
+version = "0.1" \ No newline at end of file
diff --git a/misc/pylib/fontbuild/alignpoints.py b/misc/pylib/fontbuild/alignpoints.py
new file mode 100644
index 000000000..f49f24d95
--- /dev/null
+++ b/misc/pylib/fontbuild/alignpoints.py
@@ -0,0 +1,173 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import math
+
+import numpy as np
+from numpy.linalg import lstsq
+
+
+def alignCorners(glyph, va, subsegments):
+ out = va.copy()
+ # for i,c in enumerate(subsegments):
+ # segmentCount = len(glyph.contours[i].segments) - 1
+ # n = len(c)
+ # for j,s in enumerate(c):
+ # if j < segmentCount:
+ # seg = glyph.contours[i].segments[j]
+ # if seg.type == "line":
+ # subIndex = subsegmentIndex(i,j,subsegments)
+ # out[subIndex] = alignPoints(va[subIndex])
+
+ for i,c in enumerate(subsegments):
+ segmentCount = len(glyph.contours[i].segments)
+ n = len(c)
+ for j,s in enumerate(c):
+ if j < segmentCount - 1:
+ segType = glyph.contours[i].segments[j].type
+ segnextType = glyph.contours[i].segments[j+1].type
+ next = j+1
+ elif j == segmentCount -1 and s[1] > 3:
+ segType = glyph.contours[i].segments[j].type
+ segNextType = "line"
+ next = j+1
+ elif j == segmentCount:
+ segType = "line"
+ segnextType = glyph.contours[i].segments[1].type
+ if glyph.name == "J":
+ print s[1]
+ print segnextType
+ next = 1
+ else:
+ break
+ if segType == "line" and segnextType == "line":
+ subIndex = subsegmentIndex(i,j,subsegments)
+ pts = va[subIndex]
+ ptsnext = va[subsegmentIndex(i,next,subsegments)]
+ # out[subIndex[-1]] = (out[subIndex[-1]] - 500) * 3 + 500 #findCorner(pts, ptsnext)
+ # print subIndex[-1], subIndex, subsegmentIndex(i,next,subsegments)
+ try:
+ out[subIndex[-1]] = findCorner(pts, ptsnext)
+ except:
+ pass
+ # print glyph.name, "Can't find corner: parallel lines"
+ return out
+
+
+def subsegmentIndex(contourIndex, segmentIndex, subsegments):
+ # This whole thing is so dumb. Need a better data model for subsegments
+
+ contourOffset = 0
+ for i,c in enumerate(subsegments):
+ if i == contourIndex:
+ break
+ contourOffset += c[-1][0]
+ n = subsegments[contourIndex][-1][0]
+ # print contourIndex, contourOffset, n
+ startIndex = subsegments[contourIndex][segmentIndex-1][0]
+ segmentCount = subsegments[contourIndex][segmentIndex][1]
+ endIndex = (startIndex + segmentCount + 1) % (n)
+
+ indices = np.array([(startIndex + i) % (n) + contourOffset for i in range(segmentCount + 1)])
+ return indices
+
+
+def alignPoints(pts, start=None, end=None):
+ if start == None or end == None:
+ start, end = fitLine(pts)
+ out = pts.copy()
+ for i,p in enumerate(pts):
+ out[i] = nearestPoint(start, end, p)
+ return out
+
+
+def findCorner(pp, nn):
+ if len(pp) < 4 or len(nn) < 4:
+ assert 0, "line too short to fit"
+ pStart,pEnd = fitLine(pp)
+ nStart,nEnd = fitLine(nn)
+ prev = pEnd - pStart
+ next = nEnd - nStart
+ # print int(np.arctan2(prev[1],prev[0]) / math.pi * 180),
+ # print int(np.arctan2(next[1],next[0]) / math.pi * 180)
+ # if lines are parallel, return simple average of end and start points
+ if np.dot(prev / np.linalg.norm(prev),
+ next / np.linalg.norm(next)) > .999999:
+ # print "parallel lines", np.arctan2(prev[1],prev[0]), np.arctan2(next[1],next[0])
+ # print prev, next
+ assert 0, "parallel lines"
+ if glyph.name is None:
+ # Never happens, but here to fix a bug in Python 2.7 with -OO
+ print ''
+ return lineIntersect(pStart, pEnd, nStart, nEnd)
+
+
+def lineIntersect((x1,y1),(x2,y2),(x3,y3),(x4,y4)):
+ x12 = x1 - x2
+ x34 = x3 - x4
+ y12 = y1 - y2
+ y34 = y3 - y4
+
+ det = x12 * y34 - y12 * x34
+ if det == 0:
+ print "parallel!"
+
+ a = x1 * y2 - y1 * x2
+ b = x3 * y4 - y3 * x4
+
+ x = (a * x34 - b * x12) / det
+ y = (a * y34 - b * y12) / det
+
+ return (x,y)
+
+
+def fitLineLSQ(pts):
+ "returns a line fit with least squares. Fails for vertical lines"
+ n = len(pts)
+ a = np.ones((n,2))
+ for i in range(n):
+ a[i,0] = pts[i,0]
+ line = lstsq(a,pts[:,1])[0]
+ return line
+
+
+def fitLine(pts):
+ """returns a start vector and direction vector
+ Assumes points segments that already form a somewhat smooth line
+ """
+ n = len(pts)
+ if n < 1:
+ return (0,0),(0,0)
+ a = np.zeros((n-1,2))
+ for i in range(n-1):
+ v = pts[i] - pts[i+1]
+ a[i] = v / np.linalg.norm(v)
+ direction = np.mean(a[1:-1], axis=0)
+ start = np.mean(pts[1:-1], axis=0)
+ return start, start+direction
+
+
+def nearestPoint(a,b,c):
+ "nearest point to point c on line a_b"
+ magnitude = np.linalg.norm(b-a)
+ if magnitude == 0:
+ raise Exception, "Line segment cannot be 0 length"
+ return (b-a) * np.dot((c-a) / magnitude, (b-a) / magnitude) + a
+
+
+# pts = np.array([[1,1],[2,2],[3,3],[4,4]])
+# pts2 = np.array([[1,0],[2,0],[3,0],[4,0]])
+# print alignPoints(pts2, start = pts[0], end = pts[0]+pts[0])
+# # print findCorner(pts,pts2)
diff --git a/misc/pylib/fontbuild/anchors.py b/misc/pylib/fontbuild/anchors.py
new file mode 100644
index 000000000..a617b2f51
--- /dev/null
+++ b/misc/pylib/fontbuild/anchors.py
@@ -0,0 +1,77 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def getGlyph(gname, font):
+ return font[gname] if font.has_key(gname) else None
+
+
+def getComponentByName(f, g, componentName):
+ for c in g.components:
+ if c.baseGlyph == componentName:
+ return c
+
+def getAnchorByName(g,anchorName):
+ for a in g.anchors:
+ if a.name == anchorName:
+ return a
+
+def moveMarkAnchors(f, g, anchorName, accentName, dx, dy):
+ if "top"==anchorName:
+ anchors = f[accentName].anchors
+ for anchor in anchors:
+ if "mkmktop_acc" == anchor.name:
+ for anc in g.anchors:
+ if anc.name == "top":
+ g.removeAnchor(anc)
+ break
+ g.appendAnchor("top", (anchor.x + int(dx), anchor.y + int(dy)))
+
+ elif anchorName in ["bottom", "bottomu"]:
+ anchors = f[accentName].anchors
+ for anchor in anchors:
+ if "mkmkbottom_acc" == anchor.name:
+ for anc in g.anchors:
+ if anc.name == "bottom":
+ g.removeAnchor(anc)
+ break
+ x = anchor.x + int(dx)
+ for anc in anchors:
+ if "top" == anc.name:
+ x = anc.x + int(dx)
+ g.appendAnchor("bottom", (x, anchor.y + int(dy)))
+
+
+def alignComponentToAnchor(f,glyphName,baseName,accentName,anchorName):
+ g = getGlyph(glyphName,f)
+ base = getGlyph(baseName,f)
+ accent = getGlyph(accentName,f)
+ if g == None or base == None or accent == None:
+ return
+ a1 = getAnchorByName(base,anchorName)
+ a2 = getAnchorByName(accent,"_" + anchorName)
+ if a1 == None or a2 == None:
+ return
+ offset = (a1.x - a2.x, a1.y - a2.y)
+ c = getComponentByName(f, g, accentName)
+ c.offset = offset
+ moveMarkAnchors(f, g, anchorName, accentName, offset[0], offset[1])
+
+
+def alignComponentsToAnchors(f,glyphName,baseName,accentNames):
+ for a in accentNames:
+ if len(a) == 1:
+ continue
+ alignComponentToAnchor(f,glyphName,baseName,a[0],a[1])
+
diff --git a/misc/pylib/fontbuild/convertCurves.py b/misc/pylib/fontbuild/convertCurves.py
new file mode 100644
index 000000000..b6efd5ca2
--- /dev/null
+++ b/misc/pylib/fontbuild/convertCurves.py
@@ -0,0 +1,102 @@
+#! /usr/bin/env python
+#
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Converts a cubic bezier curve to a quadratic spline with
+exactly two off curve points.
+
+"""
+
+import numpy
+from numpy import array,cross,dot
+from fontTools.misc import bezierTools
+from robofab.objects.objectsRF import RSegment
+
+def replaceSegments(contour, segments):
+ while len(contour):
+ contour.removeSegment(0)
+ for s in segments:
+ contour.appendSegment(s.type, [(p.x, p.y) for p in s.points], s.smooth)
+
+def calcIntersect(a,b,c,d):
+ numpy.seterr(all='raise')
+ e = b-a
+ f = d-c
+ p = array([-e[1], e[0]])
+ try:
+ h = dot((a-c),p) / dot(f,p)
+ except:
+ print a,b,c,d
+ raise
+ return c + dot(f,h)
+
+def simpleConvertToQuadratic(p0,p1,p2,p3):
+ p = [array(i.x,i.y) for i in [p0,p1,p2,p3]]
+ off = calcIntersect(p[0],p[1],p[2],p[3])
+
+# OFFCURVE_VECTOR_CORRECTION = -.015
+OFFCURVE_VECTOR_CORRECTION = 0
+
+def convertToQuadratic(p0,p1,p2,p3):
+ # TODO: test for accuracy and subdivide further if needed
+ p = [(i.x,i.y) for i in [p0,p1,p2,p3]]
+ # if p[0][0] == p[1][0] and p[0][0] == p[2][0] and p[0][0] == p[2][0] and p[0][0] == p[3][0]:
+ # return (p[0],p[1],p[2],p[3])
+ # if p[0][1] == p[1][1] and p[0][1] == p[2][1] and p[0][1] == p[2][1] and p[0][1] == p[3][1]:
+ # return (p[0],p[1],p[2],p[3])
+ seg1,seg2 = bezierTools.splitCubicAtT(p[0], p[1], p[2], p[3], .5)
+ pts1 = [array([i[0], i[1]]) for i in seg1]
+ pts2 = [array([i[0], i[1]]) for i in seg2]
+ on1 = seg1[0]
+ on2 = seg2[3]
+ try:
+ off1 = calcIntersect(pts1[0], pts1[1], pts1[2], pts1[3])
+ off2 = calcIntersect(pts2[0], pts2[1], pts2[2], pts2[3])
+ except:
+ return (p[0],p[1],p[2],p[3])
+ off1 = (on1 - off1) * OFFCURVE_VECTOR_CORRECTION + off1
+ off2 = (on2 - off2) * OFFCURVE_VECTOR_CORRECTION + off2
+ return (on1,off1,off2,on2)
+
+def cubicSegmentToQuadratic(c,sid):
+
+ segment = c[sid]
+ if (segment.type != "curve"):
+ print "Segment type not curve"
+ return
+
+ #pSegment,junk = getPrevAnchor(c,sid)
+ pSegment = c[sid-1] #assumes that a curve type will always be proceeded by another point on the same contour
+ points = convertToQuadratic(pSegment.points[-1],segment.points[0],
+ segment.points[1],segment.points[2])
+ return RSegment(
+ 'qcurve', [[int(i) for i in p] for p in points[1:]], segment.smooth)
+
+def glyphCurvesToQuadratic(g):
+
+ for c in g:
+ segments = []
+ for i in range(len(c)):
+ s = c[i]
+ if s.type == "curve":
+ try:
+ segments.append(cubicSegmentToQuadratic(c, i))
+ except Exception:
+ print g.name, i
+ raise
+ else:
+ segments.append(s)
+ replaceSegments(c, segments)
diff --git a/misc/pylib/fontbuild/curveFitPen.py b/misc/pylib/fontbuild/curveFitPen.py
new file mode 100644
index 000000000..f7c0caed9
--- /dev/null
+++ b/misc/pylib/fontbuild/curveFitPen.py
@@ -0,0 +1,422 @@
+#! /opt/local/bin/pythonw2.7
+#
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+__all__ = ["SubsegmentPen","SubsegmentsToCurvesPen", "segmentGlyph", "fitGlyph"]
+
+
+from fontTools.pens.basePen import BasePen
+import numpy as np
+from numpy import array as v
+from numpy.linalg import norm
+from robofab.pens.adapterPens import GuessSmoothPointPen
+from robofab.pens.pointPen import BasePointToSegmentPen
+
+
+class SubsegmentsToCurvesPointPen(BasePointToSegmentPen):
+ def __init__(self, glyph, subsegmentGlyph, subsegments):
+ BasePointToSegmentPen.__init__(self)
+ self.glyph = glyph
+ self.subPen = SubsegmentsToCurvesPen(None, glyph.getPen(), subsegmentGlyph, subsegments)
+
+ def setMatchTangents(self, b):
+ self.subPen.matchTangents = b
+
+ def _flushContour(self, segments):
+ #
+ # adapted from robofab.pens.adapterPens.rfUFOPointPen
+ #
+ assert len(segments) >= 1
+ # if we only have one point and it has a name, we must have an anchor
+ first = segments[0]
+ segmentType, points = first
+ pt, smooth, name, kwargs = points[0]
+ if len(segments) == 1 and name != None:
+ self.glyph.appendAnchor(name, pt)
+ return
+ else:
+ segmentType, points = segments[-1]
+ movePt, smooth, name, kwargs = points[-1]
+ if smooth:
+ # last point is smooth, set pen to start smooth
+ self.subPen.setLastSmooth(True)
+ if segmentType == 'line':
+ del segments[-1]
+
+ self.subPen.moveTo(movePt)
+
+ # do the rest of the segments
+ for segmentType, points in segments:
+ isSmooth = True in [smooth for pt, smooth, name, kwargs in points]
+ pp = [pt for pt, smooth, name, kwargs in points]
+ if segmentType == "line":
+ assert len(pp) == 1
+ if isSmooth:
+ self.subPen.smoothLineTo(pp[0])
+ else:
+ self.subPen.lineTo(pp[0])
+ elif segmentType == "curve":
+ assert len(pp) == 3
+ if isSmooth:
+ self.subPen.smoothCurveTo(*pp)
+ else:
+ self.subPen.curveTo(*pp)
+ elif segmentType == "qcurve":
+ assert 0, "qcurve not supported"
+ else:
+ assert 0, "illegal segmentType: %s" % segmentType
+ self.subPen.closePath()
+
+ def addComponent(self, glyphName, transform):
+ self.subPen.addComponent(glyphName, transform)
+
+
+class SubsegmentsToCurvesPen(BasePen):
+ def __init__(self, glyphSet, otherPen, subsegmentGlyph, subsegments):
+ BasePen.__init__(self, None)
+ self.otherPen = otherPen
+ self.ssglyph = subsegmentGlyph
+ self.subsegments = subsegments
+ self.contourIndex = -1
+ self.segmentIndex = -1
+ self.lastPoint = (0,0)
+ self.lastSmooth = False
+ self.nextSmooth = False
+
+ def setLastSmooth(self, b):
+ self.lastSmooth = b
+
+ def _moveTo(self, (x, y)):
+ self.contourIndex += 1
+ self.segmentIndex = 0
+ self.startPoint = (x,y)
+ p = self.ssglyph.contours[self.contourIndex][0].points[0]
+ self.otherPen.moveTo((p.x, p.y))
+ self.lastPoint = (x,y)
+
+ def _lineTo(self, (x, y)):
+ self.segmentIndex += 1
+ index = self.subsegments[self.contourIndex][self.segmentIndex][0]
+ p = self.ssglyph.contours[self.contourIndex][index].points[0]
+ self.otherPen.lineTo((p.x, p.y))
+ self.lastPoint = (x,y)
+ self.lastSmooth = False
+
+ def smoothLineTo(self, (x, y)):
+ self.lineTo((x,y))
+ self.lastSmooth = True
+
+ def smoothCurveTo(self, (x1, y1), (x2, y2), (x3, y3)):
+ self.nextSmooth = True
+ self.curveTo((x1, y1), (x2, y2), (x3, y3))
+ self.nextSmooth = False
+ self.lastSmooth = True
+
+ def _curveToOne(self, (x1, y1), (x2, y2), (x3, y3)):
+ self.segmentIndex += 1
+ c = self.ssglyph.contours[self.contourIndex]
+ n = len(c)
+ startIndex = (self.subsegments[self.contourIndex][self.segmentIndex-1][0])
+ segmentCount = (self.subsegments[self.contourIndex][self.segmentIndex][1])
+ endIndex = (startIndex + segmentCount + 1) % (n)
+
+ indices = [(startIndex + i) % (n) for i in range(segmentCount + 1)]
+ points = np.array([(c[i].points[0].x, c[i].points[0].y) for i in indices])
+ prevPoint = (c[(startIndex - 1)].points[0].x, c[(startIndex - 1)].points[0].y)
+ nextPoint = (c[(endIndex) % n].points[0].x, c[(endIndex) % n].points[0].y)
+ prevTangent = prevPoint - points[0]
+ nextTangent = nextPoint - points[-1]
+
+ tangent1 = points[1] - points[0]
+ tangent3 = points[-2] - points[-1]
+ prevTangent /= np.linalg.norm(prevTangent)
+ nextTangent /= np.linalg.norm(nextTangent)
+ tangent1 /= np.linalg.norm(tangent1)
+ tangent3 /= np.linalg.norm(tangent3)
+
+ tangent1, junk = self.smoothTangents(tangent1, prevTangent, self.lastSmooth)
+ tangent3, junk = self.smoothTangents(tangent3, nextTangent, self.nextSmooth)
+ if self.matchTangents == True:
+ cp = fitBezier(points, tangent1, tangent3)
+ cp[1] = norm(cp[1] - cp[0]) * tangent1 / norm(tangent1) + cp[0]
+ cp[2] = norm(cp[2] - cp[3]) * tangent3 / norm(tangent3) + cp[3]
+ else:
+ cp = fitBezier(points)
+ # if self.ssglyph.name == 'r':
+ # print "-----------"
+ # print self.lastSmooth, self.nextSmooth
+ # print "%i %i : %i %i \n %i %i : %i %i \n %i %i : %i %i"%(x1,y1, cp[1,0], cp[1,1], x2,y2, cp[2,0], cp[2,1], x3,y3, cp[3,0], cp[3,1])
+ self.otherPen.curveTo((cp[1,0], cp[1,1]), (cp[2,0], cp[2,1]), (cp[3,0], cp[3,1]))
+ self.lastPoint = (x3, y3)
+ self.lastSmooth = False
+
+ def smoothTangents(self,t1,t2,forceSmooth = False):
+ if forceSmooth or (abs(t1.dot(t2)) > .95 and norm(t1-t2) > 1):
+ # print t1,t2,
+ t1 = (t1 - t2) / 2
+ t2 = -t1
+ # print t1,t2
+ return t1 / norm(t1), t2 / norm(t2)
+
+ def _closePath(self):
+ self.otherPen.closePath()
+
+ def _endPath(self):
+ self.otherPen.endPath()
+
+ def addComponent(self, glyphName, transformation):
+ self.otherPen.addComponent(glyphName, transformation)
+
+
+class SubsegmentPointPen(BasePointToSegmentPen):
+ def __init__(self, glyph, resolution):
+ BasePointToSegmentPen.__init__(self)
+ self.glyph = glyph
+ self.resolution = resolution
+ self.subPen = SubsegmentPen(None, glyph.getPen())
+
+ def getSubsegments(self):
+ return self.subPen.subsegments[:]
+
+ def _flushContour(self, segments):
+ #
+ # adapted from robofab.pens.adapterPens.rfUFOPointPen
+ #
+ assert len(segments) >= 1
+ # if we only have one point and it has a name, we must have an anchor
+ first = segments[0]
+ segmentType, points = first
+ pt, smooth, name, kwargs = points[0]
+ if len(segments) == 1 and name != None:
+ self.glyph.appendAnchor(name, pt)
+ return
+ else:
+ segmentType, points = segments[-1]
+ movePt, smooth, name, kwargs = points[-1]
+ if segmentType == 'line':
+ del segments[-1]
+
+ self.subPen.moveTo(movePt)
+
+ # do the rest of the segments
+ for segmentType, points in segments:
+ points = [pt for pt, smooth, name, kwargs in points]
+ if segmentType == "line":
+ assert len(points) == 1
+ self.subPen.lineTo(points[0])
+ elif segmentType == "curve":
+ assert len(points) == 3
+ self.subPen.curveTo(*points)
+ elif segmentType == "qcurve":
+ assert 0, "qcurve not supported"
+ else:
+ assert 0, "illegal segmentType: %s" % segmentType
+ self.subPen.closePath()
+
+ def addComponent(self, glyphName, transform):
+ self.subPen.addComponent(glyphName, transform)
+
+
+class SubsegmentPen(BasePen):
+
+ def __init__(self, glyphSet, otherPen, resolution=25):
+ BasePen.__init__(self,glyphSet)
+ self.resolution = resolution
+ self.otherPen = otherPen
+ self.subsegments = []
+ self.startContour = (0,0)
+ self.contourIndex = -1
+
+ def _moveTo(self, (x, y)):
+ self.contourIndex += 1
+ self.segmentIndex = 0
+ self.subsegments.append([])
+ self.subsegmentCount = 0
+ self.subsegments[self.contourIndex].append([self.subsegmentCount, 0])
+ self.startContour = (x,y)
+ self.lastPoint = (x,y)
+ self.otherPen.moveTo((x,y))
+
+ def _lineTo(self, (x, y)):
+ count = self.stepsForSegment((x,y),self.lastPoint)
+ if count < 1:
+ count = 1
+ self.subsegmentCount += count
+ self.subsegments[self.contourIndex].append([self.subsegmentCount, count])
+ for i in range(1,count+1):
+ x1 = self.lastPoint[0] + (x - self.lastPoint[0]) * i/float(count)
+ y1 = self.lastPoint[1] + (y - self.lastPoint[1]) * i/float(count)
+ self.otherPen.lineTo((x1,y1))
+ self.lastPoint = (x,y)
+
+ def _curveToOne(self, (x1, y1), (x2, y2), (x3, y3)):
+ count = self.stepsForSegment((x3,y3),self.lastPoint)
+ if count < 2:
+ count = 2
+ self.subsegmentCount += count
+ self.subsegments[self.contourIndex].append([self.subsegmentCount,count])
+ x = self.renderCurve((self.lastPoint[0],x1,x2,x3),count)
+ y = self.renderCurve((self.lastPoint[1],y1,y2,y3),count)
+ assert len(x) == count
+ if (x3 == self.startContour[0] and y3 == self.startContour[1]):
+ count -= 1
+ for i in range(count):
+ self.otherPen.lineTo((x[i],y[i]))
+ self.lastPoint = (x3,y3)
+
+ def _closePath(self):
+ if not (self.lastPoint[0] == self.startContour[0] and self.lastPoint[1] == self.startContour[1]):
+ self._lineTo(self.startContour)
+
+ # round values used by otherPen (a RoboFab SegmentToPointPen) to decide
+ # whether to delete duplicate points at start and end of contour
+ #TODO(jamesgk) figure out why we have to do this hack, then remove it
+ c = self.otherPen.contour
+ for i in [0, -1]:
+ c[i] = [[round(n, 5) for n in c[i][0]]] + list(c[i][1:])
+
+ self.otherPen.closePath()
+
+ def _endPath(self):
+ self.otherPen.endPath()
+
+ def addComponent(self, glyphName, transformation):
+ self.otherPen.addComponent(glyphName, transformation)
+
+ def stepsForSegment(self, p1, p2):
+ dist = np.linalg.norm(v(p1) - v(p2))
+ out = int(dist / self.resolution)
+ return out
+
+ def renderCurve(self,p,count):
+ curvePoints = []
+ t = 1.0 / float(count)
+ temp = t * t
+
+ f = p[0]
+ fd = 3 * (p[1] - p[0]) * t
+ fdd_per_2 = 3 * (p[0] - 2 * p[1] + p[2]) * temp
+ fddd_per_2 = 3 * (3 * (p[1] - p[2]) + p[3] - p[0]) * temp * t
+
+ fddd = fddd_per_2 + fddd_per_2
+ fdd = fdd_per_2 + fdd_per_2
+ fddd_per_6 = fddd_per_2 * (1.0 / 3)
+
+ for i in range(count):
+ f = f + fd + fdd_per_2 + fddd_per_6
+ fd = fd + fdd + fddd_per_2
+ fdd = fdd + fddd
+ fdd_per_2 = fdd_per_2 + fddd_per_2
+ curvePoints.append(f)
+
+ return curvePoints
+
+
+def fitBezierSimple(pts):
+ T = [np.linalg.norm(pts[i]-pts[i-1]) for i in range(1,len(pts))]
+ tsum = np.sum(T)
+ T = [0] + T
+ T = [np.sum(T[0:i+1])/tsum for i in range(len(pts))]
+ T = [[t**3, t**2, t, 1] for t in T]
+ T = np.array(T)
+ M = np.array([[-1, 3, -3, 1],
+ [ 3, -6, 3, 0],
+ [-3, 3, 0, 0],
+ [ 1, 0, 0, 0]])
+ T = T.dot(M)
+ T = np.concatenate((T, np.array([[100,0,0,0], [0,0,0,100]])))
+ # pts = np.vstack((pts, pts[0] * 100, pts[-1] * 100))
+ C = np.linalg.lstsq(T, pts)
+ return C[0]
+
+
+def subdivideLineSegment(pts):
+ out = [pts[0]]
+ for i in range(1, len(pts)):
+ out.append(pts[i-1] + (pts[i] - pts[i-1]) * .5)
+ out.append(pts[i])
+ return np.array(out)
+
+
+def fitBezier(pts,tangent0=None,tangent3=None):
+ if len(pts < 4):
+ pts = subdivideLineSegment(pts)
+ T = [np.linalg.norm(pts[i]-pts[i-1]) for i in range(1,len(pts))]
+ tsum = np.sum(T)
+ T = [0] + T
+ T = [np.sum(T[0:i+1])/tsum for i in range(len(pts))]
+ T = [[t**3, t**2, t, 1] for t in T]
+ T = np.array(T)
+ M = np.array([[-1, 3, -3, 1],
+ [ 3, -6, 3, 0],
+ [-3, 3, 0, 0],
+ [ 1, 0, 0, 0]])
+ T = T.dot(M)
+ n = len(pts)
+ pout = pts.copy()
+ pout[:,0] -= (T[:,0] * pts[0,0]) + (T[:,3] * pts[-1,0])
+ pout[:,1] -= (T[:,0] * pts[0,1]) + (T[:,3] * pts[-1,1])
+
+ TT = np.zeros((n*2,4))
+ for i in range(n):
+ for j in range(2):
+ TT[i*2,j*2] = T[i,j+1]
+ TT[i*2+1,j*2+1] = T[i,j+1]
+ pout = pout.reshape((n*2,1),order="C")
+
+ if tangent0 != None and tangent3 != None:
+ tangentConstraintsT = np.array([
+ [tangent0[1], -tangent0[0], 0, 0],
+ [0, 0, tangent3[1], -tangent3[0]]
+ ])
+ tangentConstraintsP = np.array([
+ [pts[0][1] * -tangent0[0] + pts[0][0] * tangent0[1]],
+ [pts[-1][1] * -tangent3[0] + pts[-1][0] * tangent3[1]]
+ ])
+ TT = np.concatenate((TT, tangentConstraintsT * 1000))
+ pout = np.concatenate((pout, tangentConstraintsP * 1000))
+ C = np.linalg.lstsq(TT,pout)[0].reshape((2,2))
+ return np.array([pts[0], C[0], C[1], pts[-1]])
+
+
+def segmentGlyph(glyph,resolution=50):
+ g1 = glyph.copy()
+ g1.clear()
+ dp = SubsegmentPointPen(g1, resolution)
+ glyph.drawPoints(dp)
+ return g1, dp.getSubsegments()
+
+
+def fitGlyph(glyph, subsegmentGlyph, subsegmentIndices, matchTangents=True):
+ outGlyph = glyph.copy()
+ outGlyph.clear()
+ fitPen = SubsegmentsToCurvesPointPen(outGlyph, subsegmentGlyph, subsegmentIndices)
+ fitPen.setMatchTangents(matchTangents)
+ # smoothPen = GuessSmoothPointPen(fitPen)
+ glyph.drawPoints(fitPen)
+ outGlyph.width = subsegmentGlyph.width
+ return outGlyph
+
+
+if __name__ == '__main__':
+ p = SubsegmentPen(None, None)
+ pts = np.array([
+ [0,0],
+ [.5,.5],
+ [.5,.5],
+ [1,1]
+ ])
+ print np.array(p.renderCurve(pts,10)) * 10
diff --git a/misc/pylib/fontbuild/decomposeGlyph.py b/misc/pylib/fontbuild/decomposeGlyph.py
new file mode 100644
index 000000000..0470fa60b
--- /dev/null
+++ b/misc/pylib/fontbuild/decomposeGlyph.py
@@ -0,0 +1,23 @@
+def decomposeGlyph(font, glyph):
+ """Moves the components of a glyph to its outline."""
+ if len(glyph.components):
+ deepCopyContours(font, glyph, glyph, (0, 0), (1, 1))
+ glyph.clearComponents()
+
+
+def deepCopyContours(font, parent, component, offset, scale):
+ """Copy contours to parent from component, including nested components."""
+
+ for nested in component.components:
+ deepCopyContours(
+ font, parent, font[nested.baseGlyph],
+ (offset[0] + nested.offset[0], offset[1] + nested.offset[1]),
+ (scale[0] * nested.scale[0], scale[1] * nested.scale[1]))
+
+ if component == parent:
+ return
+ for contour in component:
+ contour = contour.copy()
+ contour.scale(scale)
+ contour.move(offset)
+ parent.appendContour(contour)
diff --git a/misc/pylib/fontbuild/features.py b/misc/pylib/fontbuild/features.py
new file mode 100755
index 000000000..fe6eca012
--- /dev/null
+++ b/misc/pylib/fontbuild/features.py
@@ -0,0 +1,189 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import re
+
+from feaTools import parser
+from feaTools.writers.fdkSyntaxWriter import FDKSyntaxFeatureWriter
+
+
+class FilterFeatureWriter(FDKSyntaxFeatureWriter):
+ """Feature writer to detect invalid references and duplicate definitions."""
+
+ def __init__(self, refs=set(), name=None, isFeature=False):
+ """Initializes the set of known references, empty by default."""
+ self.refs = refs
+ self.featureNames = set()
+ self.lookupNames = set()
+ self.tableNames = set()
+ self.languageSystems = set()
+ super(FilterFeatureWriter, self).__init__(
+ name=name, isFeature=isFeature)
+
+ # error to print when undefined reference is found in glyph class
+ self.classErr = ('Undefined reference "%s" removed from glyph class '
+ 'definition %s.')
+
+ # error to print when undefined reference is found in sub or pos rule
+ subErr = ['Substitution rule with undefined reference "%s" removed']
+ if self._name:
+ subErr.append(" from ")
+ subErr.append("feature" if self._isFeature else "lookup")
+ subErr.append(' "%s"' % self._name)
+ subErr.append(".")
+ self.subErr = "".join(subErr)
+ self.posErr = self.subErr.replace("Substitution", "Positioning")
+
+ def _subwriter(self, name, isFeature):
+ """Use this class for nested expressions e.g. in feature definitions."""
+ return FilterFeatureWriter(self.refs, name, isFeature)
+
+ def _flattenRefs(self, refs, flatRefs):
+ """Flatten a list of references."""
+ for ref in refs:
+ if type(ref) == list:
+ self._flattenRefs(ref, flatRefs)
+ elif ref != "'": # ignore contextual class markings
+ flatRefs.append(ref)
+
+ def _checkRefs(self, refs, errorMsg):
+ """Check a list of references found in a sub or pos rule."""
+ flatRefs = []
+ self._flattenRefs(refs, flatRefs)
+ for ref in flatRefs:
+ # trailing apostrophes should be ignored
+ if ref[-1] == "'":
+ ref = ref[:-1]
+ if ref not in self.refs:
+ print errorMsg % ref
+ # insert an empty instruction so that we can't end up with an
+ # empty block, which is illegal syntax
+ super(FilterFeatureWriter, self).rawText(";")
+ return False
+ return True
+
+ def classDefinition(self, name, contents):
+ """Check that contents are valid, then add name to known references."""
+ if name in self.refs:
+ return
+ newContents = []
+ for ref in contents:
+ if ref not in self.refs and ref != "-":
+ print self.classErr % (ref, name)
+ else:
+ newContents.append(ref)
+ self.refs.add(name)
+ super(FilterFeatureWriter, self).classDefinition(name, newContents)
+
+ def gsubType1(self, target, replacement):
+ """Check a sub rule with one-to-one replacement."""
+ if self._checkRefs([target, replacement], self.subErr):
+ super(FilterFeatureWriter, self).gsubType1(target, replacement)
+
+ def gsubType4(self, target, replacement):
+ """Check a sub rule with many-to-one replacement."""
+ if self._checkRefs([target, replacement], self.subErr):
+ super(FilterFeatureWriter, self).gsubType4(target, replacement)
+
+ def gsubType6(self, precedingContext, target, trailingContext, replacement):
+ """Check a sub rule with contextual replacement."""
+ refs = [precedingContext, target, trailingContext, replacement]
+ if self._checkRefs(refs, self.subErr):
+ super(FilterFeatureWriter, self).gsubType6(
+ precedingContext, target, trailingContext, replacement)
+
+ def gposType1(self, target, value):
+ """Check a single positioning rule."""
+ if self._checkRefs([target], self.posErr):
+ super(FilterFeatureWriter, self).gposType1(target, value)
+
+ def gposType2(self, target, value, needEnum=False):
+ """Check a pair positioning rule."""
+ if self._checkRefs(target, self.posErr):
+ super(FilterFeatureWriter, self).gposType2(target, value, needEnum)
+
+ # these rules may contain references, but they aren't present in Roboto
+ def gsubType3(self, target, replacement):
+ raise NotImplementedError
+
+ def feature(self, name):
+ """Adds a feature definition only once."""
+ if name not in self.featureNames:
+ self.featureNames.add(name)
+ return super(FilterFeatureWriter, self).feature(name)
+ # we must return a new writer even if we don't add it to this one
+ return FDKSyntaxFeatureWriter(name, True)
+
+ def lookup(self, name):
+ """Adds a lookup block only once."""
+ if name not in self.lookupNames:
+ self.lookupNames.add(name)
+ return super(FilterFeatureWriter, self).lookup(name)
+ # we must return a new writer even if we don't add it to this one
+ return FDKSyntaxFeatureWriter(name, False)
+
+ def languageSystem(self, langTag, scriptTag):
+ """Adds a language system instruction only once."""
+ system = (langTag, scriptTag)
+ if system not in self.languageSystems:
+ self.languageSystems.add(system)
+ super(FilterFeatureWriter, self).languageSystem(langTag, scriptTag)
+
+ def table(self, name, data):
+ """Adds a table only once."""
+ if name in self.tableNames:
+ return
+ self.tableNames.add(name)
+ self._instructions.append("table %s {" % name)
+ self._instructions.extend([" %s %s;" % line for line in data])
+ self._instructions.append("} %s;" % name)
+
+
+def compileFeatureRE(name):
+ """Compiles a feature-matching regex."""
+
+ # this is the pattern used internally by feaTools:
+ # https://github.com/typesupply/feaTools/blob/master/Lib/feaTools/parser.py
+ featureRE = list(parser.featureContentRE)
+ featureRE.insert(2, name)
+ featureRE.insert(6, name)
+ return re.compile("".join(featureRE))
+
+
+def updateFeature(font, name, value):
+ """Add a feature definition, or replace existing one."""
+ featureRE = compileFeatureRE(name)
+ if featureRE.search(font.features.text):
+ font.features.text = featureRE.sub(value, font.features.text)
+ else:
+ font.features.text += "\n" + value
+
+
+def readFeatureFile(font, text, prepend=True):
+ """Incorporate valid definitions from feature text into font."""
+ writer = FilterFeatureWriter(set(font.keys()))
+ if prepend:
+ text += font.features.text
+ else:
+ text = font.features.text + text
+ parser.parseFeatures(writer, text)
+ font.features.text = writer.write()
+
+
+def writeFeatureFile(font, path):
+ """Write the font's features to an external file."""
+ fout = open(path, "w")
+ fout.write(font.features.text)
+ fout.close()
diff --git a/misc/pylib/fontbuild/generateGlyph.py b/misc/pylib/fontbuild/generateGlyph.py
new file mode 100644
index 000000000..465f940a9
--- /dev/null
+++ b/misc/pylib/fontbuild/generateGlyph.py
@@ -0,0 +1,97 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import re
+from string import find
+
+from anchors import alignComponentsToAnchors, getAnchorByName
+
+
+def parseComposite(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def copyMarkAnchors(f, g, srcname, width):
+ for anchor in f[srcname].anchors:
+ if anchor.name in ("top_dd", "bottom_dd", "top0315"):
+ g.appendAnchor(anchor.name, (anchor.x + width, anchor.y))
+
+ if ("top" == anchor.name and
+ not any(a.name == "parent_top" for a in g.anchors)):
+ g.appendAnchor("parent_top", anchor.position)
+
+ if ("bottom" == anchor.name and
+ not any(a.name == "bottom" for a in g.anchors)):
+ g.appendAnchor("bottom", anchor.position)
+
+ if any(a.name == "top" for a in g.anchors):
+ return
+
+ anchor_parent_top = getAnchorByName(g, "parent_top")
+ if anchor_parent_top is not None:
+ g.appendAnchor("top", anchor_parent_top.position)
+
+
+def generateGlyph(f,gname,glyphList={}):
+ glyphName, baseName, accentNames, offset = parseComposite(gname)
+ if f.has_key(glyphName):
+ print('Existing glyph "%s" found in font, ignoring composition rule '
+ '"%s"' % (glyphName, gname))
+ return
+
+ if baseName.find("_") != -1:
+ g = f.newGlyph(glyphName)
+ for componentName in baseName.split("_"):
+ g.appendComponent(componentName, (g.width, 0))
+ g.width += f[componentName].width
+ setUnicodeValue(g, glyphList)
+
+ else:
+ try:
+ f.compileGlyph(glyphName, baseName, accentNames)
+ except KeyError as e:
+ print('KeyError raised for composition rule "%s", likely "%s" '
+ 'anchor not found in glyph "%s"' % (gname, e, baseName))
+ return
+ g = f[glyphName]
+ setUnicodeValue(g, glyphList)
+ copyMarkAnchors(f, g, baseName, offset[1] + offset[0])
+ if len(accentNames) > 0:
+ alignComponentsToAnchors(f, glyphName, baseName, accentNames)
+ if offset[0] != 0 or offset[1] != 0:
+ g.width += offset[1] + offset[0]
+ g.move((offset[0], 0), anchors=False)
+
+
+def setUnicodeValue(glyph, glyphList):
+ """Try to ensure glyph has a unicode value -- used by FDK to make OTFs."""
+
+ if glyph.name in glyphList:
+ glyph.unicode = int(glyphList[glyph.name], 16)
+ else:
+ uvNameMatch = re.match("uni([\dA-F]{4})$", glyph.name)
+ if uvNameMatch:
+ glyph.unicode = int(uvNameMatch.group(1), 16)
diff --git a/misc/pylib/fontbuild/instanceNames.py b/misc/pylib/fontbuild/instanceNames.py
new file mode 100644
index 000000000..cf87ba719
--- /dev/null
+++ b/misc/pylib/fontbuild/instanceNames.py
@@ -0,0 +1,232 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from datetime import date
+import re
+from random import randint
+import string
+
+class InstanceNames:
+ "Class that allows easy setting of FontLab name fields. TODO: Add proper italic flags"
+
+ foundry = ""
+ foundryURL = ""
+ copyrightHolderName = ""
+ build = ""
+ version = "1.0"
+ year = date.today().year
+ designer = ""
+ designerURL = ""
+ license = ""
+ licenseURL = ""
+
+ def __init__(self,names):
+ if type(names) == type(" "):
+ names = names.split("/")
+ #print names
+ self.longfamily = names[0]
+ self.longstyle = names[1]
+ self.shortstyle = names[2]
+ self.subfamilyAbbrev = names[3]
+
+ self.width = self._getWidth()
+ self.italic = self._getItalic()
+ self.weight = self._getWeight()
+ self.fullname = "%s %s" %(self.longfamily, self.longstyle)
+ self.postscript = re.sub(' ','', self.longfamily) + "-" + re.sub(' ','',self.longstyle)
+
+ if self.subfamilyAbbrev != "" and self.subfamilyAbbrev != None and self.subfamilyAbbrev != "Rg":
+ self.shortfamily = "%s %s" %(self.longfamily, self.longstyle.split()[0])
+ else:
+ self.shortfamily = self.longfamily
+
+ def setRFNames(self,f, version=1, versionMinor=0):
+ f.info.familyName = self.longfamily
+ f.info.styleName = self.longstyle
+ f.info.styleMapFamilyName = self.shortfamily
+ f.info.styleMapStyleName = self.shortstyle.lower()
+ f.info.versionMajor = version
+ f.info.versionMinor = versionMinor
+ f.info.year = self.year
+ if len(self.copyrightHolderName) > 0:
+ f.info.copyright = "Copyright %s %s" % (self.year, self.copyrightHolderName)
+ f.info.trademark = "%s is a trademark of %s." %(self.longfamily, self.foundry.rstrip('.'))
+
+ if len(self.designer) > 0:
+ f.info.openTypeNameDesigner = self.designer
+ if len(self.designerURL) > 0:
+ f.info.openTypeNameDesignerURL = self.designerURL
+ f.info.openTypeNameManufacturer = self.foundry
+ f.info.openTypeNameManufacturerURL = self.foundryURL
+ f.info.openTypeNameLicense = self.license
+ f.info.openTypeNameLicenseURL = self.licenseURL
+ f.info.openTypeNameVersion = "Version %i.%i" %(version, versionMinor)
+
+ if self.build is not None and len(self.build):
+ f.info.openTypeNameUniqueID = "%s:%s:%s" %(self.fullname, self.build, self.year)
+ else:
+ f.info.openTypeNameUniqueID = "%s:%s" %(self.fullname, self.year)
+
+ # f.info.openTypeNameDescription = ""
+ # f.info.openTypeNameCompatibleFullName = ""
+ # f.info.openTypeNameSampleText = ""
+ if (self.subfamilyAbbrev != "Rg"):
+ f.info.openTypeNamePreferredFamilyName = self.longfamily
+ f.info.openTypeNamePreferredSubfamilyName = self.longstyle
+
+ f.info.openTypeOS2WeightClass = self._getWeightCode(self.weight)
+ f.info.macintoshFONDName = re.sub(' ','',self.longfamily) + " " + re.sub(' ','',self.longstyle)
+ f.info.postscriptFontName = f.info.macintoshFONDName.replace(" ", "-")
+ if self.italic:
+ f.info.italicAngle = -12.0
+
+
+ def setFLNames(self,flFont):
+
+ from FL import NameRecord
+
+ flFont.family_name = self.shortfamily
+ flFont.mac_compatible = self.fullname
+ flFont.style_name = self.longstyle
+ flFont.full_name = self.fullname
+ flFont.font_name = self.postscript
+ flFont.font_style = self._getStyleCode()
+ flFont.menu_name = self.shortfamily
+ flFont.apple_name = re.sub(' ','',self.longfamily) + " " + re.sub(' ','',self.longstyle)
+ flFont.fond_id = randint(1000,9999)
+ flFont.pref_family_name = self.longfamily
+ flFont.pref_style_name = self.longstyle
+ flFont.weight = self.weight
+ flFont.weight_code = self._getWeightCode(self.weight)
+ flFont.width = self.width
+ if len(self.italic):
+ flFont.italic_angle = -12
+
+ fn = flFont.fontnames
+ fn.clean()
+ #fn.append(NameRecord(0,1,0,0, "Font data copyright %s %s" %(self.foundry, self.year) ))
+ #fn.append(NameRecord(0,3,1,1033, "Font data copyright %s %s" %(self.foundry, self.year) ))
+ copyrightHolderName = self.copyrightHolderName if len(self.copyrightHolderName) > 0 else self.foundry
+ fn.append(NameRecord(0,1,0,0, "Copyright %s %s" %(self.year, copyrightHolderName) ))
+ fn.append(NameRecord(0,3,1,1033, "Copyright %s %s" %(self.year, copyrightHolderName) ))
+ fn.append(NameRecord(1,1,0,0, self.longfamily ))
+ fn.append(NameRecord(1,3,1,1033, self.shortfamily ))
+ fn.append(NameRecord(2,1,0,0, self.longstyle ))
+ fn.append(NameRecord(2,3,1,1033, self.longstyle ))
+ #fn.append(NameRecord(3,1,0,0, "%s:%s:%s" %(self.foundry, self.longfamily, self.year) ))
+ #fn.append(NameRecord(3,3,1,1033, "%s:%s:%s" %(self.foundry, self.longfamily, self.year) ))
+ fn.append(NameRecord(3,1,0,0, "%s:%s:%s" %(self.foundry, self.fullname, self.year) ))
+ fn.append(NameRecord(3,3,1,1033, "%s:%s:%s" %(self.foundry, self.fullname, self.year) ))
+ fn.append(NameRecord(4,1,0,0, self.fullname ))
+ fn.append(NameRecord(4,3,1,1033, self.fullname ))
+ if len(self.build) > 0:
+ fn.append(NameRecord(5,1,0,0, "Version %s%s; %s" %(self.version, self.build, self.year) ))
+ fn.append(NameRecord(5,3,1,1033, "Version %s%s; %s" %(self.version, self.build, self.year) ))
+ else:
+ fn.append(NameRecord(5,1,0,0, "Version %s; %s" %(self.version, self.year) ))
+ fn.append(NameRecord(5,3,1,1033, "Version %s; %s" %(self.version, self.year) ))
+ fn.append(NameRecord(6,1,0,0, self.postscript ))
+ fn.append(NameRecord(6,3,1,1033, self.postscript ))
+ fn.append(NameRecord(7,1,0,0, "%s is a trademark of %s." %(self.longfamily, self.foundry) ))
+ fn.append(NameRecord(7,3,1,1033, "%s is a trademark of %s." %(self.longfamily, self.foundry) ))
+ fn.append(NameRecord(9,1,0,0, self.foundry ))
+ fn.append(NameRecord(9,3,1,1033, self.foundry ))
+ fn.append(NameRecord(11,1,0,0, self.foundryURL ))
+ fn.append(NameRecord(11,3,1,1033, self.foundryURL ))
+ fn.append(NameRecord(12,1,0,0, self.designer ))
+ fn.append(NameRecord(12,3,1,1033, self.designer ))
+ fn.append(NameRecord(13,1,0,0, self.license ))
+ fn.append(NameRecord(13,3,1,1033, self.license ))
+ fn.append(NameRecord(14,1,0,0, self.licenseURL ))
+ fn.append(NameRecord(14,3,1,1033, self.licenseURL ))
+ if (self.subfamilyAbbrev != "Rg"):
+ fn.append(NameRecord(16,3,1,1033, self.longfamily ))
+ fn.append(NameRecord(17,3,1,1033, self.longstyle))
+ #else:
+ #fn.append(NameRecord(17,3,1,1033,""))
+ #fn.append(NameRecord(18,1,0,0, re.sub("Italic","It", self.fullname)))
+
+ def _getSubstyle(self, regex):
+ substyle = re.findall(regex, self.longstyle)
+ if len(substyle) > 0:
+ return substyle[0]
+ else:
+ return ""
+
+ def _getItalic(self):
+ return self._getSubstyle(r"Italic|Oblique|Obliq")
+
+ def _getWeight(self):
+ w = self._getSubstyle(r"Extrabold|Superbold|Super|Fat|Black|Bold|Semibold|Demibold|Medium|Light|Thin")
+ if w == "":
+ w = "Regular"
+ return w
+
+ def _getWidth(self):
+ w = self._getSubstyle(r"Condensed|Extended|Narrow|Wide")
+ if w == "":
+ w = "Normal"
+ return w
+
+ def _getStyleCode(self):
+ #print "shortstyle:", self.shortstyle
+ styleCode = 0
+ if self.shortstyle == "Bold":
+ styleCode = 32
+ if self.shortstyle == "Italic":
+ styleCode = 1
+ if self.shortstyle == "Bold Italic":
+ styleCode = 33
+ if self.longstyle == "Regular":
+ styleCode = 64
+ return styleCode
+
+ def _getWeightCode(self,weight):
+ if weight == "Thin":
+ return 250
+ elif weight == "Light":
+ return 300
+ elif weight == "Bold":
+ return 700
+ elif weight == "Medium":
+ return 500
+ elif weight == "Semibold":
+ return 600
+ elif weight == "Black":
+ return 900
+ elif weight == "Fat":
+ return 900
+
+ return 400
+
+def setNames(f,names,foundry="",version="1.0",build=""):
+ InstanceNames.foundry = foundry
+ InstanceNames.version = version
+ InstanceNames.build = build
+ i = InstanceNames(names)
+ i.setFLNames(f)
+
+
+def setInfoRF(f, names, attrs={}):
+ i = InstanceNames(names)
+ version, versionMinor = (1, 0)
+ for k,v in attrs.iteritems():
+ if k == 'version':
+ if v.find('.') != -1:
+ version, versionMinor = [int(num) for num in v.split(".")]
+ else:
+ version = int(v)
+ setattr(i, k, v)
+ i.setRFNames(f, version=version, versionMinor=versionMinor)
diff --git a/misc/pylib/fontbuild/italics.py b/misc/pylib/fontbuild/italics.py
new file mode 100644
index 000000000..91e658c74
--- /dev/null
+++ b/misc/pylib/fontbuild/italics.py
@@ -0,0 +1,308 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import math
+
+from fontTools.misc.transform import Transform
+import numpy as np
+from numpy.linalg import norm
+from scipy.sparse.linalg import cg
+from scipy.ndimage.filters import gaussian_filter1d as gaussian
+from scipy.cluster.vq import vq, whiten
+
+from fontbuild.alignpoints import alignCorners
+from fontbuild.curveFitPen import fitGlyph, segmentGlyph
+
+
+def italicizeGlyph(f, g, angle=10, stemWidth=185, meanYCenter=-825, narrowAmount=1):
+ unic = g.unicode #save unicode
+
+ glyph = f[g.name]
+ slope = np.tanh(math.pi * angle / 180)
+
+ # determine how far on the x axis the glyph should slide
+ # to compensate for the slant.
+ # meanYCenter:
+ # -600 is a magic number that assumes a 2048 unit em square,
+ # and -825 for a 2816 unit em square. (UPM*0.29296875)
+ m = Transform(1, 0, slope, 1, 0, 0)
+ xoffset, junk = m.transformPoint((0, meanYCenter))
+ m = Transform(narrowAmount, 0, slope, 1, xoffset, 0)
+
+ if len(glyph) > 0:
+ g2 = italicize(f[g.name], angle, xoffset=xoffset, stemWidth=stemWidth)
+ f.insertGlyph(g2, g.name)
+
+ transformFLGlyphMembers(f[g.name], m)
+
+ if unic > 0xFFFF: #restore unicode
+ g.unicode = unic
+
+
+def italicize(glyph, angle=12, stemWidth=180, xoffset=-50):
+ CURVE_CORRECTION_WEIGHT = .03
+ CORNER_WEIGHT = 10
+
+ # decompose the glyph into smaller segments
+ ga, subsegments = segmentGlyph(glyph,25)
+ va, e = glyphToMesh(ga)
+ n = len(va)
+ grad = mapEdges(lambda a,(p,n): normalize(p-a), va, e)
+ cornerWeights = mapEdges(lambda a,(p,n): normalize(p-a).dot(normalize(a-n)), grad, e)[:,0].reshape((-1,1))
+ smooth = np.ones((n,1)) * CURVE_CORRECTION_WEIGHT
+
+ controlPoints = findControlPointsInMesh(glyph, va, subsegments)
+ smooth[controlPoints > 0] = 1
+ smooth[cornerWeights < .6] = CORNER_WEIGHT
+ # smooth[cornerWeights >= .9999] = 1
+
+ out = va.copy()
+ hascurves = False
+ for c in glyph.contours:
+ for s in c.segments:
+ if s.type == "curve":
+ hascurves = True
+ break
+ if hascurves:
+ break
+ if stemWidth > 100:
+ outCorrected = skewMesh(recompose(skewMesh(out, angle * 1.6), grad, e, smooth=smooth), -angle * 1.6)
+ # out = copyMeshDetails(va, out, e, 6)
+ else:
+ outCorrected = out
+
+ # create a transform for italicizing
+ normals = edgeNormals(out, e)
+ center = va + normals * stemWidth * .4
+ if stemWidth > 130:
+ center[:, 0] = va[:, 0] * .7 + center[:,0] * .3
+ centerSkew = skewMesh(center.dot(np.array([[.97,0],[0,1]])), angle * .9)
+
+ # apply the transform
+ out = outCorrected + (centerSkew - center)
+ out[:,1] = outCorrected[:,1]
+
+ # make some corrections
+ smooth = np.ones((n,1)) * .1
+ out = alignCorners(glyph, out, subsegments)
+ out = copyMeshDetails(skewMesh(va, angle), out, e, 7, smooth=smooth)
+ # grad = mapEdges(lambda a,(p,n): normalize(p-a), skewMesh(outCorrected, angle*.9), e)
+ # out = recompose(out, grad, e, smooth=smooth)
+
+ out = skewMesh(out, angle * .1)
+ out[:,0] += xoffset
+ # out[:,1] = outCorrected[:,1]
+ out[va[:,1] == 0, 1] = 0
+ gOut = meshToGlyph(out, ga)
+ # gOut.width *= .97
+ # gOut.width += 10
+ # return gOut
+
+ # recompose the glyph into original segments
+ return fitGlyph(glyph, gOut, subsegments)
+
+
+def transformFLGlyphMembers(g, m, transformAnchors = True):
+ # g.transform(m)
+ g.width = g.width * m[0]
+ p = m.transformPoint((0,0))
+ for c in g.components:
+ d = m.transformPoint(c.offset)
+ c.offset = (d[0] - p[0], d[1] - p[1])
+ if transformAnchors:
+ for a in g.anchors:
+ aa = m.transformPoint((a.x,a.y))
+ a.x = aa[0]
+ # a.x,a.y = (aa[0] - p[0], aa[1] - p[1])
+ # a.x = a.x - m[4]
+
+
+def glyphToMesh(g):
+ points = []
+ edges = {}
+ offset = 0
+ for c in g.contours:
+ if len(c) < 2:
+ continue
+ for i,prev,next in rangePrevNext(len(c)):
+ points.append((c[i].points[0].x, c[i].points[0].y))
+ edges[i + offset] = np.array([prev + offset, next + offset], dtype=int)
+ offset += len(c)
+ return np.array(points), edges
+
+
+def meshToGlyph(points, g):
+ g1 = g.copy()
+ j = 0
+ for c in g1.contours:
+ if len(c) < 2:
+ continue
+ for i in range(len(c)):
+ c[i].points[0].x = points[j][0]
+ c[i].points[0].y = points[j][1]
+ j += 1
+ return g1
+
+
+def quantizeGradient(grad, book=None):
+ if book == None:
+ book = np.array([(1,0),(0,1),(0,-1),(-1,0)])
+ indexArray = vq(whiten(grad), book)[0]
+ out = book[indexArray]
+ for i,v in enumerate(out):
+ out[i] = normalize(v)
+ return out
+
+
+def findControlPointsInMesh(glyph, va, subsegments):
+ controlPointIndices = np.zeros((len(va),1))
+ index = 0
+ for i,c in enumerate(subsegments):
+ segmentCount = len(glyph.contours[i].segments) - 1
+ for j,s in enumerate(c):
+ if j < segmentCount:
+ if glyph.contours[i].segments[j].type == "line":
+ controlPointIndices[index] = 1
+ index += s[1]
+ return controlPointIndices
+
+
+def recompose(v, grad, e, smooth=1, P=None, distance=None):
+ n = len(v)
+ if distance == None:
+ distance = mapEdges(lambda a,(p,n): norm(p - a), v, e)
+ if (P == None):
+ P = mP(v,e)
+ P += np.identity(n) * smooth
+ f = v.copy()
+ for i,(prev,next) in e.iteritems():
+ f[i] = (grad[next] * distance[next] - grad[i] * distance[i])
+ out = v.copy()
+ f += v * smooth
+ for i in range(len(out[0,:])):
+ out[:,i] = cg(P, f[:,i])[0]
+ return out
+
+
+def mP(v,e):
+ n = len(v)
+ M = np.zeros((n,n))
+ for i, edges in e.iteritems():
+ w = -2 / float(len(edges))
+ for index in edges:
+ M[i,index] = w
+ M[i,i] = 2
+ return M
+
+
+def normalize(v):
+ n = np.linalg.norm(v)
+ if n == 0:
+ return v
+ return v/n
+
+
+def mapEdges(func,v,e,*args):
+ b = v.copy()
+ for i, edges in e.iteritems():
+ b[i] = func(v[i], [v[j] for j in edges], *args)
+ return b
+
+
+def getNormal(a,b,c):
+ "Assumes TT winding direction"
+ p = np.roll(normalize(b - a), 1)
+ n = -np.roll(normalize(c - a), 1)
+ p[1] *= -1
+ n[1] *= -1
+ # print p, n, normalize((p + n) * .5)
+ return normalize((p + n) * .5)
+
+
+def edgeNormals(v,e):
+ "Assumes a mesh where each vertex has exactly least two edges"
+ return mapEdges(lambda a,(p,n) : getNormal(a,p,n),v,e)
+
+
+def rangePrevNext(count):
+ c = np.arange(count,dtype=int)
+ r = np.vstack((c, np.roll(c, 1), np.roll(c, -1)))
+ return r.T
+
+
+def skewMesh(v,angle):
+ slope = np.tanh([math.pi * angle / 180])
+ return v.dot(np.array([[1,0],[slope,1]]))
+
+
+def labelConnected(e):
+ label = 0
+ labels = np.zeros((len(e),1))
+ for i,(prev,next) in e.iteritems():
+ labels[i] = label
+ if next <= i:
+ label += 1
+ return labels
+
+
+def copyGradDetails(a,b,e,scale=15):
+ n = len(a)
+ labels = labelConnected(e)
+ out = a.astype(float).copy()
+ for i in range(labels[-1]+1):
+ mask = (labels==i).flatten()
+ out[mask,:] = gaussian(b[mask,:], scale, mode="wrap", axis=0) + a[mask,:] - gaussian(a[mask,:], scale, mode="wrap", axis=0)
+ return out
+
+
+def copyMeshDetails(va,vb,e,scale=5,smooth=.01):
+ gradA = mapEdges(lambda a,(p,n): normalize(p-a), va, e)
+ gradB = mapEdges(lambda a,(p,n): normalize(p-a), vb, e)
+ grad = copyGradDetails(gradA, gradB, e, scale)
+ grad = mapEdges(lambda a,(p,n): normalize(a), grad, e)
+ return recompose(vb, grad, e, smooth=smooth)
+
+
+def condenseGlyph(glyph, scale=.8, stemWidth=185):
+ ga, subsegments = segmentGlyph(glyph, 25)
+ va, e = glyphToMesh(ga)
+ n = len(va)
+
+ normals = edgeNormals(va,e)
+ cn = va.dot(np.array([[scale, 0],[0,1]]))
+ grad = mapEdges(lambda a,(p,n): normalize(p-a), cn, e)
+ # ograd = mapEdges(lambda a,(p,n): normalize(p-a), va, e)
+
+ cn[:,0] -= normals[:,0] * stemWidth * .5 * (1 - scale)
+ out = recompose(cn, grad, e, smooth=.5)
+ # out = recompose(out, grad, e, smooth=.1)
+ out = recompose(out, grad, e, smooth=.01)
+
+ # cornerWeights = mapEdges(lambda a,(p,n): normalize(p-a).dot(normalize(a-n)), grad, e)[:,0].reshape((-1,1))
+ # smooth = np.ones((n,1)) * .1
+ # smooth[cornerWeights < .6] = 10
+ #
+ # grad2 = quantizeGradient(grad).astype(float)
+ # grad2 = copyGradDetails(grad, grad2, e, scale=10)
+ # grad2 = mapEdges(lambda a,e: normalize(a), grad2, e)
+ # out = recompose(out, grad2, e, smooth=smooth)
+ out[:,0] += 15
+ out[:,1] = va[:,1]
+ # out = recompose(out, grad, e, smooth=.5)
+ gOut = meshToGlyph(out, ga)
+ gOut = fitGlyph(glyph, gOut, subsegments)
+ for i,seg in enumerate(gOut):
+ gOut[i].points[0].y = glyph[i].points[0].y
+ return gOut
diff --git a/misc/pylib/fontbuild/markFeature.py b/misc/pylib/fontbuild/markFeature.py
new file mode 100755
index 000000000..42cafe4c7
--- /dev/null
+++ b/misc/pylib/fontbuild/markFeature.py
@@ -0,0 +1,55 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from ufo2ft.kernFeatureWriter import KernFeatureWriter
+from ufo2ft.makeotfParts import FeatureOTFCompiler
+
+
+class RobotoFeatureCompiler(FeatureOTFCompiler):
+ def precompile(self):
+ self.overwriteFeatures = True
+
+ def setupAnchorPairs(self):
+ self.anchorPairs = [
+ ["top", "_marktop"],
+ ["bottom", "_markbottom"],
+ ["top_dd", "_marktop_dd"],
+ ["bottom_dd", "_markbottom_dd"],
+ ["rhotichook", "_markrhotichook"],
+ ["top0315", "_marktop0315"],
+ ["parent_top", "_markparent_top"],
+ ["parenthesses.w1", "_markparenthesses.w1"],
+ ["parenthesses.w2", "_markparenthesses.w2"],
+ ["parenthesses.w3", "_markparenthesses.w3"]]
+
+ self.mkmkAnchorPairs = [
+ ["mkmktop", "_marktop"],
+ ["mkmkbottom_acc", "_markbottom"],
+
+ # By providing a pair with accent anchor _bottom and no base anchor,
+ # we designate all glyphs with _bottom as accents (so that they will
+ # be used as base glyphs for mkmk features) without generating any
+ # positioning rules actually using this anchor (which is instead
+ # used to generate composite glyphs). This is all for consistency
+ # with older roboto versions.
+ ["", "_bottom"],
+ ]
+
+ self.ligaAnchorPairs = []
+
+
+class RobotoKernWriter(KernFeatureWriter):
+ leftFeaClassRe = r"@_(.+)_L$"
+ rightFeaClassRe = r"@_(.+)_R$"
diff --git a/misc/pylib/fontbuild/mitreGlyph.py b/misc/pylib/fontbuild/mitreGlyph.py
new file mode 100644
index 000000000..d0834ed84
--- /dev/null
+++ b/misc/pylib/fontbuild/mitreGlyph.py
@@ -0,0 +1,111 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Mitre Glyph:
+
+mitreSize : Length of the segment created by the mitre. The default is 4.
+maxAngle : Maximum angle in radians at which segments will be mitred. The default is .9 (about 50 degrees).
+ Works for both inside and outside angles
+
+"""
+
+import math
+from robofab.objects.objectsRF import RPoint, RSegment
+from fontbuild.convertCurves import replaceSegments
+
+def getTangents(contours):
+ tmap = []
+ for c in contours:
+ clen = len(c)
+ for i in range(clen):
+ s = c[i]
+ p = s.points[-1]
+ ns = c[(i + 1) % clen]
+ ps = c[(clen + i - 1) % clen]
+ np = ns.points[1] if ns.type == 'curve' else ns.points[-1]
+ pp = s.points[2] if s.type == 'curve' else ps.points[-1]
+ tmap.append((pp - p, np - p))
+ return tmap
+
+def normalizeVector(p):
+ m = getMagnitude(p);
+ if m != 0:
+ return p*(1/m)
+ else:
+ return RPoint(0,0)
+
+def getMagnitude(p):
+ return math.sqrt(p.x*p.x + p.y*p.y)
+
+def getDistance(v1,v2):
+ return getMagnitude(RPoint(v1.x - v2.x, v1.y - v2.y))
+
+def getAngle(v1,v2):
+ angle = math.atan2(v1.y,v1.x) - math.atan2(v2.y,v2.x)
+ return (angle + (2*math.pi)) % (2*math.pi)
+
+def angleDiff(a,b):
+ return math.pi - abs((abs(a - b) % (math.pi*2)) - math.pi)
+
+def getAngle2(v1,v2):
+ return abs(angleDiff(math.atan2(v1.y, v1.x), math.atan2(v2.y, v2.x)))
+
+def getMitreOffset(n,v1,v2,mitreSize=4,maxAngle=.9):
+
+ # dont mitre if segment is too short
+ if abs(getMagnitude(v1)) < mitreSize * 2 or abs(getMagnitude(v2)) < mitreSize * 2:
+ return
+ angle = getAngle2(v2,v1)
+ v1 = normalizeVector(v1)
+ v2 = normalizeVector(v2)
+ if v1.x == v2.x and v1.y == v2.y:
+ return
+
+
+ # only mitre corners sharper than maxAngle
+ if angle > maxAngle:
+ return
+
+ radius = mitreSize / abs(getDistance(v1,v2))
+ offset1 = RPoint(round(v1.x * radius), round(v1.y * radius))
+ offset2 = RPoint(round(v2.x * radius), round(v2.y * radius))
+ return offset1, offset2
+
+def mitreGlyph(g,mitreSize,maxAngle):
+ if g == None:
+ return
+
+ tangents = getTangents(g.contours)
+ sid = -1
+ for c in g.contours:
+ segments = []
+ needsMitring = False
+ for s in c:
+ sid += 1
+ v1, v2 = tangents[sid]
+ off = getMitreOffset(s,v1,v2,mitreSize,maxAngle)
+ s1 = s.copy()
+ if off != None:
+ offset1, offset2 = off
+ p2 = s.points[-1] + offset2
+ s2 = RSegment('line', [(p2.x, p2.y)])
+ s1.points[0] += offset1
+ segments.append(s1)
+ segments.append(s2)
+ needsMitring = True
+ else:
+ segments.append(s1)
+ if needsMitring:
+ replaceSegments(c, segments)
diff --git a/misc/pylib/fontbuild/mix.py b/misc/pylib/fontbuild/mix.py
new file mode 100644
index 000000000..5e5388b3e
--- /dev/null
+++ b/misc/pylib/fontbuild/mix.py
@@ -0,0 +1,360 @@
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from numpy import array, append
+import copy
+import json
+from robofab.objects.objectsRF import RPoint, RGlyph
+from robofab.world import OpenFont
+from decomposeGlyph import decomposeGlyph
+
+
+class FFont:
+ "Font wrapper for floating point operations"
+
+ def __init__(self,f=None):
+ self.glyphs = {}
+ self.hstems = []
+ self.vstems = []
+ self.kerning = {}
+ if isinstance(f,FFont):
+ #self.glyphs = [g.copy() for g in f.glyphs]
+ for key,g in f.glyphs.iteritems():
+ self.glyphs[key] = g.copy()
+ self.hstems = list(f.hstems)
+ self.vstems = list(f.vstems)
+ self.kerning = dict(f.kerning)
+ elif f != None:
+ self.copyFromFont(f)
+
+ def copyFromFont(self, f):
+ for g in f:
+ self.glyphs[g.name] = FGlyph(g)
+ self.hstems = [s for s in f.info.postscriptStemSnapH]
+ self.vstems = [s for s in f.info.postscriptStemSnapV]
+ self.kerning = f.kerning.asDict()
+
+
+ def copyToFont(self, f):
+ for g in f:
+ try:
+ gF = self.glyphs[g.name]
+ gF.copyToGlyph(g)
+ except:
+ print "Copy to glyph failed for" + g.name
+ f.info.postscriptStemSnapH = self.hstems
+ f.info.postscriptStemSnapV = self.vstems
+ for pair in self.kerning:
+ f.kerning[pair] = self.kerning[pair]
+
+ def getGlyph(self, gname):
+ try:
+ return self.glyphs[gname]
+ except:
+ return None
+
+ def setGlyph(self, gname, glyph):
+ self.glyphs[gname] = glyph
+
+ def addDiff(self,b,c):
+ newFont = FFont(self)
+ for key,g in newFont.glyphs.iteritems():
+ gB = b.getGlyph(key)
+ gC = c.getGlyph(key)
+ try:
+ newFont.glyphs[key] = g.addDiff(gB,gC)
+ except:
+ print "Add diff failed for '%s'" %key
+ return newFont
+
+class FGlyph:
+ "provides a temporary floating point compatible glyph data structure"
+
+ def __init__(self, g=None):
+ self.contours = []
+ self.width = 0.
+ self.components = []
+ self.anchors = []
+ if g != None:
+ self.copyFromGlyph(g)
+
+ def copyFromGlyph(self,g):
+ self.name = g.name
+ valuesX = []
+ valuesY = []
+ self.width = len(valuesX)
+ valuesX.append(g.width)
+ for c in g.components:
+ self.components.append((len(valuesX), len(valuesY)))
+ valuesX.append(c.scale[0])
+ valuesY.append(c.scale[1])
+ valuesX.append(c.offset[0])
+ valuesY.append(c.offset[1])
+
+ for a in g.anchors:
+ self.anchors.append((len(valuesX), len(valuesY)))
+ valuesX.append(a.x)
+ valuesY.append(a.y)
+
+ for i in range(len(g)):
+ self.contours.append([])
+ for j in range (len(g[i].points)):
+ self.contours[i].append((len(valuesX), len(valuesY)))
+ valuesX.append(g[i].points[j].x)
+ valuesY.append(g[i].points[j].y)
+
+ self.dataX = array(valuesX, dtype=float)
+ self.dataY = array(valuesY, dtype=float)
+
+ def copyToGlyph(self,g):
+ g.width = self._derefX(self.width)
+ if len(g.components) == len(self.components):
+ for i in range(len(self.components)):
+ g.components[i].scale = (self._derefX(self.components[i][0] + 0, asInt=False),
+ self._derefY(self.components[i][1] + 0, asInt=False))
+ g.components[i].offset = (self._derefX(self.components[i][0] + 1),
+ self._derefY(self.components[i][1] + 1))
+ if len(g.anchors) == len(self.anchors):
+ for i in range(len(self.anchors)):
+ g.anchors[i].x = self._derefX( self.anchors[i][0])
+ g.anchors[i].y = self._derefY( self.anchors[i][1])
+ for i in range(len(g)) :
+ for j in range (len(g[i].points)):
+ g[i].points[j].x = self._derefX(self.contours[i][j][0])
+ g[i].points[j].y = self._derefY(self.contours[i][j][1])
+
+ def isCompatible(self, g):
+ return (len(self.dataX) == len(g.dataX) and
+ len(self.dataY) == len(g.dataY) and
+ len(g.contours) == len(self.contours))
+
+ def __add__(self,g):
+ if self.isCompatible(g):
+ newGlyph = self.copy()
+ newGlyph.dataX = self.dataX + g.dataX
+ newGlyph.dataY = self.dataY + g.dataY
+ return newGlyph
+ else:
+ print "Add failed for '%s'" %(self.name)
+ raise Exception
+
+ def __sub__(self,g):
+ if self.isCompatible(g):
+ newGlyph = self.copy()
+ newGlyph.dataX = self.dataX - g.dataX
+ newGlyph.dataY = self.dataY - g.dataY
+ return newGlyph
+ else:
+ print "Subtract failed for '%s'" %(self.name)
+ raise Exception
+
+ def __mul__(self,scalar):
+ newGlyph = self.copy()
+ newGlyph.dataX = self.dataX * scalar
+ newGlyph.dataY = self.dataY * scalar
+ return newGlyph
+
+ def scaleX(self,scalar):
+ newGlyph = self.copy()
+ if len(self.dataX) > 0:
+ newGlyph.dataX = self.dataX * scalar
+ for i in range(len(newGlyph.components)):
+ newGlyph.dataX[newGlyph.components[i][0]] = self.dataX[newGlyph.components[i][0]]
+ return newGlyph
+
+ def shift(self,ammount):
+ newGlyph = self.copy()
+ newGlyph.dataX = self.dataX + ammount
+ for i in range(len(newGlyph.components)):
+ newGlyph.dataX[newGlyph.components[i][0]] = self.dataX[newGlyph.components[i][0]]
+ return newGlyph
+
+ def interp(self, g, v):
+ gF = self.copy()
+ if not self.isCompatible(g):
+ print "Interpolate failed for '%s'; outlines incompatible" %(self.name)
+ raise Exception
+
+ gF.dataX += (g.dataX - gF.dataX) * v.x
+ gF.dataY += (g.dataY - gF.dataY) * v.y
+ return gF
+
+ def copy(self):
+ ng = FGlyph()
+ ng.contours = list(self.contours)
+ ng.width = self.width
+ ng.components = list(self.components)
+ ng.anchors = list(self.anchors)
+ ng.dataX = self.dataX.copy()
+ ng.dataY = self.dataY.copy()
+ ng.name = self.name
+ return ng
+
+ def _derefX(self,id, asInt=True):
+ val = self.dataX[id]
+ return int(round(val)) if asInt else val
+
+ def _derefY(self,id, asInt=True):
+ val = self.dataY[id]
+ return int(round(val)) if asInt else val
+
+ def addDiff(self,gB,gC):
+ newGlyph = self + (gB - gC)
+ return newGlyph
+
+
+
+class Master:
+
+ def __init__(self, font=None, v=0, kernlist=None, overlay=None):
+ if isinstance(font, FFont):
+ self.font = None
+ self.ffont = font
+ elif isinstance(font,str):
+ self.openFont(font,overlay)
+ elif isinstance(font,Mix):
+ self.font = font
+ else:
+ self.font = font
+ self.ffont = FFont(font)
+ if isinstance(v,float) or isinstance(v,int):
+ self.v = RPoint(v, v)
+ else:
+ self.v = v
+ if kernlist != None:
+ kerns = [i.strip().split() for i in open(kernlist).readlines()]
+
+ self.kernlist = [{'left':k[0], 'right':k[1], 'value': k[2]}
+ for k in kerns
+ if not k[0].startswith("#")
+ and not k[0] == ""]
+ #TODO implement class based kerning / external kerning file
+
+ def openFont(self, path, overlayPath=None):
+ self.font = OpenFont(path)
+ for g in self.font:
+ size = len(g)
+ csize = len(g.components)
+ if (size > 0 and csize > 0):
+ decomposeGlyph(self.font, self.font[g.name])
+
+ if overlayPath != None:
+ overlayFont = OpenFont(overlayPath)
+ font = self.font
+ for overlayGlyph in overlayFont:
+ font.insertGlyph(overlayGlyph)
+
+ self.ffont = FFont(self.font)
+
+
+class Mix:
+ def __init__(self,masters,v):
+ self.masters = masters
+ if isinstance(v,float) or isinstance(v,int):
+ self.v = RPoint(v,v)
+ else:
+ self.v = v
+
+ def getFGlyph(self, master, gname):
+ if isinstance(master.font, Mix):
+ return font.mixGlyphs(gname)
+ return master.ffont.getGlyph(gname)
+
+ def getGlyphMasters(self,gname):
+ masters = self.masters
+ if len(masters) <= 2:
+ return self.getFGlyph(masters[0], gname), self.getFGlyph(masters[-1], gname)
+
+ def generateFFont(self):
+ ffont = FFont(self.masters[0].ffont)
+ for key,g in ffont.glyphs.iteritems():
+ ffont.glyphs[key] = self.mixGlyphs(key)
+ ffont.kerning = self.mixKerns()
+ return ffont
+
+ def generateFont(self, baseFont):
+ newFont = baseFont.copy()
+ #self.mixStems(newFont) todo _ fix stems code
+ for g in newFont:
+ gF = self.mixGlyphs(g.name)
+ if gF == None:
+ g.mark = True
+ elif isinstance(gF, RGlyph):
+ newFont[g.name] = gF.copy()
+ else:
+ gF.copyToGlyph(g)
+
+ newFont.kerning.clear()
+ newFont.kerning.update(self.mixKerns() or {})
+ return newFont
+
+ def mixGlyphs(self,gname):
+ gA,gB = self.getGlyphMasters(gname)
+ try:
+ return gA.interp(gB,self.v)
+ except:
+ print "mixglyph failed for %s" %(gname)
+ if gA != None:
+ return gA.copy()
+
+ def getKerning(self, master):
+ if isinstance(master.font, Mix):
+ return master.font.mixKerns()
+ return master.ffont.kerning
+
+ def mixKerns(self):
+ masters = self.masters
+ kA, kB = self.getKerning(masters[0]), self.getKerning(masters[-1])
+ return interpolateKerns(kA, kB, self.v)
+
+
+def narrowFLGlyph(g, gThin, factor=.75):
+ gF = FGlyph(g)
+ if not isinstance(gThin,FGlyph):
+ gThin = FGlyph(gThin)
+ gCondensed = gThin.scaleX(factor)
+ try:
+ gNarrow = gF + (gCondensed - gThin)
+ gNarrow.copyToGlyph(g)
+ except:
+ print "No dice for: " + g.name
+
+def interpolate(a,b,v,e=0):
+ if e == 0:
+ return a+(b-a)*v
+ qe = (b-a)*v*v*v + a #cubic easing
+ le = a+(b-a)*v # linear easing
+ return le + (qe-le) * e
+
+def interpolateKerns(kA, kB, v):
+ # to yield correct kerning for Roboto output, we must emulate the behavior
+ # of old versions of this code; namely, take the kerning values of the first
+ # master instead of actually interpolating.
+ # old code:
+ # https://github.com/google/roboto/blob/7f083ac31241cc86d019ea6227fa508b9fcf39a6/scripts/lib/fontbuild/mix.py
+ # bug:
+ # https://github.com/google/roboto/issues/213
+ # return dict(kA)
+
+ kerns = {}
+ for pair, val in kA.items():
+ kerns[pair] = interpolate(val, kB.get(pair, 0), v.x)
+ for pair, val in kB.items():
+ lerped_val = interpolate(val, kA.get(pair, 0), 1 - v.x)
+ if pair in kerns:
+ assert abs(kerns[pair] - lerped_val) < 1e-6
+ else:
+ kerns[pair] = lerped_val
+ return kerns
diff --git a/misc/restore-diacritics-kerning.py b/misc/restore-diacritics-kerning.py
new file mode 100644
index 000000000..d0fe3100f
--- /dev/null
+++ b/misc/restore-diacritics-kerning.py
@@ -0,0 +1,431 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# This script was used specifically to re-introduce a bunch of kerning values
+# that where lost in an old kerning cleanup that failed to account for
+# automatically composed glyphs defined in diacritics.txt.
+#
+# Steps:
+# 1. git diff 10e15297b 10e15297b^ > 10e15297b.diff
+# 2. edit 10e15297b.diff and remove the python script add
+# 3. fetch copies of kerning.plist and groups.plist from before the loss change
+# bold-groups.plist
+# bold-kerning.plist
+# regular-groups.plist
+# regular-kerning.plist
+# 4. run this script
+#
+from __future__ import print_function
+import os, sys, plistlib, json
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+from argparse import ArgumentParser
+from fontTools import ttLib
+from robofab.objects.objectsRF import OpenFont
+
+
+srcFontPaths = ['src/Interface-Regular.ufo', 'src/Interface-Bold.ufo']
+
+
+def getTTGlyphList(font): # -> { 'Omega': [2126, ...], ... }
+ if isinstance(font, str):
+ font = ttLib.TTFont(font)
+
+ if not 'cmap' in font:
+ raise Exception('missing cmap table')
+
+ gl = {}
+ bestCodeSubTable = None
+ bestCodeSubTableFormat = 0
+
+ for st in font['cmap'].tables:
+ if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
+ if st.format > bestCodeSubTableFormat:
+ bestCodeSubTable = st
+ bestCodeSubTableFormat = st.format
+
+ if bestCodeSubTable is not None:
+ for cp, glyphname in bestCodeSubTable.cmap.items():
+ if glyphname in gl:
+ gl[glyphname].append(cp)
+ else:
+ gl[glyphname] = [cp]
+
+ return gl, font
+
+
+def parseAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def loadGlyphCompositions(filename):
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def loadNamesFromDiff(diffFilename):
+ with open(diffFilename, 'r') as f:
+ diffLines = [s.strip() for s in f.read().splitlines() if s.startswith('+\t')]
+ diffLines = [s for s in diffLines if not s.startswith('<int')]
+ namesInDiff = set()
+ for s in diffLines:
+ if s.startswith('<int') or s.startswith('<arr') or s.startswith('</'):
+ continue
+ p = s.find('>')
+ if p != -1:
+ p2 = s.find('<', p+1)
+ if p2 != -1:
+ name = s[p+1:p2]
+ try:
+ int(name)
+ except:
+ if not name.startswith('@'):
+ namesInDiff.add(s[p+1:p2])
+ return namesInDiff
+
+
+def loadGroups(filename):
+ groups = plistlib.readPlist(filename)
+ nameMap = {} # { glyphName => set(groupName) }
+ for groupName, glyphNames in groups.iteritems():
+ for glyphName in glyphNames:
+ nameMap.setdefault(glyphName, set()).add(groupName)
+ return groups, nameMap
+
+
+def loadKerning(filename):
+ kerning = plistlib.readPlist(filename)
+ # <dict>
+ # <key>@KERN_LEFT_A</key>
+ # <dict>
+ # <key>@KERN_RIGHT_C</key>
+ # <integer>-96</integer>
+
+ leftIndex = {} # { glyph-name => <ref to plist right-hand side dict> }
+ rightIndex = {} # { glyph-name => [(left-hand-side-name, kernVal), ...] }
+ rightGroupIndex = {} # { group-name => [(left-hand-side-name, kernVal), ...] }
+
+ for leftName, right in kerning.iteritems():
+ if leftName[0] != '@':
+ leftIndex[leftName] = right
+
+ for rightName, kernVal in right.iteritems():
+ if rightName[0] != '@':
+ rightIndex.setdefault(rightName, []).append((leftName, kernVal))
+ else:
+ rightGroupIndex.setdefault(rightName, []).append((leftName, kernVal))
+
+ return kerning, leftIndex, rightIndex, rightGroupIndex
+
+
+def loadAltNamesDB(agl, fontFilename):
+ uc2names = {} # { 2126: ['Omega', ...], ...}
+ name2ucs = {} # { 'Omega': [2126, ...], ...}
+
+ name2ucs, _ = getTTGlyphList(fontFilename)
+ # -> { 'Omega': [2126, ...], ... }
+ for name, ucs in name2ucs.iteritems():
+ for uc in ucs:
+ uc2names.setdefault(uc, []).append(name)
+
+ for uc, name in agl.iteritems():
+ name2ucs.setdefault(name, []).append(uc)
+ uc2names.setdefault(uc, []).append(name)
+ # -> { 2126: 'Omega', ... }
+
+ return uc2names, name2ucs
+
+
+def loadLocalNamesDB(agl, diacriticComps): # { 2126: ['Omega', ...], ...}
+ uc2names = None
+
+ for fontPath in srcFontPaths:
+ font = OpenFont(fontPath)
+ if uc2names is None:
+ uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
+ else:
+ for uc, names in font.getCharacterMapping().iteritems():
+ names2 = uc2names.get(uc, [])
+ for name in names:
+ if name not in names2:
+ names2.append(name)
+ uc2names[uc] = names2
+
+ # agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
+ aglName2Ucs = {}
+ for uc, name in agl.iteritems():
+ aglName2Ucs.setdefault(name, []).append(uc)
+
+ for glyphName, comp in diacriticComps.iteritems():
+ for uc in aglName2Ucs.get(glyphName, []):
+ names = uc2names.get(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ uc2names[uc] = names
+
+ name2ucs = {}
+ for uc, names in uc2names.iteritems():
+ for name in names:
+ name2ucs.setdefault(name, set()).add(uc)
+
+ return uc2names, name2ucs
+
+
+def _canonicalGlyphName(name, localName2ucs, localUc2Names, altName2ucs):
+ ucs = localName2ucs.get(name)
+ if ucs:
+ return name, list(ucs)[0]
+ ucs = altName2ucs.get(name)
+ if ucs:
+ for uc in ucs:
+ localNames = localUc2Names.get(uc)
+ if localNames and len(localNames):
+ return localNames[0], uc
+ return None, None
+
+
+def main():
+ argparser = ArgumentParser(description='Restore lost kerning')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ 'srcFont', metavar='<fontfile>', type=str,
+ help='TrueType, OpenType or UFO fonts to gather glyph info from')
+
+ argparser.add_argument(
+ 'diffFile', metavar='<diffile>', type=str, help='Diff file')
+
+ args = argparser.parse_args()
+
+ dryRun = args.dryRun
+
+ agl = parseAGL('src/glyphlist.txt')
+ diacriticComps = loadGlyphCompositions('src/diacritics.txt')
+
+ altUc2names, altName2ucs = loadAltNamesDB(agl, args.srcFont)
+ localUc2Names, localName2ucs = loadLocalNamesDB(agl, diacriticComps)
+
+ canonicalGlyphName = lambda name: _canonicalGlyphName(
+ name, localName2ucs, localUc2Names, altName2ucs)
+
+ deletedNames = loadNamesFromDiff(args.diffFile) # 10e15297b.diff
+ deletedDiacriticNames = OrderedDict()
+
+ for glyphName, comp in diacriticComps.iteritems():
+ if glyphName in deletedNames:
+ deletedDiacriticNames[glyphName] = comp
+
+
+ for fontPath in srcFontPaths:
+ addedGroupNames = set()
+
+ oldFilenamePrefix = 'regular'
+ if fontPath.find('Bold') != -1:
+ oldFilenamePrefix = 'bold'
+ oldGroups, oldNameToGroups = loadGroups(
+ oldFilenamePrefix + '-groups.plist')
+ oldKerning, oldLIndex, oldRIndex, oldRGroupIndex = loadKerning(
+ oldFilenamePrefix + '-kerning.plist')
+ # lIndex : { name => <ref to plist right-hand side dict> }
+ # rIndex : { name => [(left-hand-side-name, kernVal), ...] }
+
+ currGroupFilename = os.path.join(fontPath, 'groups.plist')
+ currKerningFilename = os.path.join(fontPath, 'kerning.plist')
+ currGroups, currNameToGroups = loadGroups(currGroupFilename)
+ currKerning, currLIndex, currRIndex, currRGroupIndex = loadKerning(currKerningFilename)
+
+ for glyphName, comp in deletedDiacriticNames.iteritems():
+ oldGroupMemberships = oldNameToGroups.get(glyphName)
+ localGlyphName, localUc = canonicalGlyphName(glyphName)
+
+ # if glyphName != 'dcaron':
+ # continue # XXX DEBUG
+
+ if localGlyphName is None:
+ # glyph does no longer exist -- ignore
+ print('[IGNORE]', glyphName)
+ continue
+
+ if oldGroupMemberships:
+ # print('group', localGlyphName,
+ # '=>', localUc,
+ # 'in old group:', oldGroupMemberships, ', curr group:', currGroupMemberships)
+ for oldGroupName in oldGroupMemberships:
+ currGroup = currGroups.get(oldGroupName) # None|[glyphname, ...]
+ # print('GM ', localGlyphName, oldGroupName, len(currGroup) if currGroup else 0)
+ if currGroup is not None:
+ if localGlyphName not in currGroup:
+ # print('[UPDATE group]', oldGroupName, 'append', localGlyphName)
+ currGroup.append(localGlyphName)
+ else:
+ # group does not currently exist
+ if currNameToGroups.get(localGlyphName):
+ raise Exception('TODO: case where glyph is in some current groups, but not the' +
+ 'original-named group')
+ print('[ADD group]', oldGroupName, '=> [', localGlyphName, ']')
+ currGroups[oldGroupName] = [localGlyphName]
+ addedGroupNames.add(oldGroupName)
+ # if oldGroupName in oldKerning:
+ # print('TODO: effects of oldGroupName being in oldKerning:',
+ # oldKerning[oldGroupName])
+ if oldGroupName in oldRGroupIndex:
+ print('TODO: effects of oldGroupName being in oldRGroupIndex:',
+ oldRGroupIndex[oldGroupName])
+
+ else: # if not oldGroupMemberships
+ ucs = localName2ucs.get(glyphName)
+ if not ucs:
+ raise Exception(
+ 'TODO non-group, non-local name ' + glyphName + ' -- lookup in alt names')
+
+ asLeft = oldLIndex.get(glyphName)
+ atRightOf = oldRIndex.get(glyphName)
+
+ # print('individual', glyphName,
+ # '=>', ', '.join([str(uc) for uc in ucs]),
+ # '\n as left:', asLeft is not None,
+ # '\n at right of:', atRightOf is not None)
+
+ if asLeft:
+ currKern = currKerning.get(localGlyphName)
+ if currKern is None:
+ rightValues = {}
+ for rightName, kernValue in asLeft.iteritems():
+ if rightName[0] == '@':
+ currGroup = currGroups.get(rightName)
+ if currGroup and localGlyphName not in currGroup:
+ rightValues[rightName] = kernValue
+ else:
+ localName, localUc = canonicalGlyphName(rightName)
+ if localName:
+ rightValues[localName] = kernValue
+ if len(rightValues) > 0:
+ print('[ADD currKerning]', localGlyphName, '=>', rightValues)
+ currKerning[localGlyphName] = rightValues
+
+ if atRightOf:
+ for parentLeftName, kernVal in atRightOf:
+ # print('atRightOf:', parentLeftName, kernVal)
+ if parentLeftName[0] == '@':
+ if parentLeftName in currGroups:
+ k = currKerning.get(parentLeftName)
+ if k:
+ if localGlyphName not in k:
+ print('[UPDATE currKerning g]',
+ parentLeftName, '+= {', localGlyphName, ':', kernVal, '}')
+ k[localGlyphName] = kernVal
+ else:
+ print('TODO: left-group is NOT in currKerning; left-group', parentLeftName)
+ else:
+ localParentLeftGlyphName, _ = canonicalGlyphName(parentLeftName)
+ if localParentLeftGlyphName:
+ k = currKerning.get(localParentLeftGlyphName)
+ if k:
+ if localGlyphName not in k:
+ print('[UPDATE currKerning i]',
+ localParentLeftGlyphName, '+= {', localGlyphName, ':', kernVal, '}')
+ k[localGlyphName] = kernVal
+ else:
+ print('[ADD currKerning i]',
+ localParentLeftGlyphName, '=> {', localGlyphName, ':', kernVal, '}')
+ currKerning[localParentLeftGlyphName] = {localGlyphName: kernVal}
+
+
+ for groupName in addedGroupNames:
+ print('————————————————————————————————————————————')
+ print('re-introduce group', groupName, 'to kerning')
+
+ oldRKern = oldKerning.get(groupName)
+ if oldRKern is not None:
+ newRKern = {}
+ for oldRightName, kernVal in oldRKern.iteritems():
+ if oldRightName[0] == '@':
+ if oldRightName in currGroups:
+ newRKern[oldRightName] = kernVal
+ else:
+ # Note: (oldRightName in addedGroupNames) should always be False here
+ # as we would have added it to currGroups already.
+ print('[DROP group]', oldRightName, kernVal)
+ if oldRightName in currGroups:
+ del currGroups[oldRightName]
+ else:
+ localGlyphName, _ = canonicalGlyphName(oldRightName)
+ if localGlyphName:
+ newRKern[localGlyphName] = kernVal
+ print('localGlyphName', localGlyphName)
+
+ if len(newRKern):
+ print('[ADD currKerning g]', groupName, newRKern)
+ currKerning[groupName] = newRKern
+
+ # oldRGroupIndex : { group-name => [(left-hand-side-name, kernVal), ...] }
+ oldLKern = oldRGroupIndex.get(groupName)
+ if oldLKern:
+ for oldRightName, kernVal in oldLKern:
+ if oldRightName[0] == '@':
+ if oldRightName in currGroups:
+ k = currKerning.get(oldRightName)
+ if k is not None:
+ print('[UPDATE kerning g]', oldRightName, '+= {', groupName, ':', kernVal, '}')
+ k[groupName] = kernVal
+ else:
+ currKerning[oldRightName] = {groupName: kernVal}
+ print('[ADD kerning g]', oldRightName, '= {', groupName, ':', kernVal, '}')
+ else:
+ localGlyphName, _ = canonicalGlyphName(oldRightName)
+ if localGlyphName:
+ k = currKerning.get(localGlyphName)
+ if k is not None:
+ print('[UPDATE kerning i]', localGlyphName, '+= {', groupName, ':', kernVal, '}')
+ k[groupName] = kernVal
+ else:
+ currKerning[localGlyphName] = {groupName: kernVal}
+ print('[ADD kerning i]', localGlyphName, '= {', groupName, ':', kernVal, '}')
+
+
+ print('Write', currGroupFilename)
+ if not dryRun:
+ plistlib.writePlist(currGroups, currGroupFilename)
+
+ print('Write', currKerningFilename)
+ if not dryRun:
+ plistlib.writePlist(currKerning, currKerningFilename)
+
+ # end: for fontPath
+
+main()
diff --git a/misc/rewrite-glyphorder.py b/misc/rewrite-glyphorder.py
new file mode 100755
index 000000000..3da0c1699
--- /dev/null
+++ b/misc/rewrite-glyphorder.py
@@ -0,0 +1,305 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib, json, re
+from collections import OrderedDict
+from argparse import ArgumentParser
+from ConfigParser import RawConfigParser
+from fontTools import ttLib
+from robofab.objects.objectsRF import OpenFont
+
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
+
+
+class PList:
+ def __init__(self, filename):
+ self.filename = filename
+ self.plist = None
+
+ def load(self):
+ self.plist = plistlib.readPlist(self.filename)
+
+ def save(self):
+ if self.plist is not None:
+ plistlib.writePlist(self.plist, self.filename)
+
+ def get(self, k, defaultValue=None):
+ if self.plist is None:
+ self.load()
+ return self.plist.get(k, defaultValue)
+
+ def __getitem__(self, k):
+ if self.plist is None:
+ self.load()
+ return self.plist[k]
+
+ def __setitem__(self, k, v):
+ if self.plist is None:
+ self.load()
+ self.plist[k] = v
+
+ def __delitem__(self, k):
+ if self.plist is None:
+ self.load()
+ del self.plist[k]
+
+
+def parseAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def revCharMap(ucToNames):
+ # {2126:['Omega','Omegagr']} -> {'Omega':2126, 'Omegagr':2126}
+ # {2126:'Omega'} -> {'Omega':2126}
+ m = {}
+ if len(ucToNames) == 0:
+ return m
+
+ lists = True
+ for v in ucToNames.itervalues():
+ lists = not isinstance(v, str)
+ break
+
+ if lists:
+ for uc, names in ucToNames.iteritems():
+ for name in names:
+ m[name] = uc
+ else:
+ for uc, name in ucToNames.iteritems():
+ m[name] = uc
+
+ return m
+
+
+def loadJSONGlyphOrder(jsonFilename):
+ gol = None
+ if jsonFilename == '-':
+ gol = json.load(sys.stdin)
+ else:
+ with open(jsonFilename, 'r') as f:
+ gol = json.load(f)
+ if not isinstance(gol, list):
+ raise Exception('expected [[string, int|null]')
+ if len(gol) > 0:
+ for v in gol:
+ if not isinstance(v, list):
+ raise Exception('expected [[string, int|null]]')
+ break
+ return gol
+
+
+def loadTTGlyphOrder(font):
+ if isinstance(font, str):
+ font = ttLib.TTFont(font)
+
+ if not 'cmap' in font:
+ raise Exception('missing cmap table')
+
+ bestCodeSubTable = None
+ bestCodeSubTableFormat = 0
+
+ for st in font['cmap'].tables:
+ if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
+ if st.format > bestCodeSubTableFormat:
+ bestCodeSubTable = st
+ bestCodeSubTableFormat = st.format
+
+ ucmap = {}
+ if bestCodeSubTable is not None:
+ for cp, glyphname in bestCodeSubTable.cmap.items():
+ ucmap[glyphname] = cp
+
+ gol = []
+ for name in font.getGlyphOrder():
+ gol.append((name, ucmap.get(name)))
+
+ return gol
+
+
+def loadSrcGlyphOrder(jsonFilename, fontFilename): # -> [ ('Omegagreek', 2126|None), ...]
+ if jsonFilename:
+ return loadJSONGlyphOrder(jsonFilename)
+ elif fontFilename:
+ return loadTTGlyphOrder(fontFilename.rstrip('/ '))
+ return None
+
+
+def loadUFOGlyphNames(ufoPath):
+ font = OpenFont(ufoPath)
+
+ libPlist = PList(os.path.join(ufoPath, 'lib.plist'))
+ orderedNames = libPlist['public.glyphOrder'] # [ 'Omega', ...]
+
+ # append any glyphs that are missing in orderedNames
+ allNames = set(font.keys())
+ for name in orderedNames:
+ allNames.discard(name)
+ for name in allNames:
+ orderedNames.append(name)
+
+ ucToNames = font.getCharacterMapping() # { 2126: [ 'Omega', ...], ...}
+ nameToUc = revCharMap(ucToNames) # { 'Omega': 2126, ...}
+
+ gol = OrderedDict() # OrderedDict{ ('Omega', 2126|None), ...}
+ for name in orderedNames:
+ gol[name] = nameToUc.get(name)
+ # gol.append((name, nameToUc.get(name)))
+
+ return gol, ucToNames, nameToUc, libPlist
+
+
+def saveUFOGlyphOrder(libPlist, orderedNames, dryRun):
+ libPlist['public.glyphOrder'] = orderedNames
+
+ roboSort = libPlist.get('com.typemytype.robofont.sort')
+ if roboSort is not None:
+ # lib['com.typemytype.robofont.sort'] has schema
+ # [ { type: "glyphList", ascending: [glyphname, ...] }, ...]
+ for i in range(len(roboSort)):
+ ent = roboSort[i]
+ if isinstance(ent, dict) and ent.get('type') == 'glyphList':
+ roboSort[i] = {'type':'glyphList', 'ascending':orderedNames}
+ break
+
+ print('Writing', libPlist.filename)
+ if not dryRun:
+ libPlist.save()
+
+
+def getConfigResFile(config, basedir, name):
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ basedir = os.path.dirname(basedir)
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ fn = None
+ return fn
+
+
+def main():
+ argparser = ArgumentParser(description='Rewrite glyph order of UFO fonts')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ '-src-json', dest='srcJSONFile', metavar='<file>', type=str,
+ help='JSON file to read glyph order from.' +
+ ' Should be a list e.g. [["Omega", 2126], [".notdef", null], ...]')
+
+ argparser.add_argument(
+ '-src-font', dest='srcFontFile', metavar='<file>', type=str,
+ help='TrueType or OpenType font to read glyph order from.')
+
+ argparser.add_argument(
+ '-out', dest='outFile', metavar='<file>', type=str,
+ help='Write each name per line to <file>')
+
+ argparser.add_argument(
+ 'dstFontsPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ if args.srcJSONFile and args.srcFontFile:
+ argparser.error('Both -src-json and -src-font specified -- please provide only one.')
+
+ srcGol = loadSrcGlyphOrder(args.srcJSONFile, args.srcFontFile)
+ if srcGol is None:
+ argparser.error('No source provided (-src-* argument missing)')
+
+ # Load Adobe Glyph List database
+ srcDir = os.path.dirname(args.dstFontsPaths[0])
+ config = RawConfigParser(dict_type=OrderedDict)
+ config.read(os.path.join(srcDir, 'fontbuild.cfg'))
+ aglUcToName = parseAGL(getConfigResFile(config, srcDir, 'agl_glyphlistfile'))
+ aglNameToUc = revCharMap(aglUcToName)
+
+ glyphorderUnion = OrderedDict()
+
+ for dstFontPath in args.dstFontsPaths:
+ glyphOrder, ucToNames, nameToUc, libPlist = loadUFOGlyphNames(dstFontPath)
+
+ newGol = OrderedDict()
+ for name, uc in srcGol:
+
+ if uc is None:
+ # if there's no unicode associated, derive from name if possible
+ m = uniNameRe.match(name)
+ if m:
+ try:
+ uc = int(m.group(1), 16)
+ except:
+ pass
+ if uc is None:
+ uc = aglNameToUc.get(name)
+
+ # has same glyph mapped to same unicode
+ names = ucToNames.get(uc)
+ if names is not None:
+ for name in names:
+ # print('U %s U+%04X' % (name, uc))
+ newGol[name] = uc
+ continue
+
+ # has same name in dst?
+ uc2 = glyphOrder.get(name)
+ if uc2 is not None:
+ # print('N %s U+%04X' % (name, uc2))
+ newGol[name] = uc2
+ continue
+
+ # Try AGL[uc] -> name == name
+ if uc is not None:
+ name2 = aglUcToName.get(uc)
+ if name2 is not None:
+ uc2 = glyphOrder.get(name2)
+ if uc2 is not None:
+ # print('A %s U+%04X' % (name2, uc2))
+ newGol[name2] = uc2
+ continue
+
+ # else: ignore glyph name in srcGol not found in target
+ # if uc is None:
+ # print('x %s -' % name)
+ # else:
+ # print('x %s U+%04X' % (name, uc))
+
+
+ # add remaining glyphs from original glyph order
+ for name, uc in glyphOrder.iteritems():
+ if name not in newGol:
+ # print('E %s U+%04X' % (name, uc))
+ newGol[name] = uc
+
+ orderedNames = []
+ for name in newGol.iterkeys():
+ orderedNames.append(name)
+ glyphorderUnion[name] = True
+
+ saveUFOGlyphOrder(libPlist, orderedNames, dryRun)
+
+ if args.outFile:
+ print('Write', args.outFile)
+ glyphorderUnionNames = glyphorderUnion.keys()
+ if not dryRun:
+ with open(args.outFile, 'w') as f:
+ f.write('\n'.join(glyphorderUnionNames) + '\n')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/rf-scripts/AdjustWidth.py b/misc/rf-scripts/AdjustWidth.py
new file mode 100644
index 000000000..c3d381f68
--- /dev/null
+++ b/misc/rf-scripts/AdjustWidth.py
@@ -0,0 +1,53 @@
+#
+# This script changes the width of all glyphs by applying a multiplier.
+# It keeps the contours centered as glyphs get wider or tighter.
+#
+from mojo.roboFont import version
+from math import ceil, floor
+
+if __name__ == "__main__":
+ font = CurrentFont()
+ print "Resizing glyph margins for %r" % font
+
+ # how much to add or remove from each glyph's margin
+ A = -16
+
+ if font is not None:
+ for g in font:
+ # skip glyphs
+ if g.name in ('c', 'e', 'o', 'r', 'j'):
+ continue
+
+ if g.width < 2:
+ print '"%s": ["ignore", "zero-width"],' % (g.name)
+ continue
+
+ if g.box is None:
+ print '"%s": ["ignore", "empty"],' % (g.name)
+ continue
+
+ if g.width % 16 != 0:
+ print '"%s": ["ignore", "misaligned"],' % (g.name)
+ continue
+
+ if g.leftMargin <= 0 or g.rightMargin <= 0:
+ print '"%s": ["ignore", "zero-or-negative"],' % (g.name)
+ continue
+
+ leftMargin = int(max(0, g.leftMargin + A))
+ rightMargin = int(max(0, g.rightMargin + A))
+
+ #print '"%s": ["update", %g, %g],' % (g.name, leftMargin, rightMargin)
+ if 'interface.spaceadjust' in g.lib:
+ g.lib['interface.width-adjustments'].append(A)
+ else:
+ g.lib['interface.width-adjustments'] = [A]
+ # order of assignment is probably important
+ g.rightMargin = int(rightMargin)
+ g.leftMargin = int(leftMargin)
+
+ font.update()
+ else:
+ print "No fonts open"
+
+ print "Done"
diff --git a/misc/rf-scripts/ChangeUPM.py b/misc/rf-scripts/ChangeUPM.py
new file mode 100644
index 000000000..f7617353a
--- /dev/null
+++ b/misc/rf-scripts/ChangeUPM.py
@@ -0,0 +1,107 @@
+# Change upm
+# Jens Kutilek 2013-01-02
+
+from mojo.roboFont import version
+
+def scalePoints(glyph, factor):
+ if version == "1.4":
+ # stupid workaround for bug in RoboFont 1.4
+ for contour in glyph:
+ for point in contour.points:
+ point.x *= factor
+ point.y *= factor
+ glyph.width *= factor
+ else:
+ glyph *= factor
+
+def scaleGlyph(glyph, factor, scaleWidth=True, roundCoordinates=True):
+ if not(scaleWidth):
+ oldWidth = glyph.width
+ if len(glyph.components) == 0:
+ scalePoints(glyph, factor)
+ if roundCoordinates:
+ glyph.round()
+ else:
+ # save components
+ # this may be a tad too convoluted ...
+ components = []
+ for i in range(len(glyph.components)):
+ components.append(glyph.components[i])
+ for c in components:
+ glyph.removeComponent(c)
+ scalePoints(glyph, factor)
+ if roundCoordinates:
+ glyph.round()
+ # restore components
+ for i in range(len(components)):
+ newOffset = (int(round(components[i].offset[0] * factor)),
+ int(round(components[i].offset[1] * factor)))
+ glyph.appendComponent(components[i].baseGlyph, newOffset, components[i].scale)
+ if not(scaleWidth):
+ # restore width
+ glyph.width = oldWidth
+
+
+def changeUPM(font, factor, roundCoordinates=True):
+
+ # Glyphs
+ for g in font:
+ scaleGlyph(g, factor)
+ for guide in g.guides:
+ # another thing that doesn't work in RoboFont 1.4 - 1.5.1
+ guide.x *= factor
+ guide.y *= factor
+
+ # Glyph layers
+ mainLayer = "foreground"
+ for layerName in font.layerOrder:
+ if layerName != mainLayer:
+ for g in font:
+ g.flipLayers(mainLayer, layerName)
+ scaleGlyph(g, factor, scaleWidth=False)
+ g.flipLayers(layerName, mainLayer)
+
+ # Kerning
+ if font.kerning:
+ font.kerning.scale(factor)
+ if roundCoordinates:
+ if not version in ["1.4", "1.5", "1.5.1"]:
+ font.kerning.round(1)
+ else:
+ print "WARNING: kerning values cannot be rounded to integer in this RoboFont version"
+
+ # TODO: Change positioning feature code?
+
+ # Vertical dimensions
+ font.info.descender = int(round(font.info.descender * factor))
+ font.info.xHeight = int(round(font.info.xHeight * factor))
+ font.info.capHeight = int(round(font.info.capHeight * factor))
+ font.info.ascender = int(round(font.info.ascender * factor))
+
+ # Finally set new UPM
+ font.info.unitsPerEm = newUpm
+
+ font.update()
+
+if __name__ == "__main__":
+ from robofab.interface.all.dialogs import AskString
+
+ print "Change Units Per Em"
+
+ if CurrentFont() is not None:
+ oldUpm = CurrentFont().info.unitsPerEm
+ newUpm = CurrentFont().info.unitsPerEm
+ try:
+ newUpm = int(AskString("New units per em size?", oldUpm))
+ except:
+ pass
+ if newUpm == oldUpm:
+ print " Not changing upm size."
+ else:
+ factor = float(newUpm) / oldUpm
+ print " Scaling all font measurements by", factor
+ changeUPM(CurrentFont(), factor)
+ else:
+ print " Open a font first to change upm, please."
+
+ print " Done."
diff --git a/misc/rf-scripts/GridAdjust.py b/misc/rf-scripts/GridAdjust.py
new file mode 100644
index 000000000..f14550b4a
--- /dev/null
+++ b/misc/rf-scripts/GridAdjust.py
@@ -0,0 +1,83 @@
+#
+# This script changes the width of any glyph which width is not an even multiple of 256.
+# For glyphs that are updated, the shape(s) inside the glyph are centered as well.
+#
+from mojo.roboFont import version
+from math import ceil, floor
+
+if __name__ == "__main__":
+ font = CurrentFont()
+ print "Fitting glyphs to EM grid at 256 %r" % font
+
+ # Strategy to use for centering a glyph when resizing its EM:
+ # "center" Ignore existing margins and center in EM at on integer units.
+ # "adjust-margins" Attempt to retain existing margins w/o centering inside EM.
+ centeringStrategy = 'center'
+
+ if font is not None:
+ for g in font:
+ # only consider adjusting the listed glyphs
+ # if g.unicode not in (0x212B, 0x005A, 0x0387):
+ # continue
+
+ if g.width < 2:
+ # ignore zero width glyph
+ # print 'ignoring %r -- zero width' % g
+ continue
+
+ if g.width % 256 == 0:
+ # ignore already aligned glyph
+ # print 'ignoring %r -- already aligned' % g
+ continue
+
+ width = g.width
+ if g.rightMargin < 128:
+ width = ceil(width / 256) * 256
+ else:
+ width = round(width / 256) * 256
+
+ # center glyph in EM
+ leftMargin = g.leftMargin
+ rightMargin = g.rightMargin
+
+ if centeringStrategy == 'adjust-margins':
+ # Adjust margins to place the glyph in the center while retaining original
+ # left/right margins.
+ widthDelta = width - g.width
+ leftMargin = g.leftMargin + int(floor(widthDelta / 2))
+ rightMargin = g.rightMargin + int(ceil(widthDelta / 2))
+ elif centeringStrategy == 'center':
+ # Read g.box (effective bounds of the glyph) and truly center the
+ # glyph, but we could run the risk of losing some intentionally-left or right
+ # aligned glyph, e.g. "|x |" -> "| x |"
+ if g.box is not None:
+ xMin, yMin, xMax, yMax = g.box
+ graphicWidth = xMax - xMin
+ leftMargin = round((width - graphicWidth) / 2)
+ else:
+ print 'Unexpected centeringStrategy value'
+ break
+
+ # log message
+ uniname = ''
+ if g.unicode is not None:
+ uniname = ' U+%04X' % g.unicode
+ print 'Adjusting "%s"%s from %g to %g' % (g.name, uniname, g.width, width)
+
+ # write changes to glyph
+ g.lib['interface.gridadjust.original'] = repr({
+ "rightMargin": g.rightMargin,
+ "leftMargin": g.leftMargin,
+ "width": g.width,
+ })
+
+ # order of assignment is probably important
+ g.rightMargin = int(rightMargin)
+ g.leftMargin = int(leftMargin)
+ g.width = int(width)
+
+ font.update()
+ else:
+ print "No fonts open"
+
+ print "Done"
diff --git a/misc/rf-scripts/RemoveLocalGuides.py b/misc/rf-scripts/RemoveLocalGuides.py
new file mode 100644
index 000000000..05e1a05b7
--- /dev/null
+++ b/misc/rf-scripts/RemoveLocalGuides.py
@@ -0,0 +1,15 @@
+#
+# Removes local guides from all glyphs
+#
+if __name__ == "__main__":
+ font = CurrentFont()
+ print "Removing local guides from all glyphs of %r" % font
+ if font is not None:
+ for g in font:
+ if 'com.typemytype.robofont.guides' in g.lib:
+ del(g.lib['com.typemytype.robofont.guides'])
+ font.update()
+ else:
+ print "No fonts open"
+
+ print "Done"
diff --git a/misc/rf-scripts/StripGlyphs.py b/misc/rf-scripts/StripGlyphs.py
new file mode 100644
index 000000000..12bc2ab88
--- /dev/null
+++ b/misc/rf-scripts/StripGlyphs.py
@@ -0,0 +1,384 @@
+#
+# Removes unused glyphs
+#
+from mojo.roboFont import version
+
+SC_ROMAN = [
+ "A.smcp",
+ "B.smcp",
+ "C.smcp",
+ "D.smcp",
+ "E.smcp",
+ "F.smcp",
+ "G.smcp",
+ "H.smcp",
+ "I.smcp",
+ "J.smcp",
+ "K.smcp",
+ "L.smcp",
+ "M.smcp",
+ "N.smcp",
+ "O.smcp",
+ "P.smcp",
+ "Q.smcp",
+ "R.smcp",
+ "S.smcp",
+ "T.smcp",
+ "U.smcp",
+ "V.smcp",
+ "W.smcp",
+ "X.smcp",
+ "Y.smcp",
+ "Z.smcp",
+ "AE.smcp",
+ "AEacute.smcp",
+ "Aacute.smcp",
+ "Abreve.smcp",
+ "Acircumflex.smcp",
+ "Adieresis.smcp",
+ "Agrave.smcp",
+ "Alpha.smcp",
+ "Alphatonos.smcp",
+ "Amacron.smcp",
+ "Aogonek.smcp",
+ "Aogonek.smcp.NAV",
+ "Aring.smcp",
+ "Aringacute.smcp",
+ "Atilde.smcp",
+ "Beta.smcp",
+ "Cacute.smcp",
+ "Ccaron.smcp",
+ "Ccedilla.smcp",
+ "Ccircumflex.smcp",
+ "Chi.smcp",
+ "Dcaron.smcp",
+ "Dcroat.smcp",
+ "Delta.smcp",
+ "Eacute.smcp",
+ "Ebreve.smcp",
+ "Ecaron.smcp",
+ "Ecircumflex.smcp",
+ "Edieresis.smcp",
+ "Edotaccent.smcp",
+ "Egrave.smcp",
+ "Emacron.smcp",
+ "Eng.smcp",
+ "Eogonek.smcp",
+ "Eogonek.smcp.NAV",
+ "Epsilon.smcp",
+ "Epsilontonos.smcp",
+ "Eta.smcp",
+ "Etatonos.smcp",
+ "Eth.smcp",
+ "Gamma.smcp",
+ "Gbreve.smcp",
+ "Gcircumflex.smcp",
+ "Gcommaaccent.smcp",
+ "Germandbls.smcp",
+ "Hbar.smcp",
+ "Hcircumflex.smcp",
+ "IJ.smcp",
+ "Iacute.smcp",
+ "Ibreve.smcp",
+ "Icircumflex.smcp",
+ "Idieresis.smcp",
+ "Igrave.smcp",
+ "Imacron.smcp",
+ "Iogonek.smcp",
+ "Iota.smcp",
+ "Iotadieresis.smcp",
+ "Iotatonos.smcp",
+ "Itilde.smcp",
+ "Jcircumflex.smcp",
+ "Kappa.smcp",
+ "Kcommaaccent.smcp",
+ "Lacute.smcp",
+ "Lambda.smcp",
+ "Lcaron.smcp",
+ "Lcommaaccent.smcp",
+ "Ldot.smcp",
+ "Lslash.smcp",
+ "Nacute.smcp",
+ "Ncaron.smcp",
+ "Ncommaaccent.smcp",
+ "Ntilde.smcp",
+ "Nu.smcp",
+ "OE.smcp",
+ "Oacute.smcp",
+ "Obreve.smcp",
+ "Ocircumflex.smcp",
+ "Odieresis.smcp",
+ "Ograve.smcp",
+ "Ohungarumlaut.smcp",
+ "Omacron.smcp",
+ "Omega.smcp",
+ "Omegatonos.smcp",
+ "Omicron.smcp",
+ "Omicrontonos.smcp",
+ "Oogonek.smcp",
+ "Oogonek.smcp.NAV",
+ "Oslash.smcp",
+ "Oslashacute.smcp",
+ "Otilde.smcp",
+ "Phi.smcp",
+ "Pi.smcp",
+ "Psi.smcp",
+ "Racute.smcp",
+ "Rcaron.smcp",
+ "Rcommaaccent.smcp",
+ "Rho.smcp",
+ "Sacute.smcp",
+ "Scaron.smcp",
+ "Scedilla.smcp",
+ "Scircumflex.smcp",
+ "Sigma.smcp",
+ "Tau.smcp",
+ "Tbar.smcp",
+ "Tcaron.smcp",
+ "Theta.smcp",
+ "Thorn.smcp",
+ "Uacute.smcp",
+ "Ubreve.smcp",
+ "Ucircumflex.smcp",
+ "Udieresis.smcp",
+ "Ugrave.smcp",
+ "Uhungarumlaut.smcp",
+ "Umacron.smcp",
+ "Uogonek.smcp",
+ "Upsilon.smcp",
+ "Upsilondieresis.smcp",
+ "Upsilontonos.smcp",
+ "Uring.smcp",
+ "Utilde.smcp",
+ "Wacute.smcp",
+ "Wcircumflex.smcp",
+ "Wdieresis.smcp",
+ "Wgrave.smcp",
+ "Xi.smcp",
+ "Yacute.smcp",
+ "Ycircumflex.smcp",
+ "Ydieresis.smcp",
+ "Ygrave.smcp",
+ "Zacute.smcp",
+ "Zcaron.smcp",
+ "Zdotaccent.smcp",
+ "Zeta.smcp",
+ "ampersand.smcp",
+ "uni010A.smcp",
+ "uni0120.smcp",
+ "uni0162.smcp",
+ "Scommaaccent.smcp",
+ "Tcommaaccent.smcp",
+ "uni037F.smcp"
+]
+
+
+SC_SET1 = [
+ "zero.smcp",
+ "one.smcp",
+ "two.smcp",
+ "three.smcp",
+ "four.smcp",
+ "five.smcp",
+ "six.smcp",
+ "seven.smcp",
+ "eight.smcp",
+ "nine.smcp",
+ "Euro.smcp",
+ "Idotaccent.smcp",
+ "Mu.smcp",
+ "dollar.smcp",
+ "lira.smcp",
+ "sterling.smcp",
+ "uni0401.smcp",
+ "uni0402.smcp",
+ "uni0403.smcp",
+ "uni0404.smcp",
+ "uni0405.smcp",
+ "uni0406.smcp",
+ "uni0407.smcp",
+ "uni0408.smcp",
+ "uni0409.smcp",
+ "uni040A.smcp",
+ "uni040B.smcp",
+ "uni040C.smcp",
+ "uni040E.smcp",
+ "uni040F.smcp",
+ "uni0410.smcp",
+ "uni0411.smcp",
+ "uni0412.smcp",
+ "uni0413.smcp",
+ "uni0414.smcp",
+ "uni0415.smcp",
+ "uni0416.smcp",
+ "uni0417.smcp",
+ "uni0418.smcp",
+ "uni0419.smcp",
+ "uni041A.smcp",
+ "uni041B.smcp",
+ "uni041C.smcp",
+ "uni041D.smcp",
+ "uni041E.smcp",
+ "uni041F.smcp",
+ "uni0420.smcp",
+ "uni0421.smcp",
+ "uni0422.smcp",
+ "uni0423.smcp",
+ "uni0424.smcp",
+ "uni0425.smcp",
+ "uni0426.smcp",
+ "uni0427.smcp",
+ "uni0428.smcp",
+ "uni0429.smcp",
+ "uni042A.smcp",
+ "uni042B.smcp",
+ "uni042C.smcp",
+ "uni042D.smcp",
+ "uni042E.smcp",
+ "uni042F.smcp",
+ "uni0490.smcp",
+ "uni0492.smcp",
+ "uni0496.smcp",
+ "uni0498.smcp",
+ "uni049A.smcp",
+ "uni049C.smcp",
+ "uni04A0.smcp",
+ "uni04A2.smcp",
+ "uni04A8.smcp",
+ "uni04AA.smcp",
+ "uni04AE.smcp",
+ "uni04B0.smcp",
+ "uni04B2.smcp",
+ "uni04B4.smcp",
+ "uni04B8.smcp",
+ "uni04BA.smcp",
+ "uni04BC.smcp",
+ "uni04BE.smcp",
+ "uni04D8.smcp",
+ "uni04E0.smcp",
+ "uni04E2.smcp",
+ "uni04E8.smcp",
+ "uni04EE.smcp",
+ "uni20B4.smcp",
+ "uni20B8.smcp",
+ "uni20BD.smcp",
+ "uni2116.smcp",
+ "yen.smcp"
+]
+
+
+SC_SET2 = [
+ "I.smcp",
+ "Sigma.smcp",
+ "Mu.smcp",
+ "uni0410.smcp",
+ "uni0411.smcp",
+ "uni0412.smcp",
+ "uni0413.smcp",
+ "uni0414.smcp",
+ "uni0415.smcp",
+ "uni0416.smcp",
+ "uni0417.smcp",
+ "uni0418.smcp",
+ "uni0419.smcp",
+ "uni041A.smcp",
+ "uni041B.smcp",
+ "uni041C.smcp",
+ "uni041D.smcp",
+ "uni041E.smcp",
+ "uni041F.smcp",
+ "uni0420.smcp",
+ "uni0421.smcp",
+ "uni0422.smcp",
+ "uni0423.smcp",
+ "uni0424.smcp",
+ "uni0425.smcp",
+ "uni0426.smcp",
+ "uni0427.smcp",
+ "uni0428.smcp",
+ "uni0429.smcp",
+ "uni042A.smcp",
+ "uni042B.smcp",
+ "uni042C.smcp",
+ "uni042D.smcp",
+ "uni042E.smcp",
+ "uni042F.smcp",
+ "uni0401.smcp",
+ "uni0402.smcp",
+ "uni0403.smcp",
+ "uni0404.smcp",
+ "uni0405.smcp",
+ "uni0406.smcp",
+ "uni0407.smcp",
+ "uni0408.smcp",
+ "uni0409.smcp",
+ "uni040A.smcp",
+ "uni040B.smcp",
+ "uni040C.smcp",
+ "uni040E.smcp",
+ "uni040F.smcp",
+ "uni0490.smcp",
+ "uni0492.smcp",
+ "uni0496.smcp",
+ "uni0498.smcp",
+ "uni049A.smcp",
+ "uni049C.smcp",
+ "uni04A0.smcp",
+ "uni04A2.smcp",
+ "uni04A8.smcp",
+ "uni04AA.smcp",
+ "uni04AE.smcp",
+ "uni04B0.smcp",
+ "uni04B2.smcp",
+ "uni04B4.smcp",
+ "uni04B8.smcp",
+ "uni04BA.smcp",
+ "uni04BC.smcp",
+ "uni04BE.smcp",
+ "uni04D8.smcp",
+ "uni04E0.smcp",
+ "uni04E2.smcp",
+ "uni04E8.smcp",
+ "uni04EE.smcp"
+]
+
+
+STRIP_NAME_SET = set(SC_ROMAN).union(SC_SET1).union(SC_SET2)
+
+STRIP_SUFFIXES = (
+ '.smcp',
+ '.unic',
+ '.alt',
+ '.alt2',
+ '.ss06',
+ '.ss07',
+ '.onum',
+ '.pnum',
+ '.tnum'
+)
+
+def hasStripSuffix(g):
+ name = g.name
+ for suffix in STRIP_SUFFIXES:
+ if str.endswith(name, suffix):
+ return True
+ return False
+
+if __name__ == "__main__":
+ font = CurrentFont()
+ if font is not None:
+ for g in font:
+ if g.name in STRIP_NAME_SET or hasStripSuffix(g):
+
+ if g.unicode is not None:
+ # glyph maps to a codepoint -- keep it
+ continue
+
+ print 'Removing "%s"' % g.name
+
+ font.removeGlyph(g.name)
+ font.update()
+ else:
+ print "No fonts open"
+
+ print "Done"
diff --git a/misc/rf-scripts/ZeroWidth.py b/misc/rf-scripts/ZeroWidth.py
new file mode 100644
index 000000000..a9277d09c
--- /dev/null
+++ b/misc/rf-scripts/ZeroWidth.py
@@ -0,0 +1,26 @@
+#
+# This script changes the width of all glyphs by applying a multiplier.
+# It keeps the contours centered as glyphs get wider or tighter.
+#
+from mojo.roboFont import version
+from math import ceil, floor
+
+if __name__ == "__main__":
+ font = CurrentFont()
+ print "Resizing glyph margins for %r" % font
+
+ if font is not None:
+ for g in font:
+ leftMargin = g.leftMargin
+ rightMargin = g.rightMargin
+
+ if leftMargin < 0 or rightMargin < 0:
+ g.rightMargin = int(max(0, rightMargin))
+ g.leftMargin = int(max(0, leftMargin))
+ print("adjust %s" % g.name)
+
+ font.update()
+ else:
+ print "No fonts open"
+
+ print "Done"
diff --git a/misc/stems.txt b/misc/stems.txt
new file mode 100644
index 000000000..0cedc8909
--- /dev/null
+++ b/misc/stems.txt
@@ -0,0 +1,25 @@
+
+================================================================================================
+Regular
+••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
+
+Horizontal:
+ 220 A B D E F G H L P R T Z two three(center) four five seven
+ 200 a e f t z minus
+
+Vertical:
+ 248 B D E F G H I J K L N P R T U Y one four
+ 236 a b d f g h i j k l m n p q r t u
+ 232 M
+
+
+
+================================================================================================
+Bold
+••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••
+
+Horizontal:
+ 380 ?
+
+Vertical:
+ 464 ?
diff --git a/misc/svgsync.py b/misc/svgsync.py
new file mode 100755
index 000000000..84e425194
--- /dev/null
+++ b/misc/svgsync.py
@@ -0,0 +1,435 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Sync glyph shapes between SVG and UFO, creating a bridge between UFO and Figma.
+#
+import os
+import sys
+import argparse
+import re
+from xml.dom.minidom import parseString as xmlparseString
+
+# from robofab.world import world, RFont, RGlyph, OpenFont, NewFont
+from robofab.objects.objectsRF import RFont, RGlyph, OpenFont, NewFont, RContour
+from robofab.objects.objectsBase import MOVE, LINE, CORNER, CURVE, QCURVE, OFFCURVE
+
+font = None # RFont
+ufopath = ''
+svgdir = ''
+effectiveAscender = 0
+
+
+def num(s):
+ return int(s) if s.find('.') == -1 else float(s)
+
+
+def glyphToSVGPath(g, yMul):
+ commands = {'move':'M','line':'L','curve':'Y','offcurve':'X','offCurve':'X'}
+ svg = ''
+ contours = []
+ if len(g.components):
+ font.newGlyph('__svgsync')
+ new = font['__svgsync']
+ new.width = g.width
+ new.appendGlyph(g)
+ new.decompose()
+ g = new
+ if len(g):
+ for c in range(len(g)):
+ contours.append(g[c])
+ for i in range(len(contours)):
+ c = contours[i]
+ contour = end = ''
+ curve = False
+ points = c.points
+ if points[0].type == 'offCurve':
+ points.append(points.pop(0))
+ if points[0].type == 'offCurve':
+ points.append(points.pop(0))
+ for x in range(len(points)):
+ p = points[x]
+ command = commands[str(p.type)]
+ if command == 'X':
+ if curve == True:
+ command = ''
+ else:
+ command = 'C'
+ curve = True
+ if command == 'Y':
+ command = ''
+ curve = False
+ if x == 0:
+ command = 'M'
+ if p.type == 'curve':
+ end = ' ' + str(p.x) + ' ' + str(p.y * yMul)
+ contour += ' ' + command + str(p.x) + ' ' + str(p.y * yMul)
+ svg += ' ' + contour + end + 'z'
+ if font.has_key('__svgsync'):
+ font.removeGlyph('__svgsync')
+ return svg.strip()
+
+
+def maybeAddMove(contour, x, y, smooth):
+ if len(contour.segments) == 0:
+ contour.appendSegment(MOVE, [(x, y)], smooth=smooth)
+
+
+
+svgPathDataRegEx = re.compile(r'(?:([A-Z])\s*|)([0-9\.\-\+eE]+)')
+
+
+def drawSVGPath(g, d, tr):
+ yMul = -1
+ xOffs = tr[0]
+ yOffs = -(font.info.unitsPerEm - tr[1])
+
+ for pathd in d.split('M'):
+ pathd = pathd.strip()
+ # print 'pathd', pathd
+ if len(pathd) == 0:
+ continue
+ i = 0
+ closePath = False
+ if pathd[-1] == 'z':
+ closePath = True
+ pathd = pathd[0:-1]
+
+ pv = []
+ for m in svgPathDataRegEx.finditer('M' + pathd):
+ if m.group(1) is not None:
+ pv.append(m.group(1) + m.group(2))
+ else:
+ pv.append(m.group(2))
+
+ initX = 0
+ initY = 0
+
+ pen = g.getPen()
+
+ while i < len(pv):
+ pd = pv[i]; i += 1
+ cmd = pd[0]
+ x = num(pd[1:]) + xOffs
+ y = (num(pv[i]) + yOffs) * yMul; i += 1
+
+ if cmd == 'M':
+ # print cmd, x, y, '/', num(pv[i-2][1:])
+ initX = x
+ initY = y
+ pen.moveTo((x, y))
+ continue
+
+ if cmd == 'C':
+ # Bezier curve: "C x1 y1, x2 y2, x y"
+ x1 = x
+ y1 = y
+ x2 = num(pv[i]) + xOffs; i += 1
+ y2 = (num(pv[i]) + yOffs) * yMul; i += 1
+ x = num(pv[i]) + xOffs; i += 1
+ y = (num(pv[i]) + yOffs) * yMul; i += 1
+ pen.curveTo((x1, y1), (x2, y2), (x, y))
+ # print cmd, x1, y1, x2, y2, x, y
+
+ elif cmd == 'L':
+ pen.lineTo((x, y))
+
+ else:
+ raise Exception('unexpected SVG path command %r' % cmd)
+
+ if closePath:
+ pen.closePath()
+ else:
+ pen.endPath()
+ # print 'path ended. closePath:', closePath
+
+
+def glyphToSVG(g):
+ width = g.width
+ height = font.info.unitsPerEm
+
+ d = {
+ 'name': g.name,
+ 'width': width,
+ 'height': effectiveAscender - font.info.descender,
+ 'effectiveAscender': effectiveAscender,
+ 'leftMargin': g.leftMargin,
+ 'rightMargin': g.rightMargin,
+ 'glyphSVGPath': glyphToSVGPath(g, -1),
+ 'ascender': font.info.ascender,
+ 'descender': font.info.descender,
+ 'baselineOffset': height + font.info.descender,
+ 'unitsPerEm': font.info.unitsPerEm,
+ }
+
+ # for kv in d.iteritems():
+ # if kv[0] == 'glyphSVGPath':
+ # print ' %s: ...' % kv[0]
+ # else:
+ # print ' %s: %r' % kv
+
+ svg = '''
+<svg xmlns="http://www.w3.org/2000/svg" width="%(width)d" height="%(height)d">
+ <g id="%(name)s">
+ <path d="%(glyphSVGPath)s" transform="translate(0 %(effectiveAscender)d)" />
+ <rect x="0" y="0" width="%(width)d" height="%(height)d" fill="" stroke="black" />
+ </g>
+</svg>
+ ''' % d
+ # print svg
+ return svg.strip()
+
+
+def _findPathNodes(n, paths, defs, uses, isDef=False):
+ for cn in n.childNodes:
+ if cn.nodeName == 'path':
+ if isDef:
+ defs[cn.getAttribute('id')] = cn
+ else:
+ paths.append(cn)
+ elif cn.nodeName == 'use':
+ uses[cn.getAttribute('xlink:href').lstrip('#')] = {'useNode': cn, 'targetNode': None}
+ elif cn.nodeName == 'defs':
+ _findPathNodes(cn, paths, defs, uses, isDef=True)
+ elif not isinstance(cn, basestring) and cn.childNodes and len(cn.childNodes) > 0:
+ _findPathNodes(cn, paths, defs, uses, isDef)
+ # return translate
+
+
+def findPathNodes(n, isDef=False):
+ paths = []
+ defs = {}
+ uses = {}
+ # <g id="Canvas" transform="translate(-3677 -24988)">
+ # <g id="six 2">
+ # <g id="six">
+ # <g id="Vector">
+ # <use xlink:href="#path0_fill" transform="translate(3886 25729)"/>
+ # ...
+ # <defs>
+ # <path id="path0_fill" ...
+ #
+ _findPathNodes(n, paths, defs, uses)
+
+ # flatten uses & defs
+ for k in uses.keys():
+ dfNode = defs.get(k)
+ if dfNode is not None:
+ v = uses[k]
+ v['targetNode'] = dfNode
+ if dfNode.nodeName == 'path':
+ useNode = v['useNode']
+ useNode.parentNode.replaceChild(dfNode, useNode)
+ attrs = useNode.attributes
+ for k in attrs.keys():
+ if k != 'xlink:href':
+ dfNode.setAttribute(k, attrs[k])
+ paths.append(dfNode)
+
+ else:
+ del defs[k]
+
+ return paths
+
+
+def nodeTranslation(path, x=0, y=0):
+ tr = path.getAttribute('transform')
+ if tr is not None:
+ if not isinstance(tr, basestring):
+ tr = tr.value
+ if len(tr) > 0:
+ m = re.match(r"translate\s*\(\s*(?P<x>[\-\d\.eE]+)[\s,]*(?P<y>[\-\d\.eE]+)\s*\)", tr)
+ if m is not None:
+ x += num(m.group('x'))
+ y += num(m.group('y'))
+ else:
+ raise Exception('Unable to handle transform="%s"' % tr)
+ # m = re.match(r"matrix\s*\(\s*(?P<a>[\-\d\.eE]+)[\s,]*(?P<b>[\-\d\.eE]+)[\s,]*(?P<c>[\-\d\.eE]+)[\s,]*(?P<d>[\-\d\.eE]+)[\s,]*(?P<e>[\-\d\.eE]+)[\s,]*(?P<f>[\-\d\.eE]+)[\s,]*", tr)
+ # if m is not None:
+ # a, b, c = num(m.group('a')), num(m.group('b')), num(m.group('c'))
+ # d, e, f = num(m.group('d')), num(m.group('e')), num(m.group('f'))
+ # # matrix -1 0 0 -1 -660.719 31947
+ # print 'matrix', a, b, c, d, e, f
+ # # matrix(-1 0 -0 -1 -2553 31943)
+ pn = path.parentNode
+ if pn is not None and pn.nodeName != '#document':
+ x, y = nodeTranslation(pn, x, y)
+ return (x, y)
+
+
+def glyphUpdateFromSVG(g, svgCode):
+ doc = xmlparseString(svgCode)
+ svg = doc.documentElement
+ paths = findPathNodes(svg)
+ if len(paths) == 0:
+ raise Exception('no <path> found in SVG')
+ path = paths[0]
+ if len(paths) != 1:
+ for p in paths:
+ id = p.getAttribute('id')
+ if id is not None and id.find('stroke') == -1:
+ path = p
+ break
+
+ tr = nodeTranslation(path)
+ d = path.getAttribute('d')
+ g.clearContours()
+ drawSVGPath(g, d, tr)
+
+
+def stat(path):
+ try:
+ return os.stat(path)
+ except OSError as e:
+ return None
+
+
+def writeFile(file, s):
+ with open(file, 'w') as f:
+ f.write(s)
+
+
+def writeFileAndMkDirsIfNeeded(file, s):
+ try:
+ writeFile(file, s)
+ except IOError as e:
+ if e.errno == 2:
+ os.makedirs(os.path.dirname(file))
+ writeFile(file, s)
+
+
+def syncGlyphUFOToSVG(glyphname, svgFile, mtime):
+ print glyphname + ': UFO -> SVG'
+ g = font.getGlyph(glyphname)
+ svg = glyphToSVG(g)
+ writeFileAndMkDirsIfNeeded(svgFile, svg)
+ os.utime(svgFile, (mtime, mtime))
+ print 'write', svgFile
+
+
+def syncGlyphSVGToUFO(glyphname, svgFile):
+ print glyphname + ': SVG -> UFO'
+ svg = ''
+ with open(svgFile, 'r') as f:
+ svg = f.read()
+ g = font.getGlyph(glyphname)
+ glyphUpdateFromSVG(g, svg)
+
+
+def findGlifFile(glyphname):
+ # glyphname.glif
+ # glyphname_.glif
+ # glyphname__.glif
+ # glyphname___.glif
+ for underscoreCount in range(0, 5):
+ fn = os.path.join(ufopath, 'glyphs', glyphname + ('_' * underscoreCount) + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ if glyphname.find('.') != -1:
+ # glyph_.name.glif
+ # glyph__.name.glif
+ # glyph___.name.glif
+ for underscoreCount in range(0, 5):
+ nv = glyphname.split('.')
+ nv[0] = nv[0] + ('_' * underscoreCount)
+ ns = '.'.join(nv)
+ fn = os.path.join(ufopath, 'glyphs', ns + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ if glyphname.find('_') != -1:
+ # glyph_name.glif
+ # glyph_name_.glif
+ # glyph_name__.glif
+ # glyph__name.glif
+ # glyph__name_.glif
+ # glyph__name__.glif
+ # glyph___name.glif
+ # glyph___name_.glif
+ # glyph___name__.glif
+ for x in range(0, 4):
+ for y in range(0, 5):
+ ns = glyphname.replace('_', '__' + ('_' * x))
+ fn = os.path.join(ufopath, 'glyphs', ns + ('_' * y) + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ return ('', None)
+
+
+def syncGlyph(glyphname):
+ glyphFile, glyphStat = findGlifFile(glyphname)
+
+ svgFile = os.path.join(svgdir, glyphname + '.svg')
+ svgStat = stat(svgFile)
+
+ if glyphStat is None and svgStat is None:
+ raise Exception("glyph %r doesn't exist in UFO or SVG directory" % glyphname)
+
+ c = cmp(
+ 0 if glyphStat is None else glyphStat.st_mtime,
+ 0 if svgStat is None else svgStat.st_mtime
+ )
+ if c < 0:
+ syncGlyphSVGToUFO(glyphname, svgFile)
+ return (glyphFile, svgStat.st_mtime) # glif file in UFO change + it's new mtime
+ elif c > 0:
+ syncGlyphUFOToSVG(glyphname, svgFile, glyphStat.st_mtime)
+ # else:
+ # print glyphname + ': up to date'
+
+ return (None, 0) # UFO did not change
+
+
+# ————————————————————————————————————————————————————————————————————————
+# main
+
+argparser = argparse.ArgumentParser(description='Convert UFO glyphs to SVG')
+
+argparser.add_argument('--svgdir', dest='svgdir', metavar='<dir>', type=str,
+ default='',
+ help='Write SVG files to <dir>. If not specified, SVG files are' +
+ ' written to: {dirname(<ufopath>)/svg/<familyname>/<style>')
+
+argparser.add_argument('ufopath', metavar='<ufopath>', type=str,
+ help='Path to UFO packages')
+
+argparser.add_argument('glyphs', metavar='<glyphname>', type=str, nargs='*',
+ help='Glyphs to convert. Converts all if none specified.')
+
+args = argparser.parse_args()
+
+ufopath = args.ufopath.rstrip('/')
+
+font = OpenFont(ufopath)
+effectiveAscender = max(font.info.ascender, font.info.unitsPerEm)
+
+svgdir = args.svgdir
+if len(svgdir) == 0:
+ svgdir = os.path.join(
+ os.path.dirname(ufopath),
+ 'svg',
+ font.info.familyName,
+ font.info.styleName
+ )
+
+print 'sync %s (%s)' % (font.info.familyName, font.info.styleName)
+
+glyphnames = args.glyphs if len(args.glyphs) else font.keys()
+
+modifiedGlifFiles = []
+for glyphname in glyphnames:
+ glyphFile, mtime = syncGlyph(glyphname)
+ if glyphFile is not None:
+ modifiedGlifFiles.append((glyphFile, mtime))
+
+if len(modifiedGlifFiles) > 0:
+ print 'Saving font'
+ font.save()
+ for glyphFile, mtime in modifiedGlifFiles:
+ os.utime(glyphFile, (mtime, mtime))
+ print 'write', glyphFile
+
diff --git a/misc/svgsync2.py b/misc/svgsync2.py
new file mode 100755
index 000000000..992d6d314
--- /dev/null
+++ b/misc/svgsync2.py
@@ -0,0 +1,626 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Sync glyph shapes between SVG and UFO, creating a bridge between UFO and Figma.
+#
+import os
+import sys
+import argparse
+import re
+from StringIO import StringIO
+from hashlib import sha256
+from xml.dom.minidom import parseString as xmlparseString
+from svgpathtools import svg2paths, parse_path, Path, Line, CubicBezier
+from base64 import b64encode
+
+# from robofab.world import world, RFont, RGlyph, OpenFont, NewFont
+from robofab.objects.objectsRF import RFont, RGlyph, OpenFont, NewFont, RContour
+from robofab.objects.objectsBase import MOVE, LINE, CORNER, CURVE, QCURVE, OFFCURVE
+
+font = None # RFont
+ufopath = ''
+svgdir = ''
+effectiveAscender = 0
+
+
+def num(s):
+ return int(s) if s.find('.') == -1 else float(s)
+
+
+def glyphToSVGPath(g, yMul=-1):
+ commands = {'move':'M','line':'L','curve':'Y','offcurve':'X','offCurve':'X'}
+ svg = ''
+ contours = []
+ if len(g.components):
+ font.newGlyph('__svgsync')
+ new = font['__svgsync']
+ new.width = g.width
+ new.appendGlyph(g)
+ new.decompose()
+ g = new
+ if len(g):
+ for c in range(len(g)):
+ contours.append(g[c])
+ for i in range(len(contours)):
+ c = contours[i]
+ contour = end = ''
+ curve = False
+ points = c.points
+ if points[0].type == 'offCurve':
+ points.append(points.pop(0))
+ if points[0].type == 'offCurve':
+ points.append(points.pop(0))
+ for x in range(len(points)):
+ p = points[x]
+ command = commands[str(p.type)]
+ if command == 'X':
+ if curve == True:
+ command = ''
+ else:
+ command = 'C'
+ curve = True
+ if command == 'Y':
+ command = ''
+ curve = False
+ if x == 0:
+ command = 'M'
+ if p.type == 'curve':
+ end = ' ' + str(p.x) + ' ' + str(p.y * yMul)
+ contour += ' ' + command + str(p.x) + ' ' + str(p.y * yMul)
+ svg += ' ' + contour + end + 'z'
+ if font.has_key('__svgsync'):
+ font.removeGlyph('__svgsync')
+ return svg.strip()
+
+
+def vec2(x, y):
+ return float(x) + float(y) * 1j
+
+
+def glyphToPaths(g, yMul=-1):
+ paths = []
+ contours = []
+ yOffs = -font.info.unitsPerEm
+
+ # decompose components
+ if len(g.components):
+ font.newGlyph('__svgsync')
+ ng = font['__svgsync']
+ ng.width = g.width
+ ng.appendGlyph(g)
+ ng.decompose()
+ g = ng
+
+ for c in g:
+ curve = False
+ points = c.points
+ path = Path()
+ currentPos = 0j
+ controlPoints = []
+
+ for x in range(len(points)):
+ p = points[x]
+ # print 'p#' + str(x) + '.type = ' + repr(p.type)
+
+ if p.type == 'move':
+ currentPos = vec2(p.x, (p.y + yOffs) * yMul)
+ elif p.type == 'offcurve':
+ controlPoints.append(p)
+ elif p.type == 'curve':
+ pos = vec2(p.x, (p.y + yOffs) * yMul)
+ if len(controlPoints) == 2:
+ cp1, cp2 = controlPoints
+ path.append(CubicBezier(
+ currentPos,
+ vec2(cp1.x, (cp1.y + yOffs) * yMul),
+ vec2(cp2.x, (cp2.y + yOffs) * yMul),
+ pos))
+ else:
+ if len(controlPoints) != 1:
+ raise Exception('unexpected number of control points for curve')
+ cp = controlPoints[0]
+ path.append(QuadraticBezier(currentPos, vec2(cp.x, (cp.y + yOffs) * yMul), pos))
+ currentPos = pos
+ controlPoints = []
+ elif p.type == 'line':
+ pos = vec2(p.x, (p.y + yOffs) * yMul)
+ path.append(Line(currentPos, pos))
+ currentPos = pos
+
+ paths.append(path)
+
+ if font.has_key('__svgsync'):
+ font.removeGlyph('__svgsync')
+
+ return paths
+
+
+def maybeAddMove(contour, x, y, smooth):
+ if len(contour.segments) == 0:
+ contour.appendSegment(MOVE, [(x, y)], smooth=smooth)
+
+
+
+svgPathDataRegEx = re.compile(r'(?:([A-Z])\s*|)([0-9\.\-\+eE]+)')
+
+
+def drawSVGPath(g, d, tr):
+ yMul = -1
+ xOffs = tr[0]
+ yOffs = -(font.info.unitsPerEm - tr[1])
+
+ for pathd in d.split('M'):
+ pathd = pathd.strip()
+ # print 'pathd', pathd
+ if len(pathd) == 0:
+ continue
+ i = 0
+ closePath = False
+ if pathd[-1] == 'z':
+ closePath = True
+ pathd = pathd[0:-1]
+
+ pv = []
+ for m in svgPathDataRegEx.finditer('M' + pathd):
+ if m.group(1) is not None:
+ pv.append(m.group(1) + m.group(2))
+ else:
+ pv.append(m.group(2))
+
+ initX = 0
+ initY = 0
+
+ pen = g.getPen()
+
+ while i < len(pv):
+ pd = pv[i]; i += 1
+ cmd = pd[0]
+ x = num(pd[1:]) + xOffs
+ y = (num(pv[i]) + yOffs) * yMul; i += 1
+
+ if cmd == 'M':
+ # print cmd, x, y, '/', num(pv[i-2][1:])
+ initX = x
+ initY = y
+ pen.moveTo((x, y))
+ continue
+
+ if cmd == 'C':
+ # Bezier curve: "C x1 y1, x2 y2, x y"
+ x1 = x
+ y1 = y
+ x2 = num(pv[i]) + xOffs; i += 1
+ y2 = (num(pv[i]) + yOffs) * yMul; i += 1
+ x = num(pv[i]) + xOffs; i += 1
+ y = (num(pv[i]) + yOffs) * yMul; i += 1
+ pen.curveTo((x1, y1), (x2, y2), (x, y))
+ # print cmd, x1, y1, x2, y2, x, y
+
+ elif cmd == 'L':
+ pen.lineTo((x, y))
+
+ else:
+ raise Exception('unexpected SVG path command %r' % cmd)
+
+ if closePath:
+ pen.closePath()
+ else:
+ pen.endPath()
+ # print 'path ended. closePath:', closePath
+
+
+def glyphToSVG(g, path, hash):
+ width = g.width
+ height = font.info.unitsPerEm
+
+ d = {
+ 'name': g.name,
+ 'width': width,
+ 'height': effectiveAscender - font.info.descender,
+ 'effectiveAscender': effectiveAscender,
+ 'leftMargin': g.leftMargin,
+ 'rightMargin': g.rightMargin,
+ 'd': path.d(use_closed_attrib=True),
+ 'ascender': font.info.ascender,
+ 'descender': font.info.descender,
+ 'baselineOffset': height + font.info.descender,
+ 'unitsPerEm': font.info.unitsPerEm,
+ 'hash': hash,
+ }
+
+ svg = '''
+<svg xmlns="http://www.w3.org/2000/svg" width="%(width)d" height="%(height)d" data-svgsync-hash="%(hash)s">
+ <g id="%(name)s">
+ <path d="%(d)s" transform="translate(0 %(effectiveAscender)d)" />
+ <rect x="0" y="0" width="%(width)d" height="%(height)d" fill="" stroke="black" />
+ </g>
+</svg>
+ ''' % d
+ # print svg
+ return svg.strip()
+
+
+def _findPathNodes(n, paths, defs, uses, isDef=False):
+ for cn in n.childNodes:
+ if cn.nodeName == 'path':
+ if isDef:
+ defs[cn.getAttribute('id')] = cn
+ else:
+ paths.append(cn)
+ elif cn.nodeName == 'use':
+ uses[cn.getAttribute('xlink:href').lstrip('#')] = {'useNode': cn, 'targetNode': None}
+ elif cn.nodeName == 'defs':
+ _findPathNodes(cn, paths, defs, uses, isDef=True)
+ elif not isinstance(cn, basestring) and cn.childNodes and len(cn.childNodes) > 0:
+ _findPathNodes(cn, paths, defs, uses, isDef)
+ # return translate
+
+
+def findPathNodes(n, isDef=False):
+ paths = []
+ defs = {}
+ uses = {}
+ # <g id="Canvas" transform="translate(-3677 -24988)">
+ # <g id="six 2">
+ # <g id="six">
+ # <g id="Vector">
+ # <use xlink:href="#path0_fill" transform="translate(3886 25729)"/>
+ # ...
+ # <defs>
+ # <path id="path0_fill" ...
+ #
+ _findPathNodes(n, paths, defs, uses)
+
+ # flatten uses & defs
+ for k in uses.keys():
+ dfNode = defs.get(k)
+ if dfNode is not None:
+ v = uses[k]
+ v['targetNode'] = dfNode
+ if dfNode.nodeName == 'path':
+ useNode = v['useNode']
+ useNode.parentNode.replaceChild(dfNode, useNode)
+ attrs = useNode.attributes
+ for k in attrs.keys():
+ if k != 'xlink:href':
+ dfNode.setAttribute(k, attrs[k])
+ paths.append(dfNode)
+
+ else:
+ del defs[k]
+
+ return paths
+
+
+def nodeTranslation(path, x=0, y=0):
+ tr = path.getAttribute('transform')
+ if tr is not None:
+ if not isinstance(tr, basestring):
+ tr = tr.value
+ if len(tr) > 0:
+ m = re.match(r"translate\s*\(\s*(?P<x>[\-\d\.eE]+)[\s,]*(?P<y>[\-\d\.eE]+)\s*\)", tr)
+ if m is not None:
+ x += num(m.group('x'))
+ y += num(m.group('y'))
+ else:
+ raise Exception('Unable to handle transform="%s"' % tr)
+ # m = re.match(r"matrix\s*\(\s*(?P<a>[\-\d\.eE]+)[\s,]*(?P<b>[\-\d\.eE]+)[\s,]*(?P<c>[\-\d\.eE]+)[\s,]*(?P<d>[\-\d\.eE]+)[\s,]*(?P<e>[\-\d\.eE]+)[\s,]*(?P<f>[\-\d\.eE]+)[\s,]*", tr)
+ # if m is not None:
+ # a, b, c = num(m.group('a')), num(m.group('b')), num(m.group('c'))
+ # d, e, f = num(m.group('d')), num(m.group('e')), num(m.group('f'))
+ # # matrix -1 0 0 -1 -660.719 31947
+ # print 'matrix', a, b, c, d, e, f
+ # # matrix(-1 0 -0 -1 -2553 31943)
+ pn = path.parentNode
+ if pn is not None and pn.nodeName != '#document':
+ x, y = nodeTranslation(pn, x, y)
+ return (x, y)
+
+
+def glyphUpdateFromSVG(g, svgCode):
+ doc = xmlparseString(svgCode)
+ svg = doc.documentElement
+ paths = findPathNodes(svg)
+ if len(paths) == 0:
+ raise Exception('no <path> found in SVG')
+ path = paths[0]
+ if len(paths) != 1:
+ for p in paths:
+ id = p.getAttribute('id')
+ if id is not None and id.find('stroke') == -1:
+ path = p
+ break
+
+ tr = nodeTranslation(path)
+ d = path.getAttribute('d')
+ g.clearContours()
+ drawSVGPath(g, d, tr)
+
+
+def stat(path):
+ try:
+ return os.stat(path)
+ except OSError as e:
+ return None
+
+
+def writeFile(file, s):
+ with open(file, 'w') as f:
+ f.write(s)
+
+
+def writeFileAndMkDirsIfNeeded(file, s):
+ try:
+ writeFile(file, s)
+ except IOError as e:
+ if e.errno == 2:
+ os.makedirs(os.path.dirname(file))
+ writeFile(file, s)
+
+
+def findSvgSyncHashInSVG(svgCode):
+ # with open(svgFile, 'r') as f:
+ # svgCode = f.readline(512)
+ r = re.compile(r'^\s*<svg[^>]+data-svgsync-hash="([^"]*)".+')
+ m = r.match(svgCode)
+ if m is not None:
+ return m.group(1)
+ return None
+
+
+def computeSVGHashFromSVG(g):
+ # h = sha256()
+ return 'abc123'
+
+
+def encodePath(o, path):
+ o.write(path.d())
+
+
+def hashPaths(paths):
+ h = sha256()
+ for path in paths:
+ h.update(path.d()+';')
+ return b64encode(h.digest(), '-_')
+
+
+def svgGetPaths(svgCode):
+ doc = xmlparseString(svgCode)
+ svg = doc.documentElement
+ paths = findPathNodes(svg)
+ isFigmaSVG = svgCode.find('Figma</desc>') != -1
+
+ if len(paths) == 0:
+ return paths, (0,0)
+
+ paths2 = []
+ for path in paths:
+ id = path.getAttribute('id')
+ if not isFigmaSVG or (id is None or id.find('stroke') == -1):
+ tr = nodeTranslation(path)
+ d = path.getAttribute('d')
+ paths2.append((d, tr))
+
+ return paths2, isFigmaSVG
+
+
+def translatePath(path, trX, trY):
+ pass
+
+
+def parseSVG(svgFile):
+ svgCode = None
+ with open(svgFile, 'r') as f:
+ svgCode = f.read()
+
+ existingSvgHash = findSvgSyncHashInSVG(svgCode)
+ print 'hash in SVG file:', existingSvgHash
+
+ svgPathDefs, isFigmaSVG = svgGetPaths(svgCode)
+ paths = []
+ for pathDef, tr in svgPathDefs:
+ print 'pathDef:', pathDef, 'tr:', tr
+ path = parse_path(pathDef)
+ if tr[0] != 0 or tr[1] != 0:
+ path = path.translated(vec2(*tr))
+ paths.append(path)
+
+ return paths, existingSvgHash
+
+
+def syncGlyphUFOToSVG(g, glyphFile, svgFile, mtime, hasSvgFile):
+ # # Let's print out the first path object and the color it was in the SVG
+ # # We'll see it is composed of two CubicBezier objects and, in the SVG file it
+ # # came from, it was red
+ # paths, attributes, svg_attributes = svg2paths(svgFile, return_svg_attributes=True)
+ # print('svg_attributes:', repr(svg_attributes))
+ # # redpath = paths[0]
+ # # redpath_attribs = attributes[0]
+ # print(paths)
+ # print(attributes)
+ # wsvg(paths, attributes=attributes, svg_attributes=svg_attributes, filename=svgFile + '-x.svg')
+
+ # existingSVGHash = readSVGHash(svgFile)
+ svgPaths = None
+ existingSVGHash = None
+ if hasSvgFile:
+ svgPaths, existingSVGHash = parseSVG(svgFile)
+ print 'existingSVGHash:', existingSVGHash
+ print 'svgPaths:\n', '\n'.join([p.d() for p in svgPaths])
+ svgHash = hashPaths(svgPaths)
+ print 'hash(SVG-glyph) =>', svgHash
+
+ # computedSVGHash = computeSVGHashFromSVG(svgFile)
+ # print 'computeSVGHashFromSVG:', computedSVGHash
+
+ ufoPaths = glyphToPaths(g)
+ print 'ufoPaths:\n', '\n'.join([p.d() for p in ufoPaths])
+ ufoGlyphHash = hashPaths(ufoPaths)
+ print 'hash(UFO-glyph) =>', ufoGlyphHash
+
+ # svg = glyphToSVG(g, ufoGlyphHash)
+
+ # with open('/Users/rsms/src/interface/_local/svgPaths.txt', 'w') as f:
+ # f.write(svgPaths[0].d())
+ # with open('/Users/rsms/src/interface/_local/ufoPaths.txt', 'w') as f:
+ # f.write(ufoPaths[0].d())
+ # print svgPaths[0].d() == ufoPaths[0].d()
+
+ # svgHash = hashPaths()
+ # print 'hash(UFO-glyph) =>', pathHash
+
+ sys.exit(1)
+ if pathHash == existingSVGHash:
+ return (None, 0) # did not change
+
+ svg = glyphToSVG(g, pathHash)
+ sys.exit(1)
+
+ writeFileAndMkDirsIfNeeded(svgFile, svg)
+ os.utime(svgFile, (mtime, mtime))
+ print 'svgsync write', svgFile
+
+ g.lib['svgsync.hash'] = pathHash
+ return (glyphFile, mtime)
+
+
+def syncGlyphSVGToUFO(glyphname, svgFile):
+ print glyphname + ': SVG -> UFO'
+ sys.exit(1)
+ svg = ''
+ with open(svgFile, 'r') as f:
+ svg = f.read()
+ g = font.getGlyph(glyphname)
+ glyphUpdateFromSVG(g, svg)
+
+
+def findGlifFile(glyphname):
+ # glyphname.glif
+ # glyphname_.glif
+ # glyphname__.glif
+ # glyphname___.glif
+ for underscoreCount in range(0, 5):
+ fn = os.path.join(ufopath, 'glyphs', glyphname + ('_' * underscoreCount) + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ if glyphname.find('.') != -1:
+ # glyph_.name.glif
+ # glyph__.name.glif
+ # glyph___.name.glif
+ for underscoreCount in range(0, 5):
+ nv = glyphname.split('.')
+ nv[0] = nv[0] + ('_' * underscoreCount)
+ ns = '.'.join(nv)
+ fn = os.path.join(ufopath, 'glyphs', ns + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ if glyphname.find('_') != -1:
+ # glyph_name.glif
+ # glyph_name_.glif
+ # glyph_name__.glif
+ # glyph__name.glif
+ # glyph__name_.glif
+ # glyph__name__.glif
+ # glyph___name.glif
+ # glyph___name_.glif
+ # glyph___name__.glif
+ for x in range(0, 4):
+ for y in range(0, 5):
+ ns = glyphname.replace('_', '__' + ('_' * x))
+ fn = os.path.join(ufopath, 'glyphs', ns + ('_' * y) + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ return ('', None)
+
+
+def syncGlyph(glyphname, createSVG=False): # => (glyphname, mtime) or (None, 0) if noop
+ glyphFile, glyphStat = findGlifFile(glyphname)
+
+ svgFile = os.path.join(svgdir, glyphname + '.svg')
+ svgStat = stat(svgFile)
+
+ if glyphStat is None and svgStat is None:
+ raise Exception("glyph %r doesn't exist in UFO or SVG directory" % glyphname)
+
+ c = cmp(
+ 0 if glyphStat is None else glyphStat.st_mtime,
+ 0 if svgStat is None else svgStat.st_mtime
+ )
+
+ g = font.getGlyph(glyphname)
+ ufoPathHash = g.lib['svgsync.hash'] if 'svgsync.hash' in g.lib else None
+ print '[syncGlyph] g.lib["svgsync.hash"] =', ufoPathHash
+
+ c = 1 # XXX DEBUG
+
+ if c < 0:
+ syncGlyphSVGToUFO(glyphname, svgFile)
+ return (glyphFile, svgStat.st_mtime) # glif file in UFO change + it's new mtime
+ elif c > 0 and (svgStat is not None or createSVG):
+ print glyphname + ': UFO -> SVG'
+ return syncGlyphUFOToSVG(
+ g,
+ glyphFile,
+ svgFile,
+ glyphStat.st_mtime,
+ hasSvgFile=svgStat is not None
+ )
+
+ return (None, 0) # UFO did not change
+
+
+# ————————————————————————————————————————————————————————————————————————
+# main
+
+argparser = argparse.ArgumentParser(description='Convert UFO glyphs to SVG')
+
+argparser.add_argument('--svgdir', dest='svgdir', metavar='<dir>', type=str,
+ default='',
+ help='Write SVG files to <dir>. If not specified, SVG files are' +
+ ' written to: {dirname(<ufopath>)/svg/<familyname>/<style>')
+
+argparser.add_argument('ufopath', metavar='<ufopath>', type=str,
+ help='Path to UFO packages')
+
+argparser.add_argument('glyphs', metavar='<glyphname>', type=str, nargs='*',
+ help='Glyphs to convert. Converts all if none specified.')
+
+args = argparser.parse_args()
+
+ufopath = args.ufopath.rstrip('/')
+
+font = OpenFont(ufopath)
+effectiveAscender = max(font.info.ascender, font.info.unitsPerEm)
+
+svgdir = args.svgdir
+if len(svgdir) == 0:
+ svgdir = os.path.join(
+ os.path.dirname(ufopath),
+ 'svg',
+ font.info.familyName,
+ font.info.styleName
+ )
+
+print 'svgsync sync %s (%s)' % (font.info.familyName, font.info.styleName)
+
+createSVGs = len(args.glyphs) > 0
+glyphnames = args.glyphs if len(args.glyphs) else font.keys()
+
+modifiedGlifFiles = []
+for glyphname in glyphnames:
+ glyphFile, mtime = syncGlyph(glyphname, createSVG=createSVGs)
+ if glyphFile is not None:
+ modifiedGlifFiles.append((glyphFile, mtime))
+
+if len(modifiedGlifFiles) > 0:
+ font.save()
+ for glyphFile, mtime in modifiedGlifFiles:
+ os.utime(glyphFile, (mtime, mtime))
+ print 'svgsync write', glyphFile
+
diff --git a/misc/ttf2woff/.gitignore b/misc/ttf2woff/.gitignore
new file mode 100644
index 000000000..b257e4c92
--- /dev/null
+++ b/misc/ttf2woff/.gitignore
@@ -0,0 +1,9 @@
+*.o
+*.d
+*.core
+*.obj
+*.exe
+*~
+.DS_Store
+
+ttf2woff
diff --git a/misc/ttf2woff/Makefile b/misc/ttf2woff/Makefile
new file mode 100644
index 000000000..ea8e739e0
--- /dev/null
+++ b/misc/ttf2woff/Makefile
@@ -0,0 +1,68 @@
+# gmake
+
+NAME = ttf2woff
+VERSION = 0.14
+BINDIR = /usr/local/bin
+PKG=$(NAME)-$(VERSION)
+FILES_TTF2WOFF := Makefile ttf2woff.c ttf2woff.h genwoff.c genttf.c readttf.c readttc.c readwoff.c optimize.c \
+ comp-zlib.c comp-zopfli.c compat.c ttf2woff.rc zopfli.diff
+FILES_ZOPFLI := zopfli.h symbols.h \
+ $(patsubst %,%.h,zlib_container deflate lz77 blocksplitter squeeze hash cache tree util katajainen) \
+ $(patsubst %,%.c,zlib_container deflate lz77 blocksplitter squeeze hash cache tree util katajainen)
+FILES += $(FILES_TTF2WOFF) $(addprefix zopfli/,$(FILES_ZOPFLI))
+
+ZOPFLI = 1
+
+OBJ := ttf2woff.o readttf.o readttc.o readwoff.o genwoff.o genttf.o optimize.o
+ifeq ($(ZOPFLI),)
+OBJ += comp-zlib.o
+else
+OBJ += comp-zopfli.o
+LDFLAGS += -lm
+endif
+
+CFLAGS ?= -O2 -g
+LDFLAGS += -lz
+
+# eg. make WIN32=1 CC=mingw32-gcc RC=mingw32-windres
+ifdef WIN32
+EXE = .exe
+CFLAGS += -DNO_ERRWARN
+OBJ += compat.o rc.o
+endif
+
+ttf2woff$(EXE): $(OBJ)
+ $(CC) -o $@ $(OBJ) $(LDFLAGS)
+
+ttf2woff.o: ttf2woff.c ttf2woff.h Makefile
+ $(CC) $(CFLAGS) -DVERSION=$(VERSION) -c ttf2woff.c
+
+comp-zopfli.o: comp-zopfli.c ttf2woff.h $(addprefix zopfli/,$(FILES_ZOPFLI))
+ $(CC) $(CFLAGS) -c comp-zopfli.c
+
+rc.o: ttf2woff.rc Makefile
+ $(RC) $(DEF) -DVERNUMS=`echo $(VERSION) | sed 's/\\./,/g; s/[^0-9,]//g'` -DVERSION=$(VERSION) -o $@ ttf2woff.rc
+
+install: ttf2woff
+ install -s $< $(BINDIR)
+
+clean:
+ rm -f ttf2woff $(addsuffix .o,$(basename $(filter %.c,$(FILES_TTF2WOFF))))
+
+dist:
+ ln -s . $(PKG)
+ tar czf $(PKG).tar.gz --group=root --owner=root $(addprefix $(PKG)/, $(FILES)); \
+ rm $(PKG)
+
+.PHONY: install clean dist zopfli zopfli.diff
+
+
+# git://github.com/google/zopfli.git
+ZOPFLI_SRC = zopfli-src
+zopfli: $(addprefix $(ZOPFLI_SRC)/src/zopfli/,$(FILES_ZOPFLI))
+ @install -d zopfli
+ cp -pf $^ zopfli
+ patch -p3 -dzopfli <zopfli.diff
+
+zopfli.diff:
+ diff -u --minimal $(ZOPFLI_SRC)/src/zopfli zopfli >$@; true
diff --git a/misc/ttf2woff/comp-zlib.c b/misc/ttf2woff/comp-zlib.c
new file mode 100644
index 000000000..2dd4cefc1
--- /dev/null
+++ b/misc/ttf2woff/comp-zlib.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <stdlib.h>
+#include <zlib.h>
+#include "ttf2woff.h"
+
+char *copression_by = "zlib";
+
+int zlib_compress(struct buf *out, struct buf *inp)
+{
+ u8 *b;
+ int v;
+ uLongf len;
+
+ len = inp->len;
+ b = my_alloc(inp->len);
+
+ v = compress2(b,&len, inp->ptr,inp->len, 9);
+
+ if(v==Z_OK && REALLY_SMALLER(len, inp->len)) {
+ out->ptr = b;
+ out->len = len;
+ return 1;
+ } else {
+ my_free(b);
+ return 0;
+ }
+}
diff --git a/misc/ttf2woff/comp-zopfli.c b/misc/ttf2woff/comp-zopfli.c
new file mode 100644
index 000000000..3c20498d0
--- /dev/null
+++ b/misc/ttf2woff/comp-zopfli.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <stdlib.h>
+#include "ttf2woff.h"
+
+#include "zopfli/zlib_container.c"
+#include "zopfli/deflate.c"
+#include "zopfli/lz77.c"
+#include "zopfli/blocksplitter.c"
+#include "zopfli/squeeze.c"
+#include "zopfli/hash.c"
+#include "zopfli/cache.c"
+#include "zopfli/tree.c"
+#include "zopfli/util.c"
+#include "zopfli/katajainen.c"
+
+#define adler32 zlib_adler32
+#include <zlib.h>
+
+char *copression_by = "zopfli";
+
+int zlib_compress(struct buf *out, struct buf *inp)
+{
+ ZopfliOptions opt = {0};
+ u8 *b=0;
+ size_t sz=0;
+
+ opt.numiterations = 15;
+ ZopfliZlibCompress(&opt, inp->ptr, inp->len, &b, &sz);
+
+ if(REALLY_SMALLER(sz, inp->len)) {
+
+ /* Trust, but verify */
+ uLong tmpl = inp->len;
+ Bytef *tmpb = my_alloc(inp->len);
+ int v = uncompress(tmpb, &tmpl, b, sz);
+ if(v!=Z_OK || tmpl!=inp->len)
+ errx(3,"Zopfli error");
+ my_free(tmpb);
+
+ out->ptr = b;
+ out->len = sz;
+ return 1;
+ } else {
+ free(b);
+ return 0;
+ }
+}
diff --git a/misc/ttf2woff/compat.c b/misc/ttf2woff/compat.c
new file mode 100644
index 000000000..55d2cee72
--- /dev/null
+++ b/misc/ttf2woff/compat.c
@@ -0,0 +1,43 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdarg.h>
+
+static void er(int s, int e, char *f, va_list *va)
+{
+// fprintf(stderr, "%s: ", getexecname());
+ if(f) vfprintf(stderr, f, *va);
+ va_end(*va);
+ if(e >= 0) fprintf(stderr, ": %s", strerror(e));
+ putc('\n', stderr);
+ if(s >= 0) exit(s);
+}
+
+void err(int s, char *f, ...)
+{
+ va_list va;
+ va_start(va, f);
+ er(s, errno, f, &va);
+}
+
+void errx(int s, char *f, ...)
+{
+ va_list va;
+ va_start(va, f);
+ er(s, -1, f, &va);
+}
+
+void warn(char *f, ...)
+{
+ va_list va;
+ va_start(va, f);
+ er(-1, errno, f, &va);
+}
+
+void warnx(char *f, ...)
+{
+ va_list va;
+ va_start(va, f);
+ er(-1, -1, f, &va);
+}
diff --git a/misc/ttf2woff/genttf.c b/misc/ttf2woff/genttf.c
new file mode 100644
index 000000000..ad87be45b
--- /dev/null
+++ b/misc/ttf2woff/genttf.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <assert.h>
+#include "ttf2woff.h"
+
+u8 *put_ttf_header(u8 buf[12], struct ttf *ttf)
+{
+ u8 *p = buf;
+ int n = ttf->ntables;
+ p = p32(p, ttf->flavor);
+ p = p16(p, n);
+ while(n & n-1) n &= n-1;
+ p = p16(p, n<<4);
+ p = p16(p, ffs(n)-1);
+ p = p16(p, ttf->ntables-n << 4);
+ return p;
+}
+
+void gen_ttf(struct buf *out, struct ttf *ttf)
+{
+ unsigned sfnt_size;
+ u8 *buf, *p;
+ int i;
+
+ sfnt_size = 12 + 16*ttf->ntables;
+ for(i=0; i<ttf->ntables; i++) {
+ struct table *t = ttf->tab_pos[i];
+ t->pos = sfnt_size; // remember offset in output file
+ sfnt_size += t->buf.len+3 & ~3;
+ }
+
+ buf = my_alloc(sfnt_size);
+ p = put_ttf_header(buf, ttf);
+
+ for(i=0; i<ttf->ntables; i++) {
+ struct table *t = &ttf->tables[i];
+ p = p32(p, t->tag);
+ p = p32(p, t->csum);
+ p = p32(p, t->pos);
+ p = p32(p, t->buf.len);
+ }
+
+ for(i=0; i<ttf->ntables; i++) {
+ struct table *t = ttf->tab_pos[i];
+ unsigned sz = t->buf.len;
+ p = append(p, t->buf.ptr, sz);
+ while(sz&3) *p++=0, sz++;
+ }
+
+ assert(p == buf+sfnt_size);
+
+ out->ptr = buf;
+ out->len = sfnt_size;
+}
diff --git a/misc/ttf2woff/genwoff.c b/misc/ttf2woff/genwoff.c
new file mode 100644
index 000000000..7b6b746a7
--- /dev/null
+++ b/misc/ttf2woff/genwoff.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "ttf2woff.h"
+
+#define MIN_COMPR 16
+
+void gen_woff(struct buf *out, struct ttf *ttf)
+{
+ unsigned woff_size, sfnt_size;
+ struct buf meta_comp={0};
+ u32 meta_off, priv_off;
+ u8 *buf, *p;
+ int i;
+
+ woff_size = 44 + 20*ttf->ntables;
+ sfnt_size = 12 + 16*ttf->ntables;
+ for(i=0; i<ttf->ntables; i++) {
+ struct table *t = ttf->tab_pos[i];
+ t->pos = woff_size; // remember offset in output file
+ t->zbuf = t->buf;
+ if(t->buf.len >= MIN_COMPR)
+ zlib_compress(&t->zbuf, &t->buf);
+ sfnt_size += t->buf.len+3 & ~3;
+ woff_size += t->zbuf.len+3 & ~3;
+ }
+
+ meta_off = 0;
+ if(ttf->woff_meta.len >= MIN_COMPR) {
+ meta_comp = ttf->woff_meta;
+ zlib_compress(&meta_comp, &ttf->woff_meta);
+ meta_off = woff_size;
+ woff_size += meta_comp.len;
+ }
+
+ priv_off = 0;
+ if(ttf->woff_priv.len) {
+ priv_off = woff_size;
+ woff_size += ttf->woff_priv.len;
+ }
+
+ buf = my_alloc(woff_size);
+
+ p32(buf, 0x774F4646);
+ p32(buf+4, ttf->flavor);
+ p32(buf+8, woff_size);
+ p16(buf+12, ttf->ntables);
+ p16(buf+14, 0);
+ p32(buf+16, sfnt_size);
+ p32(buf+20, 0); // version ?
+ p32(buf+24, meta_off);
+ p32(buf+28, meta_comp.len); // meta len
+ p32(buf+32, ttf->woff_meta.len); // meta orig len
+ p32(buf+36, priv_off);
+ p32(buf+40, ttf->woff_priv.len);
+
+ p = buf + 44;
+ for(i=0; i<ttf->ntables; i++) {
+ struct table *t = &ttf->tables[i];
+ p32(p, t->tag);
+ p32(p+4, t->pos);
+ p32(p+8, t->zbuf.len);
+ p32(p+12, t->buf.len);
+ p32(p+16, t->csum);
+ p += 20;
+ }
+
+ for(i=0; i<ttf->ntables; i++) {
+ struct table *t = ttf->tab_pos[i];
+ u32 sz = t->zbuf.len;
+ p = append(p, t->zbuf.ptr, sz);
+ while(sz&3) *p++=0, sz++;
+// if(t->zbuf.ptr != t->buf.ptr)
+// my_free(t->zbuf.ptr);
+ }
+
+ if(meta_comp.len)
+ p = append(p, meta_comp.ptr, meta_comp.len);
+
+ if(ttf->woff_priv.len)
+ p = append(p, ttf->woff_priv.ptr, ttf->woff_priv.len);
+
+ assert(p == buf+woff_size);
+
+ out->ptr = buf;
+ out->len = woff_size;
+}
diff --git a/misc/ttf2woff/optimize.c b/misc/ttf2woff/optimize.c
new file mode 100644
index 000000000..c85f88455
--- /dev/null
+++ b/misc/ttf2woff/optimize.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include "ttf2woff.h"
+
+struct table *find_table(struct ttf *ttf, char tag[4])
+{
+ u32 tg = g32((u8*)tag);
+ int i;
+ for(i=0; i<ttf->ntables; i++)
+ if(ttf->tables[i].tag == tg)
+ return &ttf->tables[i];
+ return 0;
+}
+
+static void replace_table(struct table *t, u8 *p, int l)
+{
+ if(t->free_buf)
+ t->buf.ptr = my_free(t->buf.ptr);
+ t->free_buf = 1;
+ t->modified = 1;
+ t->buf.ptr = p;
+ t->buf.len = l;
+}
+
+static void optimized(struct table *t, struct buf new)
+{
+ if(g.verbose)
+ echo("Optimized %s table: %u > %u (%d bytes)", t->name, t->buf.len, new.len, new.len-t->buf.len);
+ replace_table(t, new.ptr, new.len);
+}
+
+static void optimize_loca(struct ttf *ttf)
+{
+ struct table *head, *loca, *glyf;
+ struct buf new;
+ int i,n;
+
+ head = find_table(ttf, "head");
+ loca = find_table(ttf, "loca");
+ glyf = find_table(ttf, "glyf");
+
+ if(!head || !loca || !glyf)
+ return;
+
+ if(head->buf.len<54 || g16(head->buf.ptr+50)!=1)
+ return;
+
+ if(loca->buf.len&3 || loca->buf.len<4)
+ return;
+
+ // we have 32-bit loca table
+
+ if(glyf->buf.len != g32(loca->buf.ptr+loca->buf.len-4))
+ return;
+
+ if(glyf->buf.len >= 1<<16)
+ return;
+
+ n = loca->buf.len>>2;
+ new.len = 2*n;
+ new.ptr = my_alloc(new.len);
+ for(i=0;i<n;i++) {
+ u32 o = g32(loca->buf.ptr+4*i);
+ if(o&1) {
+ echo("Bad offset in loca");
+ my_free(new.ptr);
+ return;
+ }
+ p16(new.ptr+2*i, o>>1);
+ }
+
+ optimized(loca, new);
+
+ p16(head->buf.ptr+50, 0);
+ head->modified = 1;
+}
+
+static int overlap(struct buf a, struct buf b)
+{
+ int o = a.len<b.len ? a.len : b.len;
+ while(o) {
+ if(memcmp(a.len-o+a.ptr, b.ptr, o)==0)
+ break;
+ o--;
+ }
+ return o;
+}
+
+static u8 *bufbuf(struct buf a, struct buf b)
+{
+ u8 *p=a.ptr, *e=a.ptr+a.len-b.len;
+ while(p<=e) {
+ if(memcmp(p,b.ptr,b.len)==0)
+ return p;
+ p++;
+ }
+ return 0;
+}
+
+static int name_cmp_len(const void *va, const void *vb) {
+ struct buf a = *(struct buf*)va;
+ struct buf b = *(struct buf*)vb;
+ int d = a.len - b.len;
+ if(!d) d = memcmp(a.ptr, b.ptr, a.len);
+ return d;
+}
+
+static void optimize_name(struct ttf *ttf)
+{
+ struct table *name = find_table(ttf, "name");
+ struct buf str, new;
+ struct buf *ent;
+ u8 *p;
+ int count,n,i;
+
+ if(!name || name->buf.len<6+2*12+1 || g16(name->buf.ptr))
+ return;
+
+ n = g16(name->buf.ptr+4); // stringOffset
+ if(name->buf.len < n)
+ goto corrupted;
+
+ str.ptr = name->buf.ptr+n;
+ str.len = name->buf.len-n;
+
+ count = g16(name->buf.ptr+2);
+ if(name->buf.len < 6+12*count) {
+corrupted:
+ echo("Name table corrupted");
+ return;
+ }
+
+ n = count;
+ ent = my_alloc(n * sizeof *ent);
+
+ p = name->buf.ptr+6;
+ for(i=0; i<n; i++) {
+ unsigned l = g16(p+8);
+ unsigned o = g16(p+10);
+ if(o+l > str.len) {
+ echo("Bad string location in name table");
+ my_free(ent);
+ return;
+ }
+ if(l) {
+ ent[i].ptr = str.ptr + o;
+ ent[i].len = l;
+ }
+ p += 12;
+ }
+
+ qsort(ent, n, sizeof *ent, name_cmp_len);
+
+ for(;;) {
+ int j,mo,mi,mj;
+ struct buf a, b, c;
+
+ mo = 0;
+ for(j=0;j<n;j++) for(i=1;i<n;i++) if(i!=j) {
+ int o;
+ a = ent[i];
+ b = ent[j];
+ if(bufbuf(a,b))
+ goto remove_b;
+ o = overlap(a,b);
+ if(o > mo) {
+ mo = o;
+ mi = i;
+ mj = j;
+ }
+ }
+ if(!mo)
+ break;
+
+ a = ent[mi];
+ b = ent[mj];
+ c.len = a.len + b.len - mo;
+ c.ptr = my_alloc(c.len);
+ p = append(c.ptr, a.ptr, a.len);
+ append(p, b.ptr+mo, b.len-mo);
+ if(a.ptr<str.ptr || a.ptr>=str.ptr+str.len)
+ my_free(a.ptr);
+
+ i = mi<mj ? mi : mj;
+ j = mi<mj ? mj : mi;
+ ent[i] = c;
+
+remove_b:
+ if(b.ptr<str.ptr || b.ptr>=str.ptr+str.len)
+ my_free(b.ptr);
+ n--;
+ while(j < n) ent[j]=ent[j+1], j++;
+ }
+
+ {
+ int sz = 6 + 12*count;
+ for(i=0;i<n;i++)
+ sz += ent[i].len;
+
+ if(sz >= name->buf.len) {
+ my_free(ent);
+ return;
+ }
+
+ new.len = sz;
+ new.ptr = my_alloc(sz);
+
+ p = new.ptr + 6 + 12*count;
+ for(i=0;i<n;i++) {
+ struct buf a = ent[i];
+ memcpy(p,a.ptr,a.len); p+=a.len;
+ if(a.ptr<str.ptr || a.ptr>=str.ptr+str.len)
+ my_free(a.ptr);
+ }
+ assert(p == new.ptr+new.len);
+ }
+
+ my_free(ent);
+
+ memcpy(new.ptr, name->buf.ptr, 6+12*count);
+ p16(new.ptr+4,6+12*count);
+
+ {
+ struct buf newstr;
+
+ newstr.ptr = new.ptr + 6+12*count;
+ newstr.len = new.len - 6+12*count;
+
+ p = new.ptr + 6 + 10;
+ for(i=0;i<count;i++) {
+ struct buf a = {str.ptr+g16(p), g16(p-2)};
+ u8 *s = bufbuf(newstr, a);
+ assert(s);
+ p16(p, s-newstr.ptr);
+ p += 12;
+ }
+ }
+
+#ifndef NDEBUG
+ for(i=0; i<count; i++) {
+ u8 *p0 = name->buf.ptr;
+ u8 *p1 = new.ptr;
+ p0 += g16(p0+4) + g16(p0+6+12*i+10);
+ p1 += g16(p1+4) + g16(p1+6+12*i+10);
+ assert(!memcmp(p0,p1,g16(new.ptr+6+12*i+8)));
+ }
+#endif
+
+ optimized(name, new);
+}
+
+static void optimize_hmtx(struct ttf *ttf)
+{
+ struct table *hhea, *hmtx;
+ struct buf buf;
+ u8 *p, *q;
+ int nlhm,adv,n;
+
+ hhea = find_table(ttf, "hhea");
+ hmtx = find_table(ttf, "hmtx");
+
+ if(!hhea || !hmtx || hhea->buf.len < 36 || g32(hhea->buf.ptr)!=0x10000)
+ return;
+
+ nlhm = g16(hhea->buf.ptr + 34);
+ buf = hmtx->buf;
+
+ if(!nlhm || buf.len&1 || buf.len < 4*nlhm) {
+ return;
+ }
+
+ if(nlhm<2)
+ return;
+
+ p = buf.ptr + 4*(nlhm-1);
+ adv = g16(p);
+
+ for(n=nlhm; n>1; n--) {
+ p -= 4;
+ if(adv != g16(p))
+ break;
+ }
+ if(n < nlhm) {
+ struct buf new;
+ int i, nent = (buf.len>>1) - nlhm;
+
+ new.len = 2*nent + 2*n;
+ new.ptr = my_alloc(new.len);
+ p = append(new.ptr, buf.ptr, n<<2);
+ q = buf.ptr + (n<<2);
+ for(i=n; i<nlhm; i++) {
+ p = p16(p, g16(q+2));
+ q += 4;
+ }
+ p = append(p, q, buf.ptr+buf.len-q);
+ assert(p == new.ptr+new.len);
+
+ optimized(hmtx, new);
+
+ p16(hhea->buf.ptr+34, n);
+ hhea->modified = 1;
+ }
+}
+
+void optimize(struct ttf *ttf)
+{
+ optimize_loca(ttf);
+ optimize_name(ttf);
+ optimize_hmtx(ttf);
+}
diff --git a/misc/ttf2woff/readttc.c b/misc/ttf2woff/readttc.c
new file mode 100644
index 000000000..aad7ba0bd
--- /dev/null
+++ b/misc/ttf2woff/readttc.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include "ttf2woff.h"
+
+void read_ttc(struct ttf *ttf, u8 *data, size_t length, int fontn)
+{
+ unsigned n, o;
+
+ if(length < 16+12+16) BAD_FONT;
+
+ n = g32(data+8);
+ if(length < 16+(4+12+16)*n) BAD_FONT;
+
+ if(fontn<0 || fontn>=n)
+ errx(1, "No font #%d in collection",fontn);
+
+ o = g32(data+12+4*fontn);
+ if(o >= length) BAD_FONT;
+
+ read_ttf(ttf, data, length, o);
+}
diff --git a/misc/ttf2woff/readttf.c b/misc/ttf2woff/readttf.c
new file mode 100644
index 000000000..87716c1c9
--- /dev/null
+++ b/misc/ttf2woff/readttf.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include "ttf2woff.h"
+
+void read_ttf(struct ttf *ttf, u8 *data, size_t length, unsigned start)
+{
+ int i;
+ u8 *p;
+
+ if(length-start<+12+16)
+ BAD_FONT;
+
+ ttf->flavor = g32(data+start);
+ // XXX check type 'true', or ...
+ ttf->ntables = g16(data+start+4);
+
+ if(!ttf->ntables || length-start<=12+16*ttf->ntables)
+ BAD_FONT;
+
+ alloc_tables(ttf);
+
+ p = data+start+12;
+ for(i=0; i<ttf->ntables; i++) {
+ struct table *t = &ttf->tables[i];
+ u32 off=g32(p+8), len=g32(p+12);
+ if((off|len)>length || off+len>length)
+ BAD_FONT;
+ t->tag = g32(p);
+ t->csum = g32(p+4);
+ t->pos = off;
+ t->buf.ptr = data + off;
+ t->buf.len = len;
+ name_table(t);
+
+// echo("%5X %5X %s", off, len, t->name);
+
+ p += 16;
+ }
+}
diff --git a/misc/ttf2woff/readwoff.c b/misc/ttf2woff/readwoff.c
new file mode 100644
index 000000000..566eaea3e
--- /dev/null
+++ b/misc/ttf2woff/readwoff.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <zlib.h>
+#include "ttf2woff.h"
+
+static struct buf get_or_inflate(u8 *p, size_t len, size_t orig_len)
+{
+ struct buf buf;
+ uLongf blen;
+ char *m;
+ int v;
+
+ if(len == orig_len) {
+ buf.ptr = p;
+ buf.len = len;
+ return buf;
+ }
+
+ buf.len = orig_len;
+ buf.ptr = my_alloc(orig_len);
+
+ blen = buf.len;
+ v = uncompress(buf.ptr, &blen, p, len);
+ switch(v) {
+ case Z_OK:
+ if(blen==buf.len)
+ return buf;
+ case Z_MEM_ERROR: m = "BAD_FONT uncompressed length"; break;
+ case Z_DATA_ERROR: m = "Data corrupted"; ; break;
+ default: m = "Error";
+ }
+
+ errx(3, "zlib: %s", m);
+}
+
+void read_woff(struct ttf *ttf, u8 *data, size_t length)
+{
+ u8 *p;
+ int i;
+
+ if(length<=44+20) BAD_FONT;
+
+ ttf->flavor = g32(data+4);
+
+ if(g32(data+8) != length) BAD_FONT;
+
+ ttf->ntables = g16(data+12);
+ if(!ttf->ntables) BAD_FONT;
+
+ {
+ u32 len=g32(data+28), off;
+ ttf->woff_meta.len = 0;
+ if(len) {
+ off = g32(data+24);
+ if((off|len)>length || off+len>length)
+ BAD_FONT;
+ ttf->woff_meta = get_or_inflate(data+off, len, g32(data+32));
+ }
+ }
+
+ ttf->woff_priv.len = g32(data+40);
+ ttf->woff_priv.ptr = ttf->woff_priv.len ? data+g32(data+36) : 0;
+
+ alloc_tables(ttf);
+
+ p = data+44;
+ for(i=0; i<ttf->ntables; i++) {
+ struct table *t = &ttf->tables[i];
+ u32 off=g32(p+4), len=g32(p+8);
+ if((off|len)>length || off+len>length)
+ BAD_FONT;
+ t->tag = g32(p);
+ t->csum = g32(p+16);
+ t->pos = off;
+ t->free_buf = 1;
+ t->buf = get_or_inflate(data+off, len, g32(p+12));
+ name_table(t);
+ p += 20;
+ }
+}
diff --git a/misc/ttf2woff/ttf2woff.c b/misc/ttf2woff/ttf2woff.c
new file mode 100644
index 000000000..f1f2f80db
--- /dev/null
+++ b/misc/ttf2woff/ttf2woff.c
@@ -0,0 +1,523 @@
+/*
+ * Copyright (C) 2013 Jan Bobrowski <jb@wizard.ae.krakow.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <strings.h>
+#include <errno.h>
+#include "ttf2woff.h"
+
+#ifndef O_BINARY
+#define O_BINARY 0
+#endif
+
+struct flags g;
+
+void echo(char *f, ...)
+{
+ FILE *o = g.stdout_used ? stderr : stdout;
+ va_list va;
+ va_start(va, f);
+ vfprintf(o, f, va);
+ va_end(va);
+ fputc('\n',o);
+}
+
+void *my_alloc(size_t sz)
+{
+ void *p = malloc(sz);
+ if(!p) errx(1,"Out of memory");
+ return p;
+}
+
+void *my_free(void *p)
+{
+ free(p);
+ return 0;
+}
+
+void *my_realloc(void *p, size_t sz)
+{
+ p = realloc(p, sz);
+ if(!p) errx(1,"Out of memory");
+ return p;
+}
+
+static struct buf read_file(char *path)
+{
+ struct buf file = {0};
+ int v, fd = 0;
+
+ if(path[0]!='-' || path[1]) {
+ fd = open(path, O_RDONLY|O_BINARY);
+ if(fd<0)
+ err(1, "%s", path);
+ }
+
+ {
+ struct stat st;
+ if(fstat(fd, &st) < 0)
+ err(1, "fstat");
+ file.len = st.st_size;
+ }
+
+ if(file.len) {
+ file.ptr = malloc(file.len);
+ v = read(fd, file.ptr, file.len);
+ if(v < file.len) {
+ if(v<0) err(1, "read");
+ errx(1, "Truncated");
+ }
+ } else {
+ size_t alen = 0;
+ file.ptr = 0;
+ for(;;) {
+ if(file.len == alen) {
+ if(alen > 64<<20)
+ errx(1,"Too much data - aborting");
+ alen += 1<<16;
+ file.ptr = my_realloc(file.ptr, alen);
+ }
+ v = read(fd, file.ptr+file.len, alen-file.len);
+ if(v<=0) {
+ if(v) err(1, "read");
+ break;
+ }
+ file.len += v;
+ }
+ }
+ if(fd) close(fd);
+
+ return file;
+}
+
+static int open_temporary(char *pt, char **pnm)
+{
+ int l = strlen(pt);
+ char *nm = malloc(l+5);
+ char *p = nm + l;
+ int i, fd;
+
+ memcpy(nm, pt, l);
+ *p++ = '.';
+ for(i=0;;) {
+ sprintf(p, "%d", i);
+ fd = open(nm, O_WRONLY|O_TRUNC|O_CREAT|O_BINARY|O_EXCL, 0666);
+ if(fd>=0)
+ break;
+ if(errno!=EEXIST)
+ err(1, "%s", nm);
+ if(++i>999)
+ errx(1, "Can't create temporary file");
+ }
+ *pnm = nm;
+ return fd;
+}
+
+void alloc_tables(struct ttf *ttf)
+{
+ int sz = ttf->ntables*sizeof *ttf->tables;
+ ttf->tables = my_alloc(sz);
+ memset(ttf->tables, 0, sz);
+}
+
+void name_table(struct table *t) {
+ char *d = t->name;
+ int i;
+ for(i=24; i>=0; i-=8) {
+ char c = t->tag>>i;
+ if(c>' ' && c<127)
+ *d++ = c;
+ }
+ *d = 0;
+}
+
+static u32 calc_csum(u8 *p, size_t n)
+{
+ u32 s=0;
+ if(n) for(;;) {
+ s += p[0]<<24;
+ if(!--n) break;
+ s += p[1]<<16;
+ if(!--n) break;
+ s += p[2]<<8;
+ if(!--n) break;
+ s += p[3];
+ if(!--n) break;
+ p += 4;
+ }
+ return s;
+}
+
+enum {
+ tag_head = 0x68656164,
+ tag_DSIG = 0x44534947
+};
+
+static void recalc_checksums(struct ttf *ttf)
+{
+ u8 h[12];
+ u32 font_csum, off;
+ int i, modified;
+ struct table *head = 0;
+ struct table *DSIG = 0;
+
+ modified = ttf->modified;
+ for(i=0; i<ttf->ntables; i++) {
+ struct table *t = ttf->tab_pos[i];
+ u8 *p = t->buf.ptr;
+ u32 csum;
+
+ if(t->tag == tag_DSIG && t->buf.len>8)
+ DSIG = t;
+
+ if(t->tag != tag_head)
+ csum = calc_csum(p, t->buf.len);
+ else {
+ head = t;
+ csum = calc_csum(p, 8);
+ csum += calc_csum(p+12, t->buf.len-12);
+ }
+ modified |= t->modified;
+ if(csum != t->csum) {
+ modified = 1;
+ t->csum = csum;
+ if(!t->modified)
+ echo("Corrected checksum of table %s", t->name);
+ }
+ }
+
+ if(modified && DSIG) {
+remove_signature:
+ if(DSIG->free_buf)
+ free(DSIG->buf.ptr);
+ DSIG->buf.len = 8;
+ DSIG->buf.ptr = (u8*)"\0\0\0\1\0\0\0"; // empty DSIG
+ DSIG->free_buf = 0;
+ DSIG->csum = calc_csum(DSIG->buf.ptr, DSIG->buf.len);
+ DSIG = 0;
+ if(g.verbose)
+ echo("Digital signature removed");
+ }
+
+ put_ttf_header(h, ttf);
+ font_csum = calc_csum(h, 12);
+
+ off = 12 + 16*ttf->ntables;
+ for(i=0; i<ttf->ntables; i++) {
+ struct table *t = ttf->tab_pos[i];
+ font_csum += t->tag + t->csum + off + t->buf.len;
+ font_csum += t->csum;
+ off += t->buf.len+3 & ~3;
+ }
+
+ if(!head || head->buf.len<16)
+ errx(1, "No head table");
+
+ {
+ u8 *p = head->buf.ptr + 8;
+ font_csum = 0xB1B0AFBA - font_csum;
+ if(font_csum != g32(p)) {
+ if(DSIG)
+ goto remove_signature;
+ p32(p, font_csum);
+ if(!modified)
+ echo("Corrected checkSumAdjustment");
+ }
+ }
+}
+
+static int usage(FILE *f, int y)
+{
+ if(!y) {
+ fprintf(f, "usage:"
+ "\tttf2woff [-v] font.ttf [font.woff]\n"
+ "\tttf2woff [-v] font.woff [font.ttf]\n"
+ "\tttf2woff [-v] -i font\n"
+ "\tttf2woff -h\n");
+ } else {
+ fprintf(f,"TTF2WOFF "STR(VERSION)" by Jan Bobrowski\n"
+ "usage:\n"
+ " ttf2woff [-v] [-O|-S] [-t type] [-X table]... [-m file] [-p file] [-u font] input [output]\n"
+ " ttf2woff -i [-v] [-O|-S] [-X table]... [-m file] [-p file] file\n"
+ " ttf2woff -l input\n"
+ " -v be verbose\n"
+ " -i in place modification\n"
+ " -O optimize (default unless signed)\n"
+ " -S don't optimize\n"
+ " -t fmt output format: woff, ttf\n"
+ " -u num font number in collection (TTC), 0-based\n"
+ " -m xml metadata\n"
+ " -p priv private data\n"
+ " -X tag remove table\n"
+ " -l list tables\n"
+ "Use `-' to indicate standard input/output.\n"
+ "Skip output for dry run.\n"
+ "Compressor: %s.\n",
+ copression_by);
+ }
+ return 1;
+}
+
+static int type_by_name(char *s)
+{
+ if(strcasecmp(s,"TTF")==0 || strcasecmp(s,"OTF")==0) return fmt_TTF;
+ if(strcasecmp(s,"WOFF")==0) return fmt_WOFF;
+ return fmt_UNKNOWN;
+}
+
+static int cmp_tab_pos(const void *a, const void *b) {
+ return (*(struct table**)a)->pos - (*(struct table**)b)->pos;
+}
+
+int main(int argc, char *argv[])
+{
+ struct ttf ttf = {0};
+ char *iname, *itype_name, *oname, *otype_name, *mname=0, *pname=0;
+ struct buf input, output;
+ struct buf xtab = {0};
+ int i, v, itype, fontn;
+
+ g.otype = fmt_UNKNOWN;
+ g.dryrun = 1; // no output
+ g.mayoptim = 1;
+ fontn = 0;
+
+ for(;;) switch(getopt(argc, argv, "vt:u:SOX:lm:p:ihV")) {
+ case 'v': g.verbose = 1; break;
+ case 'l': g.listonly = 1; break;
+ case 'i': g.inplace = 1; break;
+ case 't':
+ v = type_by_name(optarg);
+ if(v==fmt_UNKNOWN)
+ errx(1, "Unsupported font type: %s", optarg);
+ g.otype = v;
+ break;
+ case 'u':
+ fontn = atoi(optarg);
+ break;
+ case 'S':
+ g.mayoptim = g.optimize = 0;
+ break;
+ case 'O':
+ g.mayoptim = g.optimize = 1;
+ break;
+ case 'X':
+ v = strlen(optarg) + 1;
+ xtab.ptr = my_realloc(xtab.ptr, xtab.len+v);
+ strcpy(xtab.ptr+xtab.len, optarg);
+ xtab.len += v;
+ break;
+ case 'm': mname = optarg; break;
+ case 'p': pname = optarg; break;
+ case '?':
+ if(optopt!='?')
+ break;
+ case 'h': return usage(stdout,1);
+ case 'V': printf(STR(VERSION)"\n"); return 0;
+ case -1: goto gotopt;
+ }
+gotopt:
+
+ if(optind==argc)
+ return usage(stderr,0);
+
+ iname = argv[optind++];
+ oname = 0;
+
+ if(g.inplace) {
+ if(iname[0]=='-' && !iname[1])
+ errx(1, "-i is not compatible with -");
+ g.dryrun = 0;
+ if(optind < argc)
+ warnx("Too many args");
+ }
+
+ if(optind < argc) {
+ g.dryrun = 0;
+ oname = argv[optind++];
+ if(optind < argc)
+ warnx("Too many args");
+ if(oname[0]=='-' && !oname[1]) {
+ oname = 0;
+ g.stdout_used = 1;
+ } else if(g.otype==fmt_UNKNOWN) {
+ char *p = strrchr(oname, '.');
+ if(p)
+ g.otype = type_by_name(p+1);
+ }
+ }
+
+ input = read_file(iname);
+
+ if(input.len < 28)
+ errx(1,"File too short");
+
+ itype = fmt_UNKNOWN;
+ if(g32(input.ptr) == g32("wOFF")) {
+ read_woff(&ttf, input.ptr, input.len);
+ itype_name = "WOFF";
+ itype = fmt_WOFF;
+ } else if(g32(input.ptr) == g32("ttcf")) {
+ if(g.inplace)
+ errx(1, "Can't optimize collection");
+ read_ttc(&ttf, input.ptr, input.len, fontn);
+ itype_name = "TTC";
+ } else if(g32(input.ptr) == g32("wOF2")) {
+ errx(1, "WOFF2 is not supported");
+ } else {
+ read_ttf(&ttf, input.ptr, input.len, 0);
+ itype_name = "TTF";
+ itype = fmt_TTF;
+ }
+
+ if(g.inplace)
+ g.otype = itype;
+
+ if(g.otype==fmt_UNKNOWN || g.otype==fmt_WOFF) {
+ g.otype = fmt_WOFF;
+ if(mname)
+ ttf.woff_meta = read_file(mname);
+ if(pname)
+ ttf.woff_priv = read_file(pname);
+ }
+
+ // all read
+
+ if(xtab.len) {
+ char *p=xtab.ptr, *e=p+xtab.len;
+ for(; p<e; p=strchr(p,0)+1) {
+ struct table *t;
+ struct buf *b;
+ if(strcmp(p,"metadata")==0) {
+ b = &ttf.woff_meta;
+rm_meta:
+ if(b->len) {
+ b->len = 0;
+ ttf.modified_meta = 1;
+ }
+ continue;
+ }
+ if(strcmp(p,"private")==0) {
+ b = &ttf.woff_priv;
+ goto rm_meta;
+ }
+ for(i=0; i<ttf.ntables; i++) {
+ t = &ttf.tables[i];
+ if(strcmp(t->name, p)==0)
+ goto rm_tab;
+ }
+ echo("Table %s not found", p);
+ if(0) {
+rm_tab:
+ memmove(t, t+1, (char*)(ttf.tables+ttf.ntables) - (char*)(t+1));
+ ttf.ntables--;
+ ttf.modified = 1;
+ if(g.verbose)
+ echo("Table %s removed", p);
+ }
+ }
+ free(xtab.ptr);
+ }
+
+ ttf.tab_pos = malloc(ttf.ntables * sizeof *ttf.tab_pos);
+ for(i=0; i<ttf.ntables; i++)
+ ttf.tab_pos[i] = &ttf.tables[i];
+ qsort(ttf.tab_pos, ttf.ntables, sizeof *ttf.tab_pos, cmp_tab_pos);
+
+ if(g.listonly) {
+ unsigned size = 12 + 16*ttf.ntables;
+ for(i=0; i<ttf.ntables; i++) {
+ struct table *t = ttf.tab_pos[i];
+ size += t->buf.len;
+ echo("%-4s %6u", t->name, t->buf.len);
+ }
+ echo("%-4s %6u", "", size);
+ return 0;
+ }
+
+ if(!ttf.modified) {
+ struct table *t = find_table(&ttf, "DSIG");
+ if(t && t->buf.len>8)
+ g.mayoptim = g.optimize;
+ }
+
+ if(g.mayoptim)
+ optimize(&ttf);
+
+ recalc_checksums(&ttf);
+
+ switch(g.otype) {
+ case fmt_TTF:
+ gen_ttf(&output, &ttf);
+ otype_name = "TTF";
+ break;
+ case fmt_WOFF:
+ gen_woff(&output, &ttf);
+ otype_name = "WOFF";
+ break;
+ }
+
+ if(g.verbose || g.dryrun)
+ echo("input: %s %u bytes, output: %s %u bytes (%.1f%%)",
+ itype_name, input.len, otype_name, output.len, 100.*output.len/input.len);
+
+ if(g.dryrun)
+ return 0;
+
+ if(g.inplace && !ttf.modified && !ttf.modified_meta) {
+ if(output.len >= input.len) {
+ if(g.verbose)
+ echo("Not modified");
+ return 0;
+ }
+ }
+
+ {
+ u8 *p=output.ptr, *e=p+output.len;
+ int fd = 1;
+
+ if(g.inplace)
+ fd = open_temporary(iname, &oname);
+ else if(oname) {
+ fd = open(oname, O_WRONLY|O_TRUNC|O_CREAT|O_BINARY, 0666);
+ if(fd<0) err(1, "%s", oname);
+ }
+
+ do {
+ v = write(fd, p, e-p);
+ if(v<=0) {
+ if(v) err(1, "write");
+ errx(1, "Short write");
+ }
+ p += v;
+ } while(p < e);
+
+ close(fd);
+ }
+
+ if(g.inplace) {
+#ifdef WIN32
+ unlink(iname);
+#endif
+ v = rename(oname, iname);
+ if(v<0) {
+ warn("Rename %s to %s", oname, iname);
+ unlink(oname);
+ return 1;
+ }
+// free(oname);
+ }
+
+ return 0;
+}
diff --git a/misc/ttf2woff/ttf2woff.h b/misc/ttf2woff/ttf2woff.h
new file mode 100644
index 000000000..23facdd59
--- /dev/null
+++ b/misc/ttf2woff/ttf2woff.h
@@ -0,0 +1,94 @@
+#include <sys/types.h>
+#include <string.h>
+
+#pragma clang diagnostic ignored "-Wshift-op-parentheses"
+#pragma clang diagnostic ignored "-Wpointer-sign"
+
+#ifndef NO_ERRWARN
+#include <err.h>
+#else
+void err(int,char*,...);
+void errx(int,char*,...);
+void warn(char*,...);
+void warnx(char*,...);
+#endif
+
+enum {
+ fmt_UNKNOWN=0,
+ fmt_TTF,
+ fmt_WOFF
+};
+
+extern struct flags {
+ unsigned otype:8;
+ unsigned stdout_used:1;
+ unsigned verbose:1;
+ unsigned mayoptim:1;
+ unsigned optimize:1;
+ unsigned dryrun:1;
+ unsigned inplace:1;
+ unsigned listonly:1;
+} g;
+
+void echo(char *, ...);
+
+typedef unsigned char u8;
+typedef unsigned int u32;
+
+static inline int g16(u8 *p) {return p[0]<<8 | p[1];}
+static inline u32 g32(u8 *p) {return (u32)p[0]<<24 | p[1]<<16 | p[2]<<8 | p[3];}
+static inline u8 *p16(u8 *p, int v) {p[0]=v>>8; p[1]=v; return p+2;}
+static inline u8 *p32(u8 *p, u32 v) {p[0]=v>>24; p[1]=v>>16; p[2]=v>>8; p[3]=v; return p+4;}
+static inline u8 *append(u8 *d, u8 *s, size_t n) {u8 *p=d+n; memcpy(d,s,n); return p;}
+
+struct buf {
+ u8 *ptr;
+ unsigned len;
+};
+
+struct table {
+ u32 tag;
+ unsigned modified:1;
+ unsigned free_buf:1;
+ struct buf buf;
+ u32 csum;
+ u32 pos;
+ char name[8];
+ struct buf zbuf;
+};
+
+struct ttf {
+ u32 flavor;
+ int ntables;
+ unsigned modified:1;
+ unsigned modified_meta:1; // WOFF meta & priv
+ struct table *tables; // sorted by name
+ struct table **tab_pos; // sorted by file pos
+ struct buf woff_meta, woff_priv;
+};
+
+void alloc_tables(struct ttf *ttf);
+void name_table(struct table *t);
+u8 *put_ttf_header(u8 buf[12], struct ttf *ttf);
+struct table *find_table(struct ttf *ttf, char tag[4]);
+void optimize(struct ttf *ttf);
+
+void read_ttf(struct ttf *ttf, u8 *data, size_t length, unsigned offset);
+void read_ttc(struct ttf *ttf, u8 *data, size_t length, int fontn);
+void read_woff(struct ttf *ttf, u8 *data, size_t length);
+void gen_woff(struct buf *out, struct ttf *ttf);
+void gen_ttf(struct buf *out, struct ttf *ttf);
+
+#define BAD_FONT errx(2, "Bad font (%s:%d)",__FILE__,__LINE__)
+
+int zlib_compress(struct buf *out, struct buf *inp);
+extern char *copression_by;
+
+#define _STR(X) #X
+#define STR(X) _STR(X)
+
+#define REALLY_SMALLER(A,B) (((A)+3&~3)<((B)+3&~3))
+
+void *my_alloc(size_t sz);
+void *my_free(void *p);
+void *my_realloc(void *p, size_t sz);
diff --git a/misc/ttf2woff/ttf2woff.rc b/misc/ttf2woff/ttf2woff.rc
new file mode 100644
index 000000000..68d4bc1a5
--- /dev/null
+++ b/misc/ttf2woff/ttf2woff.rc
@@ -0,0 +1,39 @@
+#include <winver.h>
+#define _STR(S) #S
+#define STR(S) _STR(S)
+#define Z "\0"
+#define _FOUR(A,B,C,D,E...) A,B,C,D
+#define FOUR(A...) _FOUR(A,0,0,0)
+
+#ifdef __GNUC__
+VS_VERSION_INFO VERSIONINFO
+#else
+VS_VERSION_INFO VERSIONINFO MOVEABLE IMPURE LOADONCALL DISCARDABLE
+#endif
+ FILEVERSION FOUR(VERNUMS)
+ PRODUCTVERSION FOUR(VERNUMS)
+ FILEFLAGS 0
+ FILEOS VOS__WINDOWS32
+ FILETYPE VFT_APP
+ FILESUBTYPE 0
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "00000000"
+ BEGIN
+ VALUE "FileDescription", "WOFF font converter" Z
+ VALUE "FileVersion", STR(VERSION) Z
+ VALUE "InternalName", "ttf2woff" Z
+ VALUE "LegalCopyright", "GPL" Z
+ VALUE "OriginalFilename", "ttf2woff.exe" Z
+ VALUE "ProductName", "TTF2WOFF" Z
+ VALUE "ProductVersion", STR(VERSION) Z
+ VALUE "URL", "http://wizard.ae.krakow.pl/~jb/ttf2woff/" Z
+ VALUE "Author", "Jan Bobrowski" Z
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0,0
+ END
+END
diff --git a/misc/ttf2woff/zopfli/blocksplitter.c b/misc/ttf2woff/zopfli/blocksplitter.c
new file mode 100644
index 000000000..161783d89
--- /dev/null
+++ b/misc/ttf2woff/zopfli/blocksplitter.c
@@ -0,0 +1,332 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#include "blocksplitter.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "deflate.h"
+#include "squeeze.h"
+#include "tree.h"
+#include "util.h"
+
+/*
+The "f" for the FindMinimum function below.
+i: the current parameter of f(i)
+context: for your implementation
+*/
+typedef double FindMinimumFun(size_t i, void* context);
+
+/*
+Finds minimum of function f(i) where is is of type size_t, f(i) is of type
+double, i is in range start-end (excluding end).
+Outputs the minimum value in *smallest and returns the index of this value.
+*/
+static size_t FindMinimum(FindMinimumFun f, void* context,
+ size_t start, size_t end, double* smallest) {
+ if (end - start < 1024) {
+ double best = ZOPFLI_LARGE_FLOAT;
+ size_t result = start;
+ size_t i;
+ for (i = start; i < end; i++) {
+ double v = f(i, context);
+ if (v < best) {
+ best = v;
+ result = i;
+ }
+ }
+ *smallest = best;
+ return result;
+ } else {
+ /* Try to find minimum faster by recursively checking multiple points. */
+#define NUM 9 /* Good value: 9. */
+ size_t i;
+ size_t p[NUM];
+ double vp[NUM];
+ size_t besti;
+ double best;
+ double lastbest = ZOPFLI_LARGE_FLOAT;
+ size_t pos = start;
+
+ for (;;) {
+ if (end - start <= NUM) break;
+
+ for (i = 0; i < NUM; i++) {
+ p[i] = start + (i + 1) * ((end - start) / (NUM + 1));
+ vp[i] = f(p[i], context);
+ }
+ besti = 0;
+ best = vp[0];
+ for (i = 1; i < NUM; i++) {
+ if (vp[i] < best) {
+ best = vp[i];
+ besti = i;
+ }
+ }
+ if (best > lastbest) break;
+
+ start = besti == 0 ? start : p[besti - 1];
+ end = besti == NUM - 1 ? end : p[besti + 1];
+
+ pos = p[besti];
+ lastbest = best;
+ }
+ *smallest = lastbest;
+ return pos;
+#undef NUM
+ }
+}
+
+/*
+Returns estimated cost of a block in bits. It includes the size to encode the
+tree and the size to encode all literal, length and distance symbols and their
+extra bits.
+
+litlens: lz77 lit/lengths
+dists: ll77 distances
+lstart: start of block
+lend: end of block (not inclusive)
+*/
+static double EstimateCost(const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend) {
+ return ZopfliCalculateBlockSizeAutoType(lz77, lstart, lend);
+}
+
+typedef struct SplitCostContext {
+ const ZopfliLZ77Store* lz77;
+ size_t start;
+ size_t end;
+} SplitCostContext;
+
+
+/*
+Gets the cost which is the sum of the cost of the left and the right section
+of the data.
+type: FindMinimumFun
+*/
+static double SplitCost(size_t i, void* context) {
+ SplitCostContext* c = (SplitCostContext*)context;
+ return EstimateCost(c->lz77, c->start, i) + EstimateCost(c->lz77, i, c->end);
+}
+
+static void AddSorted(size_t value, size_t** out, size_t* outsize) {
+ size_t i;
+ ZOPFLI_APPEND_DATA(value, out, outsize);
+ for (i = 0; i + 1 < *outsize; i++) {
+ if ((*out)[i] > value) {
+ size_t j;
+ for (j = *outsize - 1; j > i; j--) {
+ (*out)[j] = (*out)[j - 1];
+ }
+ (*out)[i] = value;
+ break;
+ }
+ }
+}
+
+/*
+Prints the block split points as decimal and hex values in the terminal.
+*/
+static void PrintBlockSplitPoints(const ZopfliLZ77Store* lz77,
+ const size_t* lz77splitpoints,
+ size_t nlz77points) {
+ size_t* splitpoints = 0;
+ size_t npoints = 0;
+ size_t i;
+ /* The input is given as lz77 indices, but we want to see the uncompressed
+ index values. */
+ size_t pos = 0;
+ if (nlz77points > 0) {
+ for (i = 0; i < lz77->size; i++) {
+ size_t length = lz77->dists[i] == 0 ? 1 : lz77->litlens[i];
+ if (lz77splitpoints[npoints] == i) {
+ ZOPFLI_APPEND_DATA(pos, &splitpoints, &npoints);
+ if (npoints == nlz77points) break;
+ }
+ pos += length;
+ }
+ }
+ assert(npoints == nlz77points);
+
+ fprintf(stderr, "block split points: ");
+ for (i = 0; i < npoints; i++) {
+ fprintf(stderr, "%d ", (int)splitpoints[i]);
+ }
+ fprintf(stderr, "(hex:");
+ for (i = 0; i < npoints; i++) {
+ fprintf(stderr, " %x", (int)splitpoints[i]);
+ }
+ fprintf(stderr, ")\n");
+
+ free(splitpoints);
+}
+
+/*
+Finds next block to try to split, the largest of the available ones.
+The largest is chosen to make sure that if only a limited amount of blocks is
+requested, their sizes are spread evenly.
+lz77size: the size of the LL77 data, which is the size of the done array here.
+done: array indicating which blocks starting at that position are no longer
+ splittable (splitting them increases rather than decreases cost).
+splitpoints: the splitpoints found so far.
+npoints: the amount of splitpoints found so far.
+lstart: output variable, giving start of block.
+lend: output variable, giving end of block.
+returns 1 if a block was found, 0 if no block found (all are done).
+*/
+static int FindLargestSplittableBlock(
+ size_t lz77size, const unsigned char* done,
+ const size_t* splitpoints, size_t npoints,
+ size_t* lstart, size_t* lend) {
+ size_t longest = 0;
+ int found = 0;
+ size_t i;
+ for (i = 0; i <= npoints; i++) {
+ size_t start = i == 0 ? 0 : splitpoints[i - 1];
+ size_t end = i == npoints ? lz77size - 1 : splitpoints[i];
+ if (!done[start] && end - start > longest) {
+ *lstart = start;
+ *lend = end;
+ found = 1;
+ longest = end - start;
+ }
+ }
+ return found;
+}
+
+void ZopfliBlockSplitLZ77(const ZopfliOptions* options,
+ const ZopfliLZ77Store* lz77, size_t maxblocks,
+ size_t** splitpoints, size_t* npoints) {
+ size_t lstart, lend;
+ size_t i;
+ size_t llpos = 0;
+ size_t numblocks = 1;
+ unsigned char* done;
+ double splitcost, origcost;
+
+ if (lz77->size < 10) return; /* This code fails on tiny files. */
+
+ done = (unsigned char*)malloc(lz77->size);
+ if (!done) exit(-1); /* Allocation failed. */
+ for (i = 0; i < lz77->size; i++) done[i] = 0;
+
+ lstart = 0;
+ lend = lz77->size;
+ for (;;) {
+ SplitCostContext c;
+
+ if (maxblocks > 0 && numblocks >= maxblocks) {
+ break;
+ }
+
+ c.lz77 = lz77;
+ c.start = lstart;
+ c.end = lend;
+ assert(lstart < lend);
+ llpos = FindMinimum(SplitCost, &c, lstart + 1, lend, &splitcost);
+
+ assert(llpos > lstart);
+ assert(llpos < lend);
+
+ origcost = EstimateCost(lz77, lstart, lend);
+
+ if (splitcost > origcost || llpos == lstart + 1 || llpos == lend) {
+ done[lstart] = 1;
+ } else {
+ AddSorted(llpos, splitpoints, npoints);
+ numblocks++;
+ }
+
+ if (!FindLargestSplittableBlock(
+ lz77->size, done, *splitpoints, *npoints, &lstart, &lend)) {
+ break; /* No further split will probably reduce compression. */
+ }
+
+ if (lend - lstart < 10) {
+ break;
+ }
+ }
+
+ if (options->verbose) {
+ PrintBlockSplitPoints(lz77, *splitpoints, *npoints);
+ }
+
+ free(done);
+}
+
+void ZopfliBlockSplit(const ZopfliOptions* options,
+ const unsigned char* in, size_t instart, size_t inend,
+ size_t maxblocks, size_t** splitpoints, size_t* npoints) {
+ size_t pos = 0;
+ size_t i;
+ ZopfliBlockState s;
+ size_t* lz77splitpoints = 0;
+ size_t nlz77points = 0;
+ ZopfliLZ77Store store;
+ ZopfliHash hash;
+ ZopfliHash* h = &hash;
+
+ ZopfliInitLZ77Store(in, &store);
+ ZopfliInitBlockState(options, instart, inend, 0, &s);
+ ZopfliAllocHash(ZOPFLI_WINDOW_SIZE, h);
+
+ *npoints = 0;
+ *splitpoints = 0;
+
+ /* Unintuitively, Using a simple LZ77 method here instead of ZopfliLZ77Optimal
+ results in better blocks. */
+ ZopfliLZ77Greedy(&s, in, instart, inend, &store, h);
+
+ ZopfliBlockSplitLZ77(options,
+ &store, maxblocks,
+ &lz77splitpoints, &nlz77points);
+
+ /* Convert LZ77 positions to positions in the uncompressed input. */
+ pos = instart;
+ if (nlz77points > 0) {
+ for (i = 0; i < store.size; i++) {
+ size_t length = store.dists[i] == 0 ? 1 : store.litlens[i];
+ if (lz77splitpoints[*npoints] == i) {
+ ZOPFLI_APPEND_DATA(pos, splitpoints, npoints);
+ if (*npoints == nlz77points) break;
+ }
+ pos += length;
+ }
+ }
+ assert(*npoints == nlz77points);
+
+ free(lz77splitpoints);
+ ZopfliCleanBlockState(&s);
+ ZopfliCleanLZ77Store(&store);
+ ZopfliCleanHash(h);
+}
+
+void ZopfliBlockSplitSimple(const unsigned char* in,
+ size_t instart, size_t inend,
+ size_t blocksize,
+ size_t** splitpoints, size_t* npoints) {
+ size_t i = instart;
+ while (i < inend) {
+ ZOPFLI_APPEND_DATA(i, splitpoints, npoints);
+ i += blocksize;
+ }
+ (void)in;
+}
diff --git a/misc/ttf2woff/zopfli/blocksplitter.h b/misc/ttf2woff/zopfli/blocksplitter.h
new file mode 100644
index 000000000..d1d622f1b
--- /dev/null
+++ b/misc/ttf2woff/zopfli/blocksplitter.h
@@ -0,0 +1,73 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+/*
+Functions to choose good boundaries for block splitting. Deflate allows encoding
+the data in multiple blocks, with a separate Huffman tree for each block. The
+Huffman tree itself requires some bytes to encode, so by choosing certain
+blocks, you can either hurt, or enhance compression. These functions choose good
+ones that enhance it.
+*/
+
+#ifndef ZOPFLI_BLOCKSPLITTER_H_
+#define ZOPFLI_BLOCKSPLITTER_H_
+
+#include <stdlib.h>
+
+#include "lz77.h"
+#include "zopfli.h"
+
+
+/*
+Does blocksplitting on LZ77 data.
+The output splitpoints are indices in the LZ77 data.
+maxblocks: set a limit to the amount of blocks. Set to 0 to mean no limit.
+*/
+void ZopfliBlockSplitLZ77(const ZopfliOptions* options,
+ const ZopfliLZ77Store* lz77, size_t maxblocks,
+ size_t** splitpoints, size_t* npoints);
+
+/*
+Does blocksplitting on uncompressed data.
+The output splitpoints are indices in the uncompressed bytes.
+
+options: general program options.
+in: uncompressed input data
+instart: where to start splitting
+inend: where to end splitting (not inclusive)
+maxblocks: maximum amount of blocks to split into, or 0 for no limit
+splitpoints: dynamic array to put the resulting split point coordinates into.
+ The coordinates are indices in the input array.
+npoints: pointer to amount of splitpoints, for the dynamic array. The amount of
+ blocks is the amount of splitpoitns + 1.
+*/
+void ZopfliBlockSplit(const ZopfliOptions* options,
+ const unsigned char* in, size_t instart, size_t inend,
+ size_t maxblocks, size_t** splitpoints, size_t* npoints);
+
+/*
+Divides the input into equal blocks, does not even take LZ77 lengths into
+account.
+*/
+void ZopfliBlockSplitSimple(const unsigned char* in,
+ size_t instart, size_t inend,
+ size_t blocksize,
+ size_t** splitpoints, size_t* npoints);
+
+#endif /* ZOPFLI_BLOCKSPLITTER_H_ */
diff --git a/misc/ttf2woff/zopfli/cache.c b/misc/ttf2woff/zopfli/cache.c
new file mode 100644
index 000000000..f5559c32e
--- /dev/null
+++ b/misc/ttf2woff/zopfli/cache.c
@@ -0,0 +1,125 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#include "cache.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#ifdef ZOPFLI_LONGEST_MATCH_CACHE
+
+void ZopfliInitCache(size_t blocksize, ZopfliLongestMatchCache* lmc) {
+ size_t i;
+ lmc->length = (unsigned short*)malloc(sizeof(unsigned short) * blocksize);
+ lmc->dist = (unsigned short*)malloc(sizeof(unsigned short) * blocksize);
+ /* Rather large amount of memory. */
+ lmc->sublen = (unsigned char*)malloc(ZOPFLI_CACHE_LENGTH * 3 * blocksize);
+ if(lmc->sublen == NULL) {
+ fprintf(stderr,
+ "Error: Out of memory. Tried allocating %lu bytes of memory.\n",
+ ZOPFLI_CACHE_LENGTH * 3 * blocksize);
+ exit (EXIT_FAILURE);
+ }
+
+ /* length > 0 and dist 0 is invalid combination, which indicates on purpose
+ that this cache value is not filled in yet. */
+ for (i = 0; i < blocksize; i++) lmc->length[i] = 1;
+ for (i = 0; i < blocksize; i++) lmc->dist[i] = 0;
+ for (i = 0; i < ZOPFLI_CACHE_LENGTH * blocksize * 3; i++) lmc->sublen[i] = 0;
+}
+
+void ZopfliCleanCache(ZopfliLongestMatchCache* lmc) {
+ free(lmc->length);
+ free(lmc->dist);
+ free(lmc->sublen);
+}
+
+void ZopfliSublenToCache(const unsigned short* sublen,
+ size_t pos, size_t length,
+ ZopfliLongestMatchCache* lmc) {
+ size_t i;
+ size_t j = 0;
+ unsigned bestlength = 0;
+ unsigned char* cache;
+
+#if ZOPFLI_CACHE_LENGTH == 0
+ return;
+#endif
+
+ cache = &lmc->sublen[ZOPFLI_CACHE_LENGTH * pos * 3];
+ if (length < 3) return;
+ for (i = 3; i <= length; i++) {
+ if (i == length || sublen[i] != sublen[i + 1]) {
+ cache[j * 3] = i - 3;
+ cache[j * 3 + 1] = sublen[i] % 256;
+ cache[j * 3 + 2] = (sublen[i] >> 8) % 256;
+ bestlength = i;
+ j++;
+ if (j >= ZOPFLI_CACHE_LENGTH) break;
+ }
+ }
+ if (j < ZOPFLI_CACHE_LENGTH) {
+ assert(bestlength == length);
+ cache[(ZOPFLI_CACHE_LENGTH - 1) * 3] = bestlength - 3;
+ } else {
+ assert(bestlength <= length);
+ }
+ assert(bestlength == ZopfliMaxCachedSublen(lmc, pos, length));
+}
+
+void ZopfliCacheToSublen(const ZopfliLongestMatchCache* lmc,
+ size_t pos, size_t length,
+ unsigned short* sublen) {
+ size_t i, j;
+ unsigned maxlength = ZopfliMaxCachedSublen(lmc, pos, length);
+ unsigned prevlength = 0;
+ unsigned char* cache;
+#if ZOPFLI_CACHE_LENGTH == 0
+ return;
+#endif
+ if (length < 3) return;
+ cache = &lmc->sublen[ZOPFLI_CACHE_LENGTH * pos * 3];
+ for (j = 0; j < ZOPFLI_CACHE_LENGTH; j++) {
+ unsigned length = cache[j * 3] + 3;
+ unsigned dist = cache[j * 3 + 1] + 256 * cache[j * 3 + 2];
+ for (i = prevlength; i <= length; i++) {
+ sublen[i] = dist;
+ }
+ if (length == maxlength) break;
+ prevlength = length + 1;
+ }
+}
+
+/*
+Returns the length up to which could be stored in the cache.
+*/
+unsigned ZopfliMaxCachedSublen(const ZopfliLongestMatchCache* lmc,
+ size_t pos, size_t length) {
+ unsigned char* cache;
+#if ZOPFLI_CACHE_LENGTH == 0
+ return 0;
+#endif
+ cache = &lmc->sublen[ZOPFLI_CACHE_LENGTH * pos * 3];
+ (void)length;
+ if (cache[1] == 0 && cache[2] == 0) return 0; /* No sublen cached. */
+ return cache[(ZOPFLI_CACHE_LENGTH - 1) * 3] + 3;
+}
+
+#endif /* ZOPFLI_LONGEST_MATCH_CACHE */
diff --git a/misc/ttf2woff/zopfli/cache.h b/misc/ttf2woff/zopfli/cache.h
new file mode 100644
index 000000000..5ca0c5015
--- /dev/null
+++ b/misc/ttf2woff/zopfli/cache.h
@@ -0,0 +1,66 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+/*
+The cache that speeds up ZopfliFindLongestMatch of lz77.c.
+*/
+
+#ifndef ZOPFLI_CACHE_H_
+#define ZOPFLI_CACHE_H_
+
+#include "util.h"
+
+#ifdef ZOPFLI_LONGEST_MATCH_CACHE
+
+/*
+Cache used by ZopfliFindLongestMatch to remember previously found length/dist
+values.
+This is needed because the squeeze runs will ask these values multiple times for
+the same position.
+Uses large amounts of memory, since it has to remember the distance belonging
+to every possible shorter-than-the-best length (the so called "sublen" array).
+*/
+typedef struct ZopfliLongestMatchCache {
+ unsigned short* length;
+ unsigned short* dist;
+ unsigned char* sublen;
+} ZopfliLongestMatchCache;
+
+/* Initializes the ZopfliLongestMatchCache. */
+void ZopfliInitCache(size_t blocksize, ZopfliLongestMatchCache* lmc);
+
+/* Frees up the memory of the ZopfliLongestMatchCache. */
+void ZopfliCleanCache(ZopfliLongestMatchCache* lmc);
+
+/* Stores sublen array in the cache. */
+void ZopfliSublenToCache(const unsigned short* sublen,
+ size_t pos, size_t length,
+ ZopfliLongestMatchCache* lmc);
+
+/* Extracts sublen array from the cache. */
+void ZopfliCacheToSublen(const ZopfliLongestMatchCache* lmc,
+ size_t pos, size_t length,
+ unsigned short* sublen);
+/* Returns the length up to which could be stored in the cache. */
+unsigned ZopfliMaxCachedSublen(const ZopfliLongestMatchCache* lmc,
+ size_t pos, size_t length);
+
+#endif /* ZOPFLI_LONGEST_MATCH_CACHE */
+
+#endif /* ZOPFLI_CACHE_H_ */
diff --git a/misc/ttf2woff/zopfli/deflate.c b/misc/ttf2woff/zopfli/deflate.c
new file mode 100644
index 000000000..60e0df144
--- /dev/null
+++ b/misc/ttf2woff/zopfli/deflate.c
@@ -0,0 +1,933 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#include "deflate.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "blocksplitter.h"
+#include "squeeze.h"
+#include "symbols.h"
+#include "tree.h"
+
+/*
+bp = bitpointer, always in range [0, 7].
+The outsize is number of necessary bytes to encode the bits.
+Given the value of bp and the amount of bytes, the amount of bits represented
+is not simply bytesize * 8 + bp because even representing one bit requires a
+whole byte. It is: (bp == 0) ? (bytesize * 8) : ((bytesize - 1) * 8 + bp)
+*/
+static void AddBit(int bit,
+ unsigned char* bp, unsigned char** out, size_t* outsize) {
+ if (*bp == 0) ZOPFLI_APPEND_DATA(0, out, outsize);
+ (*out)[*outsize - 1] |= bit << *bp;
+ *bp = (*bp + 1) & 7;
+}
+
+static void AddBits(unsigned symbol, unsigned length,
+ unsigned char* bp, unsigned char** out, size_t* outsize) {
+ /* TODO(lode): make more efficient (add more bits at once). */
+ unsigned i;
+ for (i = 0; i < length; i++) {
+ unsigned bit = (symbol >> i) & 1;
+ if (*bp == 0) ZOPFLI_APPEND_DATA(0, out, outsize);
+ (*out)[*outsize - 1] |= bit << *bp;
+ *bp = (*bp + 1) & 7;
+ }
+}
+
+/*
+Adds bits, like AddBits, but the order is inverted. The deflate specification
+uses both orders in one standard.
+*/
+static void AddHuffmanBits(unsigned symbol, unsigned length,
+ unsigned char* bp, unsigned char** out,
+ size_t* outsize) {
+ /* TODO(lode): make more efficient (add more bits at once). */
+ unsigned i;
+ for (i = 0; i < length; i++) {
+ unsigned bit = (symbol >> (length - i - 1)) & 1;
+ if (*bp == 0) ZOPFLI_APPEND_DATA(0, out, outsize);
+ (*out)[*outsize - 1] |= bit << *bp;
+ *bp = (*bp + 1) & 7;
+ }
+}
+
+/*
+Ensures there are at least 2 distance codes to support buggy decoders.
+Zlib 1.2.1 and below have a bug where it fails if there isn't at least 1
+distance code (with length > 0), even though it's valid according to the
+deflate spec to have 0 distance codes. On top of that, some mobile phones
+require at least two distance codes. To support these decoders too (but
+potentially at the cost of a few bytes), add dummy code lengths of 1.
+References to this bug can be found in the changelog of
+Zlib 1.2.2 and here: http://www.jonof.id.au/forum/index.php?topic=515.0.
+
+d_lengths: the 32 lengths of the distance codes.
+*/
+static void PatchDistanceCodesForBuggyDecoders(unsigned* d_lengths) {
+#if 0
+ int num_dist_codes = 0; /* Amount of non-zero distance codes */
+ int i;
+ for (i = 0; i < 30 /* Ignore the two unused codes from the spec */; i++) {
+ if (d_lengths[i]) num_dist_codes++;
+ if (num_dist_codes >= 2) return; /* Two or more codes is fine. */
+ }
+
+ if (num_dist_codes == 0) {
+ d_lengths[0] = d_lengths[1] = 1;
+ } else if (num_dist_codes == 1) {
+ d_lengths[d_lengths[0] ? 1 : 0] = 1;
+ }
+#endif
+}
+
+/*
+Encodes the Huffman tree and returns how many bits its encoding takes. If out
+is a null pointer, only returns the size and runs faster.
+*/
+static size_t EncodeTree(const unsigned* ll_lengths,
+ const unsigned* d_lengths,
+ int use_16, int use_17, int use_18,
+ unsigned char* bp,
+ unsigned char** out, size_t* outsize) {
+ unsigned lld_total; /* Total amount of literal, length, distance codes. */
+ /* Runlength encoded version of lengths of litlen and dist trees. */
+ unsigned* rle = 0;
+ unsigned* rle_bits = 0; /* Extra bits for rle values 16, 17 and 18. */
+ size_t rle_size = 0; /* Size of rle array. */
+ size_t rle_bits_size = 0; /* Should have same value as rle_size. */
+ unsigned hlit = 29; /* 286 - 257 */
+ unsigned hdist = 29; /* 32 - 1, but gzip does not like hdist > 29.*/
+ unsigned hclen;
+ unsigned hlit2;
+ size_t i, j;
+ size_t clcounts[19];
+ unsigned clcl[19]; /* Code length code lengths. */
+ unsigned clsymbols[19];
+ /* The order in which code length code lengths are encoded as per deflate. */
+ static const unsigned order[19] = {
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
+ };
+ int size_only = !out;
+ size_t result_size = 0;
+
+ for(i = 0; i < 19; i++) clcounts[i] = 0;
+
+ /* Trim zeros. */
+ while (hlit > 0 && ll_lengths[257 + hlit - 1] == 0) hlit--;
+ while (hdist > 0 && d_lengths[1 + hdist - 1] == 0) hdist--;
+ hlit2 = hlit + 257;
+
+ lld_total = hlit2 + hdist + 1;
+
+ for (i = 0; i < lld_total; i++) {
+ /* This is an encoding of a huffman tree, so now the length is a symbol */
+ unsigned char symbol = i < hlit2 ? ll_lengths[i] : d_lengths[i - hlit2];
+ unsigned count = 1;
+ if(use_16 || (symbol == 0 && (use_17 || use_18))) {
+ for (j = i + 1; j < lld_total && symbol ==
+ (j < hlit2 ? ll_lengths[j] : d_lengths[j - hlit2]); j++) {
+ count++;
+ }
+ }
+ i += count - 1;
+
+ /* Repetitions of zeroes */
+ if (symbol == 0 && count >= 3) {
+ if (use_18) {
+ while (count >= 11) {
+ unsigned count2 = count > 138 ? 138 : count;
+ if (!size_only) {
+ ZOPFLI_APPEND_DATA(18, &rle, &rle_size);
+ ZOPFLI_APPEND_DATA(count2 - 11, &rle_bits, &rle_bits_size);
+ }
+ clcounts[18]++;
+ count -= count2;
+ }
+ }
+ if (use_17) {
+ while (count >= 3) {
+ unsigned count2 = count > 10 ? 10 : count;
+ if (!size_only) {
+ ZOPFLI_APPEND_DATA(17, &rle, &rle_size);
+ ZOPFLI_APPEND_DATA(count2 - 3, &rle_bits, &rle_bits_size);
+ }
+ clcounts[17]++;
+ count -= count2;
+ }
+ }
+ }
+
+ /* Repetitions of any symbol */
+ if (use_16 && count >= 4) {
+ count--; /* Since the first one is hardcoded. */
+ clcounts[symbol]++;
+ if (!size_only) {
+ ZOPFLI_APPEND_DATA(symbol, &rle, &rle_size);
+ ZOPFLI_APPEND_DATA(0, &rle_bits, &rle_bits_size);
+ }
+ while (count >= 3) {
+ unsigned count2 = count > 6 ? 6 : count;
+ if (!size_only) {
+ ZOPFLI_APPEND_DATA(16, &rle, &rle_size);
+ ZOPFLI_APPEND_DATA(count2 - 3, &rle_bits, &rle_bits_size);
+ }
+ clcounts[16]++;
+ count -= count2;
+ }
+ }
+
+ /* No or insufficient repetition */
+ clcounts[symbol] += count;
+ while (count > 0) {
+ if (!size_only) {
+ ZOPFLI_APPEND_DATA(symbol, &rle, &rle_size);
+ ZOPFLI_APPEND_DATA(0, &rle_bits, &rle_bits_size);
+ }
+ count--;
+ }
+ }
+
+ ZopfliCalculateBitLengths(clcounts, 19, 7, clcl);
+ if (!size_only) ZopfliLengthsToSymbols(clcl, 19, 7, clsymbols);
+
+ hclen = 15;
+ /* Trim zeros. */
+ while (hclen > 0 && clcounts[order[hclen + 4 - 1]] == 0) hclen--;
+
+ if (!size_only) {
+ AddBits(hlit, 5, bp, out, outsize);
+ AddBits(hdist, 5, bp, out, outsize);
+ AddBits(hclen, 4, bp, out, outsize);
+
+ for (i = 0; i < hclen + 4; i++) {
+ AddBits(clcl[order[i]], 3, bp, out, outsize);
+ }
+
+ for (i = 0; i < rle_size; i++) {
+ unsigned symbol = clsymbols[rle[i]];
+ AddHuffmanBits(symbol, clcl[rle[i]], bp, out, outsize);
+ /* Extra bits. */
+ if (rle[i] == 16) AddBits(rle_bits[i], 2, bp, out, outsize);
+ else if (rle[i] == 17) AddBits(rle_bits[i], 3, bp, out, outsize);
+ else if (rle[i] == 18) AddBits(rle_bits[i], 7, bp, out, outsize);
+ }
+ }
+
+ result_size += 14; /* hlit, hdist, hclen bits */
+ result_size += (hclen + 4) * 3; /* clcl bits */
+ for(i = 0; i < 19; i++) {
+ result_size += clcl[i] * clcounts[i];
+ }
+ /* Extra bits. */
+ result_size += clcounts[16] * 2;
+ result_size += clcounts[17] * 3;
+ result_size += clcounts[18] * 7;
+
+ /* Note: in case of "size_only" these are null pointers so no effect. */
+ free(rle);
+ free(rle_bits);
+
+ return result_size;
+}
+
+static void AddDynamicTree(const unsigned* ll_lengths,
+ const unsigned* d_lengths,
+ unsigned char* bp,
+ unsigned char** out, size_t* outsize) {
+ int i;
+ int best = 0;
+ size_t bestsize = 0;
+
+ for(i = 0; i < 8; i++) {
+ size_t size = EncodeTree(ll_lengths, d_lengths,
+ i & 1, i & 2, i & 4,
+ 0, 0, 0);
+ if (bestsize == 0 || size < bestsize) {
+ bestsize = size;
+ best = i;
+ }
+ }
+
+ EncodeTree(ll_lengths, d_lengths,
+ best & 1, best & 2, best & 4,
+ bp, out, outsize);
+}
+
+/*
+Gives the exact size of the tree, in bits, as it will be encoded in DEFLATE.
+*/
+static size_t CalculateTreeSize(const unsigned* ll_lengths,
+ const unsigned* d_lengths) {
+ size_t result = 0;
+ int i;
+
+ for(i = 0; i < 8; i++) {
+ size_t size = EncodeTree(ll_lengths, d_lengths,
+ i & 1, i & 2, i & 4,
+ 0, 0, 0);
+ if (result == 0 || size < result) result = size;
+ }
+
+ return result;
+}
+
+/*
+Adds all lit/len and dist codes from the lists as huffman symbols. Does not add
+end code 256. expected_data_size is the uncompressed block size, used for
+assert, but you can set it to 0 to not do the assertion.
+*/
+static void AddLZ77Data(const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend,
+ size_t expected_data_size,
+ const unsigned* ll_symbols, const unsigned* ll_lengths,
+ const unsigned* d_symbols, const unsigned* d_lengths,
+ unsigned char* bp,
+ unsigned char** out, size_t* outsize) {
+ size_t testlength = 0;
+ size_t i;
+
+ for (i = lstart; i < lend; i++) {
+ unsigned dist = lz77->dists[i];
+ unsigned litlen = lz77->litlens[i];
+ if (dist == 0) {
+ assert(litlen < 256);
+ assert(ll_lengths[litlen] > 0);
+ AddHuffmanBits(ll_symbols[litlen], ll_lengths[litlen], bp, out, outsize);
+ testlength++;
+ } else {
+ unsigned lls = ZopfliGetLengthSymbol(litlen);
+ unsigned ds = ZopfliGetDistSymbol(dist);
+ assert(litlen >= 3 && litlen <= 288);
+ assert(ll_lengths[lls] > 0);
+ assert(d_lengths[ds] > 0);
+ AddHuffmanBits(ll_symbols[lls], ll_lengths[lls], bp, out, outsize);
+ AddBits(ZopfliGetLengthExtraBitsValue(litlen),
+ ZopfliGetLengthExtraBits(litlen),
+ bp, out, outsize);
+ AddHuffmanBits(d_symbols[ds], d_lengths[ds], bp, out, outsize);
+ AddBits(ZopfliGetDistExtraBitsValue(dist),
+ ZopfliGetDistExtraBits(dist),
+ bp, out, outsize);
+ testlength += litlen;
+ }
+ }
+ assert(expected_data_size == 0 || testlength == expected_data_size);
+}
+
+static void GetFixedTree(unsigned* ll_lengths, unsigned* d_lengths) {
+ size_t i;
+ for (i = 0; i < 144; i++) ll_lengths[i] = 8;
+ for (i = 144; i < 256; i++) ll_lengths[i] = 9;
+ for (i = 256; i < 280; i++) ll_lengths[i] = 7;
+ for (i = 280; i < 288; i++) ll_lengths[i] = 8;
+ for (i = 0; i < 32; i++) d_lengths[i] = 5;
+}
+
+/*
+Same as CalculateBlockSymbolSize, but for block size smaller than histogram
+size.
+*/
+static size_t CalculateBlockSymbolSizeSmall(const unsigned* ll_lengths,
+ const unsigned* d_lengths,
+ const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend) {
+ size_t result = 0;
+ size_t i;
+ for (i = lstart; i < lend; i++) {
+ assert(i < lz77->size);
+ assert(lz77->litlens[i] < 259);
+ if (lz77->dists[i] == 0) {
+ result += ll_lengths[lz77->litlens[i]];
+ } else {
+ int ll_symbol = ZopfliGetLengthSymbol(lz77->litlens[i]);
+ int d_symbol = ZopfliGetDistSymbol(lz77->dists[i]);
+ result += ll_lengths[ll_symbol];
+ result += d_lengths[d_symbol];
+ result += ZopfliGetLengthSymbolExtraBits(ll_symbol);
+ result += ZopfliGetDistSymbolExtraBits(d_symbol);
+ }
+ }
+ result += ll_lengths[256]; /*end symbol*/
+ return result;
+}
+
+/*
+Same as CalculateBlockSymbolSize, but with the histogram provided by the caller.
+*/
+static size_t CalculateBlockSymbolSizeGivenCounts(const size_t* ll_counts,
+ const size_t* d_counts,
+ const unsigned* ll_lengths,
+ const unsigned* d_lengths,
+ const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend) {
+ size_t result = 0;
+ size_t i;
+ if (lstart + ZOPFLI_NUM_LL * 3 > lend) {
+ return CalculateBlockSymbolSizeSmall(
+ ll_lengths, d_lengths, lz77, lstart, lend);
+ } else {
+ for (i = 0; i < 256; i++) {
+ result += ll_lengths[i] * ll_counts[i];
+ }
+ for (i = 257; i < 286; i++) {
+ result += ll_lengths[i] * ll_counts[i];
+ result += ZopfliGetLengthSymbolExtraBits(i) * ll_counts[i];
+ }
+ for (i = 0; i < 30; i++) {
+ result += d_lengths[i] * d_counts[i];
+ result += ZopfliGetDistSymbolExtraBits(i) * d_counts[i];
+ }
+ result += ll_lengths[256]; /*end symbol*/
+ return result;
+ }
+}
+
+/*
+Calculates size of the part after the header and tree of an LZ77 block, in bits.
+*/
+static size_t CalculateBlockSymbolSize(const unsigned* ll_lengths,
+ const unsigned* d_lengths,
+ const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend) {
+ if (lstart + ZOPFLI_NUM_LL * 3 > lend) {
+ return CalculateBlockSymbolSizeSmall(
+ ll_lengths, d_lengths, lz77, lstart, lend);
+ } else {
+ size_t ll_counts[ZOPFLI_NUM_LL];
+ size_t d_counts[ZOPFLI_NUM_D];
+ ZopfliLZ77GetHistogram(lz77, lstart, lend, ll_counts, d_counts);
+ return CalculateBlockSymbolSizeGivenCounts(
+ ll_counts, d_counts, ll_lengths, d_lengths, lz77, lstart, lend);
+ }
+}
+
+static size_t AbsDiff(size_t x, size_t y) {
+ if (x > y)
+ return x - y;
+ else
+ return y - x;
+}
+
+/*
+Changes the population counts in a way that the consequent Huffman tree
+compression, especially its rle-part, will be more likely to compress this data
+more efficiently. length contains the size of the histogram.
+*/
+void OptimizeHuffmanForRle(int length, size_t* counts) {
+ int i, k, stride;
+ size_t symbol, sum, limit;
+ int* good_for_rle;
+
+ /* 1) We don't want to touch the trailing zeros. We may break the
+ rules of the format by adding more data in the distance codes. */
+ for (; length >= 0; --length) {
+ if (length == 0) {
+ return;
+ }
+ if (counts[length - 1] != 0) {
+ /* Now counts[0..length - 1] does not have trailing zeros. */
+ break;
+ }
+ }
+ /* 2) Let's mark all population counts that already can be encoded
+ with an rle code.*/
+ good_for_rle = (int*)malloc(length * sizeof(int));
+ for (i = 0; i < length; ++i) good_for_rle[i] = 0;
+
+ /* Let's not spoil any of the existing good rle codes.
+ Mark any seq of 0's that is longer than 5 as a good_for_rle.
+ Mark any seq of non-0's that is longer than 7 as a good_for_rle.*/
+ symbol = counts[0];
+ stride = 0;
+ for (i = 0; i < length + 1; ++i) {
+ if (i == length || counts[i] != symbol) {
+ if ((symbol == 0 && stride >= 5) || (symbol != 0 && stride >= 7)) {
+ for (k = 0; k < stride; ++k) {
+ good_for_rle[i - k - 1] = 1;
+ }
+ }
+ stride = 1;
+ if (i != length) {
+ symbol = counts[i];
+ }
+ } else {
+ ++stride;
+ }
+ }
+
+ /* 3) Let's replace those population counts that lead to more rle codes. */
+ stride = 0;
+ limit = counts[0];
+ sum = 0;
+ for (i = 0; i < length + 1; ++i) {
+ if (i == length || good_for_rle[i]
+ /* Heuristic for selecting the stride ranges to collapse. */
+ || AbsDiff(counts[i], limit) >= 4) {
+ if (stride >= 4 || (stride >= 3 && sum == 0)) {
+ /* The stride must end, collapse what we have, if we have enough (4). */
+ int count = (sum + stride / 2) / stride;
+ if (count < 1) count = 1;
+ if (sum == 0) {
+ /* Don't make an all zeros stride to be upgraded to ones. */
+ count = 0;
+ }
+ for (k = 0; k < stride; ++k) {
+ /* We don't want to change value at counts[i],
+ that is already belonging to the next stride. Thus - 1. */
+ counts[i - k - 1] = count;
+ }
+ }
+ stride = 0;
+ sum = 0;
+ if (i < length - 3) {
+ /* All interesting strides have a count of at least 4,
+ at least when non-zeros. */
+ limit = (counts[i] + counts[i + 1] +
+ counts[i + 2] + counts[i + 3] + 2) / 4;
+ } else if (i < length) {
+ limit = counts[i];
+ } else {
+ limit = 0;
+ }
+ }
+ ++stride;
+ if (i != length) {
+ sum += counts[i];
+ }
+ }
+
+ free(good_for_rle);
+}
+
+/*
+Tries out OptimizeHuffmanForRle for this block, if the result is smaller,
+uses it, otherwise keeps the original. Returns size of encoded tree and data in
+bits, not including the 3-bit block header.
+*/
+static double TryOptimizeHuffmanForRle(
+ const ZopfliLZ77Store* lz77, size_t lstart, size_t lend,
+ const size_t* ll_counts, const size_t* d_counts,
+ unsigned* ll_lengths, unsigned* d_lengths) {
+ size_t ll_counts2[ZOPFLI_NUM_LL];
+ size_t d_counts2[ZOPFLI_NUM_D];
+ unsigned ll_lengths2[ZOPFLI_NUM_LL];
+ unsigned d_lengths2[ZOPFLI_NUM_D];
+ double treesize;
+ double datasize;
+ double treesize2;
+ double datasize2;
+
+ treesize = CalculateTreeSize(ll_lengths, d_lengths);
+ datasize = CalculateBlockSymbolSizeGivenCounts(ll_counts, d_counts,
+ ll_lengths, d_lengths, lz77, lstart, lend);
+
+ memcpy(ll_counts2, ll_counts, sizeof(ll_counts2));
+ memcpy(d_counts2, d_counts, sizeof(d_counts2));
+ OptimizeHuffmanForRle(ZOPFLI_NUM_LL, ll_counts2);
+ OptimizeHuffmanForRle(ZOPFLI_NUM_D, d_counts2);
+ ZopfliCalculateBitLengths(ll_counts2, ZOPFLI_NUM_LL, 15, ll_lengths2);
+ ZopfliCalculateBitLengths(d_counts2, ZOPFLI_NUM_D, 15, d_lengths2);
+ PatchDistanceCodesForBuggyDecoders(d_lengths2);
+
+ treesize2 = CalculateTreeSize(ll_lengths2, d_lengths2);
+ datasize2 = CalculateBlockSymbolSizeGivenCounts(ll_counts, d_counts,
+ ll_lengths2, d_lengths2, lz77, lstart, lend);
+
+ if (treesize2 + datasize2 < treesize + datasize) {
+ memcpy(ll_lengths, ll_lengths2, sizeof(ll_lengths2));
+ memcpy(d_lengths, d_lengths2, sizeof(d_lengths2));
+ return treesize2 + datasize2;
+ }
+ return treesize + datasize;
+}
+
+/*
+Calculates the bit lengths for the symbols for dynamic blocks. Chooses bit
+lengths that give the smallest size of tree encoding + encoding of all the
+symbols to have smallest output size. This are not necessarily the ideal Huffman
+bit lengths. Returns size of encoded tree and data in bits, not including the
+3-bit block header.
+*/
+static double GetDynamicLengths(const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend,
+ unsigned* ll_lengths, unsigned* d_lengths) {
+ size_t ll_counts[ZOPFLI_NUM_LL];
+ size_t d_counts[ZOPFLI_NUM_D];
+
+ ZopfliLZ77GetHistogram(lz77, lstart, lend, ll_counts, d_counts);
+ ll_counts[256] = 1; /* End symbol. */
+ ZopfliCalculateBitLengths(ll_counts, ZOPFLI_NUM_LL, 15, ll_lengths);
+ ZopfliCalculateBitLengths(d_counts, ZOPFLI_NUM_D, 15, d_lengths);
+ PatchDistanceCodesForBuggyDecoders(d_lengths);
+ return TryOptimizeHuffmanForRle(
+ lz77, lstart, lend, ll_counts, d_counts, ll_lengths, d_lengths);
+}
+
+double ZopfliCalculateBlockSize(const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend, int btype) {
+ unsigned ll_lengths[ZOPFLI_NUM_LL];
+ unsigned d_lengths[ZOPFLI_NUM_D];
+
+ double result = 3; /* bfinal and btype bits */
+
+ if (btype == 0) {
+ size_t length = ZopfliLZ77GetByteRange(lz77, lstart, lend);
+ size_t rem = length % 65535;
+ size_t blocks = length / 65535 + (rem ? 1 : 0);
+ /* An uncompressed block must actually be split into multiple blocks if it's
+ larger than 65535 bytes long. Eeach block header is 5 bytes: 3 bits,
+ padding, LEN and NLEN (potential less padding for first one ignored). */
+ return blocks * 5 * 8 + length * 8;
+ } if (btype == 1) {
+ GetFixedTree(ll_lengths, d_lengths);
+ result += CalculateBlockSymbolSize(
+ ll_lengths, d_lengths, lz77, lstart, lend);
+ } else {
+ result += GetDynamicLengths(lz77, lstart, lend, ll_lengths, d_lengths);
+ }
+
+ return result;
+}
+
+double ZopfliCalculateBlockSizeAutoType(const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend) {
+ double uncompressedcost = ZopfliCalculateBlockSize(lz77, lstart, lend, 0);
+ /* Don't do the expensive fixed cost calculation for larger blocks that are
+ unlikely to use it. */
+ double fixedcost = (lz77->size > 1000) ?
+ uncompressedcost : ZopfliCalculateBlockSize(lz77, lstart, lend, 1);
+ double dyncost = ZopfliCalculateBlockSize(lz77, lstart, lend, 2);
+ return (uncompressedcost < fixedcost && uncompressedcost < dyncost)
+ ? uncompressedcost
+ : (fixedcost < dyncost ? fixedcost : dyncost);
+}
+
+/* Since an uncompressed block can be max 65535 in size, it actually adds
+multible blocks if needed. */
+static void AddNonCompressedBlock(const ZopfliOptions* options, int final,
+ const unsigned char* in, size_t instart,
+ size_t inend,
+ unsigned char* bp,
+ unsigned char** out, size_t* outsize) {
+ size_t pos = instart;
+ (void)options;
+ for (;;) {
+ size_t i;
+ unsigned short blocksize = 65535;
+ unsigned short nlen;
+ int currentfinal;
+
+ if (pos + blocksize > inend) blocksize = inend - pos;
+ currentfinal = pos + blocksize >= inend;
+
+ nlen = ~blocksize;
+
+ AddBit(final && currentfinal, bp, out, outsize);
+ /* BTYPE 00 */
+ AddBit(0, bp, out, outsize);
+ AddBit(0, bp, out, outsize);
+
+ /* Any bits of input up to the next byte boundary are ignored. */
+ *bp = 0;
+
+ ZOPFLI_APPEND_DATA(blocksize % 256, out, outsize);
+ ZOPFLI_APPEND_DATA((blocksize / 256) % 256, out, outsize);
+ ZOPFLI_APPEND_DATA(nlen % 256, out, outsize);
+ ZOPFLI_APPEND_DATA((nlen / 256) % 256, out, outsize);
+
+ for (i = 0; i < blocksize; i++) {
+ ZOPFLI_APPEND_DATA(in[pos + i], out, outsize);
+ }
+
+ if (currentfinal) break;
+ pos += blocksize;
+ }
+}
+
+/*
+Adds a deflate block with the given LZ77 data to the output.
+options: global program options
+btype: the block type, must be 1 or 2
+final: whether to set the "final" bit on this block, must be the last block
+litlens: literal/length array of the LZ77 data, in the same format as in
+ ZopfliLZ77Store.
+dists: distance array of the LZ77 data, in the same format as in
+ ZopfliLZ77Store.
+lstart: where to start in the LZ77 data
+lend: where to end in the LZ77 data (not inclusive)
+expected_data_size: the uncompressed block size, used for assert, but you can
+ set it to 0 to not do the assertion.
+bp: output bit pointer
+out: dynamic output array to append to
+outsize: dynamic output array size
+*/
+static void AddLZ77Block(const ZopfliOptions* options, int btype, int final,
+ const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend,
+ size_t expected_data_size,
+ unsigned char* bp,
+ unsigned char** out, size_t* outsize) {
+ unsigned ll_lengths[ZOPFLI_NUM_LL];
+ unsigned d_lengths[ZOPFLI_NUM_D];
+ unsigned ll_symbols[ZOPFLI_NUM_LL];
+ unsigned d_symbols[ZOPFLI_NUM_D];
+ size_t detect_block_size = *outsize;
+ size_t compressed_size;
+ size_t uncompressed_size = 0;
+ size_t i;
+ if (btype == 0) {
+ size_t length = ZopfliLZ77GetByteRange(lz77, lstart, lend);
+ size_t pos = lstart == lend ? 0 : lz77->pos[lstart];
+ size_t end = pos + length;
+ AddNonCompressedBlock(options, final,
+ lz77->data, pos, end, bp, out, outsize);
+ return;
+ }
+
+ AddBit(final, bp, out, outsize);
+ AddBit(btype & 1, bp, out, outsize);
+ AddBit((btype & 2) >> 1, bp, out, outsize);
+
+ if (btype == 1) {
+ /* Fixed block. */
+ GetFixedTree(ll_lengths, d_lengths);
+ } else {
+ /* Dynamic block. */
+ unsigned detect_tree_size;
+ assert(btype == 2);
+
+ GetDynamicLengths(lz77, lstart, lend, ll_lengths, d_lengths);
+
+ detect_tree_size = *outsize;
+ AddDynamicTree(ll_lengths, d_lengths, bp, out, outsize);
+ if (options->verbose) {
+ fprintf(stderr, "treesize: %d\n", (int)(*outsize - detect_tree_size));
+ }
+ }
+
+ ZopfliLengthsToSymbols(ll_lengths, ZOPFLI_NUM_LL, 15, ll_symbols);
+ ZopfliLengthsToSymbols(d_lengths, ZOPFLI_NUM_D, 15, d_symbols);
+
+ detect_block_size = *outsize;
+ AddLZ77Data(lz77, lstart, lend, expected_data_size,
+ ll_symbols, ll_lengths, d_symbols, d_lengths,
+ bp, out, outsize);
+ /* End symbol. */
+ AddHuffmanBits(ll_symbols[256], ll_lengths[256], bp, out, outsize);
+
+ for (i = lstart; i < lend; i++) {
+ uncompressed_size += lz77->dists[i] == 0 ? 1 : lz77->litlens[i];
+ }
+ compressed_size = *outsize - detect_block_size;
+ if (options->verbose) {
+ fprintf(stderr, "compressed block size: %d (%dk) (unc: %d)\n",
+ (int)compressed_size, (int)(compressed_size / 1024),
+ (int)(uncompressed_size));
+ }
+}
+
+static void AddLZ77BlockAutoType(const ZopfliOptions* options, int final,
+ const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend,
+ size_t expected_data_size,
+ unsigned char* bp,
+ unsigned char** out, size_t* outsize) {
+ double uncompressedcost = ZopfliCalculateBlockSize(lz77, lstart, lend, 0);
+ double fixedcost = ZopfliCalculateBlockSize(lz77, lstart, lend, 1);
+ double dyncost = ZopfliCalculateBlockSize(lz77, lstart, lend, 2);
+
+ /* Whether to perform the expensive calculation of creating an optimal block
+ with fixed huffman tree to check if smaller. Only do this for small blocks or
+ blocks which already are pretty good with fixed huffman tree. */
+ int expensivefixed = (lz77->size < 1000) || fixedcost <= dyncost * 1.1;
+
+ ZopfliLZ77Store fixedstore;
+ if (lstart == lend) {
+ /* Smallest empty block is represented by fixed block */
+ AddBits(final, 1, bp, out, outsize);
+ AddBits(1, 2, bp, out, outsize); /* btype 01 */
+ AddBits(0, 7, bp, out, outsize); /* end symbol has code 0000000 */
+ return;
+ }
+ ZopfliInitLZ77Store(lz77->data, &fixedstore);
+ if (expensivefixed) {
+ /* Recalculate the LZ77 with ZopfliLZ77OptimalFixed */
+ size_t instart = lz77->pos[lstart];
+ size_t inend = instart + ZopfliLZ77GetByteRange(lz77, lstart, lend);
+
+ ZopfliBlockState s;
+ ZopfliInitBlockState(options, instart, inend, 1, &s);
+ ZopfliLZ77OptimalFixed(&s, lz77->data, instart, inend, &fixedstore);
+ fixedcost = ZopfliCalculateBlockSize(&fixedstore, 0, fixedstore.size, 1);
+ ZopfliCleanBlockState(&s);
+ }
+
+ if (uncompressedcost < fixedcost && uncompressedcost < dyncost) {
+ AddLZ77Block(options, 0, final, lz77, lstart, lend,
+ expected_data_size, bp, out, outsize);
+ } else if (fixedcost < dyncost) {
+ if (expensivefixed) {
+ AddLZ77Block(options, 1, final, &fixedstore, 0, fixedstore.size,
+ expected_data_size, bp, out, outsize);
+ } else {
+ AddLZ77Block(options, 1, final, lz77, lstart, lend,
+ expected_data_size, bp, out, outsize);
+ }
+ } else {
+ AddLZ77Block(options, 2, final, lz77, lstart, lend,
+ expected_data_size, bp, out, outsize);
+ }
+
+ ZopfliCleanLZ77Store(&fixedstore);
+}
+
+/*
+Deflate a part, to allow ZopfliDeflate() to use multiple master blocks if
+needed.
+It is possible to call this function multiple times in a row, shifting
+instart and inend to next bytes of the data. If instart is larger than 0, then
+previous bytes are used as the initial dictionary for LZ77.
+This function will usually output multiple deflate blocks. If final is 1, then
+the final bit will be set on the last block.
+*/
+void ZopfliDeflatePart(const ZopfliOptions* options, int btype, int final,
+ const unsigned char* in, size_t instart, size_t inend,
+ unsigned char* bp, unsigned char** out,
+ size_t* outsize) {
+ size_t i;
+ /* byte coordinates rather than lz77 index */
+ size_t* splitpoints_uncompressed = 0;
+ size_t npoints = 0;
+ size_t* splitpoints = 0;
+ double totalcost = 0;
+ ZopfliLZ77Store lz77;
+
+ /* If btype=2 is specified, it tries all block types. If a lesser btype is
+ given, then however it forces that one. Neither of the lesser types needs
+ block splitting as they have no dynamic huffman trees. */
+ if (btype == 0) {
+ AddNonCompressedBlock(options, final, in, instart, inend, bp, out, outsize);
+ return;
+ } else if (btype == 1) {
+ ZopfliLZ77Store store;
+ ZopfliBlockState s;
+ ZopfliInitLZ77Store(in, &store);
+ ZopfliInitBlockState(options, instart, inend, 1, &s);
+
+ ZopfliLZ77OptimalFixed(&s, in, instart, inend, &store);
+ AddLZ77Block(options, btype, final, &store, 0, store.size, 0,
+ bp, out, outsize);
+
+ ZopfliCleanBlockState(&s);
+ ZopfliCleanLZ77Store(&store);
+ return;
+ }
+
+
+ if (options->blocksplitting) {
+ ZopfliBlockSplit(options, in, instart, inend,
+ options->blocksplittingmax,
+ &splitpoints_uncompressed, &npoints);
+ splitpoints = (size_t*)malloc(sizeof(*splitpoints) * npoints);
+ }
+
+ ZopfliInitLZ77Store(in, &lz77);
+
+ for (i = 0; i <= npoints; i++) {
+ size_t start = i == 0 ? instart : splitpoints_uncompressed[i - 1];
+ size_t end = i == npoints ? inend : splitpoints_uncompressed[i];
+ ZopfliBlockState s;
+ ZopfliLZ77Store store;
+ ZopfliInitLZ77Store(in, &store);
+ ZopfliInitBlockState(options, start, end, 1, &s);
+ ZopfliLZ77Optimal(&s, in, start, end, options->numiterations, &store);
+ totalcost += ZopfliCalculateBlockSizeAutoType(&store, 0, store.size);
+
+ ZopfliAppendLZ77Store(&store, &lz77);
+ if (i < npoints) splitpoints[i] = lz77.size;
+
+ ZopfliCleanBlockState(&s);
+ ZopfliCleanLZ77Store(&store);
+ }
+
+ /* Second block splitting attempt */
+ if (options->blocksplitting && npoints > 1) {
+ size_t* splitpoints2 = 0;
+ size_t npoints2 = 0;
+ double totalcost2 = 0;
+
+ ZopfliBlockSplitLZ77(options, &lz77,
+ options->blocksplittingmax, &splitpoints2, &npoints2);
+
+ for (i = 0; i <= npoints2; i++) {
+ size_t start = i == 0 ? 0 : splitpoints2[i - 1];
+ size_t end = i == npoints2 ? lz77.size : splitpoints2[i];
+ totalcost2 += ZopfliCalculateBlockSizeAutoType(&lz77, start, end);
+ }
+
+ if (totalcost2 < totalcost) {
+ free(splitpoints);
+ splitpoints = splitpoints2;
+ npoints = npoints2;
+ } else {
+ free(splitpoints2);
+ }
+ }
+
+ for (i = 0; i <= npoints; i++) {
+ size_t start = i == 0 ? 0 : splitpoints[i - 1];
+ size_t end = i == npoints ? lz77.size : splitpoints[i];
+ AddLZ77BlockAutoType(options, i == npoints && final,
+ &lz77, start, end, 0,
+ bp, out, outsize);
+ }
+
+ ZopfliCleanLZ77Store(&lz77);
+ free(splitpoints);
+ free(splitpoints_uncompressed);
+}
+
+void ZopfliDeflate(const ZopfliOptions* options, int btype, int final,
+ const unsigned char* in, size_t insize,
+ unsigned char* bp, unsigned char** out, size_t* outsize) {
+ size_t offset = *outsize;
+#if ZOPFLI_MASTER_BLOCK_SIZE == 0
+ ZopfliDeflatePart(options, btype, final, in, 0, insize, bp, out, outsize);
+#else
+ size_t i = 0;
+ do {
+ int masterfinal = (i + ZOPFLI_MASTER_BLOCK_SIZE >= insize);
+ int final2 = final && masterfinal;
+ size_t size = masterfinal ? insize - i : ZOPFLI_MASTER_BLOCK_SIZE;
+ ZopfliDeflatePart(options, btype, final2,
+ in, i, i + size, bp, out, outsize);
+ i += size;
+ } while (i < insize);
+#endif
+ if (options->verbose) {
+ fprintf(stderr,
+ "Original Size: %lu, Deflate: %lu, Compression: %f%% Removed\n",
+ (unsigned long)insize, (unsigned long)(*outsize - offset),
+ 100.0 * (double)(insize - (*outsize - offset)) / (double)insize);
+ }
+}
diff --git a/misc/ttf2woff/zopfli/deflate.h b/misc/ttf2woff/zopfli/deflate.h
new file mode 100644
index 000000000..fcd9ddc0f
--- /dev/null
+++ b/misc/ttf2woff/zopfli/deflate.h
@@ -0,0 +1,92 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#ifndef ZOPFLI_DEFLATE_H_
+#define ZOPFLI_DEFLATE_H_
+
+/*
+Functions to compress according to the DEFLATE specification, using the
+"squeeze" LZ77 compression backend.
+*/
+
+#include "lz77.h"
+#include "zopfli.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+Compresses according to the deflate specification and append the compressed
+result to the output.
+This function will usually output multiple deflate blocks. If final is 1, then
+the final bit will be set on the last block.
+
+options: global program options
+btype: the deflate block type. Use 2 for best compression.
+ -0: non compressed blocks (00)
+ -1: blocks with fixed tree (01)
+ -2: blocks with dynamic tree (10)
+final: whether this is the last section of the input, sets the final bit to the
+ last deflate block.
+in: the input bytes
+insize: number of input bytes
+bp: bit pointer for the output array. This must initially be 0, and for
+ consecutive calls must be reused (it can have values from 0-7). This is
+ because deflate appends blocks as bit-based data, rather than on byte
+ boundaries.
+out: pointer to the dynamic output array to which the result is appended. Must
+ be freed after use.
+outsize: pointer to the dynamic output array size.
+*/
+void ZopfliDeflate(const ZopfliOptions* options, int btype, int final,
+ const unsigned char* in, size_t insize,
+ unsigned char* bp, unsigned char** out, size_t* outsize);
+
+/*
+Like ZopfliDeflate, but allows to specify start and end byte with instart and
+inend. Only that part is compressed, but earlier bytes are still used for the
+back window.
+*/
+void ZopfliDeflatePart(const ZopfliOptions* options, int btype, int final,
+ const unsigned char* in, size_t instart, size_t inend,
+ unsigned char* bp, unsigned char** out,
+ size_t* outsize);
+
+/*
+Calculates block size in bits.
+litlens: lz77 lit/lengths
+dists: ll77 distances
+lstart: start of block
+lend: end of block (not inclusive)
+*/
+double ZopfliCalculateBlockSize(const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend, int btype);
+
+/*
+Calculates block size in bits, automatically using the best btype.
+*/
+double ZopfliCalculateBlockSizeAutoType(const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif /* ZOPFLI_DEFLATE_H_ */
diff --git a/misc/ttf2woff/zopfli/hash.c b/misc/ttf2woff/zopfli/hash.c
new file mode 100644
index 000000000..3025d1e29
--- /dev/null
+++ b/misc/ttf2woff/zopfli/hash.c
@@ -0,0 +1,143 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#include "hash.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#define HASH_SHIFT 5
+#define HASH_MASK 32767
+
+void ZopfliAllocHash(size_t window_size, ZopfliHash* h) {
+ h->head = (int*)malloc(sizeof(*h->head) * 65536);
+ h->prev = (unsigned short*)malloc(sizeof(*h->prev) * window_size);
+ h->hashval = (int*)malloc(sizeof(*h->hashval) * window_size);
+
+#ifdef ZOPFLI_HASH_SAME
+ h->same = (unsigned short*)malloc(sizeof(*h->same) * window_size);
+#endif
+
+#ifdef ZOPFLI_HASH_SAME_HASH
+ h->head2 = (int*)malloc(sizeof(*h->head2) * 65536);
+ h->prev2 = (unsigned short*)malloc(sizeof(*h->prev2) * window_size);
+ h->hashval2 = (int*)malloc(sizeof(*h->hashval2) * window_size);
+#endif
+}
+
+void ZopfliResetHash(size_t window_size, ZopfliHash* h) {
+ size_t i;
+
+ h->val = 0;
+ for (i = 0; i < 65536; i++) {
+ h->head[i] = -1; /* -1 indicates no head so far. */
+ }
+ for (i = 0; i < window_size; i++) {
+ h->prev[i] = i; /* If prev[j] == j, then prev[j] is uninitialized. */
+ h->hashval[i] = -1;
+ }
+
+#ifdef ZOPFLI_HASH_SAME
+ for (i = 0; i < window_size; i++) {
+ h->same[i] = 0;
+ }
+#endif
+
+#ifdef ZOPFLI_HASH_SAME_HASH
+ h->val2 = 0;
+ for (i = 0; i < 65536; i++) {
+ h->head2[i] = -1;
+ }
+ for (i = 0; i < window_size; i++) {
+ h->prev2[i] = i;
+ h->hashval2[i] = -1;
+ }
+#endif
+}
+
+void ZopfliCleanHash(ZopfliHash* h) {
+ free(h->head);
+ free(h->prev);
+ free(h->hashval);
+
+#ifdef ZOPFLI_HASH_SAME_HASH
+ free(h->head2);
+ free(h->prev2);
+ free(h->hashval2);
+#endif
+
+#ifdef ZOPFLI_HASH_SAME
+ free(h->same);
+#endif
+}
+
+/*
+Update the sliding hash value with the given byte. All calls to this function
+must be made on consecutive input characters. Since the hash value exists out
+of multiple input bytes, a few warmups with this function are needed initially.
+*/
+static void UpdateHashValue(ZopfliHash* h, unsigned char c) {
+ h->val = (((h->val) << HASH_SHIFT) ^ (c)) & HASH_MASK;
+}
+
+void ZopfliUpdateHash(const unsigned char* array, size_t pos, size_t end,
+ ZopfliHash* h) {
+ unsigned short hpos = pos & ZOPFLI_WINDOW_MASK;
+#ifdef ZOPFLI_HASH_SAME
+ size_t amount = 0;
+#endif
+
+ UpdateHashValue(h, pos + ZOPFLI_MIN_MATCH <= end ?
+ array[pos + ZOPFLI_MIN_MATCH - 1] : 0);
+ h->hashval[hpos] = h->val;
+ if (h->head[h->val] != -1 && h->hashval[h->head[h->val]] == h->val) {
+ h->prev[hpos] = h->head[h->val];
+ }
+ else h->prev[hpos] = hpos;
+ h->head[h->val] = hpos;
+
+#ifdef ZOPFLI_HASH_SAME
+ /* Update "same". */
+ if (h->same[(pos - 1) & ZOPFLI_WINDOW_MASK] > 1) {
+ amount = h->same[(pos - 1) & ZOPFLI_WINDOW_MASK] - 1;
+ }
+ while (pos + amount + 1 < end &&
+ array[pos] == array[pos + amount + 1] && amount < (unsigned short)(-1)) {
+ amount++;
+ }
+ h->same[hpos] = amount;
+#endif
+
+#ifdef ZOPFLI_HASH_SAME_HASH
+ h->val2 = ((h->same[hpos] - ZOPFLI_MIN_MATCH) & 255) ^ h->val;
+ h->hashval2[hpos] = h->val2;
+ if (h->head2[h->val2] != -1 && h->hashval2[h->head2[h->val2]] == h->val2) {
+ h->prev2[hpos] = h->head2[h->val2];
+ }
+ else h->prev2[hpos] = hpos;
+ h->head2[h->val2] = hpos;
+#endif
+}
+
+void ZopfliWarmupHash(const unsigned char* array, size_t pos, size_t end,
+ ZopfliHash* h) {
+ UpdateHashValue(h, array[pos + 0]);
+ if (pos + 1 < end) UpdateHashValue(h, array[pos + 1]);
+}
diff --git a/misc/ttf2woff/zopfli/hash.h b/misc/ttf2woff/zopfli/hash.h
new file mode 100644
index 000000000..e59c1d46f
--- /dev/null
+++ b/misc/ttf2woff/zopfli/hash.h
@@ -0,0 +1,73 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+/*
+The hash for ZopfliFindLongestMatch of lz77.c.
+*/
+
+#ifndef ZOPFLI_HASH_H_
+#define ZOPFLI_HASH_H_
+
+#include "util.h"
+
+typedef struct ZopfliHash {
+ int* head; /* Hash value to index of its most recent occurrence. */
+ unsigned short* prev; /* Index to index of prev. occurrence of same hash. */
+ int* hashval; /* Index to hash value at this index. */
+ int val; /* Current hash value. */
+
+#ifdef ZOPFLI_HASH_SAME_HASH
+ /* Fields with similar purpose as the above hash, but for the second hash with
+ a value that is calculated differently. */
+ int* head2; /* Hash value to index of its most recent occurrence. */
+ unsigned short* prev2; /* Index to index of prev. occurrence of same hash. */
+ int* hashval2; /* Index to hash value at this index. */
+ int val2; /* Current hash value. */
+#endif
+
+#ifdef ZOPFLI_HASH_SAME
+ unsigned short* same; /* Amount of repetitions of same byte after this .*/
+#endif
+} ZopfliHash;
+
+/* Allocates ZopfliHash memory. */
+void ZopfliAllocHash(size_t window_size, ZopfliHash* h);
+
+/* Resets all fields of ZopfliHash. */
+void ZopfliResetHash(size_t window_size, ZopfliHash* h);
+
+/* Frees ZopfliHash memory. */
+void ZopfliCleanHash(ZopfliHash* h);
+
+/*
+Updates the hash values based on the current position in the array. All calls
+to this must be made for consecutive bytes.
+*/
+void ZopfliUpdateHash(const unsigned char* array, size_t pos, size_t end,
+ ZopfliHash* h);
+
+/*
+Prepopulates hash:
+Fills in the initial values in the hash, before ZopfliUpdateHash can be used
+correctly.
+*/
+void ZopfliWarmupHash(const unsigned char* array, size_t pos, size_t end,
+ ZopfliHash* h);
+
+#endif /* ZOPFLI_HASH_H_ */
diff --git a/misc/ttf2woff/zopfli/katajainen.c b/misc/ttf2woff/zopfli/katajainen.c
new file mode 100644
index 000000000..145901755
--- /dev/null
+++ b/misc/ttf2woff/zopfli/katajainen.c
@@ -0,0 +1,262 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+/*
+Bounded package merge algorithm, based on the paper
+"A Fast and Space-Economical Algorithm for Length-Limited Coding
+Jyrki Katajainen, Alistair Moffat, Andrew Turpin".
+*/
+
+#include "katajainen.h"
+#include <assert.h>
+#include <stdlib.h>
+#include <limits.h>
+
+typedef struct Node Node;
+
+/*
+Nodes forming chains. Also used to represent leaves.
+*/
+struct Node {
+ size_t weight; /* Total weight (symbol count) of this chain. */
+ Node* tail; /* Previous node(s) of this chain, or 0 if none. */
+ int count; /* Leaf symbol index, or number of leaves before this chain. */
+};
+
+/*
+Memory pool for nodes.
+*/
+typedef struct NodePool {
+ Node* next; /* Pointer to a free node in the pool. */
+} NodePool;
+
+/*
+Initializes a chain node with the given values and marks it as in use.
+*/
+static void InitNode(size_t weight, int count, Node* tail, Node* node) {
+ node->weight = weight;
+ node->count = count;
+ node->tail = tail;
+}
+
+/*
+Performs a Boundary Package-Merge step. Puts a new chain in the given list. The
+new chain is, depending on the weights, a leaf or a combination of two chains
+from the previous list.
+lists: The lists of chains.
+maxbits: Number of lists.
+leaves: The leaves, one per symbol.
+numsymbols: Number of leaves.
+pool: the node memory pool.
+index: The index of the list in which a new chain or leaf is required.
+*/
+static void BoundaryPM(Node* (*lists)[2], Node* leaves, int numsymbols,
+ NodePool* pool, int index) {
+ Node* newchain;
+ Node* oldchain;
+ int lastcount = lists[index][1]->count; /* Count of last chain of list. */
+
+ if (index == 0 && lastcount >= numsymbols) return;
+
+ newchain = pool->next++;
+ oldchain = lists[index][1];
+
+ /* These are set up before the recursive calls below, so that there is a list
+ pointing to the new node, to let the garbage collection know it's in use. */
+ lists[index][0] = oldchain;
+ lists[index][1] = newchain;
+
+ if (index == 0) {
+ /* New leaf node in list 0. */
+ InitNode(leaves[lastcount].weight, lastcount + 1, 0, newchain);
+ } else {
+ size_t sum = lists[index - 1][0]->weight + lists[index - 1][1]->weight;
+ if (lastcount < numsymbols && sum > leaves[lastcount].weight) {
+ /* New leaf inserted in list, so count is incremented. */
+ InitNode(leaves[lastcount].weight, lastcount + 1, oldchain->tail,
+ newchain);
+ } else {
+ InitNode(sum, lastcount, lists[index - 1][1], newchain);
+ /* Two lookahead chains of previous list used up, create new ones. */
+ BoundaryPM(lists, leaves, numsymbols, pool, index - 1);
+ BoundaryPM(lists, leaves, numsymbols, pool, index - 1);
+ }
+ }
+}
+
+static void BoundaryPMFinal(Node* (*lists)[2],
+ Node* leaves, int numsymbols, NodePool* pool, int index) {
+ int lastcount = lists[index][1]->count; /* Count of last chain of list. */
+
+ size_t sum = lists[index - 1][0]->weight + lists[index - 1][1]->weight;
+
+ if (lastcount < numsymbols && sum > leaves[lastcount].weight) {
+ Node* newchain = pool->next;
+ Node* oldchain = lists[index][1]->tail;
+
+ lists[index][1] = newchain;
+ newchain->count = lastcount + 1;
+ newchain->tail = oldchain;
+ } else {
+ lists[index][1]->tail = lists[index - 1][1];
+ }
+}
+
+/*
+Initializes each list with as lookahead chains the two leaves with lowest
+weights.
+*/
+static void InitLists(
+ NodePool* pool, const Node* leaves, int maxbits, Node* (*lists)[2]) {
+ int i;
+ Node* node0 = pool->next++;
+ Node* node1 = pool->next++;
+ InitNode(leaves[0].weight, 1, 0, node0);
+ InitNode(leaves[1].weight, 2, 0, node1);
+ for (i = 0; i < maxbits; i++) {
+ lists[i][0] = node0;
+ lists[i][1] = node1;
+ }
+}
+
+/*
+Converts result of boundary package-merge to the bitlengths. The result in the
+last chain of the last list contains the amount of active leaves in each list.
+chain: Chain to extract the bit length from (last chain from last list).
+*/
+static void ExtractBitLengths(Node* chain, Node* leaves, unsigned* bitlengths) {
+ int counts[16] = {0};
+ unsigned end = 16;
+ unsigned ptr = 15;
+ unsigned value = 1;
+ Node* node;
+ int val;
+
+ for (node = chain; node; node = node->tail) {
+ counts[--end] = node->count;
+ }
+
+ val = counts[15];
+ while (ptr >= end) {
+ for (; val > counts[ptr - 1]; val--) {
+ bitlengths[leaves[val - 1].count] = value;
+ }
+ ptr--;
+ value++;
+ }
+}
+
+/*
+Comparator for sorting the leaves. Has the function signature for qsort.
+*/
+static int LeafComparator(const void* a, const void* b) {
+ return ((const Node*)a)->weight - ((const Node*)b)->weight;
+}
+
+int ZopfliLengthLimitedCodeLengths(
+ const size_t* frequencies, int n, int maxbits, unsigned* bitlengths) {
+ NodePool pool;
+ int i;
+ int numsymbols = 0; /* Amount of symbols with frequency > 0. */
+ int numBoundaryPMRuns;
+ Node* nodes;
+
+ /* Array of lists of chains. Each list requires only two lookahead chains at
+ a time, so each list is a array of two Node*'s. */
+ Node* (*lists)[2];
+
+ /* One leaf per symbol. Only numsymbols leaves will be used. */
+ Node* leaves = (Node*)malloc(n * sizeof(*leaves));
+
+ /* Initialize all bitlengths at 0. */
+ for (i = 0; i < n; i++) {
+ bitlengths[i] = 0;
+ }
+
+ /* Count used symbols and place them in the leaves. */
+ for (i = 0; i < n; i++) {
+ if (frequencies[i]) {
+ leaves[numsymbols].weight = frequencies[i];
+ leaves[numsymbols].count = i; /* Index of symbol this leaf represents. */
+ numsymbols++;
+ }
+ }
+
+ /* Check special cases and error conditions. */
+ if ((1 << maxbits) < numsymbols) {
+ free(leaves);
+ return 1; /* Error, too few maxbits to represent symbols. */
+ }
+ if (numsymbols == 0) {
+ free(leaves);
+ return 0; /* No symbols at all. OK. */
+ }
+ if (numsymbols == 1) {
+ bitlengths[leaves[0].count] = 1;
+ free(leaves);
+ return 0; /* Only one symbol, give it bitlength 1, not 0. OK. */
+ }
+ if (numsymbols == 2) {
+ bitlengths[leaves[0].count]++;
+ bitlengths[leaves[1].count]++;
+ free(leaves);
+ return 0;
+ }
+
+ /* Sort the leaves from lightest to heaviest. Add count into the same
+ variable for stable sorting. */
+ for (i = 0; i < numsymbols; i++) {
+ if (leaves[i].weight >=
+ ((size_t)1 << (sizeof(leaves[0].weight) * CHAR_BIT - 9))) {
+ free(leaves);
+ return 1; /* Error, we need 9 bits for the count. */
+ }
+ leaves[i].weight = (leaves[i].weight << 9) | leaves[i].count;
+ }
+ qsort(leaves, numsymbols, sizeof(Node), LeafComparator);
+ for (i = 0; i < numsymbols; i++) {
+ leaves[i].weight >>= 9;
+ }
+
+ if (numsymbols - 1 < maxbits) {
+ maxbits = numsymbols - 1;
+ }
+
+ /* Initialize node memory pool. */
+ nodes = (Node*)malloc(maxbits * 2 * numsymbols * sizeof(Node));
+ pool.next = nodes;
+
+ lists = (Node* (*)[2])malloc(maxbits * sizeof(*lists));
+ InitLists(&pool, leaves, maxbits, lists);
+
+ /* In the last list, 2 * numsymbols - 2 active chains need to be created. Two
+ are already created in the initialization. Each BoundaryPM run creates one. */
+ numBoundaryPMRuns = 2 * numsymbols - 4;
+ for (i = 0; i < numBoundaryPMRuns - 1; i++) {
+ BoundaryPM(lists, leaves, numsymbols, &pool, maxbits - 1);
+ }
+ BoundaryPMFinal(lists, leaves, numsymbols, &pool, maxbits - 1);
+
+ ExtractBitLengths(lists[maxbits - 1][1], leaves, bitlengths);
+
+ free(lists);
+ free(leaves);
+ free(nodes);
+ return 0; /* OK. */
+}
diff --git a/misc/ttf2woff/zopfli/katajainen.h b/misc/ttf2woff/zopfli/katajainen.h
new file mode 100644
index 000000000..5927350d6
--- /dev/null
+++ b/misc/ttf2woff/zopfli/katajainen.h
@@ -0,0 +1,42 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#ifndef ZOPFLI_KATAJAINEN_H_
+#define ZOPFLI_KATAJAINEN_H_
+
+#include <string.h>
+
+/*
+Outputs minimum-redundancy length-limited code bitlengths for symbols with the
+given counts. The bitlengths are limited by maxbits.
+
+The output is tailored for DEFLATE: symbols that never occur, get a bit length
+of 0, and if only a single symbol occurs at least once, its bitlength will be 1,
+and not 0 as would theoretically be needed for a single symbol.
+
+frequencies: The amount of occurrences of each symbol.
+n: The amount of symbols.
+maxbits: Maximum bit length, inclusive.
+bitlengths: Output, the bitlengths for the symbol prefix codes.
+return: 0 for OK, non-0 for error.
+*/
+int ZopfliLengthLimitedCodeLengths(
+ const size_t* frequencies, int n, int maxbits, unsigned* bitlengths);
+
+#endif /* ZOPFLI_KATAJAINEN_H_ */
diff --git a/misc/ttf2woff/zopfli/lz77.c b/misc/ttf2woff/zopfli/lz77.c
new file mode 100644
index 000000000..9df899dd0
--- /dev/null
+++ b/misc/ttf2woff/zopfli/lz77.c
@@ -0,0 +1,630 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#include "lz77.h"
+#include "symbols.h"
+#include "util.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+void ZopfliInitLZ77Store(const unsigned char* data, ZopfliLZ77Store* store) {
+ store->size = 0;
+ store->litlens = 0;
+ store->dists = 0;
+ store->pos = 0;
+ store->data = data;
+ store->ll_symbol = 0;
+ store->d_symbol = 0;
+ store->ll_counts = 0;
+ store->d_counts = 0;
+}
+
+void ZopfliCleanLZ77Store(ZopfliLZ77Store* store) {
+ free(store->litlens);
+ free(store->dists);
+ free(store->pos);
+ free(store->ll_symbol);
+ free(store->d_symbol);
+ free(store->ll_counts);
+ free(store->d_counts);
+}
+
+static size_t CeilDiv(size_t a, size_t b) {
+ return (a + b - 1) / b;
+}
+
+void ZopfliCopyLZ77Store(
+ const ZopfliLZ77Store* source, ZopfliLZ77Store* dest) {
+ size_t i;
+ size_t llsize = ZOPFLI_NUM_LL * CeilDiv(source->size, ZOPFLI_NUM_LL);
+ size_t dsize = ZOPFLI_NUM_D * CeilDiv(source->size, ZOPFLI_NUM_D);
+ ZopfliCleanLZ77Store(dest);
+ ZopfliInitLZ77Store(source->data, dest);
+ dest->litlens =
+ (unsigned short*)malloc(sizeof(*dest->litlens) * source->size);
+ dest->dists = (unsigned short*)malloc(sizeof(*dest->dists) * source->size);
+ dest->pos = (size_t*)malloc(sizeof(*dest->pos) * source->size);
+ dest->ll_symbol =
+ (unsigned short*)malloc(sizeof(*dest->ll_symbol) * source->size);
+ dest->d_symbol =
+ (unsigned short*)malloc(sizeof(*dest->d_symbol) * source->size);
+ dest->ll_counts = (size_t*)malloc(sizeof(*dest->ll_counts) * llsize);
+ dest->d_counts = (size_t*)malloc(sizeof(*dest->d_counts) * dsize);
+
+ /* Allocation failed. */
+ if (!dest->litlens || !dest->dists) exit(-1);
+ if (!dest->pos) exit(-1);
+ if (!dest->ll_symbol || !dest->d_symbol) exit(-1);
+ if (!dest->ll_counts || !dest->d_counts) exit(-1);
+
+ dest->size = source->size;
+ for (i = 0; i < source->size; i++) {
+ dest->litlens[i] = source->litlens[i];
+ dest->dists[i] = source->dists[i];
+ dest->pos[i] = source->pos[i];
+ dest->ll_symbol[i] = source->ll_symbol[i];
+ dest->d_symbol[i] = source->d_symbol[i];
+ }
+ for (i = 0; i < llsize; i++) {
+ dest->ll_counts[i] = source->ll_counts[i];
+ }
+ for (i = 0; i < dsize; i++) {
+ dest->d_counts[i] = source->d_counts[i];
+ }
+}
+
+/*
+Appends the length and distance to the LZ77 arrays of the ZopfliLZ77Store.
+context must be a ZopfliLZ77Store*.
+*/
+void ZopfliStoreLitLenDist(unsigned short length, unsigned short dist,
+ size_t pos, ZopfliLZ77Store* store) {
+ size_t i;
+ /* Needed for using ZOPFLI_APPEND_DATA multiple times. */
+ size_t origsize = store->size;
+ size_t llstart = ZOPFLI_NUM_LL * (origsize / ZOPFLI_NUM_LL);
+ size_t dstart = ZOPFLI_NUM_D * (origsize / ZOPFLI_NUM_D);
+
+ /* Everytime the index wraps around, a new cumulative histogram is made: we're
+ keeping one histogram value per LZ77 symbol rather than a full histogram for
+ each to save memory. */
+ if (origsize % ZOPFLI_NUM_LL == 0) {
+ size_t llsize = origsize;
+ for (i = 0; i < ZOPFLI_NUM_LL; i++) {
+ ZOPFLI_APPEND_DATA(
+ origsize == 0 ? 0 : store->ll_counts[origsize - ZOPFLI_NUM_LL + i],
+ &store->ll_counts, &llsize);
+ }
+ }
+ if (origsize % ZOPFLI_NUM_D == 0) {
+ size_t dsize = origsize;
+ for (i = 0; i < ZOPFLI_NUM_D; i++) {
+ ZOPFLI_APPEND_DATA(
+ origsize == 0 ? 0 : store->d_counts[origsize - ZOPFLI_NUM_D + i],
+ &store->d_counts, &dsize);
+ }
+ }
+
+ ZOPFLI_APPEND_DATA(length, &store->litlens, &store->size);
+ store->size = origsize;
+ ZOPFLI_APPEND_DATA(dist, &store->dists, &store->size);
+ store->size = origsize;
+ ZOPFLI_APPEND_DATA(pos, &store->pos, &store->size);
+ assert(length < 259);
+
+ if (dist == 0) {
+ store->size = origsize;
+ ZOPFLI_APPEND_DATA(length, &store->ll_symbol, &store->size);
+ store->size = origsize;
+ ZOPFLI_APPEND_DATA(0, &store->d_symbol, &store->size);
+ store->ll_counts[llstart + length]++;
+ } else {
+ store->size = origsize;
+ ZOPFLI_APPEND_DATA(ZopfliGetLengthSymbol(length),
+ &store->ll_symbol, &store->size);
+ store->size = origsize;
+ ZOPFLI_APPEND_DATA(ZopfliGetDistSymbol(dist),
+ &store->d_symbol, &store->size);
+ store->ll_counts[llstart + ZopfliGetLengthSymbol(length)]++;
+ store->d_counts[dstart + ZopfliGetDistSymbol(dist)]++;
+ }
+}
+
+void ZopfliAppendLZ77Store(const ZopfliLZ77Store* store,
+ ZopfliLZ77Store* target) {
+ size_t i;
+ for (i = 0; i < store->size; i++) {
+ ZopfliStoreLitLenDist(store->litlens[i], store->dists[i],
+ store->pos[i], target);
+ }
+}
+
+size_t ZopfliLZ77GetByteRange(const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend) {
+ size_t l = lend - 1;
+ if (lstart == lend) return 0;
+ return lz77->pos[l] + ((lz77->dists[l] == 0) ?
+ 1 : lz77->litlens[l]) - lz77->pos[lstart];
+}
+
+static void ZopfliLZ77GetHistogramAt(const ZopfliLZ77Store* lz77, size_t lpos,
+ size_t* ll_counts, size_t* d_counts) {
+ /* The real histogram is created by using the histogram for this chunk, but
+ all superfluous values of this chunk subtracted. */
+ size_t llpos = ZOPFLI_NUM_LL * (lpos / ZOPFLI_NUM_LL);
+ size_t dpos = ZOPFLI_NUM_D * (lpos / ZOPFLI_NUM_D);
+ size_t i;
+ for (i = 0; i < ZOPFLI_NUM_LL; i++) {
+ ll_counts[i] = lz77->ll_counts[llpos + i];
+ }
+ for (i = lpos + 1; i < llpos + ZOPFLI_NUM_LL && i < lz77->size; i++) {
+ ll_counts[lz77->ll_symbol[i]]--;
+ }
+ for (i = 0; i < ZOPFLI_NUM_D; i++) {
+ d_counts[i] = lz77->d_counts[dpos + i];
+ }
+ for (i = lpos + 1; i < dpos + ZOPFLI_NUM_D && i < lz77->size; i++) {
+ if (lz77->dists[i] != 0) d_counts[lz77->d_symbol[i]]--;
+ }
+}
+
+void ZopfliLZ77GetHistogram(const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend,
+ size_t* ll_counts, size_t* d_counts) {
+ size_t i;
+ if (lstart + ZOPFLI_NUM_LL * 3 > lend) {
+ memset(ll_counts, 0, sizeof(*ll_counts) * ZOPFLI_NUM_LL);
+ memset(d_counts, 0, sizeof(*d_counts) * ZOPFLI_NUM_D);
+ for (i = lstart; i < lend; i++) {
+ ll_counts[lz77->ll_symbol[i]]++;
+ if (lz77->dists[i] != 0) d_counts[lz77->d_symbol[i]]++;
+ }
+ } else {
+ /* Subtract the cumulative histograms at the end and the start to get the
+ histogram for this range. */
+ ZopfliLZ77GetHistogramAt(lz77, lend - 1, ll_counts, d_counts);
+ if (lstart > 0) {
+ size_t ll_counts2[ZOPFLI_NUM_LL];
+ size_t d_counts2[ZOPFLI_NUM_D];
+ ZopfliLZ77GetHistogramAt(lz77, lstart - 1, ll_counts2, d_counts2);
+
+ for (i = 0; i < ZOPFLI_NUM_LL; i++) {
+ ll_counts[i] -= ll_counts2[i];
+ }
+ for (i = 0; i < ZOPFLI_NUM_D; i++) {
+ d_counts[i] -= d_counts2[i];
+ }
+ }
+ }
+}
+
+void ZopfliInitBlockState(const ZopfliOptions* options,
+ size_t blockstart, size_t blockend, int add_lmc,
+ ZopfliBlockState* s) {
+ s->options = options;
+ s->blockstart = blockstart;
+ s->blockend = blockend;
+#ifdef ZOPFLI_LONGEST_MATCH_CACHE
+ if (add_lmc) {
+ s->lmc = (ZopfliLongestMatchCache*)malloc(sizeof(ZopfliLongestMatchCache));
+ ZopfliInitCache(blockend - blockstart, s->lmc);
+ } else {
+ s->lmc = 0;
+ }
+#endif
+}
+
+void ZopfliCleanBlockState(ZopfliBlockState* s) {
+#ifdef ZOPFLI_LONGEST_MATCH_CACHE
+ if (s->lmc) {
+ ZopfliCleanCache(s->lmc);
+ free(s->lmc);
+ }
+#endif
+}
+
+/*
+Gets a score of the length given the distance. Typically, the score of the
+length is the length itself, but if the distance is very long, decrease the
+score of the length a bit to make up for the fact that long distances use large
+amounts of extra bits.
+
+This is not an accurate score, it is a heuristic only for the greedy LZ77
+implementation. More accurate cost models are employed later. Making this
+heuristic more accurate may hurt rather than improve compression.
+
+The two direct uses of this heuristic are:
+-avoid using a length of 3 in combination with a long distance. This only has
+ an effect if length == 3.
+-make a slightly better choice between the two options of the lazy matching.
+
+Indirectly, this affects:
+-the block split points if the default of block splitting first is used, in a
+ rather unpredictable way
+-the first zopfli run, so it affects the chance of the first run being closer
+ to the optimal output
+*/
+static int GetLengthScore(int length, int distance) {
+ /*
+ At 1024, the distance uses 9+ extra bits and this seems to be the sweet spot
+ on tested files.
+ */
+ return distance > 1024 ? length - 1 : length;
+}
+
+void ZopfliVerifyLenDist(const unsigned char* data, size_t datasize, size_t pos,
+ unsigned short dist, unsigned short length) {
+
+ /* TODO(lode): make this only run in a debug compile, it's for assert only. */
+ size_t i;
+
+ assert(pos + length <= datasize);
+ for (i = 0; i < length; i++) {
+ if (data[pos - dist + i] != data[pos + i]) {
+ assert(data[pos - dist + i] == data[pos + i]);
+ break;
+ }
+ }
+}
+
+/*
+Finds how long the match of scan and match is. Can be used to find how many
+bytes starting from scan, and from match, are equal. Returns the last byte
+after scan, which is still equal to the correspondinb byte after match.
+scan is the position to compare
+match is the earlier position to compare.
+end is the last possible byte, beyond which to stop looking.
+safe_end is a few (8) bytes before end, for comparing multiple bytes at once.
+*/
+static const unsigned char* GetMatch(const unsigned char* scan,
+ const unsigned char* match,
+ const unsigned char* end,
+ const unsigned char* safe_end) {
+
+ if (sizeof(size_t) == 8) {
+ /* 8 checks at once per array bounds check (size_t is 64-bit). */
+ while (scan < safe_end && *((size_t*)scan) == *((size_t*)match)) {
+ scan += 8;
+ match += 8;
+ }
+ } else if (sizeof(unsigned int) == 4) {
+ /* 4 checks at once per array bounds check (unsigned int is 32-bit). */
+ while (scan < safe_end
+ && *((unsigned int*)scan) == *((unsigned int*)match)) {
+ scan += 4;
+ match += 4;
+ }
+ } else {
+ /* do 8 checks at once per array bounds check. */
+ while (scan < safe_end && *scan == *match && *++scan == *++match
+ && *++scan == *++match && *++scan == *++match
+ && *++scan == *++match && *++scan == *++match
+ && *++scan == *++match && *++scan == *++match) {
+ scan++; match++;
+ }
+ }
+
+ /* The remaining few bytes. */
+ while (scan != end && *scan == *match) {
+ scan++; match++;
+ }
+
+ return scan;
+}
+
+#ifdef ZOPFLI_LONGEST_MATCH_CACHE
+/*
+Gets distance, length and sublen values from the cache if possible.
+Returns 1 if it got the values from the cache, 0 if not.
+Updates the limit value to a smaller one if possible with more limited
+information from the cache.
+*/
+static int TryGetFromLongestMatchCache(ZopfliBlockState* s,
+ size_t pos, size_t* limit,
+ unsigned short* sublen, unsigned short* distance, unsigned short* length) {
+ /* The LMC cache starts at the beginning of the block rather than the
+ beginning of the whole array. */
+ size_t lmcpos = pos - s->blockstart;
+
+ /* Length > 0 and dist 0 is invalid combination, which indicates on purpose
+ that this cache value is not filled in yet. */
+ unsigned char cache_available = s->lmc && (s->lmc->length[lmcpos] == 0 ||
+ s->lmc->dist[lmcpos] != 0);
+ unsigned char limit_ok_for_cache = cache_available &&
+ (*limit == ZOPFLI_MAX_MATCH || s->lmc->length[lmcpos] <= *limit ||
+ (sublen && ZopfliMaxCachedSublen(s->lmc,
+ lmcpos, s->lmc->length[lmcpos]) >= *limit));
+
+ if (s->lmc && limit_ok_for_cache && cache_available) {
+ if (!sublen || s->lmc->length[lmcpos]
+ <= ZopfliMaxCachedSublen(s->lmc, lmcpos, s->lmc->length[lmcpos])) {
+ *length = s->lmc->length[lmcpos];
+ if (*length > *limit) *length = *limit;
+ if (sublen) {
+ ZopfliCacheToSublen(s->lmc, lmcpos, *length, sublen);
+ *distance = sublen[*length];
+ if (*limit == ZOPFLI_MAX_MATCH && *length >= ZOPFLI_MIN_MATCH) {
+ assert(sublen[*length] == s->lmc->dist[lmcpos]);
+ }
+ } else {
+ *distance = s->lmc->dist[lmcpos];
+ }
+ return 1;
+ }
+ /* Can't use much of the cache, since the "sublens" need to be calculated,
+ but at least we already know when to stop. */
+ *limit = s->lmc->length[lmcpos];
+ }
+
+ return 0;
+}
+
+/*
+Stores the found sublen, distance and length in the longest match cache, if
+possible.
+*/
+static void StoreInLongestMatchCache(ZopfliBlockState* s,
+ size_t pos, size_t limit,
+ const unsigned short* sublen,
+ unsigned short distance, unsigned short length) {
+ /* The LMC cache starts at the beginning of the block rather than the
+ beginning of the whole array. */
+ size_t lmcpos = pos - s->blockstart;
+
+ /* Length > 0 and dist 0 is invalid combination, which indicates on purpose
+ that this cache value is not filled in yet. */
+ unsigned char cache_available = s->lmc && (s->lmc->length[lmcpos] == 0 ||
+ s->lmc->dist[lmcpos] != 0);
+
+ if (s->lmc && limit == ZOPFLI_MAX_MATCH && sublen && !cache_available) {
+ assert(s->lmc->length[lmcpos] == 1 && s->lmc->dist[lmcpos] == 0);
+ s->lmc->dist[lmcpos] = length < ZOPFLI_MIN_MATCH ? 0 : distance;
+ s->lmc->length[lmcpos] = length < ZOPFLI_MIN_MATCH ? 0 : length;
+ assert(!(s->lmc->length[lmcpos] == 1 && s->lmc->dist[lmcpos] == 0));
+ ZopfliSublenToCache(sublen, lmcpos, length, s->lmc);
+ }
+}
+#endif
+
+void ZopfliFindLongestMatch(ZopfliBlockState* s, const ZopfliHash* h,
+ const unsigned char* array,
+ size_t pos, size_t size, size_t limit,
+ unsigned short* sublen, unsigned short* distance, unsigned short* length) {
+ unsigned short hpos = pos & ZOPFLI_WINDOW_MASK, p, pp;
+ unsigned short bestdist = 0;
+ unsigned short bestlength = 1;
+ const unsigned char* scan;
+ const unsigned char* match;
+ const unsigned char* arrayend;
+ const unsigned char* arrayend_safe;
+#if ZOPFLI_MAX_CHAIN_HITS < ZOPFLI_WINDOW_SIZE
+ int chain_counter = ZOPFLI_MAX_CHAIN_HITS; /* For quitting early. */
+#endif
+
+ unsigned dist = 0; /* Not unsigned short on purpose. */
+
+ int* hhead = h->head;
+ unsigned short* hprev = h->prev;
+ int* hhashval = h->hashval;
+ int hval = h->val;
+
+#ifdef ZOPFLI_LONGEST_MATCH_CACHE
+ if (TryGetFromLongestMatchCache(s, pos, &limit, sublen, distance, length)) {
+ assert(pos + *length <= size);
+ return;
+ }
+#endif
+
+ assert(limit <= ZOPFLI_MAX_MATCH);
+ assert(limit >= ZOPFLI_MIN_MATCH);
+ assert(pos < size);
+
+ if (size - pos < ZOPFLI_MIN_MATCH) {
+ /* The rest of the code assumes there are at least ZOPFLI_MIN_MATCH bytes to
+ try. */
+ *length = 0;
+ *distance = 0;
+ return;
+ }
+
+ if (pos + limit > size) {
+ limit = size - pos;
+ }
+ arrayend = &array[pos] + limit;
+ arrayend_safe = arrayend - 8;
+
+ assert(hval < 65536);
+
+ pp = hhead[hval]; /* During the whole loop, p == hprev[pp]. */
+ p = hprev[pp];
+
+ assert(pp == hpos);
+
+ dist = p < pp ? pp - p : ((ZOPFLI_WINDOW_SIZE - p) + pp);
+
+ /* Go through all distances. */
+ while (dist < ZOPFLI_WINDOW_SIZE) {
+ unsigned short currentlength = 0;
+
+ assert(p < ZOPFLI_WINDOW_SIZE);
+ assert(p == hprev[pp]);
+ assert(hhashval[p] == hval);
+
+ if (dist > 0) {
+ assert(pos < size);
+ assert(dist <= pos);
+ scan = &array[pos];
+ match = &array[pos - dist];
+
+ /* Testing the byte at position bestlength first, goes slightly faster. */
+ if (pos + bestlength >= size
+ || *(scan + bestlength) == *(match + bestlength)) {
+
+#ifdef ZOPFLI_HASH_SAME
+ unsigned short same0 = h->same[pos & ZOPFLI_WINDOW_MASK];
+ if (same0 > 2 && *scan == *match) {
+ unsigned short same1 = h->same[(pos - dist) & ZOPFLI_WINDOW_MASK];
+ unsigned short same = same0 < same1 ? same0 : same1;
+ if (same > limit) same = limit;
+ scan += same;
+ match += same;
+ }
+#endif
+ scan = GetMatch(scan, match, arrayend, arrayend_safe);
+ currentlength = scan - &array[pos]; /* The found length. */
+ }
+
+ if (currentlength > bestlength) {
+ if (sublen) {
+ unsigned short j;
+ for (j = bestlength + 1; j <= currentlength; j++) {
+ sublen[j] = dist;
+ }
+ }
+ bestdist = dist;
+ bestlength = currentlength;
+ if (currentlength >= limit) break;
+ }
+ }
+
+
+#ifdef ZOPFLI_HASH_SAME_HASH
+ /* Switch to the other hash once this will be more efficient. */
+ if (hhead != h->head2 && bestlength >= h->same[hpos] &&
+ h->val2 == h->hashval2[p]) {
+ /* Now use the hash that encodes the length and first byte. */
+ hhead = h->head2;
+ hprev = h->prev2;
+ hhashval = h->hashval2;
+ hval = h->val2;
+ }
+#endif
+
+ pp = p;
+ p = hprev[p];
+ if (p == pp) break; /* Uninited prev value. */
+
+ dist += p < pp ? pp - p : ((ZOPFLI_WINDOW_SIZE - p) + pp);
+
+#if ZOPFLI_MAX_CHAIN_HITS < ZOPFLI_WINDOW_SIZE
+ chain_counter--;
+ if (chain_counter <= 0) break;
+#endif
+ }
+
+#ifdef ZOPFLI_LONGEST_MATCH_CACHE
+ StoreInLongestMatchCache(s, pos, limit, sublen, bestdist, bestlength);
+#endif
+
+ assert(bestlength <= limit);
+
+ *distance = bestdist;
+ *length = bestlength;
+ assert(pos + *length <= size);
+}
+
+void ZopfliLZ77Greedy(ZopfliBlockState* s, const unsigned char* in,
+ size_t instart, size_t inend,
+ ZopfliLZ77Store* store, ZopfliHash* h) {
+ size_t i = 0, j;
+ unsigned short leng;
+ unsigned short dist;
+ int lengthscore;
+ size_t windowstart = instart > ZOPFLI_WINDOW_SIZE
+ ? instart - ZOPFLI_WINDOW_SIZE : 0;
+ unsigned short dummysublen[259];
+
+#ifdef ZOPFLI_LAZY_MATCHING
+ /* Lazy matching. */
+ unsigned prev_length = 0;
+ unsigned prev_match = 0;
+ int prevlengthscore;
+ int match_available = 0;
+#endif
+
+ if (instart == inend) return;
+
+ ZopfliResetHash(ZOPFLI_WINDOW_SIZE, h);
+ ZopfliWarmupHash(in, windowstart, inend, h);
+ for (i = windowstart; i < instart; i++) {
+ ZopfliUpdateHash(in, i, inend, h);
+ }
+
+ for (i = instart; i < inend; i++) {
+ ZopfliUpdateHash(in, i, inend, h);
+
+ ZopfliFindLongestMatch(s, h, in, i, inend, ZOPFLI_MAX_MATCH, dummysublen,
+ &dist, &leng);
+ lengthscore = GetLengthScore(leng, dist);
+
+#ifdef ZOPFLI_LAZY_MATCHING
+ /* Lazy matching. */
+ prevlengthscore = GetLengthScore(prev_length, prev_match);
+ if (match_available) {
+ match_available = 0;
+ if (lengthscore > prevlengthscore + 1) {
+ ZopfliStoreLitLenDist(in[i - 1], 0, i - 1, store);
+ if (lengthscore >= ZOPFLI_MIN_MATCH && leng < ZOPFLI_MAX_MATCH) {
+ match_available = 1;
+ prev_length = leng;
+ prev_match = dist;
+ continue;
+ }
+ } else {
+ /* Add previous to output. */
+ leng = prev_length;
+ dist = prev_match;
+ lengthscore = prevlengthscore;
+ /* Add to output. */
+ ZopfliVerifyLenDist(in, inend, i - 1, dist, leng);
+ ZopfliStoreLitLenDist(leng, dist, i - 1, store);
+ for (j = 2; j < leng; j++) {
+ assert(i < inend);
+ i++;
+ ZopfliUpdateHash(in, i, inend, h);
+ }
+ continue;
+ }
+ }
+ else if (lengthscore >= ZOPFLI_MIN_MATCH && leng < ZOPFLI_MAX_MATCH) {
+ match_available = 1;
+ prev_length = leng;
+ prev_match = dist;
+ continue;
+ }
+ /* End of lazy matching. */
+#endif
+
+ /* Add to output. */
+ if (lengthscore >= ZOPFLI_MIN_MATCH) {
+ ZopfliVerifyLenDist(in, inend, i, dist, leng);
+ ZopfliStoreLitLenDist(leng, dist, i, store);
+ } else {
+ leng = 1;
+ ZopfliStoreLitLenDist(in[i], 0, i, store);
+ }
+ for (j = 1; j < leng; j++) {
+ assert(i < inend);
+ i++;
+ ZopfliUpdateHash(in, i, inend, h);
+ }
+ }
+}
diff --git a/misc/ttf2woff/zopfli/lz77.h b/misc/ttf2woff/zopfli/lz77.h
new file mode 100644
index 000000000..dc8597abf
--- /dev/null
+++ b/misc/ttf2woff/zopfli/lz77.h
@@ -0,0 +1,142 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+/*
+Functions for basic LZ77 compression and utilities for the "squeeze" LZ77
+compression.
+*/
+
+#ifndef ZOPFLI_LZ77_H_
+#define ZOPFLI_LZ77_H_
+
+#include <stdlib.h>
+
+#include "cache.h"
+#include "hash.h"
+#include "zopfli.h"
+
+/*
+Stores lit/length and dist pairs for LZ77.
+Parameter litlens: Contains the literal symbols or length values.
+Parameter dists: Contains the distances. A value is 0 to indicate that there is
+no dist and the corresponding litlens value is a literal instead of a length.
+Parameter size: The size of both the litlens and dists arrays.
+The memory can best be managed by using ZopfliInitLZ77Store to initialize it,
+ZopfliCleanLZ77Store to destroy it, and ZopfliStoreLitLenDist to append values.
+
+*/
+typedef struct ZopfliLZ77Store {
+ unsigned short* litlens; /* Lit or len. */
+ unsigned short* dists; /* If 0: indicates literal in corresponding litlens,
+ if > 0: length in corresponding litlens, this is the distance. */
+ size_t size;
+
+ const unsigned char* data; /* original data */
+ size_t* pos; /* position in data where this LZ77 command begins */
+
+ unsigned short* ll_symbol;
+ unsigned short* d_symbol;
+
+ /* Cumulative histograms wrapping around per chunk. Each chunk has the amount
+ of distinct symbols as length, so using 1 value per LZ77 symbol, we have a
+ precise histogram at every N symbols, and the rest can be calculated by
+ looping through the actual symbols of this chunk. */
+ size_t* ll_counts;
+ size_t* d_counts;
+} ZopfliLZ77Store;
+
+void ZopfliInitLZ77Store(const unsigned char* data, ZopfliLZ77Store* store);
+void ZopfliCleanLZ77Store(ZopfliLZ77Store* store);
+void ZopfliCopyLZ77Store(const ZopfliLZ77Store* source, ZopfliLZ77Store* dest);
+void ZopfliStoreLitLenDist(unsigned short length, unsigned short dist,
+ size_t pos, ZopfliLZ77Store* store);
+void ZopfliAppendLZ77Store(const ZopfliLZ77Store* store,
+ ZopfliLZ77Store* target);
+/* Gets the amount of raw bytes that this range of LZ77 symbols spans. */
+size_t ZopfliLZ77GetByteRange(const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend);
+/* Gets the histogram of lit/len and dist symbols in the given range, using the
+cumulative histograms, so faster than adding one by one for large range. Does
+not add the one end symbol of value 256. */
+void ZopfliLZ77GetHistogram(const ZopfliLZ77Store* lz77,
+ size_t lstart, size_t lend,
+ size_t* ll_counts, size_t* d_counts);
+
+/*
+Some state information for compressing a block.
+This is currently a bit under-used (with mainly only the longest match cache),
+but is kept for easy future expansion.
+*/
+typedef struct ZopfliBlockState {
+ const ZopfliOptions* options;
+
+#ifdef ZOPFLI_LONGEST_MATCH_CACHE
+ /* Cache for length/distance pairs found so far. */
+ ZopfliLongestMatchCache* lmc;
+#endif
+
+ /* The start (inclusive) and end (not inclusive) of the current block. */
+ size_t blockstart;
+ size_t blockend;
+} ZopfliBlockState;
+
+void ZopfliInitBlockState(const ZopfliOptions* options,
+ size_t blockstart, size_t blockend, int add_lmc,
+ ZopfliBlockState* s);
+void ZopfliCleanBlockState(ZopfliBlockState* s);
+
+/*
+Finds the longest match (length and corresponding distance) for LZ77
+compression.
+Even when not using "sublen", it can be more efficient to provide an array,
+because only then the caching is used.
+array: the data
+pos: position in the data to find the match for
+size: size of the data
+limit: limit length to maximum this value (default should be 258). This allows
+ finding a shorter dist for that length (= less extra bits). Must be
+ in the range [ZOPFLI_MIN_MATCH, ZOPFLI_MAX_MATCH].
+sublen: output array of 259 elements, or null. Has, for each length, the
+ smallest distance required to reach this length. Only 256 of its 259 values
+ are used, the first 3 are ignored (the shortest length is 3. It is purely
+ for convenience that the array is made 3 longer).
+*/
+void ZopfliFindLongestMatch(
+ ZopfliBlockState *s, const ZopfliHash* h, const unsigned char* array,
+ size_t pos, size_t size, size_t limit,
+ unsigned short* sublen, unsigned short* distance, unsigned short* length);
+
+/*
+Verifies if length and dist are indeed valid, only used for assertion.
+*/
+void ZopfliVerifyLenDist(const unsigned char* data, size_t datasize, size_t pos,
+ unsigned short dist, unsigned short length);
+
+/*
+Does LZ77 using an algorithm similar to gzip, with lazy matching, rather than
+with the slow but better "squeeze" implementation.
+The result is placed in the ZopfliLZ77Store.
+If instart is larger than 0, it uses values before instart as starting
+dictionary.
+*/
+void ZopfliLZ77Greedy(ZopfliBlockState* s, const unsigned char* in,
+ size_t instart, size_t inend,
+ ZopfliLZ77Store* store, ZopfliHash* h);
+
+#endif /* ZOPFLI_LZ77_H_ */
diff --git a/misc/ttf2woff/zopfli/squeeze.c b/misc/ttf2woff/zopfli/squeeze.c
new file mode 100644
index 000000000..d7cd9bf48
--- /dev/null
+++ b/misc/ttf2woff/zopfli/squeeze.c
@@ -0,0 +1,560 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#include "squeeze.h"
+
+#include <assert.h>
+#include <math.h>
+#include <stdio.h>
+
+#include "blocksplitter.h"
+#include "deflate.h"
+#include "symbols.h"
+#include "tree.h"
+#include "util.h"
+
+typedef struct SymbolStats {
+ /* The literal and length symbols. */
+ size_t litlens[ZOPFLI_NUM_LL];
+ /* The 32 unique dist symbols, not the 32768 possible dists. */
+ size_t dists[ZOPFLI_NUM_D];
+
+ /* Length of each lit/len symbol in bits. */
+ double ll_symbols[ZOPFLI_NUM_LL];
+ /* Length of each dist symbol in bits. */
+ double d_symbols[ZOPFLI_NUM_D];
+} SymbolStats;
+
+/* Sets everything to 0. */
+static void InitStats(SymbolStats* stats) {
+ memset(stats->litlens, 0, ZOPFLI_NUM_LL * sizeof(stats->litlens[0]));
+ memset(stats->dists, 0, ZOPFLI_NUM_D * sizeof(stats->dists[0]));
+
+ memset(stats->ll_symbols, 0, ZOPFLI_NUM_LL * sizeof(stats->ll_symbols[0]));
+ memset(stats->d_symbols, 0, ZOPFLI_NUM_D * sizeof(stats->d_symbols[0]));
+}
+
+static void CopyStats(SymbolStats* source, SymbolStats* dest) {
+ memcpy(dest->litlens, source->litlens,
+ ZOPFLI_NUM_LL * sizeof(dest->litlens[0]));
+ memcpy(dest->dists, source->dists, ZOPFLI_NUM_D * sizeof(dest->dists[0]));
+
+ memcpy(dest->ll_symbols, source->ll_symbols,
+ ZOPFLI_NUM_LL * sizeof(dest->ll_symbols[0]));
+ memcpy(dest->d_symbols, source->d_symbols,
+ ZOPFLI_NUM_D * sizeof(dest->d_symbols[0]));
+}
+
+/* Adds the bit lengths. */
+static void AddWeighedStatFreqs(const SymbolStats* stats1, double w1,
+ const SymbolStats* stats2, double w2,
+ SymbolStats* result) {
+ size_t i;
+ for (i = 0; i < ZOPFLI_NUM_LL; i++) {
+ result->litlens[i] =
+ (size_t) (stats1->litlens[i] * w1 + stats2->litlens[i] * w2);
+ }
+ for (i = 0; i < ZOPFLI_NUM_D; i++) {
+ result->dists[i] =
+ (size_t) (stats1->dists[i] * w1 + stats2->dists[i] * w2);
+ }
+ result->litlens[256] = 1; /* End symbol. */
+}
+
+typedef struct RanState {
+ unsigned int m_w, m_z;
+} RanState;
+
+static void InitRanState(RanState* state) {
+ state->m_w = 1;
+ state->m_z = 2;
+}
+
+/* Get random number: "Multiply-With-Carry" generator of G. Marsaglia */
+static unsigned int Ran(RanState* state) {
+ state->m_z = 36969 * (state->m_z & 65535) + (state->m_z >> 16);
+ state->m_w = 18000 * (state->m_w & 65535) + (state->m_w >> 16);
+ return (state->m_z << 16) + state->m_w; /* 32-bit result. */
+}
+
+static void RandomizeFreqs(RanState* state, size_t* freqs, int n) {
+ int i;
+ for (i = 0; i < n; i++) {
+ if ((Ran(state) >> 4) % 3 == 0) freqs[i] = freqs[Ran(state) % n];
+ }
+}
+
+static void RandomizeStatFreqs(RanState* state, SymbolStats* stats) {
+ RandomizeFreqs(state, stats->litlens, ZOPFLI_NUM_LL);
+ RandomizeFreqs(state, stats->dists, ZOPFLI_NUM_D);
+ stats->litlens[256] = 1; /* End symbol. */
+}
+
+static void ClearStatFreqs(SymbolStats* stats) {
+ size_t i;
+ for (i = 0; i < ZOPFLI_NUM_LL; i++) stats->litlens[i] = 0;
+ for (i = 0; i < ZOPFLI_NUM_D; i++) stats->dists[i] = 0;
+}
+
+/*
+Function that calculates a cost based on a model for the given LZ77 symbol.
+litlen: means literal symbol if dist is 0, length otherwise.
+*/
+typedef double CostModelFun(unsigned litlen, unsigned dist, void* context);
+
+/*
+Cost model which should exactly match fixed tree.
+type: CostModelFun
+*/
+static double GetCostFixed(unsigned litlen, unsigned dist, void* unused) {
+ (void)unused;
+ if (dist == 0) {
+ if (litlen <= 143) return 8;
+ else return 9;
+ } else {
+ int dbits = ZopfliGetDistExtraBits(dist);
+ int lbits = ZopfliGetLengthExtraBits(litlen);
+ int lsym = ZopfliGetLengthSymbol(litlen);
+ int cost = 0;
+ if (lsym <= 279) cost += 7;
+ else cost += 8;
+ cost += 5; /* Every dist symbol has length 5. */
+ return cost + dbits + lbits;
+ }
+}
+
+/*
+Cost model based on symbol statistics.
+type: CostModelFun
+*/
+static double GetCostStat(unsigned litlen, unsigned dist, void* context) {
+ SymbolStats* stats = (SymbolStats*)context;
+ if (dist == 0) {
+ return stats->ll_symbols[litlen];
+ } else {
+ int lsym = ZopfliGetLengthSymbol(litlen);
+ int lbits = ZopfliGetLengthExtraBits(litlen);
+ int dsym = ZopfliGetDistSymbol(dist);
+ int dbits = ZopfliGetDistExtraBits(dist);
+ return lbits + dbits + stats->ll_symbols[lsym] + stats->d_symbols[dsym];
+ }
+}
+
+/*
+Finds the minimum possible cost this cost model can return for valid length and
+distance symbols.
+*/
+static double GetCostModelMinCost(CostModelFun* costmodel, void* costcontext) {
+ double mincost;
+ int bestlength = 0; /* length that has lowest cost in the cost model */
+ int bestdist = 0; /* distance that has lowest cost in the cost model */
+ int i;
+ /*
+ Table of distances that have a different distance symbol in the deflate
+ specification. Each value is the first distance that has a new symbol. Only
+ different symbols affect the cost model so only these need to be checked.
+ See RFC 1951 section 3.2.5. Compressed blocks (length and distance codes).
+ */
+ static const int dsymbols[30] = {
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513,
+ 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577
+ };
+
+ mincost = ZOPFLI_LARGE_FLOAT;
+ for (i = 3; i < 259; i++) {
+ double c = costmodel(i, 1, costcontext);
+ if (c < mincost) {
+ bestlength = i;
+ mincost = c;
+ }
+ }
+
+ mincost = ZOPFLI_LARGE_FLOAT;
+ for (i = 0; i < 30; i++) {
+ double c = costmodel(3, dsymbols[i], costcontext);
+ if (c < mincost) {
+ bestdist = dsymbols[i];
+ mincost = c;
+ }
+ }
+
+ return costmodel(bestlength, bestdist, costcontext);
+}
+
+static size_t min(size_t a, size_t b) {
+ return a < b ? a : b;
+}
+
+/*
+Performs the forward pass for "squeeze". Gets the most optimal length to reach
+every byte from a previous byte, using cost calculations.
+s: the ZopfliBlockState
+in: the input data array
+instart: where to start
+inend: where to stop (not inclusive)
+costmodel: function to calculate the cost of some lit/len/dist pair.
+costcontext: abstract context for the costmodel function
+length_array: output array of size (inend - instart) which will receive the best
+ length to reach this byte from a previous byte.
+returns the cost that was, according to the costmodel, needed to get to the end.
+*/
+static double GetBestLengths(ZopfliBlockState *s,
+ const unsigned char* in,
+ size_t instart, size_t inend,
+ CostModelFun* costmodel, void* costcontext,
+ unsigned short* length_array,
+ ZopfliHash* h, float* costs) {
+ /* Best cost to get here so far. */
+ size_t blocksize = inend - instart;
+ size_t i = 0, k, kend;
+ unsigned short leng;
+ unsigned short dist;
+ unsigned short sublen[259];
+ size_t windowstart = instart > ZOPFLI_WINDOW_SIZE
+ ? instart - ZOPFLI_WINDOW_SIZE : 0;
+ double result;
+ double mincost = GetCostModelMinCost(costmodel, costcontext);
+ double mincostaddcostj;
+
+ if (instart == inend) return 0;
+
+ ZopfliResetHash(ZOPFLI_WINDOW_SIZE, h);
+ ZopfliWarmupHash(in, windowstart, inend, h);
+ for (i = windowstart; i < instart; i++) {
+ ZopfliUpdateHash(in, i, inend, h);
+ }
+
+ for (i = 1; i < blocksize + 1; i++) costs[i] = ZOPFLI_LARGE_FLOAT;
+ costs[0] = 0; /* Because it's the start. */
+ length_array[0] = 0;
+
+ for (i = instart; i < inend; i++) {
+ size_t j = i - instart; /* Index in the costs array and length_array. */
+ ZopfliUpdateHash(in, i, inend, h);
+
+#ifdef ZOPFLI_SHORTCUT_LONG_REPETITIONS
+ /* If we're in a long repetition of the same character and have more than
+ ZOPFLI_MAX_MATCH characters before and after our position. */
+ if (h->same[i & ZOPFLI_WINDOW_MASK] > ZOPFLI_MAX_MATCH * 2
+ && i > instart + ZOPFLI_MAX_MATCH + 1
+ && i + ZOPFLI_MAX_MATCH * 2 + 1 < inend
+ && h->same[(i - ZOPFLI_MAX_MATCH) & ZOPFLI_WINDOW_MASK]
+ > ZOPFLI_MAX_MATCH) {
+ double symbolcost = costmodel(ZOPFLI_MAX_MATCH, 1, costcontext);
+ /* Set the length to reach each one to ZOPFLI_MAX_MATCH, and the cost to
+ the cost corresponding to that length. Doing this, we skip
+ ZOPFLI_MAX_MATCH values to avoid calling ZopfliFindLongestMatch. */
+ for (k = 0; k < ZOPFLI_MAX_MATCH; k++) {
+ costs[j + ZOPFLI_MAX_MATCH] = costs[j] + symbolcost;
+ length_array[j + ZOPFLI_MAX_MATCH] = ZOPFLI_MAX_MATCH;
+ i++;
+ j++;
+ ZopfliUpdateHash(in, i, inend, h);
+ }
+ }
+#endif
+
+ ZopfliFindLongestMatch(s, h, in, i, inend, ZOPFLI_MAX_MATCH, sublen,
+ &dist, &leng);
+
+ /* Literal. */
+ if (i + 1 <= inend) {
+ double newCost = costmodel(in[i], 0, costcontext) + costs[j];
+ assert(newCost >= 0);
+ if (newCost < costs[j + 1]) {
+ costs[j + 1] = newCost;
+ length_array[j + 1] = 1;
+ }
+ }
+ /* Lengths. */
+ kend = min(leng, inend-i);
+ mincostaddcostj = mincost + costs[j];
+ for (k = 3; k <= kend; k++) {
+ double newCost;
+
+ /* Calling the cost model is expensive, avoid this if we are already at
+ the minimum possible cost that it can return. */
+ if (costs[j + k] <= mincostaddcostj) continue;
+
+ newCost = costmodel(k, sublen[k], costcontext) + costs[j];
+ assert(newCost >= 0);
+ if (newCost < costs[j + k]) {
+ assert(k <= ZOPFLI_MAX_MATCH);
+ costs[j + k] = newCost;
+ length_array[j + k] = k;
+ }
+ }
+ }
+
+ assert(costs[blocksize] >= 0);
+ result = costs[blocksize];
+
+ return result;
+}
+
+/*
+Calculates the optimal path of lz77 lengths to use, from the calculated
+length_array. The length_array must contain the optimal length to reach that
+byte. The path will be filled with the lengths to use, so its data size will be
+the amount of lz77 symbols.
+*/
+static void TraceBackwards(size_t size, const unsigned short* length_array,
+ unsigned short** path, size_t* pathsize) {
+ size_t index = size;
+ if (size == 0) return;
+ for (;;) {
+ ZOPFLI_APPEND_DATA(length_array[index], path, pathsize);
+ assert(length_array[index] <= index);
+ assert(length_array[index] <= ZOPFLI_MAX_MATCH);
+ assert(length_array[index] != 0);
+ index -= length_array[index];
+ if (index == 0) break;
+ }
+
+ /* Mirror result. */
+ for (index = 0; index < *pathsize / 2; index++) {
+ unsigned short temp = (*path)[index];
+ (*path)[index] = (*path)[*pathsize - index - 1];
+ (*path)[*pathsize - index - 1] = temp;
+ }
+}
+
+static void FollowPath(ZopfliBlockState* s,
+ const unsigned char* in, size_t instart, size_t inend,
+ unsigned short* path, size_t pathsize,
+ ZopfliLZ77Store* store, ZopfliHash *h) {
+ size_t i, j, pos = 0;
+ size_t windowstart = instart > ZOPFLI_WINDOW_SIZE
+ ? instart - ZOPFLI_WINDOW_SIZE : 0;
+
+ size_t total_length_test = 0;
+
+ if (instart == inend) return;
+
+ ZopfliResetHash(ZOPFLI_WINDOW_SIZE, h);
+ ZopfliWarmupHash(in, windowstart, inend, h);
+ for (i = windowstart; i < instart; i++) {
+ ZopfliUpdateHash(in, i, inend, h);
+ }
+
+ pos = instart;
+ for (i = 0; i < pathsize; i++) {
+ unsigned short length = path[i];
+ unsigned short dummy_length;
+ unsigned short dist;
+ assert(pos < inend);
+
+ ZopfliUpdateHash(in, pos, inend, h);
+
+ /* Add to output. */
+ if (length >= ZOPFLI_MIN_MATCH) {
+ /* Get the distance by recalculating longest match. The found length
+ should match the length from the path. */
+ ZopfliFindLongestMatch(s, h, in, pos, inend, length, 0,
+ &dist, &dummy_length);
+ assert(!(dummy_length != length && length > 2 && dummy_length > 2));
+ ZopfliVerifyLenDist(in, inend, pos, dist, length);
+ ZopfliStoreLitLenDist(length, dist, pos, store);
+ total_length_test += length;
+ } else {
+ length = 1;
+ ZopfliStoreLitLenDist(in[pos], 0, pos, store);
+ total_length_test++;
+ }
+
+
+ assert(pos + length <= inend);
+ for (j = 1; j < length; j++) {
+ ZopfliUpdateHash(in, pos + j, inend, h);
+ }
+
+ pos += length;
+ }
+}
+
+/* Calculates the entropy of the statistics */
+static void CalculateStatistics(SymbolStats* stats) {
+ ZopfliCalculateEntropy(stats->litlens, ZOPFLI_NUM_LL, stats->ll_symbols);
+ ZopfliCalculateEntropy(stats->dists, ZOPFLI_NUM_D, stats->d_symbols);
+}
+
+/* Appends the symbol statistics from the store. */
+static void GetStatistics(const ZopfliLZ77Store* store, SymbolStats* stats) {
+ size_t i;
+ for (i = 0; i < store->size; i++) {
+ if (store->dists[i] == 0) {
+ stats->litlens[store->litlens[i]]++;
+ } else {
+ stats->litlens[ZopfliGetLengthSymbol(store->litlens[i])]++;
+ stats->dists[ZopfliGetDistSymbol(store->dists[i])]++;
+ }
+ }
+ stats->litlens[256] = 1; /* End symbol. */
+
+ CalculateStatistics(stats);
+}
+
+/*
+Does a single run for ZopfliLZ77Optimal. For good compression, repeated runs
+with updated statistics should be performed.
+s: the block state
+in: the input data array
+instart: where to start
+inend: where to stop (not inclusive)
+path: pointer to dynamically allocated memory to store the path
+pathsize: pointer to the size of the dynamic path array
+length_array: array of size (inend - instart) used to store lengths
+costmodel: function to use as the cost model for this squeeze run
+costcontext: abstract context for the costmodel function
+store: place to output the LZ77 data
+returns the cost that was, according to the costmodel, needed to get to the end.
+ This is not the actual cost.
+*/
+static double LZ77OptimalRun(ZopfliBlockState* s,
+ const unsigned char* in, size_t instart, size_t inend,
+ unsigned short** path, size_t* pathsize,
+ unsigned short* length_array, CostModelFun* costmodel,
+ void* costcontext, ZopfliLZ77Store* store,
+ ZopfliHash* h, float* costs) {
+ double cost = GetBestLengths(s, in, instart, inend, costmodel,
+ costcontext, length_array, h, costs);
+ free(*path);
+ *path = 0;
+ *pathsize = 0;
+ TraceBackwards(inend - instart, length_array, path, pathsize);
+ FollowPath(s, in, instart, inend, *path, *pathsize, store, h);
+ assert(cost < ZOPFLI_LARGE_FLOAT);
+ return cost;
+}
+
+void ZopfliLZ77Optimal(ZopfliBlockState *s,
+ const unsigned char* in, size_t instart, size_t inend,
+ int numiterations,
+ ZopfliLZ77Store* store) {
+ /* Dist to get to here with smallest cost. */
+ size_t blocksize = inend - instart;
+ unsigned short* length_array =
+ (unsigned short*)malloc(sizeof(unsigned short) * (blocksize + 1));
+ unsigned short* path = 0;
+ size_t pathsize = 0;
+ ZopfliLZ77Store currentstore;
+ ZopfliHash hash;
+ ZopfliHash* h = &hash;
+ SymbolStats stats, beststats, laststats;
+ int i;
+ float* costs = (float*)malloc(sizeof(float) * (blocksize + 1));
+ double cost;
+ double bestcost = ZOPFLI_LARGE_FLOAT;
+ double lastcost = 0;
+ /* Try randomizing the costs a bit once the size stabilizes. */
+ RanState ran_state;
+ int lastrandomstep = -1;
+
+ if (!costs) exit(-1); /* Allocation failed. */
+ if (!length_array) exit(-1); /* Allocation failed. */
+
+ InitRanState(&ran_state);
+ InitStats(&stats);
+ ZopfliInitLZ77Store(in, &currentstore);
+ ZopfliAllocHash(ZOPFLI_WINDOW_SIZE, h);
+
+ /* Do regular deflate, then loop multiple shortest path runs, each time using
+ the statistics of the previous run. */
+
+ /* Initial run. */
+ ZopfliLZ77Greedy(s, in, instart, inend, &currentstore, h);
+ GetStatistics(&currentstore, &stats);
+
+ /* Repeat statistics with each time the cost model from the previous stat
+ run. */
+ for (i = 0; i < numiterations; i++) {
+ ZopfliCleanLZ77Store(&currentstore);
+ ZopfliInitLZ77Store(in, &currentstore);
+ LZ77OptimalRun(s, in, instart, inend, &path, &pathsize,
+ length_array, GetCostStat, (void*)&stats,
+ &currentstore, h, costs);
+ cost = ZopfliCalculateBlockSize(&currentstore, 0, currentstore.size, 2);
+ if (s->options->verbose_more || (s->options->verbose && cost < bestcost)) {
+ fprintf(stderr, "Iteration %d: %d bit\n", i, (int) cost);
+ }
+ if (cost < bestcost) {
+ /* Copy to the output store. */
+ ZopfliCopyLZ77Store(&currentstore, store);
+ CopyStats(&stats, &beststats);
+ bestcost = cost;
+ }
+ CopyStats(&stats, &laststats);
+ ClearStatFreqs(&stats);
+ GetStatistics(&currentstore, &stats);
+ if (lastrandomstep != -1) {
+ /* This makes it converge slower but better. Do it only once the
+ randomness kicks in so that if the user does few iterations, it gives a
+ better result sooner. */
+ AddWeighedStatFreqs(&stats, 1.0, &laststats, 0.5, &stats);
+ CalculateStatistics(&stats);
+ }
+ if (i > 5 && cost == lastcost) {
+ CopyStats(&beststats, &stats);
+ RandomizeStatFreqs(&ran_state, &stats);
+ CalculateStatistics(&stats);
+ lastrandomstep = i;
+ }
+ lastcost = cost;
+ }
+
+ free(length_array);
+ free(path);
+ free(costs);
+ ZopfliCleanLZ77Store(&currentstore);
+ ZopfliCleanHash(h);
+}
+
+void ZopfliLZ77OptimalFixed(ZopfliBlockState *s,
+ const unsigned char* in,
+ size_t instart, size_t inend,
+ ZopfliLZ77Store* store)
+{
+ /* Dist to get to here with smallest cost. */
+ size_t blocksize = inend - instart;
+ unsigned short* length_array =
+ (unsigned short*)malloc(sizeof(unsigned short) * (blocksize + 1));
+ unsigned short* path = 0;
+ size_t pathsize = 0;
+ ZopfliHash hash;
+ ZopfliHash* h = &hash;
+ float* costs = (float*)malloc(sizeof(float) * (blocksize + 1));
+
+ if (!costs) exit(-1); /* Allocation failed. */
+ if (!length_array) exit(-1); /* Allocation failed. */
+
+ ZopfliAllocHash(ZOPFLI_WINDOW_SIZE, h);
+
+ s->blockstart = instart;
+ s->blockend = inend;
+
+ /* Shortest path for fixed tree This one should give the shortest possible
+ result for fixed tree, no repeated runs are needed since the tree is known. */
+ LZ77OptimalRun(s, in, instart, inend, &path, &pathsize,
+ length_array, GetCostFixed, 0, store, h, costs);
+
+ free(length_array);
+ free(path);
+ free(costs);
+ ZopfliCleanHash(h);
+}
diff --git a/misc/ttf2woff/zopfli/squeeze.h b/misc/ttf2woff/zopfli/squeeze.h
new file mode 100644
index 000000000..48bb7753d
--- /dev/null
+++ b/misc/ttf2woff/zopfli/squeeze.h
@@ -0,0 +1,61 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+/*
+The squeeze functions do enhanced LZ77 compression by optimal parsing with a
+cost model, rather than greedily choosing the longest length or using a single
+step of lazy matching like regular implementations.
+
+Since the cost model is based on the Huffman tree that can only be calculated
+after the LZ77 data is generated, there is a chicken and egg problem, and
+multiple runs are done with updated cost models to converge to a better
+solution.
+*/
+
+#ifndef ZOPFLI_SQUEEZE_H_
+#define ZOPFLI_SQUEEZE_H_
+
+#include "lz77.h"
+
+/*
+Calculates lit/len and dist pairs for given data.
+If instart is larger than 0, it uses values before instart as starting
+dictionary.
+*/
+void ZopfliLZ77Optimal(ZopfliBlockState *s,
+ const unsigned char* in, size_t instart, size_t inend,
+ int numiterations,
+ ZopfliLZ77Store* store);
+
+/*
+Does the same as ZopfliLZ77Optimal, but optimized for the fixed tree of the
+deflate standard.
+The fixed tree never gives the best compression. But this gives the best
+possible LZ77 encoding possible with the fixed tree.
+This does not create or output any fixed tree, only LZ77 data optimized for
+using with a fixed tree.
+If instart is larger than 0, it uses values before instart as starting
+dictionary.
+*/
+void ZopfliLZ77OptimalFixed(ZopfliBlockState *s,
+ const unsigned char* in,
+ size_t instart, size_t inend,
+ ZopfliLZ77Store* store);
+
+#endif /* ZOPFLI_SQUEEZE_H_ */
diff --git a/misc/ttf2woff/zopfli/symbols.h b/misc/ttf2woff/zopfli/symbols.h
new file mode 100644
index 000000000..b49df06c7
--- /dev/null
+++ b/misc/ttf2woff/zopfli/symbols.h
@@ -0,0 +1,239 @@
+/*
+Copyright 2016 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+/*
+Utilities for using the lz77 symbols of the deflate spec.
+*/
+
+#ifndef ZOPFLI_SYMBOLS_H_
+#define ZOPFLI_SYMBOLS_H_
+
+/* __has_builtin available in clang */
+#ifdef __has_builtin
+# if __has_builtin(__builtin_clz)
+# define ZOPFLI_HAS_BUILTIN_CLZ
+# endif
+/* __builtin_clz available beginning with GCC 3.4 */
+#elif __GNUC__ * 100 + __GNUC_MINOR__ >= 304
+# define ZOPFLI_HAS_BUILTIN_CLZ
+#endif
+
+/* Gets the amount of extra bits for the given dist, cfr. the DEFLATE spec. */
+static int ZopfliGetDistExtraBits(int dist) {
+#ifdef ZOPFLI_HAS_BUILTIN_CLZ
+ if (dist < 5) return 0;
+ return (31 ^ __builtin_clz(dist - 1)) - 1; /* log2(dist - 1) - 1 */
+#else
+ if (dist < 5) return 0;
+ else if (dist < 9) return 1;
+ else if (dist < 17) return 2;
+ else if (dist < 33) return 3;
+ else if (dist < 65) return 4;
+ else if (dist < 129) return 5;
+ else if (dist < 257) return 6;
+ else if (dist < 513) return 7;
+ else if (dist < 1025) return 8;
+ else if (dist < 2049) return 9;
+ else if (dist < 4097) return 10;
+ else if (dist < 8193) return 11;
+ else if (dist < 16385) return 12;
+ else return 13;
+#endif
+}
+
+/* Gets value of the extra bits for the given dist, cfr. the DEFLATE spec. */
+static int ZopfliGetDistExtraBitsValue(int dist) {
+#ifdef ZOPFLI_HAS_BUILTIN_CLZ
+ if (dist < 5) {
+ return 0;
+ } else {
+ int l = 31 ^ __builtin_clz(dist - 1); /* log2(dist - 1) */
+ return (dist - (1 + (1 << l))) & ((1 << (l - 1)) - 1);
+ }
+#else
+ if (dist < 5) return 0;
+ else if (dist < 9) return (dist - 5) & 1;
+ else if (dist < 17) return (dist - 9) & 3;
+ else if (dist < 33) return (dist - 17) & 7;
+ else if (dist < 65) return (dist - 33) & 15;
+ else if (dist < 129) return (dist - 65) & 31;
+ else if (dist < 257) return (dist - 129) & 63;
+ else if (dist < 513) return (dist - 257) & 127;
+ else if (dist < 1025) return (dist - 513) & 255;
+ else if (dist < 2049) return (dist - 1025) & 511;
+ else if (dist < 4097) return (dist - 2049) & 1023;
+ else if (dist < 8193) return (dist - 4097) & 2047;
+ else if (dist < 16385) return (dist - 8193) & 4095;
+ else return (dist - 16385) & 8191;
+#endif
+}
+
+/* Gets the symbol for the given dist, cfr. the DEFLATE spec. */
+static int ZopfliGetDistSymbol(int dist) {
+#ifdef ZOPFLI_HAS_BUILTIN_CLZ
+ if (dist < 5) {
+ return dist - 1;
+ } else {
+ int l = (31 ^ __builtin_clz(dist - 1)); /* log2(dist - 1) */
+ int r = ((dist - 1) >> (l - 1)) & 1;
+ return l * 2 + r;
+ }
+#else
+ if (dist < 193) {
+ if (dist < 13) { /* dist 0..13. */
+ if (dist < 5) return dist - 1;
+ else if (dist < 7) return 4;
+ else if (dist < 9) return 5;
+ else return 6;
+ } else { /* dist 13..193. */
+ if (dist < 17) return 7;
+ else if (dist < 25) return 8;
+ else if (dist < 33) return 9;
+ else if (dist < 49) return 10;
+ else if (dist < 65) return 11;
+ else if (dist < 97) return 12;
+ else if (dist < 129) return 13;
+ else return 14;
+ }
+ } else {
+ if (dist < 2049) { /* dist 193..2049. */
+ if (dist < 257) return 15;
+ else if (dist < 385) return 16;
+ else if (dist < 513) return 17;
+ else if (dist < 769) return 18;
+ else if (dist < 1025) return 19;
+ else if (dist < 1537) return 20;
+ else return 21;
+ } else { /* dist 2049..32768. */
+ if (dist < 3073) return 22;
+ else if (dist < 4097) return 23;
+ else if (dist < 6145) return 24;
+ else if (dist < 8193) return 25;
+ else if (dist < 12289) return 26;
+ else if (dist < 16385) return 27;
+ else if (dist < 24577) return 28;
+ else return 29;
+ }
+ }
+#endif
+}
+
+/* Gets the amount of extra bits for the given length, cfr. the DEFLATE spec. */
+static int ZopfliGetLengthExtraBits(int l) {
+ static const int table[259] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0
+ };
+ return table[l];
+}
+
+/* Gets value of the extra bits for the given length, cfr. the DEFLATE spec. */
+static int ZopfliGetLengthExtraBitsValue(int l) {
+ static const int table[259] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 0,
+ 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5,
+ 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2,
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 0
+ };
+ return table[l];
+}
+
+/*
+Gets the symbol for the given length, cfr. the DEFLATE spec.
+Returns the symbol in the range [257-285] (inclusive)
+*/
+static int ZopfliGetLengthSymbol(int l) {
+ static const int table[259] = {
+ 0, 0, 0, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 265, 266, 266, 267, 267, 268, 268,
+ 269, 269, 269, 269, 270, 270, 270, 270,
+ 271, 271, 271, 271, 272, 272, 272, 272,
+ 273, 273, 273, 273, 273, 273, 273, 273,
+ 274, 274, 274, 274, 274, 274, 274, 274,
+ 275, 275, 275, 275, 275, 275, 275, 275,
+ 276, 276, 276, 276, 276, 276, 276, 276,
+ 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277,
+ 278, 278, 278, 278, 278, 278, 278, 278,
+ 278, 278, 278, 278, 278, 278, 278, 278,
+ 279, 279, 279, 279, 279, 279, 279, 279,
+ 279, 279, 279, 279, 279, 279, 279, 279,
+ 280, 280, 280, 280, 280, 280, 280, 280,
+ 280, 280, 280, 280, 280, 280, 280, 280,
+ 281, 281, 281, 281, 281, 281, 281, 281,
+ 281, 281, 281, 281, 281, 281, 281, 281,
+ 281, 281, 281, 281, 281, 281, 281, 281,
+ 281, 281, 281, 281, 281, 281, 281, 281,
+ 282, 282, 282, 282, 282, 282, 282, 282,
+ 282, 282, 282, 282, 282, 282, 282, 282,
+ 282, 282, 282, 282, 282, 282, 282, 282,
+ 282, 282, 282, 282, 282, 282, 282, 282,
+ 283, 283, 283, 283, 283, 283, 283, 283,
+ 283, 283, 283, 283, 283, 283, 283, 283,
+ 283, 283, 283, 283, 283, 283, 283, 283,
+ 283, 283, 283, 283, 283, 283, 283, 283,
+ 284, 284, 284, 284, 284, 284, 284, 284,
+ 284, 284, 284, 284, 284, 284, 284, 284,
+ 284, 284, 284, 284, 284, 284, 284, 284,
+ 284, 284, 284, 284, 284, 284, 284, 285
+ };
+ return table[l];
+}
+
+/* Gets the amount of extra bits for the given length symbol. */
+static int ZopfliGetLengthSymbolExtraBits(int s) {
+ static const int table[29] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0
+ };
+ return table[s - 257];
+}
+
+/* Gets the amount of extra bits for the given distance symbol. */
+static int ZopfliGetDistSymbolExtraBits(int s) {
+ static const int table[30] = {
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13
+ };
+ return table[s];
+}
+
+#endif /* ZOPFLI_SYMBOLS_H_ */
diff --git a/misc/ttf2woff/zopfli/tree.c b/misc/ttf2woff/zopfli/tree.c
new file mode 100644
index 000000000..c4575119d
--- /dev/null
+++ b/misc/ttf2woff/zopfli/tree.c
@@ -0,0 +1,101 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#include "tree.h"
+
+#include <assert.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "katajainen.h"
+#include "util.h"
+
+void ZopfliLengthsToSymbols(const unsigned* lengths, size_t n, unsigned maxbits,
+ unsigned* symbols) {
+ size_t* bl_count = (size_t*)malloc(sizeof(size_t) * (maxbits + 1));
+ size_t* next_code = (size_t*)malloc(sizeof(size_t) * (maxbits + 1));
+ unsigned bits, i;
+ unsigned code;
+
+ for (i = 0; i < n; i++) {
+ symbols[i] = 0;
+ }
+
+ /* 1) Count the number of codes for each code length. Let bl_count[N] be the
+ number of codes of length N, N >= 1. */
+ for (bits = 0; bits <= maxbits; bits++) {
+ bl_count[bits] = 0;
+ }
+ for (i = 0; i < n; i++) {
+ assert(lengths[i] <= maxbits);
+ bl_count[lengths[i]]++;
+ }
+ /* 2) Find the numerical value of the smallest code for each code length. */
+ code = 0;
+ bl_count[0] = 0;
+ for (bits = 1; bits <= maxbits; bits++) {
+ code = (code + bl_count[bits-1]) << 1;
+ next_code[bits] = code;
+ }
+ /* 3) Assign numerical values to all codes, using consecutive values for all
+ codes of the same length with the base values determined at step 2. */
+ for (i = 0; i < n; i++) {
+ unsigned len = lengths[i];
+ if (len != 0) {
+ symbols[i] = next_code[len];
+ next_code[len]++;
+ }
+ }
+
+ free(bl_count);
+ free(next_code);
+}
+
+void ZopfliCalculateEntropy(const size_t* count, size_t n, double* bitlengths) {
+ static const double kInvLog2 = 1.4426950408889; /* 1.0 / log(2.0) */
+ unsigned sum = 0;
+ unsigned i;
+ double log2sum;
+ for (i = 0; i < n; ++i) {
+ sum += count[i];
+ }
+ log2sum = (sum == 0 ? log(n) : log(sum)) * kInvLog2;
+ for (i = 0; i < n; ++i) {
+ /* When the count of the symbol is 0, but its cost is requested anyway, it
+ means the symbol will appear at least once anyway, so give it the cost as if
+ its count is 1.*/
+ if (count[i] == 0) bitlengths[i] = log2sum;
+ else bitlengths[i] = log2sum - log(count[i]) * kInvLog2;
+ /* Depending on compiler and architecture, the above subtraction of two
+ floating point numbers may give a negative result very close to zero
+ instead of zero (e.g. -5.973954e-17 with gcc 4.1.2 on Ubuntu 11.4). Clamp
+ it to zero. These floating point imprecisions do not affect the cost model
+ significantly so this is ok. */
+ if (bitlengths[i] < 0 && bitlengths[i] > -1e-5) bitlengths[i] = 0;
+ assert(bitlengths[i] >= 0);
+ }
+}
+
+void ZopfliCalculateBitLengths(const size_t* count, size_t n, int maxbits,
+ unsigned* bitlengths) {
+ int error = ZopfliLengthLimitedCodeLengths(count, n, maxbits, bitlengths);
+ (void) error;
+ assert(!error);
+}
diff --git a/misc/ttf2woff/zopfli/tree.h b/misc/ttf2woff/zopfli/tree.h
new file mode 100644
index 000000000..4d6f46975
--- /dev/null
+++ b/misc/ttf2woff/zopfli/tree.h
@@ -0,0 +1,51 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+/*
+Utilities for creating and using Huffman trees.
+*/
+
+#ifndef ZOPFLI_TREE_H_
+#define ZOPFLI_TREE_H_
+
+#include <string.h>
+
+/*
+Calculates the bitlengths for the Huffman tree, based on the counts of each
+symbol.
+*/
+void ZopfliCalculateBitLengths(const size_t* count, size_t n, int maxbits,
+ unsigned *bitlengths);
+
+/*
+Converts a series of Huffman tree bitlengths, to the bit values of the symbols.
+*/
+void ZopfliLengthsToSymbols(const unsigned* lengths, size_t n, unsigned maxbits,
+ unsigned* symbols);
+
+/*
+Calculates the entropy of each symbol, based on the counts of each symbol. The
+result is similar to the result of ZopfliCalculateBitLengths, but with the
+actual theoritical bit lengths according to the entropy. Since the resulting
+values are fractional, they cannot be used to encode the tree specified by
+DEFLATE.
+*/
+void ZopfliCalculateEntropy(const size_t* count, size_t n, double* bitlengths);
+
+#endif /* ZOPFLI_TREE_H_ */
diff --git a/misc/ttf2woff/zopfli/util.c b/misc/ttf2woff/zopfli/util.c
new file mode 100644
index 000000000..428961c46
--- /dev/null
+++ b/misc/ttf2woff/zopfli/util.c
@@ -0,0 +1,35 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#include "util.h"
+
+#include "zopfli.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+void ZopfliInitOptions(ZopfliOptions* options) {
+ options->verbose = 0;
+ options->verbose_more = 0;
+ options->numiterations = 15;
+ options->blocksplitting = 1;
+ options->blocksplittinglast = 0;
+ options->blocksplittingmax = 15;
+}
diff --git a/misc/ttf2woff/zopfli/util.h b/misc/ttf2woff/zopfli/util.h
new file mode 100644
index 000000000..4b73504f9
--- /dev/null
+++ b/misc/ttf2woff/zopfli/util.h
@@ -0,0 +1,158 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+/*
+Several utilities, including: #defines to try different compression results,
+basic deflate specification values and generic program options.
+*/
+
+#ifndef ZOPFLI_UTIL_H_
+#define ZOPFLI_UTIL_H_
+
+#include <string.h>
+#include <stdlib.h>
+
+/* Minimum and maximum length that can be encoded in deflate. */
+#define ZOPFLI_MAX_MATCH 258
+#define ZOPFLI_MIN_MATCH 3
+
+/* Number of distinct literal/length and distance symbols in DEFLATE */
+#define ZOPFLI_NUM_LL 288
+#define ZOPFLI_NUM_D 32
+
+/*
+The window size for deflate. Must be a power of two. This should be 32768, the
+maximum possible by the deflate spec. Anything less hurts compression more than
+speed.
+*/
+#define ZOPFLI_WINDOW_SIZE 32768
+
+/*
+The window mask used to wrap indices into the window. This is why the
+window size must be a power of two.
+*/
+#define ZOPFLI_WINDOW_MASK (ZOPFLI_WINDOW_SIZE - 1)
+
+/*
+A block structure of huge, non-smart, blocks to divide the input into, to allow
+operating on huge files without exceeding memory, such as the 1GB wiki9 corpus.
+The whole compression algorithm, including the smarter block splitting, will
+be executed independently on each huge block.
+Dividing into huge blocks hurts compression, but not much relative to the size.
+Set it to 0 to disable master blocks.
+*/
+#define ZOPFLI_MASTER_BLOCK_SIZE 1000000
+
+/*
+Used to initialize costs for example
+*/
+#define ZOPFLI_LARGE_FLOAT 1e30
+
+/*
+For longest match cache. max 256. Uses huge amounts of memory but makes it
+faster. Uses this many times three bytes per single byte of the input data.
+This is so because longest match finding has to find the exact distance
+that belongs to each length for the best lz77 strategy.
+Good values: e.g. 5, 8.
+*/
+#define ZOPFLI_CACHE_LENGTH 8
+
+/*
+limit the max hash chain hits for this hash value. This has an effect only
+on files where the hash value is the same very often. On these files, this
+gives worse compression (the value should ideally be 32768, which is the
+ZOPFLI_WINDOW_SIZE, while zlib uses 4096 even for best level), but makes it
+faster on some specific files.
+Good value: e.g. 8192.
+*/
+#define ZOPFLI_MAX_CHAIN_HITS 8192
+
+/*
+Whether to use the longest match cache for ZopfliFindLongestMatch. This cache
+consumes a lot of memory but speeds it up. No effect on compression size.
+*/
+#define ZOPFLI_LONGEST_MATCH_CACHE
+
+/*
+Enable to remember amount of successive identical bytes in the hash chain for
+finding longest match
+required for ZOPFLI_HASH_SAME_HASH and ZOPFLI_SHORTCUT_LONG_REPETITIONS
+This has no effect on the compression result, and enabling it increases speed.
+*/
+#define ZOPFLI_HASH_SAME
+
+/*
+Switch to a faster hash based on the info from ZOPFLI_HASH_SAME once the
+best length so far is long enough. This is way faster for files with lots of
+identical bytes, on which the compressor is otherwise too slow. Regular files
+are unaffected or maybe a tiny bit slower.
+This has no effect on the compression result, only on speed.
+*/
+#define ZOPFLI_HASH_SAME_HASH
+
+/*
+Enable this, to avoid slowness for files which are a repetition of the same
+character more than a multiple of ZOPFLI_MAX_MATCH times. This should not affect
+the compression result.
+*/
+#define ZOPFLI_SHORTCUT_LONG_REPETITIONS
+
+/*
+Whether to use lazy matching in the greedy LZ77 implementation. This gives a
+better result of ZopfliLZ77Greedy, but the effect this has on the optimal LZ77
+varies from file to file.
+*/
+#define ZOPFLI_LAZY_MATCHING
+
+/*
+Appends value to dynamically allocated memory, doubling its allocation size
+whenever needed.
+
+value: the value to append, type T
+data: pointer to the dynamic array to append to, type T**
+size: pointer to the size of the array to append to, type size_t*. This is the
+size that you consider the array to be, not the internal allocation size.
+Precondition: allocated size of data is at least a power of two greater than or
+equal than *size.
+*/
+#ifdef __cplusplus /* C++ cannot assign void* from malloc to *data */
+#define ZOPFLI_APPEND_DATA(/* T */ value, /* T** */ data, /* size_t* */ size) {\
+ if (!((*size) & ((*size) - 1))) {\
+ /*double alloc size if it's a power of two*/\
+ void** data_void = reinterpret_cast<void**>(data);\
+ *data_void = (*size) == 0 ? malloc(sizeof(**data))\
+ : realloc((*data), (*size) * 2 * sizeof(**data));\
+ }\
+ (*data)[(*size)] = (value);\
+ (*size)++;\
+}
+#else /* C gives problems with strict-aliasing rules for (void**) cast */
+#define ZOPFLI_APPEND_DATA(/* T */ value, /* T** */ data, /* size_t* */ size) {\
+ if (!((*size) & ((*size) - 1))) {\
+ /*double alloc size if it's a power of two*/\
+ (*data) = (*size) == 0 ? malloc(sizeof(**data))\
+ : realloc((*data), (*size) * 2 * sizeof(**data));\
+ }\
+ (*data)[(*size)] = (value);\
+ (*size)++;\
+}
+#endif
+
+
+#endif /* ZOPFLI_UTIL_H_ */
diff --git a/misc/ttf2woff/zopfli/zlib_container.c b/misc/ttf2woff/zopfli/zlib_container.c
new file mode 100644
index 000000000..130ffc793
--- /dev/null
+++ b/misc/ttf2woff/zopfli/zlib_container.c
@@ -0,0 +1,79 @@
+/*
+Copyright 2013 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#include "zlib_container.h"
+#include "util.h"
+
+#include <stdio.h>
+
+#include "deflate.h"
+
+
+/* Calculates the adler32 checksum of the data */
+static unsigned adler32(const unsigned char* data, size_t size)
+{
+ static const unsigned sums_overflow = 5550;
+ unsigned s1 = 1;
+ unsigned s2 = 1 >> 16;
+
+ while (size > 0) {
+ size_t amount = size > sums_overflow ? sums_overflow : size;
+ size -= amount;
+ while (amount > 0) {
+ s1 += (*data++);
+ s2 += s1;
+ amount--;
+ }
+ s1 %= 65521;
+ s2 %= 65521;
+ }
+
+ return (s2 << 16) | s1;
+}
+
+void ZopfliZlibCompress(const ZopfliOptions* options,
+ const unsigned char* in, size_t insize,
+ unsigned char** out, size_t* outsize) {
+ unsigned char bitpointer = 0;
+ unsigned checksum = adler32(in, (unsigned)insize);
+ unsigned cmf = 120; /* CM 8, CINFO 7. See zlib spec.*/
+ unsigned flevel = 3;
+ unsigned fdict = 0;
+ unsigned cmfflg = 256 * cmf + fdict * 32 + flevel * 64;
+ unsigned fcheck = 31 - cmfflg % 31;
+ cmfflg += fcheck;
+
+ ZOPFLI_APPEND_DATA(cmfflg / 256, out, outsize);
+ ZOPFLI_APPEND_DATA(cmfflg % 256, out, outsize);
+
+ ZopfliDeflate(options, 2 /* dynamic block */, 1 /* final */,
+ in, insize, &bitpointer, out, outsize);
+
+ ZOPFLI_APPEND_DATA((checksum >> 24) % 256, out, outsize);
+ ZOPFLI_APPEND_DATA((checksum >> 16) % 256, out, outsize);
+ ZOPFLI_APPEND_DATA((checksum >> 8) % 256, out, outsize);
+ ZOPFLI_APPEND_DATA(checksum % 256, out, outsize);
+
+ if (options->verbose) {
+ fprintf(stderr,
+ "Original Size: %d, Zlib: %d, Compression: %f%% Removed\n",
+ (int)insize, (int)*outsize,
+ 100.0 * (double)(insize - *outsize) / (double)insize);
+ }
+}
diff --git a/misc/ttf2woff/zopfli/zlib_container.h b/misc/ttf2woff/zopfli/zlib_container.h
new file mode 100644
index 000000000..9ddfb9c1d
--- /dev/null
+++ b/misc/ttf2woff/zopfli/zlib_container.h
@@ -0,0 +1,50 @@
+/*
+Copyright 2013 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#ifndef ZOPFLI_ZLIB_H_
+#define ZOPFLI_ZLIB_H_
+
+/*
+Functions to compress according to the Zlib specification.
+*/
+
+#include "zopfli.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+Compresses according to the zlib specification and append the compressed
+result to the output.
+
+options: global program options
+out: pointer to the dynamic output array to which the result is appended. Must
+ be freed after use.
+outsize: pointer to the dynamic output array size.
+*/
+void ZopfliZlibCompress(const ZopfliOptions* options,
+ const unsigned char* in, size_t insize,
+ unsigned char** out, size_t* outsize);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif /* ZOPFLI_ZLIB_H_ */
diff --git a/misc/ttf2woff/zopfli/zopfli.h b/misc/ttf2woff/zopfli/zopfli.h
new file mode 100644
index 000000000..c079662aa
--- /dev/null
+++ b/misc/ttf2woff/zopfli/zopfli.h
@@ -0,0 +1,94 @@
+/*
+Copyright 2011 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Author: lode.vandevenne@gmail.com (Lode Vandevenne)
+Author: jyrki.alakuijala@gmail.com (Jyrki Alakuijala)
+*/
+
+#ifndef ZOPFLI_ZOPFLI_H_
+#define ZOPFLI_ZOPFLI_H_
+
+#include <stddef.h>
+#include <stdlib.h> /* for size_t */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+Options used throughout the program.
+*/
+typedef struct ZopfliOptions {
+ /* Whether to print output */
+ int verbose;
+
+ /* Whether to print more detailed output */
+ int verbose_more;
+
+ /*
+ Maximum amount of times to rerun forward and backward pass to optimize LZ77
+ compression cost. Good values: 10, 15 for small files, 5 for files over
+ several MB in size or it will be too slow.
+ */
+ int numiterations;
+
+ /*
+ If true, splits the data in multiple deflate blocks with optimal choice
+ for the block boundaries. Block splitting gives better compression. Default:
+ true (1).
+ */
+ int blocksplitting;
+
+ /*
+ No longer used, left for compatibility.
+ */
+ int blocksplittinglast;
+
+ /*
+ Maximum amount of blocks to split into (0 for unlimited, but this can give
+ extreme results that hurt compression on some files). Default value: 15.
+ */
+ int blocksplittingmax;
+} ZopfliOptions;
+
+/* Initializes options with default values. */
+void ZopfliInitOptions(ZopfliOptions* options);
+
+/* Output format */
+typedef enum {
+ ZOPFLI_FORMAT_GZIP,
+ ZOPFLI_FORMAT_ZLIB,
+ ZOPFLI_FORMAT_DEFLATE
+} ZopfliFormat;
+
+/*
+Compresses according to the given output format and appends the result to the
+output.
+
+options: global program options
+output_type: the output format to use
+out: pointer to the dynamic output array to which the result is appended. Must
+ be freed after use
+outsize: pointer to the dynamic output array size
+*/
+void ZopfliCompress(const ZopfliOptions* options, ZopfliFormat output_type,
+ const unsigned char* in, size_t insize,
+ unsigned char** out, size_t* outsize);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif /* ZOPFLI_ZOPFLI_H_ */
diff --git a/misc/ufo-color-glyphs.py b/misc/ufo-color-glyphs.py
new file mode 100755
index 000000000..9ec050853
--- /dev/null
+++ b/misc/ufo-color-glyphs.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Grab http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+#
+from __future__ import print_function
+import os, sys
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+from unicode_util import parseUnicodeDataFile, MainCategories as UniMainCategories
+
+lightBlueColor = (0.86, 0.92, 0.97, 1.0)
+lightTealColor = (0.8, 0.94, 0.95, 1.0)
+lightYellowColor = (0.97, 0.95, 0.83, 1.0)
+lightPurpleColor = (0.93, 0.9, 0.98, 1.0)
+lightGreyColor = (0.94, 0.94, 0.94, 1.0)
+mediumGreyColor = (0.87, 0.87, 0.87, 1.0)
+lightGreenColor = (0.89, 0.96, 0.92, 1.0)
+mediumGreenColor = (0.77, 0.95, 0.76, 1.0)
+lightRedColor = (0.98, 0.89, 0.89, 1.0)
+lightOrangeColor = (1.0, 0.89, 0.82, 1.0)
+redColor = (1, 0.3, 0.3, 1)
+
+colorsByGlyphName = [
+ (set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'), lightBlueColor), # light blue 1
+]
+
+colorsByUCMainCategory = {
+ # UniMainCategories.Letter: (1, 1, 1, 1),
+ UniMainCategories.Mark: lightRedColor,
+ UniMainCategories.Punctuation: lightGreyColor,
+ UniMainCategories.Format: lightGreyColor,
+ UniMainCategories.Number: lightGreenColor,
+ UniMainCategories.Symbol: lightTealColor,
+ UniMainCategories.Separator: lightPurpleColor,
+ UniMainCategories.Control: redColor,
+ UniMainCategories.Surrogate: redColor,
+ UniMainCategories.PrivateUse: lightYellowColor,
+ UniMainCategories.Unassigned: lightYellowColor,
+ UniMainCategories.Other: lightOrangeColor,
+}
+
+
+def colorForGlyph(name, unicodes, ucd):
+ for nameSet, color in colorsByGlyphName:
+ if name in nameSet:
+ return color
+
+ for uc in unicodes:
+ cp = ucd.get(uc)
+ if cp is None:
+ continue
+ return colorsByUCMainCategory.get(cp.mainCategory)
+
+ if len(unicodes) == 0:
+ if name.find('.cn') != -1:
+ # pure component
+ return mediumGreenColor
+ else:
+ # precomposed
+ return mediumGreyColor
+
+ return None
+
+
+def main():
+ argparser = ArgumentParser(
+ description='Set robofont color marks on glyphs based on unicode categories')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ '-ucd', dest='ucdFile', metavar='<file>', type=str,
+ help='UnicodeData.txt file from http://www.unicode.org/')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+ markLibKey = 'com.typemytype.robofont.mark'
+
+ ucd = {}
+ if args.ucdFile:
+ ucd = parseUnicodeDataFile(args.ucdFile)
+
+ for fontPath in args.fontPaths:
+ font = OpenFont(fontPath)
+ for g in font:
+ rgba = colorForGlyph(g.name, g.unicodes, ucd)
+ if rgba is None:
+ if markLibKey in g.lib:
+ del g.lib[markLibKey]
+ else:
+ g.lib[markLibKey] = [float(n) for n in rgba]
+
+ print('Write', fontPath)
+ if not dryRun:
+ font.save()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/ufocompile b/misc/ufocompile
new file mode 100755
index 000000000..2f84593c4
--- /dev/null
+++ b/misc/ufocompile
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+import os
+import sys
+import argparse
+import logging
+import subprocess
+
+from robofab.objects.objectsRF import RPoint
+from robofab.world import OpenFont
+from fontbuild.Build import FontProject
+from fontbuild.mix import Master
+from fontbuild.mix import Mix
+
+FAMILYNAME = "Interface"
+BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
+
+def extractSpecializedGlyphs(masterFont):
+ glyphSpecializations = {}
+ specializationSuffix = '.specz.'
+
+ specializedGlyphNames = []
+ font = masterFont.font
+ for g in font:
+ p = g.name.find(specializationSuffix)
+ if p == -1:
+ continue
+ name = g.name[:p]
+ category = g.name[p + len(specializationSuffix):]
+ g2 = g.copy()
+ g2.name = name
+ if name in font:
+ # copy unicodes
+ masterGlyph = font[name]
+ g2.unicodes = masterGlyph.unicodes
+ if not category in glyphSpecializations:
+ glyphSpecializations[category] = { name: g2 }
+ else:
+ glyphSpecializations[category][name] = g2
+ specializedGlyphNames.append(g.name)
+
+ ffont = masterFont.ffont
+ for name in specializedGlyphNames:
+ del ffont.glyphs[name]
+ font.removeGlyph(name)
+
+ return glyphSpecializations
+
+
+def readVersionControlTag(dir):
+ try:
+ return subprocess.check_output(
+ ['git', '-C', dir, 'rev-parse', '--short', 'HEAD'],
+ shell=False).strip()
+ except:
+ return ''
+
+
+# silence warnings from fontTools.misc.fixedTools that is harmless and caused by
+# the ufo2ft module.
+logging.getLogger('fontTools.misc.fixedTools').setLevel(logging.ERROR)
+
+default_out_dir = os.path.join(BASEDIR,'build','tmp')
+srcDir = os.path.join(BASEDIR, 'src')
+
+argparser = argparse.ArgumentParser(description='Build TTF and OTF font files from UFO sources.')
+
+argparser.add_argument('styles', metavar='<style>', type=str, nargs='*',
+ help='Build specific styles. Omit to build all.')
+
+argparser.add_argument('--otf', dest='otf', action='store_const',
+ const=True, default=False,
+ help='Build OTF files')
+
+argparser.add_argument('--no-ttf', dest='no_ttf', action='store_const',
+ const=True, default=False,
+ help='Do not build TTF files')
+
+argparser.add_argument('--out', dest='out', metavar='<dir>', type=str,
+ default=default_out_dir,
+ help='Write output to <dir> instead of the default (%r)' % default_out_dir)
+
+args = argparser.parse_args()
+styles = [s.lower() for s in args.styles]
+ALL = len(styles) == 0
+
+# version control tag, if any
+buildTag = readVersionControlTag(BASEDIR)
+
+# Load masters
+rg = Master("%s/src/Interface-Regular.ufo" % BASEDIR)
+
+bd = None
+if ALL or 'bold' in styles or 'bolditalic' in styles or 'medium' in styles or 'mediumitalic' in styles:
+ bd = Master("%s/src/Interface-Bold.ufo" % BASEDIR)
+
+# th = None
+# thFont = None
+# if ALL or 'thin' in styles or 'thinitalic' in styles or 'light' in styles or 'lightitalic' in styles:
+# th = Master("%s/src/Interface-Thin.ufo" % BASEDIR)
+
+glyphSpecializations = extractSpecializedGlyphs(rg)
+
+
+class Mix2(Mix):
+ def __init__(self, masters, v, glyphSpecializations=None):
+ Mix.__init__(self, masters, v)
+ self.glyphSpecializations = glyphSpecializations
+
+ def mixGlyphs(self, gname):
+ if self.glyphSpecializations is not None:
+ specializedGlyph = self.glyphSpecializations.get(gname)
+ if specializedGlyph is not None:
+ print 'mixglyph using specialized', gname
+ return specializedGlyph
+ return Mix.mixGlyphs(self, gname)
+
+
+proj = FontProject(rg.font, BASEDIR, os.path.join(srcDir,'fontbuild.cfg'), buildTag=buildTag)
+proj.builddir = args.out
+
+if args.otf:
+ proj.buildOTF = True
+
+
+if ALL or 'regular' in styles:
+ proj.generateFont(rg.font, "%s/Regular/Regular/Rg" % FAMILYNAME)
+
+if ALL or 'regularitalic' in styles:
+ proj.generateFont(rg.font, "%s/Regular Italic/Italic/Rg" % FAMILYNAME,
+ italic=True, stemWidth=232, italicMeanYCenter=-825, italicNarrowAmount=1)
+
+if ALL or 'medium' in styles:
+ proj.generateFont(
+ Mix2([rg, bd], 0.35, glyphSpecializations['medium']),
+ "%s/Medium/Regular/Rg" % FAMILYNAME)
+
+if ALL or 'mediumitalic' in styles:
+ proj.generateFont(
+ Mix2([rg, bd], 0.35, glyphSpecializations['medium']),
+ "%s/Medium Italic/Italic/Rg" % FAMILYNAME,
+ italic=True, stemWidth=256, italicMeanYCenter=-825, italicNarrowAmount=1)
+
+if ALL or 'bold' in styles:
+ proj.generateFont(bd.font, "%s/Bold/Bold/Rg" % FAMILYNAME)
+
+if ALL or 'bolditalic' in styles:
+ proj.generateFont(bd.font, "%s/Bold Italic/Bold Italic/Rg" % FAMILYNAME,
+ italic=True, stemWidth=290, italicMeanYCenter=-825, italicNarrowAmount=1)
+
+# if ALL or 'light' in styles:
+# proj.generateFont(Mix([th, rg], 0.45), "%s/Light/Regular/Lt" % FAMILYNAME)
+
+# if ALL or 'lightitalic' in styles:
+# proj.generateFont(rgFont, "%s/Light Italic/Italic/Lt" % FAMILYNAME,
+# italic=True, stemWidth=120)
+
+# proj.generateFont(th.font, "%s/Thin/Regular/Th" % FAMILYNAME)
+# proj.generateFont(th.font, "%s/Thin Italic/Italic/Th" % FAMILYNAME,
+# italic=True, stemWidth=80)
+
+# generate TTFs
+if args.no_ttf == False:
+ proj.generateTTFs()
diff --git a/misc/unicode_util.py b/misc/unicode_util.py
new file mode 100644
index 000000000..18196e87e
--- /dev/null
+++ b/misc/unicode_util.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+# encoding: utf8
+
+class MainCategories:
+ Letter = 'Letter'
+ Mark = 'Mark'
+ Number = 'Number'
+ Punctuation = 'Punctuation'
+ Symbol = 'Symbol'
+ Separator = 'Separator'
+ Control = 'Control'
+ Format = 'Format'
+ Surrogate = 'Surrogate'
+ PrivateUse = 'Private_Use'
+ Unassigned = 'Unassigned'
+ Other = 'Other'
+
+GeneralCategories = {
+ 'Lu': ('Uppercase_Letter', MainCategories.Letter),
+ 'Ll': ('Lowercase_Letter', MainCategories.Letter),
+ 'Lt': ('Titlecase_Letter', MainCategories.Letter),
+ 'LC': ('Cased_Letter', MainCategories.Letter),
+ 'Lm': ('Modifier_Letter', MainCategories.Letter),
+ 'Lo': ('Other_Letter', MainCategories.Letter),
+ 'L': ('Letter', MainCategories.Letter),
+ 'Mn': ('Nonspacing_Mark', MainCategories.Mark),
+ 'Mc': ('Spacing_Mark', MainCategories.Mark),
+ 'Me': ('Enclosing_Mark', MainCategories.Mark),
+ 'M': ('Mark', MainCategories.Mark),
+ 'Nd': ('Decimal_Number', MainCategories.Number),
+ 'Nl': ('Letter_Number', MainCategories.Number),
+ 'No': ('Other_Number', MainCategories.Number),
+ 'N': ('Number', MainCategories.Number),
+ 'Pc': ('Connector_Punctuation', MainCategories.Punctuation),
+ 'Pd': ('Dash_Punctuation', MainCategories.Punctuation),
+ 'Ps': ('Open_Punctuation', MainCategories.Punctuation),
+ 'Pe': ('Close_Punctuation', MainCategories.Punctuation),
+ 'Pi': ('Initial_Punctuation', MainCategories.Punctuation),
+ 'Pf': ('Final_Punctuation', MainCategories.Punctuation),
+ 'Po': ('Other_Punctuation', MainCategories.Punctuation),
+ 'P': ('Punctuation', MainCategories.Punctuation),
+ 'Sm': ('Math_Symbol', MainCategories.Symbol),
+ 'Sc': ('Currency_Symbol', MainCategories.Symbol),
+ 'Sk': ('Modifier_Symbol', MainCategories.Symbol),
+ 'So': ('Other_Symbol', MainCategories.Symbol),
+ 'S': ('Symbol', MainCategories.Symbol),
+ 'Zs': ('Space_Separator', MainCategories.Separator),
+ 'Zl': ('Line_Separator', MainCategories.Separator),
+ 'Zp': ('Paragraph_Separator', MainCategories.Separator),
+ 'Z': ('Separator', MainCategories.Separator),
+ 'Cc': ('Control', MainCategories.Control),
+ 'Cf': ('Format', MainCategories.Format),
+ 'Cs': ('Surrogate', MainCategories.Surrogate),
+ 'Co': ('Private_Use', MainCategories.PrivateUse),
+ 'Cn': ('Unassigned', MainCategories.Unassigned),
+ 'C': ('Other', MainCategories.Other),
+}
+
+
+class Codepoint:
+ def __init__(self, v):
+ self.codePoint = int(v[0], 16)
+ self.name = v[1]
+
+ self.category = v[2]
+ c = GeneralCategories.get(self.category, ('', MainCategories.Other))
+ self.categoryName = c[0]
+ self.mainCategory = c[1]
+
+ self.decDigitValue = v[6]
+ self.numValue = v[8]
+
+ def isLetter(self): return self.mainCategory is MainCategories.Letter
+ def isMark(self): return self.mainCategory is MainCategories.Mark
+ def isNumber(self): return self.mainCategory is MainCategories.Number
+ def isPunctuation(self): return self.mainCategory is MainCategories.Punctuation
+ def isSymbol(self): return self.mainCategory is MainCategories.Symbol
+ def isSeparator(self): return self.mainCategory is MainCategories.Separator
+ def isControl(self): return self.mainCategory is MainCategories.Control
+ def isFormat(self): return self.mainCategory is MainCategories.Format
+ def isSurrogate(self): return self.mainCategory is MainCategories.Surrogate
+ def isPrivateUse(self): return self.mainCategory is MainCategories.PrivateUse
+ def isUnassigned(self): return self.mainCategory is MainCategories.Unassigned
+ def isOther(self): return self.mainCategory is MainCategories.Other
+
+
+# http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+def parseUnicodeDataFile(ucdFile): # { codepoint:int => Codepoint() }
+ ucd = {}
+ with open(ucdFile, 'r') as f:
+ for line in f:
+ # See http://unicode.org/reports/tr44/#UnicodeData.txt for fields
+ # e.g. "001D;<control>;Cc;0;B;;;;;N;INFORMATION SEPARATOR THREE;;;;"
+ if len(line) == 0 or line.startswith('#'):
+ continue
+ v = line.split(';')
+ if len(v) < 10:
+ continue
+ try:
+ cp = Codepoint(v)
+ ucd[cp.codePoint] = cp
+ except:
+ pass
+ return ucd