summaryrefslogtreecommitdiff
path: root/misc/tools
diff options
context:
space:
mode:
authorRasmus Andersson <rasmus@notion.se>2018-09-03 22:55:49 +0300
committerRasmus Andersson <rasmus@notion.se>2018-09-03 22:55:49 +0300
commitc833e252c925e8dd68108660710ca835d95daa6f (patch)
tree6b2e28264ed45efd7f054e453b622098d0d875b8 /misc/tools
parent8c1a4c181ef12000179dfec541f1af87e9b03122 (diff)
downloadinter-c833e252c925e8dd68108660710ca835d95daa6f.tar.xz
Major overhaul, moving from UFO2 to Glyphs and UFO3, plus a brand new and much simpler fontbuild
Diffstat (limited to 'misc/tools')
-rwxr-xr-xmisc/tools/cleanup_kerning.py354
-rwxr-xr-xmisc/tools/download-count.py19
-rwxr-xr-xmisc/tools/enrich-glypnames.py650
-rwxr-xr-xmisc/tools/fixup-diacritics.py167
-rwxr-xr-xmisc/tools/fixup-features.py335
-rwxr-xr-xmisc/tools/fixup-kerning.py362
-rwxr-xr-xmisc/tools/fontinfo.py506
-rwxr-xr-xmisc/tools/gen-glyphinfo.py263
-rwxr-xr-xmisc/tools/gen-glyphorder.py102
-rw-r--r--misc/tools/gen-kern.py37
-rwxr-xr-xmisc/tools/gen-metrics-and-svgs.py449
-rw-r--r--misc/tools/gen-num-pairs.js10
-rwxr-xr-xmisc/tools/gen-tnum.py77
-rwxr-xr-xmisc/tools/glyf-props.py63
-rwxr-xr-xmisc/tools/glyphcheck.py45
-rwxr-xr-xmisc/tools/kernsample.py235
-rw-r--r--misc/tools/restore-diacritics-kerning.py431
-rwxr-xr-xmisc/tools/rewrite-glyphorder.py305
-rwxr-xr-xmisc/tools/rmglyph.py548
-rwxr-xr-xmisc/tools/show-changes.py115
-rwxr-xr-xmisc/tools/svgsync.py438
-rwxr-xr-xmisc/tools/svgsync2.py626
-rwxr-xr-xmisc/tools/ufo-color-glyphs.py105
-rw-r--r--misc/tools/unicode_util.py104
-rwxr-xr-xmisc/tools/versionize-css.py36
25 files changed, 6382 insertions, 0 deletions
diff --git a/misc/tools/cleanup_kerning.py b/misc/tools/cleanup_kerning.py
new file mode 100755
index 000000000..e9dce5771
--- /dev/null
+++ b/misc/tools/cleanup_kerning.py
@@ -0,0 +1,354 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib, re
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+from argparse import ArgumentParser
+from fontTools import ttLib
+from robofab.objects.objectsRF import OpenFont
+
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
+
+
+def unicodeForDefaultGlyphName(glyphName):
+ m = uniNameRe.match(glyphName)
+ if m is not None:
+ try:
+ return int(m.group(1), 16)
+ except:
+ pass
+ return None
+
+
+def canonicalGlyphName(glyphName, uc2names):
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ names = uc2names.get(uc)
+ if names is not None and len(names) > 0:
+ return names[0]
+ return glyphName
+
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def loadAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def loadLocalNamesDB(fonts, agl, diacriticComps):
+ uc2names = None # { 2126: ['Omega', ...], ...}
+ allNames = set() # set('Omega', ...)
+
+ for font in fonts:
+ _uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
+ if uc2names is None:
+ uc2names = _uc2names
+ else:
+ for uc, _names in _uc2names.iteritems():
+ names = uc2names.setdefault(uc, [])
+ for name in _names:
+ if name not in names:
+ names.append(name)
+ for g in font:
+ allNames.add(g.name)
+
+ # agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
+ aglName2Ucs = {}
+ for uc, name in agl.iteritems():
+ aglName2Ucs.setdefault(name, []).append(uc)
+
+ for glyphName, comp in diacriticComps.iteritems():
+ aglUCs = aglName2Ucs.get(glyphName)
+ if aglUCs is None:
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ glyphName2 = agl.get(uc)
+ if glyphName2 is not None:
+ glyphName = glyphName2
+ names = uc2names.setdefault(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ allNames.add(glyphName)
+ else:
+ allNames.add(glyphName)
+ for uc in aglUCs:
+ names = uc2names.get(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ uc2names[uc] = names
+
+ name2ucs = {} # { 'Omega': [2126, ...], ...}
+ for uc, names in uc2names.iteritems():
+ for name in names:
+ name2ucs.setdefault(name, set()).add(uc)
+
+ return uc2names, name2ucs, allNames
+
+
+# def getNameToGroupsMap(groups): # => { glyphName => set(groupName) }
+# nameMap = {}
+# for groupName, glyphNames in groups.iteritems():
+# for glyphName in glyphNames:
+# nameMap.setdefault(glyphName, set()).add(groupName)
+# return nameMap
+
+
+# def inspectKerning(kerning):
+# leftIndex = {} # { glyph-name => <ref to plist right-hand side dict> }
+# rightIndex = {} # { glyph-name => [(left-hand-side-name, kernVal), ...] }
+# rightGroupIndex = {} # { group-name => [(left-hand-side-name, kernVal), ...] }
+# for leftName, right in kerning.iteritems():
+# if leftName[0] != '@':
+# leftIndex[leftName] = right
+# for rightName, kernVal in right.iteritems():
+# if rightName[0] != '@':
+# rightIndex.setdefault(rightName, []).append((leftName, kernVal))
+# else:
+# rightGroupIndex.setdefault(rightName, []).append((leftName, kernVal))
+# return leftIndex, rightIndex, rightGroupIndex
+
+
+class RefTracker:
+ def __init__(self):
+ self.refs = {}
+
+ def incr(self, name):
+ self.refs[name] = self.refs.get(name, 0) + 1
+
+ def decr(self, name): # => bool hasNoRefs
+ r = self.refs.get(name)
+
+ if r is None:
+ raise Exception('decr untracked ref ' + repr(name))
+
+ if r < 1:
+ raise Exception('decr already zero ref ' + repr(name))
+
+ if r == 1:
+ del self.refs[name]
+ return True
+
+ self.refs[name] = r - 1
+
+ def __contains__(self, name):
+ return name in self.refs
+
+
+def main(argv=None):
+ argparser = ArgumentParser(description='Remove unused kerning')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args(argv)
+ dryRun = args.dryRun
+
+ agl = loadAGL('src/glyphlist.txt') # { 2126: 'Omega', ... }
+ diacriticComps = loadGlyphCompositions('src/diacritics.txt') # {glyphName => (baseName, a, o)}
+
+ for fontPath in args.fontPaths:
+ print(fontPath)
+
+ groupsFilename = os.path.join(fontPath, 'groups.plist')
+ kerningFilename = os.path.join(fontPath, 'kerning.plist')
+
+ groups = plistlib.readPlist(groupsFilename) # { groupName => [glyphName] }
+ kerning = plistlib.readPlist(kerningFilename) # { leftName => {rightName => kernVal} }
+
+ font = OpenFont(fontPath)
+ uc2names, name2ucs, allNames = loadLocalNamesDB([font], agl, diacriticComps)
+
+ # start with eliminating non-existent glyphs from groups and completely
+ # eliminate groups with all-dead glyphs.
+ eliminatedGroups = set()
+ for groupName, glyphNames in list(groups.items()):
+ glyphNames2 = []
+ for name in glyphNames:
+ if name in allNames:
+ glyphNames2.append(name)
+ else:
+ name2 = canonicalGlyphName(name, uc2names)
+ if name2 != name and name2 in allNames:
+ print('group: rename glyph', name, '->', name2)
+ glyphNames2.append(name2)
+
+ if len(glyphNames2) == 0:
+ print('group: eliminate', groupName)
+ eliminatedGroups.add(groupName)
+ del groups[groupName]
+ elif len(glyphNames2) != len(glyphNames):
+ print('group: shrink', groupName)
+ groups[groupName] = glyphNames2
+
+ # now eliminate kerning
+ groupRefs = RefTracker() # tracks group references, so we can eliminate unreachable ones
+
+ for leftName, right in list(kerning.items()):
+ leftIsGroup = leftName[0] == '@'
+
+ if leftIsGroup:
+ if leftName in eliminatedGroups:
+ print('kerning: eliminate LHS', leftName)
+ del kerning[leftName]
+ continue
+ groupRefs.incr(leftName)
+ else:
+ if leftName not in allNames:
+ print('kerning: eliminate LHS', leftName)
+ del kerning[leftName]
+ continue
+
+ right2 = {}
+ for rightName, kernVal in right.iteritems():
+ rightIsGroup = rightName[0] == '@'
+ if rightIsGroup:
+ if rightIsGroup in eliminatedGroups:
+ print('kerning: eliminate RHS group', rightName)
+ else:
+ groupRefs.incr(rightName)
+ right2[rightName] = kernVal
+ else:
+ if rightName not in allNames:
+ # maybe an unnamed glyph?
+ rightName2 = canonicalGlyphName(rightName, uc2names)
+ if rightName2 != rightName:
+ print('kerning: rename & update RHS glyph', rightName, '->', rightName2)
+ right2[rightName2] = kernVal
+ else:
+ print('kerning: eliminate RHS glyph', rightName)
+ else:
+ right2[rightName] = kernVal
+
+ if len(right2) == 0:
+ print('kerning: eliminate LHS', leftName)
+ del kerning[leftName]
+ if leftIsGroup:
+ groupRefs.decr(leftName)
+ else:
+ kerning[leftName] = right2
+
+ # eliminate any unreferenced groups
+ for groupName, glyphNames in list(groups.items()):
+ if not groupName in groupRefs:
+ print('group: eliminate unreferenced group', groupName)
+ del groups[groupName]
+
+
+ # verify that there are no conflicting kerning pairs
+ pairs = {} # { key => [...] }
+ conflictingPairs = set()
+
+ for leftName, right in kerning.iteritems():
+ # expand LHS group -> names
+ topLeftName = leftName
+ for leftName in groups[leftName] if leftName[0] == '@' else [leftName]:
+ if leftName not in allNames:
+ raise Exception('unknown LHS glyph name ' + repr(leftName))
+ keyPrefix = leftName + '+'
+ for rightName, kernVal in right.iteritems():
+ # expand RHS group -> names
+ topRightName = rightName
+ for rightName in groups[rightName] if rightName[0] == '@' else [rightName]:
+ if rightName not in allNames:
+ raise Exception('unknown RHS glyph name ' + repr(rightName))
+ # print(leftName, '+', rightName, '=>', kernVal)
+ key = keyPrefix + rightName
+ isConflict = key in pairs
+ pairs.setdefault(key, []).append(( topLeftName, topRightName, kernVal ))
+ if isConflict:
+ conflictingPairs.add(key)
+
+ # # resolve pair conflicts by preferring pairs defined via group kerning
+ # for key in conflictingPairs:
+ # pairs = pairs[key]
+ # print('kerning: conflicting pairs %r: %r' % (key, pairs))
+ # bestPair = None
+ # redundantPairs = []
+ # for pair in pairs:
+ # leftName, rightName, kernVal = pair
+ # if bestPair is None:
+ # bestPair = pair
+ # else:
+ # bestLeftName, bestRightName, _ = bestPair
+ # bestScore = 0
+ # score = 0
+ # if bestLeftName[0] == '@': bestScore += 1
+ # if bestRightName[0] == '@': bestScore += 1
+ # if leftName[0] == '@': score += 1
+ # if rightName[0] == '@': score += 1
+ # if bestScore == 2:
+ # # doesn't get better than this
+ # break
+ # elif score > bestScore:
+ # redundantPairs.append(bestPair)
+ # bestPair = pair
+ # else:
+ # redundantPairs.append(pair)
+ # print('- keeping', bestPair)
+ # print('- eliminating', redundantPairs)
+ # for redundantPairs
+
+
+ # # eliminate any unreferenced groups
+ # for groupName, glyphNames in list(groups.items()):
+ # if not groupName in groupRefs:
+ # print('group: eliminate unreferenced group', groupName)
+ # del groups[groupName]
+
+
+ print('Write', groupsFilename)
+ if not dryRun:
+ plistlib.writePlist(groups, groupsFilename)
+
+ print('Write', kerningFilename)
+ if not dryRun:
+ plistlib.writePlist(kerning, kerningFilename)
+
+ # [end] for fontPath in args.fontPaths
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/download-count.py b/misc/tools/download-count.py
new file mode 100755
index 000000000..c70532490
--- /dev/null
+++ b/misc/tools/download-count.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, json, urllib2
+
+f = urllib2.urlopen('https://api.github.com/repos/rsms/inter/releases')
+releases = json.load(f)
+
+countTotal = 0
+
+for release in releases:
+ if len(release['assets']) > 0:
+ count = release['assets'][0]['download_count']
+ countTotal += count
+ print('%s: %d' % (release['tag_name'], count))
+ else:
+ print('%s: (missing)' % release['tag_name'])
+
+print('Total: %d' % countTotal)
diff --git a/misc/tools/enrich-glypnames.py b/misc/tools/enrich-glypnames.py
new file mode 100755
index 000000000..b4c401217
--- /dev/null
+++ b/misc/tools/enrich-glypnames.py
@@ -0,0 +1,650 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os
+import sys
+import argparse
+import json
+import plistlib
+import re
+from collections import OrderedDict
+from textwrap import TextWrapper
+from StringIO import StringIO
+from ConfigParser import RawConfigParser
+from fontTools import ttLib
+from robofab.objects.objectsRF import RFont, OpenFont
+
+# from feaTools import parser as feaParser
+# from feaTools.parser import parseFeatures
+# from feaTools import FDKSyntaxFeatureWriter
+# from fontbuild.features import updateFeature, compileFeatureRE
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)[0-9A-F]{4,8}$')
+
+
+def defaultGlyphName(uc):
+ return 'uni%04X' % uc
+
+def defaultGlyphName2(uc):
+ return 'u%04X' % uc
+
+
+def isDefaultGlyphName(name):
+ return True if uniNameRe.match(name) else False
+
+
+def isDefaultGlyphNameForUnicode(name, uc):
+ return name == defaultGlyphName(uc) or name == defaultGlyphName2(uc)
+
+
+def getFirstNonDefaultGlyphName(uc, names):
+ for name in names:
+ if not isDefaultGlyphNameForUnicode(name, uc):
+ return name
+ return None
+
+
+def getTTGlyphList(font): # -> { 'Omega': [2126, ...], ... }
+ if isinstance(font, str):
+ font = ttLib.TTFont(font)
+
+ if not 'cmap' in font:
+ raise Exception('missing cmap table')
+
+ gl = {}
+ bestCodeSubTable = None
+ bestCodeSubTableFormat = 0
+
+ for st in font['cmap'].tables:
+ if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
+ if st.format > bestCodeSubTableFormat:
+ bestCodeSubTable = st
+ bestCodeSubTableFormat = st.format
+
+ if bestCodeSubTable is not None:
+ for cp, glyphname in bestCodeSubTable.cmap.items():
+ if glyphname in gl:
+ gl[glyphname].append(cp)
+ else:
+ gl[glyphname] = [cp]
+
+ return gl, font
+
+
+def getUFOGlyphList(font): # -> { 'Omega': [2126, ...], ... }
+ # Note: font.getCharacterMapping() returns {2126:['Omega', ...], ...}
+ gl = {}
+ for g in font:
+ ucv = g.unicodes
+ if len(ucv) > 0:
+ gl[g.name] = ucv
+ return gl
+
+
+def appendNames(uc2names, extraUc2names, uc, name, isDestination):
+ if uc in uc2names:
+ names = uc2names[uc]
+ if name not in names:
+ names.append(name)
+ elif isDestination:
+ uc2names[uc] = [name]
+ else:
+ if uc in extraUc2names:
+ names = extraUc2names[uc]
+ if name not in names:
+ names.append(name)
+ else:
+ extraUc2names[uc] = [name]
+
+
+def buildGlyphNames(dstFonts, srcFonts, glyphOrder, fallbackGlyphNames):
+ # fallbackGlyphNames: { 2126: 'Omega', ...}
+ uc2names = {} # { 2126: ['Omega', 'Omegagreek', ...], ...}
+ extraUc2names = {} # { 2126: ['Omega', 'Omegagreek', ...], ...}
+ # -- codepoints in Nth fonts, not found in first font
+ name2ucsv = [] # [ { 'Omega': [2126, ...] }, ... ] -- same order as fonts
+
+ fontIndex = 0
+ for font in dstFonts + srcFonts:
+ gl = None
+ if isinstance(font, RFont):
+ print('Inspecting', font.info.familyName, font.info.styleName)
+ gl = getUFOGlyphList(font)
+ else:
+ print('Inspecting', font)
+ gl, font = getTTGlyphList(font)
+
+ name2ucsv.append(gl)
+
+ isDestination = fontIndex < len(dstFonts)
+
+ for name, unicodes in gl.iteritems():
+ # if len(uc2names) > 100: break
+ for uc in unicodes:
+ appendNames(uc2names, extraUc2names, uc, name, isDestination)
+ if isDestination:
+ fallbackName = fallbackGlyphNames.get(uc)
+ if fallbackName is not None:
+ appendNames(uc2names, extraUc2names, uc, fallbackName, isDestination)
+
+ fontIndex += 1
+
+ # for name in glyphOrder:
+ # if len(name) > 7 and name.startswith('uni') and name.find('.') == -1 and name.find('_') == -1:
+ # try:
+ # print('name: %r, %r' % (name, name[3:]))
+ # uc = int(name[3:], 16)
+ # appendNames(uc2names, extraUc2names, uc, name, isDestination=True)
+ # except:
+ # print()
+ # pass
+
+ return uc2names, extraUc2names, name2ucsv
+
+
+def renameStrings(listofstrs, newNames):
+ v = []
+ for s in listofstrs:
+ s2 = newNames.get(s)
+ if s2 is not None:
+ s = s2
+ v.append(s)
+ return v
+
+
+def renameUFOLib(ufoPath, newNames, dryRun=False, print=print):
+ filename = os.path.join(ufoPath, 'lib.plist')
+ plist = plistlib.readPlist(filename)
+
+ glyphOrder = plist.get('public.glyphOrder')
+ if glyphOrder is not None:
+ plist['public.glyphOrder'] = renameStrings(glyphOrder, newNames)
+
+ roboSort = plist.get('com.typemytype.robofont.sort')
+ if roboSort is not None:
+ for entry in roboSort:
+ if isinstance(entry, dict) and entry.get('type') == 'glyphList':
+ asc = entry.get('ascending')
+ desc = entry.get('descending')
+ if asc is not None:
+ entry['ascending'] = renameStrings(asc, newNames)
+ if desc is not None:
+ entry['descending'] = renameStrings(desc, newNames)
+
+ print('Writing', filename)
+ if not dryRun:
+ plistlib.writePlist(plist, filename)
+
+
+def renameUFOGroups(ufoPath, newNames, dryRun=False, print=print):
+ filename = os.path.join(ufoPath, 'groups.plist')
+
+ plist = None
+ try:
+ plist = plistlib.readPlist(filename)
+ except:
+ return
+
+ didChange = False
+
+ for groupName, glyphNames in plist.items():
+ for i in range(len(glyphNames)):
+ name = glyphNames[i]
+ if name in newNames:
+ didChange = True
+ glyphNames[i] = newNames[name]
+
+ if didChange:
+ print('Writing', filename)
+ if not dryRun:
+ plistlib.writePlist(plist, filename)
+
+
+def renameUFOKerning(ufoPath, newNames, dryRun=False, print=print):
+ filename = os.path.join(ufoPath, 'kerning.plist')
+
+ plist = None
+ try:
+ plist = plistlib.readPlist(filename)
+ except:
+ return
+
+ didChange = False
+
+ newPlist = {}
+ for leftName, right in plist.items():
+ if leftName in newNames:
+ didChange = True
+ leftName = newNames[leftName]
+ newRight = {}
+ for rightName, kernValue in plist.items():
+ if rightName in newNames:
+ didChange = True
+ rightName = newNames[rightName]
+ newRight[rightName] = kernValue
+ newPlist[leftName] = right
+
+ if didChange:
+ print('Writing', filename)
+ if not dryRun:
+ plistlib.writePlist(newPlist, filename)
+
+
+def subFeaName(m, newNames, state):
+ try:
+ int(m[3], 16)
+ except:
+ return m[0]
+
+ name = m[2]
+
+ if name in newNames:
+ # print('sub %r => %r' % (m[0], m[1] + newNames[name] + m[4]))
+ if name == 'uni0402':
+ print('sub %r => %r' % (m[0], m[1] + newNames[name] + m[4]))
+ state['didChange'] = True
+ return m[1] + newNames[name] + m[4]
+
+ return m[0]
+
+
+FEA_TOK = 'tok'
+FEA_SEP = 'sep'
+FEA_END = 'end'
+
+def feaTokenizer(feaText):
+ separators = set('; \t\r\n,[]\'"')
+ tokStartIndex = -1
+ sepStartIndex = -1
+
+ for i in xrange(len(feaText)):
+ ch = feaText[i]
+ if ch in separators:
+ if tokStartIndex != -1:
+ yield (FEA_TOK, feaText[tokStartIndex:i])
+ tokStartIndex = -1
+ if sepStartIndex == -1:
+ sepStartIndex = i
+ else:
+ if sepStartIndex != -1:
+ yield (FEA_SEP, feaText[sepStartIndex:i])
+ sepStartIndex = -1
+ if tokStartIndex == -1:
+ tokStartIndex = i
+
+ if sepStartIndex != -1 and tokStartIndex != -1:
+ yield (FEA_END, feaText[min(sepStartIndex, tokStartIndex):])
+ elif sepStartIndex != -1:
+ yield (FEA_END, feaText[sepStartIndex:])
+ elif tokStartIndex != -1:
+ yield (FEA_END, feaText[tokStartIndex:])
+ else:
+ yield (FEA_END, '')
+
+
+def renameUFOFeatures(font, ufoPath, newNames, dryRun=False, print=print):
+ filename = os.path.join(ufoPath, 'features.fea')
+
+ feaText = ''
+ try:
+ with open(filename, 'r') as f:
+ feaText = f.read()
+ except:
+ return
+
+ didChange = False
+ feaText2 = ''
+
+ for t, v in feaTokenizer(feaText):
+ if t is FEA_TOK and len(v) > 6 and v.startswith('uni'):
+ if v in newNames:
+ # print('sub', v, newNames[v])
+ didChange = True
+ v = newNames[v]
+ feaText2 += v
+
+ feaText = feaText2
+
+ if didChange:
+ print('Writing', filename)
+ if not dryRun:
+ with open(filename, 'w') as f:
+ f.write(feaText)
+ print(
+ 'Important: you need to manually verify that', filename, 'looks okay.',
+ 'We did an optimistic update which is not perfect.'
+ )
+
+ # classes = feaParser.classDefinitionRE.findall(feaText)
+ # for precedingMark, className, classContent in classes:
+ # content = feaParser.classContentRE.findall(classContent)
+ # print('class', className, content)
+
+ # didChange = False
+ # content2 = []
+ # for name in content:
+ # if name in newNames:
+ # didChange = True
+ # content2.append(newNames[name])
+ # if didChange:
+ # print('content2', content2)
+ # feaText = feaParser.classDefinitionRE.sub('', feaText)
+
+ # featureTags = feaParser.feature_findAll_RE.findall(feaText)
+ # for precedingMark, featureTag in featureTags:
+ # print('feat', featureTag)
+
+
+def renameUFODetails(font, ufoPath, newNames, dryRun=False, print=print):
+ renameUFOLib(ufoPath, newNames, dryRun, print)
+ renameUFOGroups(ufoPath, newNames, dryRun, print)
+ renameUFOKerning(ufoPath, newNames, dryRun, print)
+ renameUFOFeatures(font, ufoPath, newNames, dryRun, print)
+
+
+def readLines(filename):
+ with open(filename, 'r') as f:
+ return f.read().strip().splitlines()
+
+
+def readGlyphOrderFile(filename):
+ names = []
+ for line in readLines(filename):
+ line = line.lstrip()
+ if len(line) > 0 and line[0] != '#':
+ names.append(line)
+ return names
+
+
+def renameGlyphOrderFile(filename, newNames, dryRun=False, print=print):
+ lines = []
+ didRename = False
+ for line in readLines(filename):
+ line = line.lstrip()
+ if len(line) > 0 and line[0] != '#':
+ newName = newNames.get(line)
+ if newName is not None:
+ didRename = True
+ line = newName
+ lines.append(line)
+ if didRename:
+ print('Writing', filename)
+ if not dryRun:
+ with open(filename, 'w') as f:
+ f.write('\n'.join(lines))
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def fmtGlyphComposition(glyphName, baseName, accentNames, offset):
+ # glyphName = 'uni03D3'
+ # baseName = 'uni03D2'
+ # accentNames = [['tonos', 'top'], ['acute', 'top']]
+ # offset = [100, 0]
+ # => "uni03D2+tonos:top+acute:top=uni03D3/100,0"
+ s = baseName
+ for accentNameTuple in accentNames:
+ s += '+' + accentNameTuple[0]
+ if len(accentNameTuple) > 1:
+ s += ':' + accentNameTuple[1]
+ s += '=' + glyphName
+ if offset[0] != 0 or offset[1] != 0:
+ s += '/%d,%d' % tuple(offset)
+ return s
+
+
+def renameDiacriticsFile(filename, newNames, dryRun=False, print=print):
+ lines = []
+ didRename = False
+ for line in readLines(filename):
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+
+ # rename
+ glyphName = newNames.get(glyphName, glyphName)
+ baseName = newNames.get(baseName, baseName)
+ for accentTuple in accentNames:
+ accentTuple[0] = newNames.get(accentTuple[0], accentTuple[0])
+
+ line2 = fmtGlyphComposition(glyphName, baseName, accentNames, offset)
+
+ if line != line2:
+ line = line2
+ didRename = True
+ # print(line, '=>', line2)
+
+ lines.append(line)
+
+ if didRename:
+ print('Writing', filename)
+ if not dryRun:
+ with open(filename, 'w') as f:
+ f.write('\n'.join(lines))
+
+
+def configFindResFile(config, basedir, name):
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ basedir = os.path.dirname(basedir)
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ fn = None
+ return fn
+
+
+def renameConfigFile(config, filename, newNames, dryRun=False, print=print):
+ wrapper = TextWrapper()
+ wrapper.width = 80
+ wrapper.break_long_words = False
+ wrapper.break_on_hyphens = False
+
+ wrap = lambda names: '\n'.join(wrapper.wrap(' '.join(names)))
+
+ didRename = False
+ for propertyName, values in config.items('glyphs'):
+ glyphNames = values.split()
+ # print(propertyName, glyphNames)
+ propChanged = False
+ for name in glyphNames:
+ if name in newNames:
+ sectionChanged = True
+ if sectionChanged:
+ config.set('glyphs', propertyName, wrap(glyphNames)+'\n')
+ didRename = True
+
+ # config.set(section, option, value)
+ if didRename:
+ s = StringIO()
+ config.write(s)
+ s = s.getvalue()
+ s = re.sub(r'\n(\w+)\s+=\s*', '\n\\1: ', s, flags=re.M)
+ s = re.sub(r'((?:^|\n)\[[^\]]*\])', '\\1\n', s, flags=re.M)
+ s = re.sub(r'\n\t\n', '\n\n', s, flags=re.M)
+ s = s.strip() + '\n'
+ print('Writing', filename)
+ if not dryRun:
+ with open(filename, 'w') as f:
+ f.write(s)
+
+
+def parseAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ for line in readLines(filename):
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def main():
+ argparser = argparse.ArgumentParser(description='Enrich UFO glyphnames')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ '-list-missing', dest='listMissing', action='store_const', const=True, default=False,
+ help='List glyphs with unicodes found in source files but missing in any of the target UFOs.')
+
+ argparser.add_argument(
+ '-list-unnamed', dest='listUnnamed', action='store_const', const=True, default=False,
+ help="List glyphs with unicodes in target UFOs that don't have symbolic names.")
+
+ argparser.add_argument(
+ '-backfill-agl', dest='backfillWithAgl', action='store_const', const=True, default=False,
+ help="Use glyphnames from Adobe Glyph List for any glyphs that no names in any of"+
+ " the input font files")
+
+ argparser.add_argument(
+ '-src', dest='srcFonts', metavar='<fontfile>', type=str, nargs='*',
+ help='TrueType, OpenType or UFO fonts to gather glyph info from. '+
+ 'Names found in earlier-listed fonts are prioritized over later listings.')
+
+ argparser.add_argument(
+ 'dstFonts', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+
+ # Load UFO fonts
+ dstFonts = []
+ dstFontPaths = {} # keyed by RFont object
+ srcDir = None
+ for fn in args.dstFonts:
+ fn = fn.rstrip('/')
+ font = OpenFont(fn)
+ dstFonts.append(font)
+ dstFontPaths[font] = fn
+ srcDir2 = os.path.dirname(fn)
+ if srcDir is None:
+ srcDir = srcDir2
+ elif srcDir != srcDir2:
+ raise Exception('All <ufofile>s must be rooted in same directory')
+
+ # load fontbuild configuration
+ config = RawConfigParser(dict_type=OrderedDict)
+ configFilename = os.path.join(srcDir, 'fontbuild.cfg')
+ config.read(configFilename)
+ glyphOrderFile = configFindResFile(config, srcDir, 'glyphorder')
+ diacriticsFile = configFindResFile(config, srcDir, 'diacriticfile')
+ glyphOrder = readGlyphOrderFile(glyphOrderFile)
+
+ fallbackGlyphNames = {} # { 2126: 'Omega', ... }
+ if args.backfillWithAgl:
+ fallbackGlyphNames = parseAGL(configFindResFile(config, srcDir, 'agl_glyphlistfile'))
+
+ # find glyph names
+ uc2names, extraUc2names, name2ucsv = buildGlyphNames(
+ dstFonts,
+ args.srcFonts,
+ glyphOrder,
+ fallbackGlyphNames
+ )
+ # Note: name2ucsv has same order as parameters to buildGlyphNames
+
+ if args.listMissing:
+ print('# Missing glyphs: (found in -src but not in any <ufofile>)')
+ for uc, names in extraUc2names.iteritems():
+ print('U+%04X\t%s' % (uc, ', '.join(names)))
+ return
+
+ elif args.listUnnamed:
+ print('# Unnamed glyphs:')
+ unnamed = set()
+ for name in glyphOrder:
+ if len(name) > 7 and name.startswith('uni'):
+ unnamed.add(name)
+ for gl in name2ucsv[:len(dstFonts)]:
+ for name, ucs in gl.iteritems():
+ for uc in ucs:
+ if isDefaultGlyphNameForUnicode(name, uc):
+ unnamed.add(name)
+ break
+ for name in unnamed:
+ print(name)
+ return
+
+ printDry = lambda *args: print(*args)
+ if args.dryRun:
+ printDry = lambda *args: print('[dry-run]', *args)
+
+ newNames = {}
+ renameGlyphsQueue = {} # keyed by RFont object
+
+ for font in dstFonts:
+ renameGlyphsQueue[font] = {}
+
+ for uc, names in uc2names.iteritems():
+ if len(names) < 2:
+ continue
+ dstGlyphName = names[0]
+ if isDefaultGlyphNameForUnicode(dstGlyphName, uc):
+ newGlyphName = getFirstNonDefaultGlyphName(uc, names[1:])
+ # if newGlyphName is None:
+ # # if we found no symbolic name, check in fallback list
+ # newGlyphName = fallbackGlyphNames.get(uc)
+ # if newGlyphName is not None:
+ # printDry('Using fallback %s' % newGlyphName)
+ if newGlyphName is not None:
+ printDry('Rename %s -> %s' % (dstGlyphName, newGlyphName))
+ for font in dstFonts:
+ if dstGlyphName in font:
+ renameGlyphsQueue[font][dstGlyphName] = newGlyphName
+ newNames[dstGlyphName] = newGlyphName
+
+ if len(newNames) == 0:
+ printDry('No changes')
+ return
+
+ # rename component instances
+ for font in dstFonts:
+ componentMap = font.getReverseComponentMapping()
+ for currName, newName in renameGlyphsQueue[font].iteritems():
+ for depName in componentMap.get(currName, []):
+ depG = font[depName]
+ for c in depG.components:
+ if c.baseGlyph == currName:
+ c.baseGlyph = newName
+ c.setChanged()
+
+ # rename glyphs
+ for font in dstFonts:
+ for currName, newName in renameGlyphsQueue[font].iteritems():
+ font[currName].name = newName
+
+ # save fonts and update font data
+ for font in dstFonts:
+ fontPath = dstFontPaths[font]
+ printDry('Saving %d glyphs in %s' % (len(newNames), fontPath))
+ if not args.dryRun:
+ font.save()
+ renameUFODetails(font, fontPath, newNames, dryRun=args.dryRun, print=printDry)
+
+ # update resource files
+ renameGlyphOrderFile(glyphOrderFile, newNames, dryRun=args.dryRun, print=printDry)
+ renameDiacriticsFile(diacriticsFile, newNames, dryRun=args.dryRun, print=printDry)
+ renameConfigFile(config, configFilename, newNames, dryRun=args.dryRun, print=printDry)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/fixup-diacritics.py b/misc/tools/fixup-diacritics.py
new file mode 100755
index 000000000..2453e7f3c
--- /dev/null
+++ b/misc/tools/fixup-diacritics.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib, re
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
+
+
+def unicodeForDefaultGlyphName(glyphName):
+ m = uniNameRe.match(glyphName)
+ if m is not None:
+ try:
+ return int(m.group(1), 16)
+ except:
+ pass
+ return None
+
+
+def canonicalGlyphName(glyphName, uc2names):
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ names = uc2names.get(uc)
+ if names is not None and len(names) > 0:
+ return names[0]
+ return glyphName
+
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def fmtGlyphComposition(glyphName, baseName, accentNames, offset):
+ # glyphName = 'uni03D3'
+ # baseName = 'uni03D2'
+ # accentNames = [['tonos', 'top'], ['acute', 'top']]
+ # offset = [100, 0]
+ # => "uni03D2+tonos:top+acute:top=uni03D3/100,0"
+ s = baseName
+ for accentNameTuple in accentNames:
+ s += '+' + accentNameTuple[0]
+ if len(accentNameTuple) > 1:
+ s += ':' + accentNameTuple[1]
+ s += '=' + glyphName
+ if offset[0] != 0 or offset[1] != 0:
+ s += '/%d,%d' % tuple(offset)
+ return s
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def loadAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def loadFontGlyphs(font):
+ uc2names = {} # { 2126: ['Omega', ...], ...}
+ name2ucs = {} # { 'Omega': [2126, ...], '.notdef': [], ...}
+ for g in font:
+ name = g.name
+ ucs = g.unicodes
+ name2ucs[name] = ucs
+ for uc in ucs:
+ names = uc2names.setdefault(uc, [])
+ if name not in names:
+ names.append(name)
+ return uc2names, name2ucs
+
+
+def main():
+ argparser = ArgumentParser(description='Fixup diacritic names')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ uc2names = {}
+ name2ucs = {}
+
+ for fontPath in args.fontPaths:
+ font = OpenFont(fontPath)
+ _uc2names, _name2ucs = loadFontGlyphs(font)
+ for uc, _names in _uc2names.iteritems():
+ names = uc2names.setdefault(uc, [])
+ for name in _names:
+ if name not in names:
+ names.append(name)
+ for name, _ucs in _name2ucs.iteritems():
+ ucs = name2ucs.setdefault(name, [])
+ for uc in _ucs:
+ if uc not in ucs:
+ ucs.append(uc)
+
+ agl = loadAGL('src/glyphlist.txt') # { 2126: 'Omega', ... }
+
+ diacriticsFilename = 'src/diacritics.txt'
+ diacriticComps = loadGlyphCompositions(diacriticsFilename) # {glyphName => (baseName, a, o)}
+
+ for glyphName, comp in list(diacriticComps.items()):
+ if glyphName not in name2ucs:
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ aglName = agl.get(uc)
+ if aglName is not None:
+ if aglName in diacriticComps:
+ raise Exception('composing same glyph with different names:', aglName, glyphName)
+ print('rename', glyphName, '->', aglName, '(U+%04X)' % uc)
+ del diacriticComps[glyphName]
+ diacriticComps[aglName] = comp
+
+ lines = []
+ for glyphName, comp in diacriticComps.iteritems():
+ lines.append(fmtGlyphComposition(glyphName, *comp))
+ # print('\n'.join(lines))
+ print('Write', diacriticsFilename)
+ if not dryRun:
+ with open(diacriticsFilename, 'w') as f:
+ for line in lines:
+ f.write(line + '\n')
+
+
+
+
+main()
diff --git a/misc/tools/fixup-features.py b/misc/tools/fixup-features.py
new file mode 100755
index 000000000..dd4c2658f
--- /dev/null
+++ b/misc/tools/fixup-features.py
@@ -0,0 +1,335 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib, re
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+from fontTools.feaLib.parser import Parser as FeaParser
+from fontTools.feaLib.builder import Builder as FeaBuilder
+from fontTools.ttLib import TTFont
+
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
+
+
+def unicodeForDefaultGlyphName(glyphName):
+ m = uniNameRe.match(glyphName)
+ if m is not None:
+ try:
+ return int(m.group(1), 16)
+ except:
+ pass
+ return None
+
+
+def canonicalGlyphName(glyphName, uc2names):
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ names = uc2names.get(uc)
+ if names is not None and len(names) > 0:
+ return names[0]
+ return glyphName
+
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def loadAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def loadLocalNamesDB(fonts, agl, diacriticComps):
+ uc2names = None # { 2126: ['Omega', ...], ...}
+ allNames = set() # set('Omega', ...)
+
+ for font in fonts:
+ _uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
+ if uc2names is None:
+ uc2names = _uc2names
+ else:
+ for uc, _names in _uc2names.iteritems():
+ names = uc2names.setdefault(uc, [])
+ for name in _names:
+ if name not in names:
+ names.append(name)
+ for g in font:
+ allNames.add(g.name)
+
+ # agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
+ aglName2Ucs = {}
+ for uc, name in agl.iteritems():
+ aglName2Ucs.setdefault(name, []).append(uc)
+
+ for glyphName, comp in diacriticComps.iteritems():
+ aglUCs = aglName2Ucs.get(glyphName)
+ if aglUCs is None:
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ glyphName2 = agl.get(uc)
+ if glyphName2 is not None:
+ glyphName = glyphName2
+ names = uc2names.setdefault(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ allNames.add(glyphName)
+ else:
+ allNames.add(glyphName)
+ for uc in aglUCs:
+ names = uc2names.get(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ uc2names[uc] = names
+
+ name2ucs = {} # { 'Omega': [2126, ...], ...}
+ for uc, names in uc2names.iteritems():
+ for name in names:
+ name2ucs.setdefault(name, set()).add(uc)
+
+ return uc2names, name2ucs, allNames
+
+
+
+includeRe = re.compile(r'^include\(([^\)]+)\);\s*$')
+
+
+def loadFeaturesFile(filepath):
+ print('read', filepath)
+ lines = []
+ with open(filepath, 'r') as f:
+ for line in f:
+ m = includeRe.match(line)
+ if m is not None:
+ includedFilename = m.group(1)
+ includedPath = os.path.normpath(os.path.join(os.path.dirname(filepath), includedFilename))
+ lines = lines + loadFeaturesFile(includedPath)
+ else:
+ lines.append(line)
+ return lines
+
+
+def main():
+ argparser = ArgumentParser(description='Fixup features.fea')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ agl = loadAGL('src/glyphlist.txt') # { 2126: 'Omega', ... }
+ diacriticComps = loadGlyphCompositions('src/diacritics.txt') # {glyphName => (baseName, a, o)}
+
+ # collect glyph names
+ fonts = [OpenFont(fontPath) for fontPath in args.fontPaths]
+ uc2names, name2ucs, allNames = loadLocalNamesDB(fonts, agl, diacriticComps)
+
+ includeRe = re.compile(r'^include\(([^\)]+)\);\s*$')
+
+ # open features.fea
+ featuresLines = loadFeaturesFile(os.path.join(fontPath, 'features.fea'))
+
+ classDefRe = re.compile(r'^@([^\s=]+)\s*=\s*\[([^\]]+)\]\s*;\s*$')
+ subRe = re.compile(r'^\s*sub\s+(.+)(\'?)\s+by\s+(.+)\s*;\s*$')
+ sub2Re = re.compile(r'^\s*sub\s+([^\[]+)\s+\[\s*([^\]]+)\s*\](\'?)\s+by\s+(.+)\s*;\s*$')
+ # sub lmidtilde [uni1ABB uni1ABD uni1ABE]' by uni1ABE.w2;
+ # sub lmidtilde uni1ABC' by uni1ABC.w2;
+ spacesRe = re.compile(r'[\s\r\n]+')
+
+ classDefs = {}
+ featuresLines2 = []
+
+ for line in featuresLines:
+ clsM = classDefRe.match(line)
+ if clsM is not None:
+ clsName = clsM.group(1)
+ names = spacesRe.split(clsM.group(2).strip())
+ if clsName in classDefs:
+ raise Exception('duplicate class definition ' + clsName)
+ # print('classdef', clsName, ' '.join(names))
+ # print('classdef', clsName)
+ names2 = []
+ for name in names:
+ if name == '-':
+ # e.g. A - Z
+ names2.append(name)
+ continue
+ if name[0] != '@':
+ canonName = canonicalGlyphName(name, uc2names)
+ if canonName != name:
+ # print('renaming ' + name + ' -> ' + canonName)
+ names2.append(canonName)
+ elif name not in allNames:
+ print('skipping unknown glyph ' + name)
+ else:
+ names2.append(name)
+ else:
+ raise Exception('todo: class-ref ' + name + ' in class-def ' + clsName)
+ classDefs[clsName] = names2
+ line = '@%s = [ %s ];' % (clsName, ' '.join(names2))
+ featuresLines2.append(line)
+ continue
+
+
+ # sub2M = sub2Re.match(line)
+ # if sub2M is not None:
+ # findNames1 = spacesRe.split(sub2M.group(1))
+ # findNames2 = spacesRe.split(sub2M.group(2))
+ # apos = sub2M.group(3)
+ # rightName = sub2M.group(4)
+ # print('TODO: sub2', findNames1, findNames2, apos, rightName)
+ # featuresLines2.append(line)
+ # continue
+
+
+ sub2M = sub2Re.match(line)
+ subM = None
+ if sub2M is None:
+ subM = subRe.match(line)
+ if subM is not None or sub2M is not None:
+ findNamesStr = ''
+ findNamesHasBrackets = False
+ findNames = []
+
+ findNamesBStr = ''
+ findNamesBHasBrackets = False
+ findNamesB = []
+
+ newNamesStr = ''
+ newNamesHasBrackets = False
+ newNames = []
+
+ apos0 = ''
+
+ if subM is not None:
+ findNamesStr = subM.group(1)
+ apos0 = subM.group(2)
+ newNamesStr = subM.group(3)
+ else: # sub2M
+ findNamesStr = sub2M.group(1)
+ findNamesBStr = sub2M.group(2)
+ apos0 = sub2M.group(3)
+ newNamesStr = sub2M.group(4)
+
+ if newNamesStr[0] == '[':
+ newNamesHasBrackets = True
+ newNamesStr = newNamesStr.strip('[ ]')
+ newNames = spacesRe.split(newNamesStr)
+
+ if findNamesStr[0] == '[':
+ findNamesHasBrackets = True
+ findNamesStr = findNamesStr.strip('[ ]')
+ findNames = spacesRe.split(findNamesStr)
+
+ if findNamesBStr != '':
+ if findNamesBStr[0] == '[':
+ findNamesBHasBrackets = True
+ findNamesBStr = findNamesBStr.strip('[ ]')
+ findNamesB = spacesRe.split(findNamesBStr)
+
+
+ names22 = []
+ for names in [findNames, findNamesB, newNames]:
+ names2 = []
+ for name in names:
+ if name[0] == '@':
+ clsName = name[1:].rstrip("'")
+ if clsName not in classDefs:
+ raise Exception('sub: missing target class ' + clsName + ' at\n' + line)
+ names2.append(name)
+ else:
+ apos = name[-1] == "'"
+ if apos:
+ name = name[:-1]
+ if name not in allNames:
+ canonName = canonicalGlyphName(name, uc2names)
+ if canonName != name:
+ print('renaming ' + name + ' -> ' + canonName)
+ name = canonName
+ else:
+ raise Exception('TODO: unknown name', name)
+ # if we remove names, we also need to remove subs (that become empty), and so on.
+ if apos:
+ name += "'"
+ names2.append(name)
+ names22.append(names2)
+
+ findNames2, findNamesB2, newNames2 = names22
+
+ findNamesStr = ' '.join(findNames2)
+ if findNamesHasBrackets: findNamesStr = '[' + findNamesStr + ']'
+
+ if findNamesBStr != '':
+ findNamesBStr = ' '.join(findNamesB2)
+ if findNamesBHasBrackets: findNamesBStr = '[' + findNamesBStr + ']'
+
+ newNamesStr = ' '.join(newNames2)
+ if newNamesHasBrackets: newNamesStr = '[' + newNamesStr + ']'
+
+ if subM is not None:
+ line = ' sub %s%s by %s;' % (findNamesStr, apos0, newNamesStr)
+ else:
+ # if subM is None:
+ # sub bbar [uni1ABB uni1ABD uni1ABE]' by uni1ABE.w2;
+ line = ' sub %s [%s]%s by %s;' % (findNamesStr, findNamesBStr, apos0, newNamesStr)
+
+ featuresLines2.append(line)
+
+
+ print('Write', featuresFilename)
+ if not dryRun:
+ with open(featuresFilename + '2', 'w') as f:
+ for line in featuresLines2:
+ f.write(line + '\n')
+
+ # FeaParser(featuresFilename + '2', allNames).parse()
+
+ # font = TTFont('build/dist-unhinted/Inter-UI-Regular.otf')
+ # FeaBuilder(font, featuresFilename + '2').build()
+
+
+
+
+
+main()
diff --git a/misc/tools/fixup-kerning.py b/misc/tools/fixup-kerning.py
new file mode 100755
index 000000000..fc4ce8071
--- /dev/null
+++ b/misc/tools/fixup-kerning.py
@@ -0,0 +1,362 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib, json
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+from argparse import ArgumentParser
+from fontTools import ttLib
+from robofab.objects.objectsRF import OpenFont
+
+
+def getTTCharMap(font): # -> { 2126: 'Omegagreek', ...}
+ if isinstance(font, str):
+ font = ttLib.TTFont(font)
+
+ if not 'cmap' in font:
+ raise Exception('missing cmap table')
+
+ gl = {}
+ bestCodeSubTable = None
+ bestCodeSubTableFormat = 0
+
+ for st in font['cmap'].tables:
+ if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
+ if st.format > bestCodeSubTableFormat:
+ bestCodeSubTable = st
+ bestCodeSubTableFormat = st.format
+
+ if bestCodeSubTable is not None:
+ for cp, glyphname in bestCodeSubTable.cmap.items():
+ if cp in gl:
+ raise Exception('duplicate unicode-to-glyphname mapping: U+%04X => %r and %r' % (
+ cp, glyphname, gl[cp]))
+ gl[cp] = glyphname
+
+ return gl
+
+
+def revCharMap(ucToNames):
+ # {2126:['Omega','Omegagr']} -> {'Omega':2126, 'Omegagr':2126}
+ # {2126:'Omega'} -> {'Omega':2126}
+ m = {}
+ if len(ucToNames) == 0:
+ return m
+
+ lists = True
+ for v in ucToNames.itervalues():
+ lists = not isinstance(v, str)
+ break
+
+ if lists:
+ for uc, names in ucToNames.iteritems():
+ for name in names:
+ m[name] = uc
+ else:
+ for uc, name in ucToNames.iteritems():
+ m[name] = uc
+
+ return m
+
+
+def getGlyphNameDifferenceMap(srcCharMap, dstCharMap, dstRevCharMap):
+ m = {} # { 'Omegagreek': 'Omega', ... }
+ for uc, srcName in srcCharMap.iteritems():
+ dstNames = dstCharMap.get(uc)
+ if dstNames is not None and len(dstNames) > 0:
+ if len(dstNames) != 1:
+ print('warning: ignoring multi-glyph map for U+%04X in source font' % uc)
+ dstName = dstNames[0]
+ if srcName != dstName and srcName not in dstRevCharMap:
+ # Only include names that differ. also, The `srcName not in dstRevCharMap` condition
+ # makes sure that we don't rename glyphs that are already valid.
+ m[srcName] = dstName
+ return m
+
+
+def fixupGroups(fontPath, dstGlyphNames, srcToDstMap, dryRun, stats):
+ filename = os.path.join(fontPath, 'groups.plist')
+ groups = plistlib.readPlist(filename)
+ groups2 = {}
+ glyphToGroups = {}
+
+ for groupName, glyphNames in groups.iteritems():
+ glyphNames2 = []
+ for glyphName in glyphNames:
+ if glyphName in srcToDstMap:
+ gn2 = srcToDstMap[glyphName]
+ stats.renamedGlyphs[glyphName] = gn2
+ glyphName = gn2
+ if glyphName in dstGlyphNames:
+ glyphNames2.append(glyphName)
+ glyphToGroups[glyphName] = glyphToGroups.get(glyphName, []) + [groupName]
+ else:
+ stats.removedGlyphs.add(glyphName)
+ if len(glyphNames2) > 0:
+ groups2[groupName] = glyphNames2
+ else:
+ stats.removedGroups.add(groupName)
+
+ print('Writing', filename)
+ if not dryRun:
+ plistlib.writePlist(groups2, filename)
+
+ return groups2, glyphToGroups
+
+
+def fixupKerning(fontPath, dstGlyphNames, srcToDstMap, groups, glyphToGroups, dryRun, stats):
+ filename = os.path.join(fontPath, 'kerning.plist')
+ kerning = plistlib.readPlist(filename)
+ kerning2 = {}
+ groupPairs = {} # { "lglyphname+lglyphname": ("lgroupname"|"", "rgroupname"|"", 123) }
+ # pairs = {} # { "name+name" => 123 }
+
+ for leftName, right in kerning.items():
+ leftIsGroup = leftName[0] == '@'
+ leftGroupNames = None
+
+ if leftIsGroup:
+ # left is a group
+ if leftName not in groups:
+ # dead group -- skip
+ stats.removedGroups.add(leftName)
+ continue
+ leftGroupNames = groups[leftName]
+ else:
+ if leftName in srcToDstMap:
+ leftName2 = srcToDstMap[leftName]
+ stats.renamedGlyphs[leftName] = leftName2
+ leftName = leftName2
+ if leftName not in dstGlyphNames:
+ # dead glyphname -- skip
+ stats.removedGlyphs.add(leftName)
+ continue
+
+ right2 = {}
+ rightGroupNamesAndValues = []
+ for rightName, kerningValue in right.iteritems():
+ rightIsGroup = rightName[0] == '@'
+ if rightIsGroup:
+ if leftIsGroup and leftGroupNames is None:
+ leftGroupNames = [leftName]
+ if rightName in groups:
+ right2[rightName] = kerningValue
+ rightGroupNamesAndValues.append((groups[rightName], rightName, kerningValue))
+ else:
+ stats.removedGroups.add(rightName)
+ else:
+ if rightName in srcToDstMap:
+ rightName2 = srcToDstMap[rightName]
+ stats.renamedGlyphs[rightName] = rightName2
+ rightName = rightName2
+ if rightName in dstGlyphNames:
+ right2[rightName] = kerningValue
+ if leftIsGroup:
+ rightGroupNamesAndValues.append(([rightName], '', kerningValue))
+ else:
+ stats.removedGlyphs.add(rightName)
+
+ if len(right2):
+ kerning2[leftName] = right2
+
+ # update groupPairs
+ lgroupname = leftName if rightIsGroup else ''
+ if leftIsGroup:
+ for lname in leftGroupNames:
+ kPrefix = lname + '+'
+ for rnames, rgroupname, kernv in rightGroupNamesAndValues:
+ for rname in rnames:
+ k = kPrefix + rname
+ v = (lgroupname, rgroupname, kernv)
+ if k in groupPairs:
+ raise Exception('duplicate group pair %s: %r and %r' % (k, groupPairs[k], v))
+ groupPairs[k] = v
+
+ elif leftIsGroup:
+ stats.removedGroups.add(leftName)
+ else:
+ stats.removedGlyphs.add(leftName)
+
+ # print('groupPairs:', groupPairs)
+
+ # remove individual pairs that are already represented through groups
+ kerning = kerning2
+ kerning2 = {}
+ for leftName, right in kerning.items():
+ leftIsGroup = leftName[0] == '@'
+ # leftNames = groups[leftName] if leftIsGroup else [leftName]
+
+ if not leftIsGroup:
+ right2 = {}
+ for rightName, kernVal in right.iteritems():
+ rightIsGroup = rightName[0] == '@'
+ if not rightIsGroup:
+ k = leftName + '+' + rightName
+ if k in groupPairs:
+ groupPair = groupPairs[k]
+ print(('simplify individual pair %r: kern %r (individual) -> %r (group)') % (
+ k, kernVal, groupPair[2]))
+ stats.simplifiedKerningPairs.add(k)
+ else:
+ right2[rightName] = kernVal
+ else:
+ right2[rightName] = kernVal
+ else:
+ # TODO, probably
+ right2 = right
+
+ kerning2[leftName] = right2
+
+ print('Writing', filename)
+ if not dryRun:
+ plistlib.writePlist(kerning2, filename)
+
+ return kerning2
+
+
+def loadJSONCharMap(filename):
+ m = None
+ if filename == '-':
+ m = json.load(sys.stdin)
+ else:
+ with open(filename, 'r') as f:
+ m = json.load(f)
+ if not isinstance(m, dict):
+ raise Exception('json root is not a dict')
+ if len(m) > 0:
+ for k, v in m.iteritems():
+ if not isinstance(k, int) and not isinstance(k, float):
+ raise Exception('json dict key is not a number')
+ if not isinstance(v, str):
+ raise Exception('json dict value is not a string')
+ break
+ return m
+
+
+class Stats:
+ def __init__(self):
+ self.removedGroups = set()
+ self.removedGlyphs = set()
+ self.simplifiedKerningPairs = set()
+ self.renamedGlyphs = {}
+
+
+def configFindResFile(config, basedir, name):
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ basedir = os.path.dirname(basedir)
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ fn = None
+ return fn
+
+
+def main():
+ jsonSchemaDescr = '{[unicode:int]: glyphname:string, ...}'
+
+ argparser = ArgumentParser(
+ description='Rename glyphnames in UFO kerning and remove unused groups and glyphnames.')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ '-no-stats', dest='noStats', action='store_const', const=True, default=False,
+ help='Do not print statistics at the end.')
+
+ argparser.add_argument(
+ '-save-stats', dest='saveStatsPath', metavar='<file>', type=str,
+ help='Write detailed statistics to JSON file.')
+
+ argparser.add_argument(
+ '-src-json', dest='srcJSONFile', metavar='<file>', type=str,
+ help='JSON file to read glyph names from.'+
+ ' Expected schema: ' + jsonSchemaDescr + ' (e.g. {2126: "Omega"})')
+
+ argparser.add_argument(
+ '-src-font', dest='srcFontFile', metavar='<file>', type=str,
+ help='TrueType or OpenType font to read glyph names from.')
+
+ argparser.add_argument(
+ 'dstFontsPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ if args.srcJSONFile and args.srcFontFile:
+ argparser.error('Both -src-json and -src-font specified -- please provide only one.')
+
+ # Strip trailing slashes from font paths
+ args.dstFontsPaths = [s.rstrip('/ ') for s in args.dstFontsPaths]
+
+ # Load source char map
+ srcCharMap = None
+ if args.srcJSONFile:
+ try:
+ srcCharMap = loadJSONCharMap(args.srcJSONFile)
+ except Exception as err:
+ argparser.error('Invalid JSON: Expected schema %s (%s)' % (jsonSchemaDescr, err))
+ elif args.srcFontFile:
+ srcCharMap = getTTCharMap(args.srcFontFile.rstrip('/ ')) # -> { 2126: 'Omegagreek', ...}
+ else:
+ argparser.error('No source provided (-src-* argument missing)')
+ if len(srcCharMap) == 0:
+ print('Empty character map', file=sys.stderr)
+ sys.exit(1)
+
+ # Find project source dir
+ srcDir = ''
+ for dstFontPath in args.dstFontsPaths:
+ s = os.path.dirname(dstFontPath)
+ if not srcDir:
+ srcDir = s
+ elif srcDir != s:
+ raise Exception('All <ufofile>s must be rooted in the same directory')
+
+ # Load font project config
+ # load fontbuild configuration
+ config = RawConfigParser(dict_type=OrderedDict)
+ configFilename = os.path.join(srcDir, 'fontbuild.cfg')
+ config.read(configFilename)
+ diacriticsFile = configFindResFile(config, srcDir, 'diacriticfile')
+
+ for dstFontPath in args.dstFontsPaths:
+ dstFont = OpenFont(dstFontPath)
+ dstCharMap = dstFont.getCharacterMapping() # -> { 2126: [ 'Omega', ...], ...}
+ dstRevCharMap = revCharMap(dstCharMap) # { 'Omega': 2126, ...}
+ srcToDstMap = getGlyphNameDifferenceMap(srcCharMap, dstCharMap, dstRevCharMap)
+
+ stats = Stats()
+
+ groups, glyphToGroups = fixupGroups(dstFontPath, dstRevCharMap, srcToDstMap, dryRun, stats)
+ fixupKerning(dstFontPath, dstRevCharMap, srcToDstMap, groups, glyphToGroups, dryRun, stats)
+
+ # stats
+ if args.saveStatsPath or not args.noStats:
+ if not args.noStats:
+ print('stats for %s:' % dstFontPath)
+ print(' Deleted %d groups and %d glyphs.' % (
+ len(stats.removedGroups), len(stats.removedGlyphs)))
+ print(' Renamed %d glyphs.' % len(stats.renamedGlyphs))
+ print(' Simplified %d kerning pairs.' % len(stats.simplifiedKerningPairs))
+ if args.saveStatsPath:
+ statsObj = {
+ 'deletedGroups': stats.removedGroups,
+ 'deletedGlyphs': stats.removedGlyphs,
+ 'simplifiedKerningPairs': stats.simplifiedKerningPairs,
+ 'renamedGlyphs': stats.renamedGlyphs,
+ }
+ f = sys.stdout
+ try:
+ if args.saveStatsPath != '-':
+ f = open(args.saveStatsPath, 'w')
+ print('Writing stats to', args.saveStatsPath)
+ json.dump(statsObj, sys.stdout, indent=2, separators=(',', ': '))
+ finally:
+ if f is not sys.stdout:
+ f.close()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/fontinfo.py b/misc/tools/fontinfo.py
new file mode 100755
index 000000000..0f406a14d
--- /dev/null
+++ b/misc/tools/fontinfo.py
@@ -0,0 +1,506 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Generates JSON-encoded information about fonts
+#
+import os
+import sys
+import argparse
+import json
+import re
+from base64 import b64encode
+
+from fontTools import ttLib
+from fontTools.misc import sstruct
+from fontTools.ttLib.tables._h_e_a_d import headFormat
+from fontTools.ttLib.tables._h_h_e_a import hheaFormat
+from fontTools.ttLib.tables._m_a_x_p import maxpFormat_0_5, maxpFormat_1_0_add
+from fontTools.ttLib.tables._p_o_s_t import postFormat
+from fontTools.ttLib.tables.O_S_2f_2 import OS2_format_1, OS2_format_2, OS2_format_5, panoseFormat
+from fontTools.ttLib.tables._m_e_t_a import table__m_e_t_a
+# from robofab.world import world, RFont, RGlyph, OpenFont, NewFont
+# from robofab.objects.objectsRF import RFont, RGlyph, OpenFont, NewFont, RContour
+
+_NAME_IDS = {}
+
+
+panoseWeights = [
+ 'Any', # 0
+ 'No Fit', # 1
+ 'Very Light', # 2
+ 'Light', # 3
+ 'Thin', # 4
+ 'Book', # 5
+ 'Medium', # 6
+ 'Demi', # 7
+ 'Bold', # 8
+ 'Heavy', # 9
+ 'Black', # 10
+ 'Extra Black', # 11
+]
+
+panoseProportion = [
+ 'Any', # 0
+ 'No fit', # 1
+ 'Old Style/Regular', # 2
+ 'Modern', # 3
+ 'Even Width', # 4
+ 'Extended', # 5
+ 'Condensed', # 6
+ 'Very Extended', # 7
+ 'Very Condensed', # 8
+ 'Monospaced', # 9
+]
+
+os2WidthClass = [
+ None,
+ 'Ultra-condensed', # 1
+ 'Extra-condensed', # 2
+ 'Condensed', # 3
+ 'Semi-condensed', # 4
+ 'Medium (normal)', # 5
+ 'Semi-expanded', # 6
+ 'Expanded', # 7
+ 'Extra-expanded', # 8
+ 'Ultra-expanded', # 9
+]
+
+os2WeightClass = {
+ 100: 'Thin',
+ 200: 'Extra-light (Ultra-light)',
+ 300: 'Light',
+ 400: 'Normal (Regular)',
+ 500: 'Medium',
+ 600: 'Semi-bold (Demi-bold)',
+ 700: 'Bold',
+ 800: 'Extra-bold (Ultra-bold)',
+ 900: 'Black (Heavy)',
+}
+
+
+def num(s):
+ return int(s) if s.find('.') == -1 else float(s)
+
+
+def tableNamesToDict(table, names):
+ t = {}
+ for name in names:
+ if name.find('reserved') == 0:
+ continue
+ t[name] = getattr(table, name)
+ return t
+
+
+def sstructTableToDict(table, format):
+ _, names, _ = sstruct.getformat(format)
+ return tableNamesToDict(table, names)
+
+
+OUTPUT_TYPE_COMPLETE = 'complete'
+OUTPUT_TYPE_GLYPHLIST = 'glyphlist'
+
+
+GLYPHS_TYPE_UNKNOWN = '?'
+GLYPHS_TYPE_TT = 'tt'
+GLYPHS_TYPE_CFF = 'cff'
+
+def getGlyphsType(tt):
+ if 'CFF ' in tt:
+ return GLYPHS_TYPE_CFF
+ elif 'glyf' in tt:
+ return GLYPHS_TYPE_TT
+ return GLYPHS_TYPE_UNKNOWN
+
+
+class GlyphInfo:
+ def __init__(self, g, name, unicodes, type, glyphTable):
+ self._type = type # GLYPHS_TYPE_*
+ self._glyphTable = glyphTable
+
+ self.name = name
+ self.width = g.width
+ self.lsb = g.lsb
+ self.unicodes = unicodes
+
+ if g.height is not None:
+ self.tsb = g.tsb
+ self.height = g.height
+ else:
+ self.tsb = 0
+ self.height = 0
+
+ self.numContours = 0
+ self.contoursBBox = (0,0,0,0) # xMin, yMin, xMax, yMax
+ self.hasHints = False
+
+ if self._type is GLYPHS_TYPE_CFF:
+ self._addCFFInfo()
+ elif self._type is GLYPHS_TYPE_TT:
+ self._addTTInfo()
+
+ def _addTTInfo(self):
+ g = self._glyphTable[self.name]
+ self.numContours = g.numberOfContours
+ if g.numberOfContours:
+ self.contoursBBox = (g.xMin,g.xMin,g.xMax,g.yMax)
+ self.hasHints = hasattr(g, "program")
+
+ def _addCFFInfo(self):
+ # TODO: parse CFF dict tree
+ pass
+
+ @classmethod
+ def structKeys(cls, type):
+ v = [
+ 'name',
+ 'unicodes',
+ 'width',
+ 'lsb',
+ 'height',
+ 'tsb',
+ 'hasHints',
+ ]
+ if type is GLYPHS_TYPE_TT:
+ v += (
+ 'numContours',
+ 'contoursBBox',
+ )
+ return v
+
+ def structValues(self):
+ v = [
+ self.name,
+ self.unicodes,
+ self.width,
+ self.lsb,
+ self.height,
+ self.tsb,
+ self.hasHints,
+ ]
+ if self._type is GLYPHS_TYPE_TT:
+ v += (
+ self.numContours,
+ self.contoursBBox,
+ )
+ return v
+
+
+# exported convenience function
+def GenGlyphList(font, withGlyphs=None):
+ if isinstance(font, str):
+ font = ttLib.TTFont(font)
+ return genGlyphsInfo(font, OUTPUT_TYPE_GLYPHLIST)
+
+
+def genGlyphsInfo(tt, outputType, glyphsType=GLYPHS_TYPE_UNKNOWN, glyphsTable=None, withGlyphs=None):
+ unicodeMap = {}
+
+ glyphnameFilter = None
+ if isinstance(withGlyphs, str):
+ glyphnameFilter = withGlyphs.split(',')
+
+ if 'cmap' in tt:
+ # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cmap.html
+ bestCodeSubTable = None
+ bestCodeSubTableFormat = 0
+ for st in tt['cmap'].tables:
+ if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
+ if st.format > bestCodeSubTableFormat:
+ bestCodeSubTable = st
+ bestCodeSubTableFormat = st.format
+ for cp, glyphname in bestCodeSubTable.cmap.items():
+ if glyphname in unicodeMap:
+ unicodeMap[glyphname].append(cp)
+ else:
+ unicodeMap[glyphname] = [cp]
+
+ glyphValues = []
+
+ glyphnames = tt.getGlyphOrder() if glyphnameFilter is None else glyphnameFilter
+
+ if outputType is OUTPUT_TYPE_GLYPHLIST:
+ glyphValues = []
+ for glyphname in glyphnames:
+ v = [glyphname]
+ if glyphname in unicodeMap:
+ v += unicodeMap[glyphname]
+ glyphValues.append(v)
+ return glyphValues
+
+ glyphset = tt.getGlyphSet(preferCFF=glyphsType is GLYPHS_TYPE_CFF)
+
+ for glyphname in glyphnames:
+ unicodes = unicodeMap[glyphname] if glyphname in unicodeMap else []
+ try:
+ g = glyphset[glyphname]
+ except KeyError:
+ raise Exception('no such glyph "'+glyphname+'"')
+ gi = GlyphInfo(g, glyphname, unicodes, glyphsType, glyphsTable)
+ glyphValues.append(gi.structValues())
+
+ return {
+ 'keys': GlyphInfo.structKeys(glyphsType),
+ 'values': glyphValues,
+ }
+
+
+def copyDictEntry(srcD, srcName, dstD, dstName):
+ try:
+ dstD[dstName] = srcD[srcName]
+ except:
+ pass
+
+
+def addCFFFontInfo(tt, info, cffTable):
+ d = cffTable.rawDict
+
+ nameDict = None
+ if 'name' not in info:
+ nameDict = {}
+ info['name'] = nameDict
+ else:
+ nameDict = info['name']
+
+ copyDictEntry(d, 'Weight', nameDict, 'weight')
+ copyDictEntry(d, 'version', nameDict, 'version')
+
+
+def genFontInfo(fontpath, outputType, withGlyphs=True):
+ tt = ttLib.TTFont(fontpath) # lazy=True
+ info = {
+ 'id': fontpath,
+ }
+
+ # for tableName in tt.keys():
+ # print 'table', tableName
+
+ nameDict = {}
+ if 'name' in tt:
+ nameDict = {}
+ for rec in tt['name'].names:
+ k = _NAME_IDS[rec.nameID] if rec.nameID in _NAME_IDS else ('#%d' % rec.nameID)
+ nameDict[k] = rec.toUnicode()
+ if 'fontId' in nameDict:
+ info['id'] = nameDict['fontId']
+
+ if 'postscriptName' in nameDict:
+ info['name'] = nameDict['postscriptName']
+ elif 'familyName' in nameDict:
+ info['name'] = nameDict['familyName'].replace(' ', '')
+ if 'subfamilyName' in nameDict:
+ info['name'] += '-' + nameDict['subfamilyName'].replace(' ', '')
+
+ if 'version' in nameDict:
+ version = nameDict['version']
+ v = re.split(r'[\s;]+', version)
+ if v and len(v) > 0:
+ version = v[0]
+ info['version'] = version
+
+ if outputType is not OUTPUT_TYPE_GLYPHLIST:
+ if len(nameDict):
+ info['names'] = nameDict
+
+ if 'head' in tt:
+ head = sstructTableToDict(tt['head'], headFormat)
+ if 'macStyle' in head:
+ s = []
+ v = head['macStyle']
+ if isinstance(v, int):
+ if v & 0b00000001: s.append('Bold')
+ if v & 0b00000010: s.append('Italic')
+ if v & 0b00000100: s.append('Underline')
+ if v & 0b00001000: s.append('Outline')
+ if v & 0b00010000: s.append('Shadow')
+ if v & 0b00100000: s.append('Condensed')
+ if v & 0b01000000: s.append('Extended')
+ head['macStyle_raw'] = head['macStyle']
+ head['macStyle'] = s
+ info['head'] = head
+
+ if 'hhea' in tt:
+ info['hhea'] = sstructTableToDict(tt['hhea'], hheaFormat)
+
+ if 'post' in tt:
+ info['post'] = sstructTableToDict(tt['post'], postFormat)
+
+ if 'OS/2' in tt:
+ t = tt['OS/2']
+ os2 = None
+ if t.version == 1:
+ os2 = sstructTableToDict(t, OS2_format_1)
+ elif t.version in (2, 3, 4):
+ os2 = sstructTableToDict(t, OS2_format_2)
+ elif t.version == 5:
+ os2 = sstructTableToDict(t, OS2_format_5)
+ os2['usLowerOpticalPointSize'] /= 20
+ os2['usUpperOpticalPointSize'] /= 20
+
+ if 'panose' in os2:
+ panose = {}
+ for k,v in sstructTableToDict(os2['panose'], panoseFormat).iteritems():
+ if k[0:1] == 'b' and k[1].isupper():
+ k = k[1].lower() + k[2:]
+ # bFooBar => fooBar
+ if k == 'weight' and isinstance(v, int) and v < len(panoseWeights):
+ panose['weightName'] = panoseWeights[v]
+ elif k == 'proportion' and isinstance(v, int) and v < len(panoseProportion):
+ panose['proportionName'] = panoseProportion[v]
+ panose[k] = v
+ os2['panose'] = panose
+
+ if 'usWidthClass' in os2:
+ v = os2['usWidthClass']
+ if isinstance(v, int) and v > 0 and v < len(os2WidthClass):
+ os2['usWidthClassName'] = os2WidthClass[v]
+
+ if 'usWeightClass' in os2:
+ v = os2['usWeightClass']
+ name = os2WeightClass.get(os2['usWeightClass'])
+ if name:
+ os2['usWeightClassName'] = name
+
+ info['os/2'] = os2
+
+ if 'meta' in tt:
+ meta = {}
+ for k,v in tt['meta'].data.iteritems():
+ try:
+ v.decode('utf8')
+ meta[k] = v
+ except:
+ meta[k] = 'data:;base64,' + b64encode(v)
+ info['meta'] = meta
+
+ # if 'maxp' in tt:
+ # table = tt['maxp']
+ # _, names, _ = sstruct.getformat(maxpFormat_0_5)
+ # if table.tableVersion != 0x00005000:
+ # _, names_1_0, _ = sstruct.getformat(maxpFormat_1_0_add)
+ # names += names_1_0
+ # info['maxp'] = tableNamesToDict(table, names)
+
+ glyphsType = getGlyphsType(tt)
+ glyphsTable = None
+ if glyphsType is GLYPHS_TYPE_CFF:
+ cff = tt["CFF "].cff
+ cffDictIndex = cff.topDictIndex
+ if len(cffDictIndex) > 1:
+ sys.stderr.write(
+ 'warning: multi-font CFF table is unsupported. Only reporting first table.\n'
+ )
+ cffTable = cffDictIndex[0]
+ if outputType is not OUTPUT_TYPE_GLYPHLIST:
+ addCFFFontInfo(tt, info, cffTable)
+ elif glyphsType is GLYPHS_TYPE_TT:
+ glyphsTable = tt["glyf"]
+ # print 'glyphs type:', glyphsType, 'flavor:', tt.flavor, 'sfntVersion:', tt.sfntVersion
+
+ if (withGlyphs is not False or outputType is OUTPUT_TYPE_GLYPHLIST) and withGlyphs is not '':
+ info['glyphs'] = genGlyphsInfo(tt, outputType, glyphsType, glyphsTable, withGlyphs)
+
+ # sys.exit(1)
+
+ return info
+
+
+# ————————————————————————————————————————————————————————————————————————
+# main
+
+def main():
+ argparser = argparse.ArgumentParser(description='Generate JSON describing fonts')
+
+ argparser.add_argument('-out', dest='outfile', metavar='<file>', type=str,
+ help='Write JSON to <file>. Writes to stdout if not specified')
+
+ argparser.add_argument('-pretty', dest='prettyJson', action='store_const',
+ const=True, default=False,
+ help='Generate pretty JSON with linebreaks and indentation')
+
+ argparser.add_argument('-with-all-glyphs', dest='withGlyphs', action='store_const',
+ const=True, default=False,
+ help='Include glyph information on all glyphs.')
+
+ argparser.add_argument('-with-glyphs', dest='withGlyphs', metavar='glyphname[,glyphname ...]',
+ type=str,
+ help='Include glyph information on specific glyphs')
+
+ argparser.add_argument('-as-glyphlist', dest='asGlyphList',
+ action='store_const', const=True, default=False,
+ help='Only generate a list of glyphs and their unicode mappings.')
+
+ argparser.add_argument('fontpaths', metavar='<path>', type=str, nargs='+',
+ help='TrueType or OpenType font files')
+
+ args = argparser.parse_args()
+
+ fonts = []
+ outputType = OUTPUT_TYPE_COMPLETE
+ if args.asGlyphList:
+ outputType = OUTPUT_TYPE_GLYPHLIST
+
+ n = 0
+ for fontpath in args.fontpaths:
+ if n > 0:
+ # workaround for a bug in fontTools.misc.sstruct where it keeps a global
+ # internal cache that mixes up values for different fonts.
+ reload(sstruct)
+ font = genFontInfo(fontpath, outputType=outputType, withGlyphs=args.withGlyphs)
+ fonts.append(font)
+ n += 1
+
+ ostream = sys.stdout
+ if args.outfile is not None:
+ ostream = open(args.outfile, 'w')
+
+
+ if args.prettyJson:
+ json.dump(fonts, ostream, sort_keys=True, indent=2, separators=(',', ': '))
+ sys.stdout.write('\n')
+ else:
+ json.dump(fonts, ostream, separators=(',', ':'))
+
+
+ if ostream is not sys.stdout:
+ ostream.close()
+
+
+
+# "name" table name identifiers
+_NAME_IDS = {
+ # TrueType & OpenType
+ 0: 'copyright',
+ 1: 'familyName',
+ 2: 'subfamilyName',
+ 3: 'fontId',
+ 4: 'fullName',
+ 5: 'version', # e.g. 'Version <number>.<number>'
+ 6: 'postscriptName',
+ 7: 'trademark',
+ 8: 'manufacturerName',
+ 9: 'designer',
+ 10: 'description',
+ 11: 'vendorURL',
+ 12: 'designerURL',
+ 13: 'licenseDescription',
+ 14: 'licenseURL',
+ 15: 'RESERVED',
+ 16: 'typoFamilyName',
+ 17: 'typoSubfamilyName',
+ 18: 'macCompatibleFullName', # Mac only (FOND)
+ 19: 'sampleText',
+
+ # OpenType
+ 20: 'postScriptCIDName',
+ 21: 'wwsFamilyName',
+ 22: 'wwsSubfamilyName',
+ 23: 'lightBackgoundPalette',
+ 24: 'darkBackgoundPalette',
+ 25: 'variationsPostScriptNamePrefix',
+
+ # 26-255: Reserved for future expansion
+ # 256-32767: Font-specific names (layout features and settings, variations, track names, etc.)
+}
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/gen-glyphinfo.py b/misc/tools/gen-glyphinfo.py
new file mode 100755
index 000000000..4fdf73fae
--- /dev/null
+++ b/misc/tools/gen-glyphinfo.py
@@ -0,0 +1,263 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Grab http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+#
+from __future__ import print_function
+import os, sys, json, re
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+from collections import OrderedDict
+from unicode_util import parseUnicodeDataFile
+from ConfigParser import RawConfigParser
+
+
+BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
+
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
+
+
+def unicodeForDefaultGlyphName(glyphName):
+ m = uniNameRe.match(glyphName)
+ if m is not None:
+ try:
+ return int(m.group(1), 16)
+ except:
+ pass
+ return None
+
+
+def loadAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def loadLocalNamesDB(fonts, agl, diacriticComps):
+ uc2names = None # { 2126: ['Omega', ...], ...}
+ allNames = OrderedDict() # {'Omega':True, ...}
+
+ for font in fonts:
+ _uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
+ if uc2names is None:
+ uc2names = _uc2names
+ else:
+ for uc, _names in _uc2names.iteritems():
+ names = uc2names.setdefault(uc, [])
+ for name in _names:
+ if name not in names:
+ names.append(name)
+ for g in font:
+ allNames.setdefault(g.name, True)
+
+ # agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
+ aglName2Ucs = {}
+ for uc, name in agl.iteritems():
+ aglName2Ucs.setdefault(name, []).append(uc)
+
+ for glyphName, comp in diacriticComps.iteritems():
+ aglUCs = aglName2Ucs.get(glyphName)
+ if aglUCs is None:
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ glyphName2 = agl.get(uc)
+ if glyphName2 is not None:
+ glyphName = glyphName2
+ names = uc2names.setdefault(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ allNames.setdefault(glyphName, True)
+ else:
+ allNames.setdefault(glyphName, True)
+ for uc in aglUCs:
+ names = uc2names.get(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ uc2names[uc] = names
+
+ name2ucs = {} # { 'Omega': [2126, ...], ...}
+ for uc, names in uc2names.iteritems():
+ for name in names:
+ name2ucs.setdefault(name, set()).add(uc)
+
+ return uc2names, name2ucs, allNames
+
+
+def canonicalGlyphName(glyphName, uc2names):
+ uc = unicodeForDefaultGlyphName(glyphName)
+ if uc is not None:
+ names = uc2names.get(uc)
+ if names is not None and len(names) > 0:
+ return names[0]
+ return glyphName
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def rgbaToCSSColor(r=0, g=0, b=0, a=1):
+ R,G,B = int(r * 255), int(g * 255), int(b * 255)
+ if a == 1:
+ return '#%02x%02x%02x' % (R,G,B)
+ else:
+ return 'rgba(%d,%d,%d,%f)' % (R,G,B,a)
+
+
+def unicodeName(cp):
+ if cp is not None and len(cp.name):
+ if cp.name[0] == '<':
+ return '[' + cp.categoryName + ']'
+ elif len(cp.name):
+ return cp.name
+ return None
+
+
+def main():
+ argparser = ArgumentParser(
+ description='Generate info on name, unicodes and color mark for all glyphs')
+
+ argparser.add_argument(
+ '-ucd', dest='ucdFile', metavar='<file>', type=str,
+ help='UnicodeData.txt file from http://www.unicode.org/')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ markLibKey = 'com.typemytype.robofont.mark'
+
+ srcDir = os.path.join(BASEDIR, 'src')
+
+ # load fontbuild config
+ config = RawConfigParser(dict_type=OrderedDict)
+ configFilename = os.path.join(srcDir, 'fontbuild.cfg')
+ config.read(configFilename)
+ deleteNames = set()
+ for sectionName, value in config.items('glyphs'):
+ if sectionName == 'delete':
+ deleteNames = set(value.split())
+
+ fontPaths = []
+ for fontPath in args.fontPaths:
+ fontPath = fontPath.rstrip('/ ')
+ if 'regular' or 'Regular' in fontPath:
+ fontPaths = [fontPath] + fontPaths
+ else:
+ fontPaths.append(fontPath)
+
+ fonts = [OpenFont(fontPath) for fontPath in args.fontPaths]
+
+ agl = loadAGL(os.path.join(srcDir, 'glyphlist.txt')) # { 2126: 'Omega', ... }
+ diacriticComps = loadGlyphCompositions(os.path.join(srcDir, 'diacritics.txt'))
+ uc2names, name2ucs, allNames = loadLocalNamesDB(fonts, agl, diacriticComps)
+
+ ucd = {}
+ if args.ucdFile:
+ ucd = parseUnicodeDataFile(args.ucdFile)
+
+ glyphorder = OrderedDict()
+ with open(os.path.join(os.path.dirname(args.fontPaths[0]), 'glyphorder.txt'), 'r') as f:
+ for name in f.read().splitlines():
+ if len(name) and name[0] != '#':
+ glyphorder[name] = True
+
+ for name in diacriticComps.iterkeys():
+ glyphorder[name] = True
+
+ glyphNames = glyphorder.keys()
+ visitedGlyphNames = set()
+ glyphs = []
+
+ for font in fonts:
+ for name, v in glyphorder.iteritems():
+ if name in deleteNames:
+ continue
+ if name in visitedGlyphNames:
+ continue
+
+ g = None
+ ucs = []
+ try:
+ g = font[name]
+ ucs = g.unicodes
+ except:
+ ucs = name2ucs.get(name)
+ if ucs is None:
+ continue
+
+ color = None
+ if g is not None and markLibKey in g.lib:
+ # TODO: translate from (r,g,b,a) to #RRGGBB (skip A)
+ rgba = g.lib[markLibKey]
+ if isinstance(rgba, list) or isinstance(rgba, tuple):
+ color = rgbaToCSSColor(*rgba)
+ elif name in diacriticComps:
+ color = '<derived>'
+
+ # name[, unicode[, unicodeName[, color]]]
+ if len(ucs):
+ for uc in ucs:
+ ucName = unicodeName(ucd.get(uc))
+
+ if not ucName and uc >= 0xE000 and uc <= 0xF8FF:
+ ucName = '[private use %04X]' % uc
+
+ if color:
+ glyph = [name, uc, ucName, color]
+ elif ucName:
+ glyph = [name, uc, ucName]
+ else:
+ glyph = [name, uc]
+
+ glyphs.append(glyph)
+ else:
+ glyph = [name, None, None, color] if color else [name]
+ glyphs.append(glyph)
+
+ visitedGlyphNames.add(name)
+
+ print('{"glyphs":[')
+ prefix = ' '
+ for g in glyphs:
+ print(prefix + json.dumps(g))
+ if prefix == ' ':
+ prefix = ', '
+ print(']}')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/gen-glyphorder.py b/misc/tools/gen-glyphorder.py
new file mode 100755
index 000000000..3d44fdd16
--- /dev/null
+++ b/misc/tools/gen-glyphorder.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, plistlib, sys
+from collections import OrderedDict
+from argparse import ArgumentParser
+from ConfigParser import RawConfigParser
+
+
+BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def main():
+ argparser = ArgumentParser(description='Generate glyph order list from UFO files')
+ argparser.add_argument('fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO files')
+ args = argparser.parse_args()
+
+ srcDir = os.path.join(BASEDIR, 'src')
+
+ # load fontbuild config
+ config = RawConfigParser(dict_type=OrderedDict)
+ config.read(os.path.join(srcDir, 'fontbuild.cfg'))
+ deleteNames = set(config.get('glyphs', 'delete').split())
+
+ fontPaths = []
+ for fontPath in args.fontPaths:
+ if 'regular' or 'Regular' in fontPath:
+ fontPaths = [fontPath] + fontPaths
+ else:
+ fontPaths.append(fontPath)
+
+ fontPath0 = args.fontPaths[0]
+ libPlist = plistlib.readPlist(os.path.join(fontPath, 'lib.plist'))
+ glyphOrder = libPlist['public.glyphOrder']
+ glyphNameSet = set(glyphOrder)
+
+ nameLists = []
+ indexOffset = 0
+ index = -1
+
+ for fontPath in fontPaths[1:]:
+ libPlist = plistlib.readPlist(os.path.join(fontPath, 'lib.plist'))
+ if 'public.glyphOrder' in libPlist:
+ names = libPlist['public.glyphOrder']
+ numInserted = 0
+ for i in range(len(names)):
+ name = names[i]
+ if name not in glyphNameSet:
+ if i > 0 and names[i-1] in glyphNameSet:
+ # find position of prev glyph
+ index = glyphOrder.index(names[i-1]) + 1
+ elif index != -1:
+ index += 1
+ else:
+ index = min(len(glyphOrder), i - indexOffset)
+
+ glyphOrder.insert(index, name)
+ numInserted += 1
+ glyphNameSet.add(name)
+
+ indexOffset += numInserted
+
+ # add any composed glyphs to the end
+ diacriticComps = loadGlyphCompositions(os.path.join(srcDir, 'diacritics.txt'))
+ for name in diacriticComps.keys():
+ if name not in glyphNameSet:
+ glyphOrder.append(name)
+
+ # filter out deleted glyphs
+ glyphOrder = [n for n in glyphOrder if n not in deleteNames]
+
+ print('\n'.join(glyphOrder))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/gen-kern.py b/misc/tools/gen-kern.py
new file mode 100644
index 000000000..e5a4c4875
--- /dev/null
+++ b/misc/tools/gen-kern.py
@@ -0,0 +1,37 @@
+
+def parseFeaList(s):
+ v = []
+ for e in s.split(' '):
+ if e.find('-') != -1:
+ (a,b) = e.split('-')
+ #print 'split: %s, %s' % (a,chr(ord(a)+1))
+ i = ord(a)
+ end = ord(b)+1
+ while i < end:
+ v.append(chr(i))
+ i += 1
+ else:
+ v.append(e)
+ return v
+
+UC_ROMAN = parseFeaList('A-Z AE AEacute Aacute Abreve Acircumflex Adieresis Agrave Alpha Alphatonos Amacron Aogonek Aogonek.NAV Aring Aringacute Atilde Beta Cacute Ccaron Ccedilla Ccircumflex Chi Dcaron Dcroat Delta Eacute Ebreve Ecaron Ecircumflex Edieresis Edotaccent Egrave Emacron Eng Eogonek Eogonek.NAV Epsilon Epsilontonos Eta Etatonos Eth Gamma Gbreve Gcircumflex Gcommaaccent Germandbls Hbar Hcircumflex IJ Iacute Ibreve Icircumflex Idieresis Igrave Imacron Iogonek Iota Iotadieresis Iotatonos Itilde Jcircumflex Kappa Kcommaaccent Lacute Lambda Lcaron Lcommaaccent Ldot Lslash Nacute Ncaron Ncommaaccent Ntilde Nu OE Oacute Obreve Ocircumflex Odieresis Ograve Ohungarumlaut Omacron Omega Omegatonos Omicron Omicrontonos Oogonek Oogonek.NAV Oslash Oslashacute Otilde Phi Pi Psi Racute Rcaron Rcommaaccent Rho Sacute Scaron Scedilla Scircumflex Sigma Tau Tbar Tcaron Theta Thorn Uacute Ubreve Ucircumflex Udieresis Ugrave Uhungarumlaut Umacron Uogonek Upsilon Upsilondieresis Upsilontonos Uring Utilde Wacute Wcircumflex Wdieresis Wgrave Xi Yacute Ycircumflex Ydieresis Ygrave Zacute Zcaron Zdotaccent Zeta ampersand uni010A uni0120 uni0162 uni0218 uni021A uni037F')
+LC_ROMAN = parseFeaList('a-z ae aeacute aacute abreve acircumflex adieresis agrave alpha alphatonos amacron aogonek aogonek.NAV aring aringacute atilde beta cacute ccaron ccedilla ccircumflex chi dcaron dcroat delta eacute ebreve ecaron ecircumflex edieresis edotaccent egrave emacron eng eogonek eogonek.NAV epsilon epsilontonos eta etatonos eth gamma gbreve gcircumflex gcommaaccent germandbls hbar hcircumflex ij iacute ibreve icircumflex idieresis igrave imacron iogonek iota iotadieresis iotatonos itilde jcircumflex kappa kcommaaccent lacute lambda lcaron lcommaaccent ldot lslash nacute ncaron ncommaaccent ntilde nu oe oacute obreve ocircumflex odieresis ograve ohungarumlaut omacron omega omegatonos omicron omicrontonos oogonek oogonek.NAV oslash oslashacute otilde phi pi psi racute rcaron rcommaaccent rho sacute scaron scedilla scircumflex sigma tau tbar tcaron theta thorn uacute ubreve ucircumflex udieresis ugrave uhungarumlaut umacron uogonek upsilon upsilondieresis upsilontonos uring utilde wacute wcircumflex wdieresis wgrave xi yacute ycircumflex ydieresis ygrave zacute zcaron zdotaccent zeta ampersand uni010B uni0121 uni0163 uni0219 uni021B uni03F3')
+
+UC_AF = parseFeaList('A-F')
+LC_AF = parseFeaList('a-f')
+
+LNUM = parseFeaList('zero one two three four five six seven eight nine')
+
+HEXNUM = LNUM + UC_AF + LC_AF
+ALL = UC_ROMAN + LC_ROMAN + LNUM
+
+glyphs = HEXNUM
+for g in glyphs:
+ print ' <key>%s</key><dict>' % g
+ for g in glyphs:
+ print ' <key>%s</key><integer>-256</integer>' % g
+ print ' </dict>'
+
+# print ', '.join(LC_ROMAN)
+
+
diff --git a/misc/tools/gen-metrics-and-svgs.py b/misc/tools/gen-metrics-and-svgs.py
new file mode 100755
index 000000000..ac100eb1c
--- /dev/null
+++ b/misc/tools/gen-metrics-and-svgs.py
@@ -0,0 +1,449 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Sync glyph shapes between SVG and UFO, creating a bridge between UFO and Figma.
+#
+from __future__ import print_function
+import os, sys, argparse, re, json, plistlib
+from math import ceil, floor
+from robofab.objects.objectsRF import OpenFont
+from collections import OrderedDict
+from fontbuild.generateGlyph import generateGlyph
+from ConfigParser import RawConfigParser
+
+
+BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
+
+font = None # RFont
+ufopath = ''
+effectiveAscender = 0
+scale = 0.1
+agl = None
+
+
+def num(s):
+ return int(s) if s.find('.') == -1 else float(s)
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset, rawline) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset, line)
+ return compositions
+
+
+def loadAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+def decomposeGlyph(font, glyph):
+ """Moves the components of a glyph to its outline."""
+ if len(glyph.components):
+ deepCopyContours(font, glyph, glyph, (0, 0), (1, 1))
+ glyph.clearComponents()
+
+
+def deepCopyContours(font, parent, component, offset, scale):
+ """Copy contours to parent from component, including nested components."""
+
+ for nested in component.components:
+ deepCopyContours(
+ font, parent, font[nested.baseGlyph],
+ (offset[0] + nested.offset[0], offset[1] + nested.offset[1]),
+ (scale[0] * nested.scale[0], scale[1] * nested.scale[1]))
+
+ if component == parent:
+ return
+ for contour in component:
+ contour = contour.copy()
+ contour.scale(scale)
+ contour.move(offset)
+ parent.appendContour(contour)
+
+
+
+def glyphToSVGPath(g, yMul):
+ commands = {'move':'M','line':'L','curve':'Y','offcurve':'X','offCurve':'X'}
+ svg = ''
+ contours = []
+
+ if len(g.components):
+ decomposeGlyph(g.getParent(), g) # mutates g
+
+ if len(g):
+ for c in range(len(g)):
+ contours.append(g[c])
+
+ for i in range(len(contours)):
+ c = contours[i]
+ contour = end = ''
+ curve = False
+ points = c.points
+ if points[0].type == 'offCurve':
+ points.append(points.pop(0))
+ if points[0].type == 'offCurve':
+ points.append(points.pop(0))
+ for x in range(len(points)):
+ p = points[x]
+ command = commands[str(p.type)]
+ if command == 'X':
+ if curve == True:
+ command = ''
+ else:
+ command = 'C'
+ curve = True
+ if command == 'Y':
+ command = ''
+ curve = False
+ if x == 0:
+ command = 'M'
+ if p.type == 'curve':
+ end = ' %g %g' % (p.x * scale, (p.y * yMul) * scale)
+ contour += ' %s%g %g' % (command, p.x * scale, (p.y * yMul) * scale)
+ svg += ' ' + contour + end + 'z'
+
+ if font.has_key('__svgsync'):
+ font.removeGlyph('__svgsync')
+ return svg.strip()
+
+
+def svgWidth(g):
+ box = g.box
+ xoffs = box[0]
+ width = box[2] - box[0]
+ return width, xoffs
+
+
+def glyphToSVG(g):
+ width, xoffs = svgWidth(g)
+
+ svg = '''
+<svg id="svg-%(name)s" xmlns="http://www.w3.org/2000/svg" width="%(width)d" height="%(height)d">
+<path d="%(glyphSVGPath)s" transform="translate(%(xoffs)g %(yoffs)g)"/>
+</svg>
+ ''' % {
+ 'name': g.name,
+ 'width': int(ceil(width * scale)),
+ 'height': int(ceil((effectiveAscender - font.info.descender) * scale)),
+ 'xoffs': -(xoffs * scale),
+ 'yoffs': effectiveAscender * scale,
+ # 'leftMargin': g.leftMargin * scale,
+ # 'rightMargin': g.rightMargin * scale,
+ 'glyphSVGPath': glyphToSVGPath(g, -1),
+ # 'ascender': font.info.ascender * scale,
+ # 'descender': font.info.descender * scale,
+ # 'baselineOffset': (font.info.unitsPerEm + font.info.descender) * scale,
+ # 'unitsPerEm': font.info.unitsPerEm,
+
+ # 'margin': [g.leftMargin * scale, g.rightMargin * scale],
+ }
+
+ # (width, advance, left, right)
+ info = (width, g.width, g.leftMargin, g.rightMargin)
+
+ return svg.strip(), info
+
+
+def stat(path):
+ try:
+ return os.stat(path)
+ except OSError as e:
+ return None
+
+
+def writeFile(file, s):
+ with open(file, 'w') as f:
+ f.write(s)
+
+
+def writeFileAndMkDirsIfNeeded(file, s):
+ try:
+ writeFile(file, s)
+ except IOError as e:
+ if e.errno == 2:
+ os.makedirs(os.path.dirname(file))
+ writeFile(file, s)
+
+
+
+def findGlifFile(glyphname):
+ # glyphname.glif
+ # glyphname_.glif
+ # glyphname__.glif
+ # glyphname___.glif
+ for underscoreCount in range(0, 5):
+ fn = os.path.join(ufopath, 'glyphs', glyphname + ('_' * underscoreCount) + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ if glyphname.find('.') != -1:
+ # glyph_.name.glif
+ # glyph__.name.glif
+ # glyph___.name.glif
+ for underscoreCount in range(0, 5):
+ nv = glyphname.split('.')
+ nv[0] = nv[0] + ('_' * underscoreCount)
+ ns = '.'.join(nv)
+ fn = os.path.join(ufopath, 'glyphs', ns + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ if glyphname.find('_') != -1:
+ # glyph_name.glif
+ # glyph_name_.glif
+ # glyph_name__.glif
+ # glyph__name.glif
+ # glyph__name_.glif
+ # glyph__name__.glif
+ # glyph___name.glif
+ # glyph___name_.glif
+ # glyph___name__.glif
+ for x in range(0, 4):
+ for y in range(0, 5):
+ ns = glyphname.replace('_', '__' + ('_' * x))
+ fn = os.path.join(ufopath, 'glyphs', ns + ('_' * y) + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ return ('', None)
+
+
+usedSVGNames = set()
+
+def genGlyph(glyphName, generateFrom, force):
+ # generateFrom = (baseName, accentNames, offset, rawline)
+ if generateFrom is not None:
+ generateGlyph(font, generateFrom[3], agl)
+
+ g = font.getGlyph(glyphName)
+
+ return glyphToSVG(g)
+
+
+def genGlyphIDs(glyphnames):
+ nameToIdMap = {}
+ idToNameMap = {}
+ nextId = 0
+ for name in glyphnames:
+ nameToIdMap[name] = nextId
+ idToNameMap[nextId] = name
+ nextId += 1
+ return nameToIdMap, idToNameMap
+
+
+def genKerningInfo(font, glyphnames, nameToIdMap):
+ kerning = font.kerning
+
+ # load groups
+ filename = os.path.join(font.path, 'groups.plist')
+ groups = plistlib.readPlist(filename)
+
+ pairs = []
+ for kt in kerning.keys():
+ v = kerning[kt]
+ leftname, rightname = kt
+ leftnames = []
+ rightnames = []
+
+ if leftname[0] == '@':
+ leftnames = groups[leftname]
+ else:
+ leftnames = [leftname]
+
+ if rightname[0] == '@':
+ rightnames = groups[rightname]
+ else:
+ rightnames = [rightname]
+
+ for lname in leftnames:
+ for rname in rightnames:
+ lnameId = nameToIdMap.get(lname)
+ rnameId = nameToIdMap.get(rname)
+ if lnameId and rnameId:
+ pairs.append([lnameId, rnameId, v])
+
+ # print('pairs: %r' % pairs)
+ return pairs
+
+
+def fmtJsonDict(d):
+ keys = sorted(d.keys())
+ s = '{'
+ delim = '\n'
+ delimNth = ',\n'
+ for k in keys:
+ v = d[k]
+ s += delim + json.dumps(str(k)) + ':' + json.dumps(v)
+ delim = delimNth
+ return s + '}'
+
+
+def fmtJsonList(d):
+ s = '['
+ delim = '\n'
+ delimNth = ',\n'
+ for t in kerning:
+ s += delim + json.dumps(t, separators=(',',':'))
+ delim = delimNth
+ return s + ']'
+
+# ————————————————————————————————————————————————————————————————————————
+# main
+
+argparser = argparse.ArgumentParser(description='Generate SVG glyphs from UFO')
+
+argparser.add_argument('-scale', dest='scale', metavar='<scale>', type=str,
+ default='',
+ help='Scale glyph. Should be a number in the range (0-1]. Defaults to %g' % scale)
+
+argparser.add_argument(
+ '-f', '-force', dest='force', action='store_const', const=True, default=False,
+ help='Generate glyphs even though they appear to be up-to date.')
+
+argparser.add_argument('ufopath', metavar='<ufopath>', type=str,
+ help='Path to UFO packages')
+
+argparser.add_argument('glyphs', metavar='<glyphname>', type=str, nargs='*',
+ help='Only generate specific glyphs.')
+
+
+args = argparser.parse_args()
+
+srcDir = os.path.join(BASEDIR, 'src')
+
+# load fontbuild config
+config = RawConfigParser(dict_type=OrderedDict)
+configFilename = os.path.join(srcDir, 'fontbuild.cfg')
+config.read(configFilename)
+deleteNames = set()
+for sectionName, value in config.items('glyphs'):
+ if sectionName == 'delete':
+ deleteNames = set(value.split())
+
+if len(args.scale):
+ scale = float(args.scale)
+
+ufopath = args.ufopath.rstrip('/')
+
+font = OpenFont(ufopath)
+effectiveAscender = max(font.info.ascender, font.info.unitsPerEm)
+
+# print('\n'.join(font.keys()))
+# sys.exit(0)
+
+agl = loadAGL(os.path.join(srcDir, 'glyphlist.txt')) # { 2126: 'Omega', ... }
+
+deleteNames.add('.notdef')
+deleteNames.add('.null')
+
+glyphnames = args.glyphs if len(args.glyphs) else font.keys()
+glyphnameSet = set(glyphnames)
+generatedGlyphNames = set()
+
+diacriticComps = loadGlyphCompositions(os.path.join(srcDir, 'diacritics.txt'))
+for glyphName, comp in diacriticComps.iteritems():
+ if glyphName not in glyphnameSet:
+ generatedGlyphNames.add(glyphName)
+ glyphnames.append(glyphName)
+ glyphnameSet.add(glyphName)
+
+glyphnames = [gn for gn in glyphnames if gn not in deleteNames]
+glyphnames.sort()
+
+nameToIdMap, idToNameMap = genGlyphIDs(glyphnames)
+
+glyphMetrics = {}
+
+# jsonLines = []
+svgLines = []
+for glyphname in glyphnames:
+ generateFrom = None
+ if glyphname in generatedGlyphNames:
+ generateFrom = diacriticComps[glyphname]
+ svg, metrics = genGlyph(glyphname, generateFrom, force=args.force)
+ # metrics: (width, advance, left, right)
+ glyphMetrics[nameToIdMap[glyphname]] = metrics
+ svgLines.append(svg.replace('\n', ''))
+
+# print('{\n' + ',\n'.join(jsonLines) + '\n}')
+
+svgtext = '\n'.join(svgLines)
+# print(svgtext)
+
+glyphsHtmlFilename = os.path.join(BASEDIR, 'docs', 'glyphs', 'index.html')
+
+html = ''
+with open(glyphsHtmlFilename, 'r') as f:
+ html = f.read()
+
+startMarker = '<div id="svgs">'
+startPos = html.find(startMarker)
+
+endMarker = '</div><!--END-SVGS'
+endPos = html.find(endMarker, startPos + len(startMarker))
+
+relfilename = os.path.relpath(glyphsHtmlFilename, os.getcwd())
+
+if startPos == -1 or endPos == -1:
+ msg = 'Could not find `<div id="svgs">...</div><!--END-SVGS` in %s'
+ print(msg % relfilename, file=sys.stderr)
+ sys.exit(1)
+
+for name in glyphnames:
+ if name == 'zero.tnum.slash':
+ print('FOUND zero.tnum.slash')
+
+kerning = genKerningInfo(font, glyphnames, nameToIdMap)
+metaJson = '{\n'
+metaJson += '"nameids":' + fmtJsonDict(idToNameMap) + ',\n'
+metaJson += '"metrics":' + fmtJsonDict(glyphMetrics) + ',\n'
+metaJson += '"kerning":' + fmtJsonList(kerning) + '\n'
+metaJson += '}'
+# metaHtml = '<script>var fontMetaData = ' + metaJson + ';</script>'
+
+html = html[:startPos + len(startMarker)] + '\n' + svgtext + '\n' + html[endPos:]
+
+print('write', relfilename)
+with open(glyphsHtmlFilename, 'w') as f:
+ f.write(html)
+
+# JSON
+jsonFilename = os.path.join(BASEDIR, 'docs', 'glyphs', 'metrics.json')
+jsonFilenameRel = os.path.relpath(jsonFilename, os.getcwd())
+print('write', jsonFilenameRel)
+with open(jsonFilename, 'w') as f:
+ f.write(metaJson)
+
+metaJson \ No newline at end of file
diff --git a/misc/tools/gen-num-pairs.js b/misc/tools/gen-num-pairs.js
new file mode 100644
index 000000000..9dbb92090
--- /dev/null
+++ b/misc/tools/gen-num-pairs.js
@@ -0,0 +1,10 @@
+
+const chars = '0 1 2 3 4 5 6 7 8 9 A B C D E F a b c d e f'.split(' ')
+
+for (let c1 of chars) {
+ let s = []
+ for (let c2 of chars) {
+ s.push(c1 + c2)
+ }
+ console.log(s.join(' '))
+}
diff --git a/misc/tools/gen-tnum.py b/misc/tools/gen-tnum.py
new file mode 100755
index 000000000..015201bcc
--- /dev/null
+++ b/misc/tools/gen-tnum.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+from math import ceil, floor
+
+dryRun = False
+numNames = [
+ 'zero','one','two','three','four','five','six','seven','eight','nine'
+]
+
+
+def main():
+ argparser = ArgumentParser(
+ description='Generate tabular number glyphs from regular number glyphs')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ # Strip trailing slashes from font paths and iterate
+ for fontPath in [s.rstrip('/ ') for s in args.fontPaths]:
+ fontName = os.path.basename(fontPath)
+ font = OpenFont(fontPath)
+
+ # Find widest glyph
+ width = 0
+ for name in numNames:
+ g = font[name]
+ width = max(width, g.width)
+
+ print('[%s] tnums width:' % fontName, width)
+
+ # Create tnum glyphs
+ for name in numNames:
+ g = font[name]
+
+ tnum = font.newGlyph(name + '.tnum')
+ tnum.width = width
+
+ # calculate component x-offset
+ xoffs = 0
+ if g.width != width:
+ print('[%s] gen (adjust width)' % fontName, tnum.name)
+ # center shape, ignoring existing margins
+ # xMin, yMin, xMax, yMax = g.box
+ # graphicWidth = xMax - xMin
+ # leftMargin = round((width - graphicWidth) / 2)
+ # xoffs = leftMargin - g.leftMargin
+
+ # adjust margins
+ widthDelta = width - g.width
+ leftMargin = g.leftMargin + int(floor(widthDelta / 2))
+ rightMargin = g.rightMargin + int(ceil(widthDelta / 2))
+ xoffs = leftMargin - g.leftMargin
+ else:
+ print('[%s] gen (same width)' % fontName, tnum.name)
+
+ tnum.appendComponent(name, (xoffs, 0))
+
+ if dryRun:
+ print('[%s] save [dry run]' % fontName)
+ else:
+ print('[%s] save' % fontName)
+ font.save()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/glyf-props.py b/misc/tools/glyf-props.py
new file mode 100755
index 000000000..8783a422d
--- /dev/null
+++ b/misc/tools/glyf-props.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+
+
+dryRun = False
+
+def renameProps(font, renames):
+ for g in font:
+ for currname, newname in renames:
+ if currname in g.lib:
+ if newname in g.lib:
+ raise Exception('property %r already exist in glyph %r' % (newname, g))
+ g.lib[newname] = g.lib[currname]
+ del g.lib[currname]
+
+
+def main():
+ argparser = ArgumentParser(
+ description='Operate on UFO glyf "lib" properties')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ '-m', dest='renameProps', metavar='<currentName>=<newName>[,...]', type=str,
+ help='Rename properties')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ renames = []
+ if args.renameProps:
+ renames = [tuple(s.split('=')) for s in args.renameProps.split(',')]
+ # TODO: verify data structure
+ print('renaming properties:')
+ for rename in renames:
+ print(' %r => %r' % rename)
+
+ # Strip trailing slashes from font paths and iterate
+ for fontPath in [s.rstrip('/ ') for s in args.fontPaths]:
+ font = OpenFont(fontPath)
+
+ if len(renames):
+ print('Renaming properties in %s' % fontPath)
+ renameProps(font, renames)
+
+ if dryRun:
+ print('Saving changes to %s (dry run)' % fontPath)
+ if not dryRun:
+ print('Saving changes to %s' % fontPath)
+ font.save()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/glyphcheck.py b/misc/tools/glyphcheck.py
new file mode 100755
index 000000000..755de686f
--- /dev/null
+++ b/misc/tools/glyphcheck.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# encoding: utf8
+import sys, argparse
+from fontTools import ttLib
+
+
+def main():
+ argparser = argparse.ArgumentParser(description='Check glyph names')
+
+ argparser.add_argument('fontfiles', metavar='<path>', type=str, nargs='+',
+ help='TrueType or OpenType font files')
+
+ args = argparser.parse_args()
+
+ nmissing = 0
+
+ matchnames = set()
+ for line in sys.stdin:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ for line2 in line.split():
+ line2 = line2.strip()
+ if len(line2) > 0:
+ matchnames.add(line2)
+
+ for fontfile in args.fontfiles:
+ font = ttLib.TTFont(fontfile)
+ glyphnames = set(font.getGlyphOrder())
+
+ # for name in glyphnames:
+ # if not name in matchnames:
+ # print('%s missing in input' % name)
+
+ for name in matchnames:
+ if not name in glyphnames:
+ print('%s missing in font' % name)
+ nmissing = nmissing + 1
+
+
+ if nmissing == 0:
+ print('all glyphs found')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/kernsample.py b/misc/tools/kernsample.py
new file mode 100755
index 000000000..7e1fbe0f8
--- /dev/null
+++ b/misc/tools/kernsample.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib
+from collections import OrderedDict
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+
+RIGHT = 1
+LEFT = 2
+
+
+def mapGroups(groups): # => { glyphname => set(groupname, ...), ... }
+ m = OrderedDict()
+ for groupname, glyphnames in groups.iteritems():
+ for glyphname in glyphnames:
+ m.setdefault(glyphname, set()).add(groupname)
+ return m
+
+
+def fmtGlyphname(glyphname, glyph=None):
+ if glyph is not None and len(glyphname) == 1 and ord(glyphname[0]) == glyph.unicode:
+ # literal, e.g. "A"
+ return glyphname
+ else:
+ # named, e.g. "\Omega"
+ return '/' + glyphname + ' '
+
+
+def printPairs(font, baseSide, baseSideGlyph, otherSideNames, args):
+ out = []
+ if args.formatAsUnicode:
+ base = '\\u%04X' % baseSideGlyph.unicode
+ for otherName in otherSideNames:
+ if otherName in font:
+ otherGlyph = font[otherName]
+ if otherGlyph.unicode is not None:
+ if baseSide == LEFT:
+ out.append('%s\\u%04X' % (base, otherGlyph.unicode))
+ else:
+ out.append('\\u%04X%s' % (otherGlyph.unicode, base))
+ else:
+ base = fmtGlyphname(baseSideGlyph.name, baseSideGlyph)
+ prefix_uc = ''
+ prefix_lc = ''
+ suffix_uc = ''
+ suffix_lc = ''
+
+ if args.noPrefixAutocase:
+ prefix_uc = args.prefix
+ prefix_lc = args.prefix
+ suffix_uc = args.suffix
+ suffix_lc = args.suffix
+ else:
+ if args.prefix and len(args.prefix) > 0:
+ s = unicode(args.prefix)
+ if s[0].isupper():
+ prefix_uc = args.prefix
+ prefix_lc = args.prefix.lower()
+ else:
+ prefix_uc = args.prefix.upper()
+ prefix_lc = args.prefix
+
+ if args.suffix and len(args.suffix) > 0:
+ s = unicode(args.suffix)
+ if s[0].isupper():
+ suffix_uc = args.suffix
+ suffix_lc = args.suffix.lower()
+ else:
+ suffix_uc = args.suffix.upper()
+ suffix_lc = args.suffix
+
+ for otherName in otherSideNames:
+ if otherName in font:
+ otherGlyph = None
+ if len(otherName) == 1:
+ otherGlyph = font[otherName]
+ prefix = prefix_lc
+ suffix = suffix_lc
+ if unicode(otherName[0]).isupper():
+ prefix = prefix_uc
+ suffix = suffix_uc
+ if baseSide == LEFT:
+ out.append('%s%s%s%s' % (
+ prefix, base, fmtGlyphname(otherName, otherGlyph), suffix
+ ))
+ else:
+ out.append('%s%s%s%s' % (
+ prefix, fmtGlyphname(otherName, otherGlyph), base, suffix
+ ))
+
+ print(' '.join(out))
+
+
+def samplesForGlyphnameR(font, groups, groupmap, kerning, glyphname, args):
+ rightGlyph = font[glyphname]
+ includeAll = args.includeAllInGroup
+ leftnames = set()
+
+ _addLeftnames(groups, kerning, glyphname, leftnames, includeAll)
+
+ if glyphname in groupmap:
+ for groupname in groupmap[glyphname]:
+ if groupname.find('_RIGHT_') != -1:
+ _addLeftnames(groups, kerning, groupname, leftnames, includeAll)
+
+ leftnames = sorted(leftnames)
+ printPairs(font, RIGHT, rightGlyph, leftnames, args)
+
+
+def _addLeftnames(groups, kerning, glyphname, leftnames, includeAll=True):
+ # kerning : { leftName => {rightName => kernVal} }
+ for leftname, kern in kerning.iteritems():
+ if glyphname in kern:
+ if leftname[0] == '@':
+ for leftname2 in groups[leftname]:
+ leftnames.add(leftname2)
+ if not includeAll:
+ # TODO: in this case, pick the one leftname that has the highest
+ # ranking in glyphorder
+ break
+ else:
+ leftnames.add(leftname)
+
+
+def samplesForGlyphnameL(font, groups, groupmap, kerning, glyphname, args):
+ leftGlyph = font[glyphname]
+ includeAll = args.includeAllInGroup
+ rightnames = set()
+
+ _addRightnames(groups, kerning, glyphname, rightnames, includeAll)
+
+ if glyphname in groupmap:
+ for groupname in groupmap[glyphname]:
+ if groupname.find('_LEFT_') != -1 or groupname.find('_RIGHT_') == -1:
+ _addRightnames(groups, kerning, groupname, rightnames, includeAll)
+
+ rightnames = sorted(rightnames)
+ printPairs(font, LEFT, leftGlyph, rightnames, args)
+
+
+def _addRightnames(groups, kerning, leftname, rightnames, includeAll=True):
+ if leftname in kerning:
+ for rightname in kerning[leftname]:
+ if rightname[0] == '@':
+ for rightname2 in groups[rightname]:
+ rightnames.add(rightname2)
+ if not includeAll:
+ # TODO: in this case, pick the one rightname that has the highest
+ # ranking in glyphorder
+ break
+ else:
+ rightnames.add(rightname)
+
+
+def main():
+ argparser = ArgumentParser(
+ description='Generate kerning samples by providing the left-hand side glyph')
+
+ argparser.add_argument(
+ '-u', dest='formatAsUnicode', action='store_const', const=True, default=False,
+ help='Format output as unicode escape sequences instead of glyphnames. ' +
+ 'E.g. "\\u2126" instead of "\\Omega"')
+
+ argparser.add_argument(
+ '-prefix', dest='prefix', metavar='<text>', type=str,
+ help='Text to append before each pair')
+
+ argparser.add_argument(
+ '-suffix', dest='suffix', metavar='<text>', type=str,
+ help='Text to append after each pair')
+
+ argparser.add_argument(
+ '-no-prefix-autocase', dest='noPrefixAutocase',
+ action='store_const', const=True, default=False,
+ help='Do not convert -prefix and -suffix to match case')
+
+ argparser.add_argument(
+ '-all-in-groups', dest='includeAllInGroup',
+ action='store_const', const=True, default=False,
+ help='Include all glyphs for groups rather than just the first glyph listed.')
+
+ argparser.add_argument(
+ '-left', dest='asLeft',
+ action='store_const', const=True, default=False,
+ help='Only include pairs where the glyphnames are on the left side.')
+
+ argparser.add_argument(
+ '-right', dest='asRight',
+ action='store_const', const=True, default=False,
+ help='Only include pairs where the glyphnames are on the right side.'+
+ ' When neither -left or -right is provided, include all pairs.')
+
+ argparser.add_argument(
+ 'fontPath', metavar='<ufofile>', type=str, help='UFO font source')
+
+ argparser.add_argument(
+ 'glyphnames', metavar='<glyphname>', type=str, nargs='+',
+ help='Name of glyphs to generate samples for. '+
+ 'You can also provide a Unicode code point using the syntax "U+XXXX"')
+
+ args = argparser.parse_args()
+
+ font = OpenFont(args.fontPath)
+
+ groupsFilename = os.path.join(args.fontPath, 'groups.plist')
+ kerningFilename = os.path.join(args.fontPath, 'kerning.plist')
+
+ groups = plistlib.readPlist(groupsFilename) # { groupName => [glyphName] }
+ kerning = plistlib.readPlist(kerningFilename) # { leftName => {rightName => kernVal} }
+ groupmap = mapGroups(groups) # { glyphname => set(groupname, ...), ... }
+
+ if not args.asLeft and not args.asRight:
+ args.asLeft = True
+ args.asRight = True
+
+ # expand any unicode codepoints
+ glyphnames = []
+ for glyphname in args.glyphnames:
+ if len(glyphname) > 2 and glyphname[:2] == 'U+':
+ cp = int(glyphname[2:], 16)
+ ucmap = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
+ for glyphname2 in ucmap[cp]:
+ glyphnames.append(glyphname2)
+ else:
+ glyphnames.append(glyphname)
+
+ for glyphname in glyphnames:
+ if args.asLeft:
+ samplesForGlyphnameL(font, groups, groupmap, kerning, glyphname, args)
+ if args.asRight:
+ samplesForGlyphnameR(font, groups, groupmap, kerning, glyphname, args)
+
+
+main()
diff --git a/misc/tools/restore-diacritics-kerning.py b/misc/tools/restore-diacritics-kerning.py
new file mode 100644
index 000000000..6fd8c1601
--- /dev/null
+++ b/misc/tools/restore-diacritics-kerning.py
@@ -0,0 +1,431 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# This script was used specifically to re-introduce a bunch of kerning values
+# that where lost in an old kerning cleanup that failed to account for
+# automatically composed glyphs defined in diacritics.txt.
+#
+# Steps:
+# 1. git diff 10e15297b 10e15297b^ > 10e15297b.diff
+# 2. edit 10e15297b.diff and remove the python script add
+# 3. fetch copies of kerning.plist and groups.plist from before the loss change
+# bold-groups.plist
+# bold-kerning.plist
+# regular-groups.plist
+# regular-kerning.plist
+# 4. run this script
+#
+from __future__ import print_function
+import os, sys, plistlib, json
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+from argparse import ArgumentParser
+from fontTools import ttLib
+from robofab.objects.objectsRF import OpenFont
+
+
+srcFontPaths = ['src/Inter-UI-Regular.ufo', 'src/Inter-UI-Bold.ufo']
+
+
+def getTTGlyphList(font): # -> { 'Omega': [2126, ...], ... }
+ if isinstance(font, str):
+ font = ttLib.TTFont(font)
+
+ if not 'cmap' in font:
+ raise Exception('missing cmap table')
+
+ gl = {}
+ bestCodeSubTable = None
+ bestCodeSubTableFormat = 0
+
+ for st in font['cmap'].tables:
+ if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
+ if st.format > bestCodeSubTableFormat:
+ bestCodeSubTable = st
+ bestCodeSubTableFormat = st.format
+
+ if bestCodeSubTable is not None:
+ for cp, glyphname in bestCodeSubTable.cmap.items():
+ if glyphname in gl:
+ gl[glyphname].append(cp)
+ else:
+ gl[glyphname] = [cp]
+
+ return gl, font
+
+
+def parseAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def loadGlyphCompositions(filename):
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def loadNamesFromDiff(diffFilename):
+ with open(diffFilename, 'r') as f:
+ diffLines = [s.strip() for s in f.read().splitlines() if s.startswith('+\t')]
+ diffLines = [s for s in diffLines if not s.startswith('<int')]
+ namesInDiff = set()
+ for s in diffLines:
+ if s.startswith('<int') or s.startswith('<arr') or s.startswith('</'):
+ continue
+ p = s.find('>')
+ if p != -1:
+ p2 = s.find('<', p+1)
+ if p2 != -1:
+ name = s[p+1:p2]
+ try:
+ int(name)
+ except:
+ if not name.startswith('@'):
+ namesInDiff.add(s[p+1:p2])
+ return namesInDiff
+
+
+def loadGroups(filename):
+ groups = plistlib.readPlist(filename)
+ nameMap = {} # { glyphName => set(groupName) }
+ for groupName, glyphNames in groups.iteritems():
+ for glyphName in glyphNames:
+ nameMap.setdefault(glyphName, set()).add(groupName)
+ return groups, nameMap
+
+
+def loadKerning(filename):
+ kerning = plistlib.readPlist(filename)
+ # <dict>
+ # <key>@KERN_LEFT_A</key>
+ # <dict>
+ # <key>@KERN_RIGHT_C</key>
+ # <integer>-96</integer>
+
+ leftIndex = {} # { glyph-name => <ref to plist right-hand side dict> }
+ rightIndex = {} # { glyph-name => [(left-hand-side-name, kernVal), ...] }
+ rightGroupIndex = {} # { group-name => [(left-hand-side-name, kernVal), ...] }
+
+ for leftName, right in kerning.iteritems():
+ if leftName[0] != '@':
+ leftIndex[leftName] = right
+
+ for rightName, kernVal in right.iteritems():
+ if rightName[0] != '@':
+ rightIndex.setdefault(rightName, []).append((leftName, kernVal))
+ else:
+ rightGroupIndex.setdefault(rightName, []).append((leftName, kernVal))
+
+ return kerning, leftIndex, rightIndex, rightGroupIndex
+
+
+def loadAltNamesDB(agl, fontFilename):
+ uc2names = {} # { 2126: ['Omega', ...], ...}
+ name2ucs = {} # { 'Omega': [2126, ...], ...}
+
+ name2ucs, _ = getTTGlyphList(fontFilename)
+ # -> { 'Omega': [2126, ...], ... }
+ for name, ucs in name2ucs.iteritems():
+ for uc in ucs:
+ uc2names.setdefault(uc, []).append(name)
+
+ for uc, name in agl.iteritems():
+ name2ucs.setdefault(name, []).append(uc)
+ uc2names.setdefault(uc, []).append(name)
+ # -> { 2126: 'Omega', ... }
+
+ return uc2names, name2ucs
+
+
+def loadLocalNamesDB(agl, diacriticComps): # { 2126: ['Omega', ...], ...}
+ uc2names = None
+
+ for fontPath in srcFontPaths:
+ font = OpenFont(fontPath)
+ if uc2names is None:
+ uc2names = font.getCharacterMapping() # { 2126: ['Omega', ...], ...}
+ else:
+ for uc, names in font.getCharacterMapping().iteritems():
+ names2 = uc2names.get(uc, [])
+ for name in names:
+ if name not in names2:
+ names2.append(name)
+ uc2names[uc] = names2
+
+ # agl { 2126: 'Omega', ...} -> { 'Omega': [2126, ...], ...}
+ aglName2Ucs = {}
+ for uc, name in agl.iteritems():
+ aglName2Ucs.setdefault(name, []).append(uc)
+
+ for glyphName, comp in diacriticComps.iteritems():
+ for uc in aglName2Ucs.get(glyphName, []):
+ names = uc2names.get(uc, [])
+ if glyphName not in names:
+ names.append(glyphName)
+ uc2names[uc] = names
+
+ name2ucs = {}
+ for uc, names in uc2names.iteritems():
+ for name in names:
+ name2ucs.setdefault(name, set()).add(uc)
+
+ return uc2names, name2ucs
+
+
+def _canonicalGlyphName(name, localName2ucs, localUc2Names, altName2ucs):
+ ucs = localName2ucs.get(name)
+ if ucs:
+ return name, list(ucs)[0]
+ ucs = altName2ucs.get(name)
+ if ucs:
+ for uc in ucs:
+ localNames = localUc2Names.get(uc)
+ if localNames and len(localNames):
+ return localNames[0], uc
+ return None, None
+
+
+def main():
+ argparser = ArgumentParser(description='Restore lost kerning')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ 'srcFont', metavar='<fontfile>', type=str,
+ help='TrueType, OpenType or UFO fonts to gather glyph info from')
+
+ argparser.add_argument(
+ 'diffFile', metavar='<diffile>', type=str, help='Diff file')
+
+ args = argparser.parse_args()
+
+ dryRun = args.dryRun
+
+ agl = parseAGL('src/glyphlist.txt')
+ diacriticComps = loadGlyphCompositions('src/diacritics.txt')
+
+ altUc2names, altName2ucs = loadAltNamesDB(agl, args.srcFont)
+ localUc2Names, localName2ucs = loadLocalNamesDB(agl, diacriticComps)
+
+ canonicalGlyphName = lambda name: _canonicalGlyphName(
+ name, localName2ucs, localUc2Names, altName2ucs)
+
+ deletedNames = loadNamesFromDiff(args.diffFile) # 10e15297b.diff
+ deletedDiacriticNames = OrderedDict()
+
+ for glyphName, comp in diacriticComps.iteritems():
+ if glyphName in deletedNames:
+ deletedDiacriticNames[glyphName] = comp
+
+
+ for fontPath in srcFontPaths:
+ addedGroupNames = set()
+
+ oldFilenamePrefix = 'regular'
+ if fontPath.find('Bold') != -1:
+ oldFilenamePrefix = 'bold'
+ oldGroups, oldNameToGroups = loadGroups(
+ oldFilenamePrefix + '-groups.plist')
+ oldKerning, oldLIndex, oldRIndex, oldRGroupIndex = loadKerning(
+ oldFilenamePrefix + '-kerning.plist')
+ # lIndex : { name => <ref to plist right-hand side dict> }
+ # rIndex : { name => [(left-hand-side-name, kernVal), ...] }
+
+ currGroupFilename = os.path.join(fontPath, 'groups.plist')
+ currKerningFilename = os.path.join(fontPath, 'kerning.plist')
+ currGroups, currNameToGroups = loadGroups(currGroupFilename)
+ currKerning, currLIndex, currRIndex, currRGroupIndex = loadKerning(currKerningFilename)
+
+ for glyphName, comp in deletedDiacriticNames.iteritems():
+ oldGroupMemberships = oldNameToGroups.get(glyphName)
+ localGlyphName, localUc = canonicalGlyphName(glyphName)
+
+ # if glyphName != 'dcaron':
+ # continue # XXX DEBUG
+
+ if localGlyphName is None:
+ # glyph does no longer exist -- ignore
+ print('[IGNORE]', glyphName)
+ continue
+
+ if oldGroupMemberships:
+ # print('group', localGlyphName,
+ # '=>', localUc,
+ # 'in old group:', oldGroupMemberships, ', curr group:', currGroupMemberships)
+ for oldGroupName in oldGroupMemberships:
+ currGroup = currGroups.get(oldGroupName) # None|[glyphname, ...]
+ # print('GM ', localGlyphName, oldGroupName, len(currGroup) if currGroup else 0)
+ if currGroup is not None:
+ if localGlyphName not in currGroup:
+ # print('[UPDATE group]', oldGroupName, 'append', localGlyphName)
+ currGroup.append(localGlyphName)
+ else:
+ # group does not currently exist
+ if currNameToGroups.get(localGlyphName):
+ raise Exception('TODO: case where glyph is in some current groups, but not the' +
+ 'original-named group')
+ print('[ADD group]', oldGroupName, '=> [', localGlyphName, ']')
+ currGroups[oldGroupName] = [localGlyphName]
+ addedGroupNames.add(oldGroupName)
+ # if oldGroupName in oldKerning:
+ # print('TODO: effects of oldGroupName being in oldKerning:',
+ # oldKerning[oldGroupName])
+ if oldGroupName in oldRGroupIndex:
+ print('TODO: effects of oldGroupName being in oldRGroupIndex:',
+ oldRGroupIndex[oldGroupName])
+
+ else: # if not oldGroupMemberships
+ ucs = localName2ucs.get(glyphName)
+ if not ucs:
+ raise Exception(
+ 'TODO non-group, non-local name ' + glyphName + ' -- lookup in alt names')
+
+ asLeft = oldLIndex.get(glyphName)
+ atRightOf = oldRIndex.get(glyphName)
+
+ # print('individual', glyphName,
+ # '=>', ', '.join([str(uc) for uc in ucs]),
+ # '\n as left:', asLeft is not None,
+ # '\n at right of:', atRightOf is not None)
+
+ if asLeft:
+ currKern = currKerning.get(localGlyphName)
+ if currKern is None:
+ rightValues = {}
+ for rightName, kernValue in asLeft.iteritems():
+ if rightName[0] == '@':
+ currGroup = currGroups.get(rightName)
+ if currGroup and localGlyphName not in currGroup:
+ rightValues[rightName] = kernValue
+ else:
+ localName, localUc = canonicalGlyphName(rightName)
+ if localName:
+ rightValues[localName] = kernValue
+ if len(rightValues) > 0:
+ print('[ADD currKerning]', localGlyphName, '=>', rightValues)
+ currKerning[localGlyphName] = rightValues
+
+ if atRightOf:
+ for parentLeftName, kernVal in atRightOf:
+ # print('atRightOf:', parentLeftName, kernVal)
+ if parentLeftName[0] == '@':
+ if parentLeftName in currGroups:
+ k = currKerning.get(parentLeftName)
+ if k:
+ if localGlyphName not in k:
+ print('[UPDATE currKerning g]',
+ parentLeftName, '+= {', localGlyphName, ':', kernVal, '}')
+ k[localGlyphName] = kernVal
+ else:
+ print('TODO: left-group is NOT in currKerning; left-group', parentLeftName)
+ else:
+ localParentLeftGlyphName, _ = canonicalGlyphName(parentLeftName)
+ if localParentLeftGlyphName:
+ k = currKerning.get(localParentLeftGlyphName)
+ if k:
+ if localGlyphName not in k:
+ print('[UPDATE currKerning i]',
+ localParentLeftGlyphName, '+= {', localGlyphName, ':', kernVal, '}')
+ k[localGlyphName] = kernVal
+ else:
+ print('[ADD currKerning i]',
+ localParentLeftGlyphName, '=> {', localGlyphName, ':', kernVal, '}')
+ currKerning[localParentLeftGlyphName] = {localGlyphName: kernVal}
+
+
+ for groupName in addedGroupNames:
+ print('————————————————————————————————————————————')
+ print('re-introduce group', groupName, 'to kerning')
+
+ oldRKern = oldKerning.get(groupName)
+ if oldRKern is not None:
+ newRKern = {}
+ for oldRightName, kernVal in oldRKern.iteritems():
+ if oldRightName[0] == '@':
+ if oldRightName in currGroups:
+ newRKern[oldRightName] = kernVal
+ else:
+ # Note: (oldRightName in addedGroupNames) should always be False here
+ # as we would have added it to currGroups already.
+ print('[DROP group]', oldRightName, kernVal)
+ if oldRightName in currGroups:
+ del currGroups[oldRightName]
+ else:
+ localGlyphName, _ = canonicalGlyphName(oldRightName)
+ if localGlyphName:
+ newRKern[localGlyphName] = kernVal
+ print('localGlyphName', localGlyphName)
+
+ if len(newRKern):
+ print('[ADD currKerning g]', groupName, newRKern)
+ currKerning[groupName] = newRKern
+
+ # oldRGroupIndex : { group-name => [(left-hand-side-name, kernVal), ...] }
+ oldLKern = oldRGroupIndex.get(groupName)
+ if oldLKern:
+ for oldRightName, kernVal in oldLKern:
+ if oldRightName[0] == '@':
+ if oldRightName in currGroups:
+ k = currKerning.get(oldRightName)
+ if k is not None:
+ print('[UPDATE kerning g]', oldRightName, '+= {', groupName, ':', kernVal, '}')
+ k[groupName] = kernVal
+ else:
+ currKerning[oldRightName] = {groupName: kernVal}
+ print('[ADD kerning g]', oldRightName, '= {', groupName, ':', kernVal, '}')
+ else:
+ localGlyphName, _ = canonicalGlyphName(oldRightName)
+ if localGlyphName:
+ k = currKerning.get(localGlyphName)
+ if k is not None:
+ print('[UPDATE kerning i]', localGlyphName, '+= {', groupName, ':', kernVal, '}')
+ k[groupName] = kernVal
+ else:
+ currKerning[localGlyphName] = {groupName: kernVal}
+ print('[ADD kerning i]', localGlyphName, '= {', groupName, ':', kernVal, '}')
+
+
+ print('Write', currGroupFilename)
+ if not dryRun:
+ plistlib.writePlist(currGroups, currGroupFilename)
+
+ print('Write', currKerningFilename)
+ if not dryRun:
+ plistlib.writePlist(currKerning, currKerningFilename)
+
+ # end: for fontPath
+
+main()
diff --git a/misc/tools/rewrite-glyphorder.py b/misc/tools/rewrite-glyphorder.py
new file mode 100755
index 000000000..3da0c1699
--- /dev/null
+++ b/misc/tools/rewrite-glyphorder.py
@@ -0,0 +1,305 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib, json, re
+from collections import OrderedDict
+from argparse import ArgumentParser
+from ConfigParser import RawConfigParser
+from fontTools import ttLib
+from robofab.objects.objectsRF import OpenFont
+
+
+# Regex matching "default" glyph names, like "uni2043" and "u01C5"
+uniNameRe = re.compile(r'^u(?:ni)([0-9A-F]{4,8})$')
+
+
+class PList:
+ def __init__(self, filename):
+ self.filename = filename
+ self.plist = None
+
+ def load(self):
+ self.plist = plistlib.readPlist(self.filename)
+
+ def save(self):
+ if self.plist is not None:
+ plistlib.writePlist(self.plist, self.filename)
+
+ def get(self, k, defaultValue=None):
+ if self.plist is None:
+ self.load()
+ return self.plist.get(k, defaultValue)
+
+ def __getitem__(self, k):
+ if self.plist is None:
+ self.load()
+ return self.plist[k]
+
+ def __setitem__(self, k, v):
+ if self.plist is None:
+ self.load()
+ self.plist[k] = v
+
+ def __delitem__(self, k):
+ if self.plist is None:
+ self.load()
+ del self.plist[k]
+
+
+def parseAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def revCharMap(ucToNames):
+ # {2126:['Omega','Omegagr']} -> {'Omega':2126, 'Omegagr':2126}
+ # {2126:'Omega'} -> {'Omega':2126}
+ m = {}
+ if len(ucToNames) == 0:
+ return m
+
+ lists = True
+ for v in ucToNames.itervalues():
+ lists = not isinstance(v, str)
+ break
+
+ if lists:
+ for uc, names in ucToNames.iteritems():
+ for name in names:
+ m[name] = uc
+ else:
+ for uc, name in ucToNames.iteritems():
+ m[name] = uc
+
+ return m
+
+
+def loadJSONGlyphOrder(jsonFilename):
+ gol = None
+ if jsonFilename == '-':
+ gol = json.load(sys.stdin)
+ else:
+ with open(jsonFilename, 'r') as f:
+ gol = json.load(f)
+ if not isinstance(gol, list):
+ raise Exception('expected [[string, int|null]')
+ if len(gol) > 0:
+ for v in gol:
+ if not isinstance(v, list):
+ raise Exception('expected [[string, int|null]]')
+ break
+ return gol
+
+
+def loadTTGlyphOrder(font):
+ if isinstance(font, str):
+ font = ttLib.TTFont(font)
+
+ if not 'cmap' in font:
+ raise Exception('missing cmap table')
+
+ bestCodeSubTable = None
+ bestCodeSubTableFormat = 0
+
+ for st in font['cmap'].tables:
+ if st.platformID == 0: # 0=unicode, 1=mac, 2=(reserved), 3=microsoft
+ if st.format > bestCodeSubTableFormat:
+ bestCodeSubTable = st
+ bestCodeSubTableFormat = st.format
+
+ ucmap = {}
+ if bestCodeSubTable is not None:
+ for cp, glyphname in bestCodeSubTable.cmap.items():
+ ucmap[glyphname] = cp
+
+ gol = []
+ for name in font.getGlyphOrder():
+ gol.append((name, ucmap.get(name)))
+
+ return gol
+
+
+def loadSrcGlyphOrder(jsonFilename, fontFilename): # -> [ ('Omegagreek', 2126|None), ...]
+ if jsonFilename:
+ return loadJSONGlyphOrder(jsonFilename)
+ elif fontFilename:
+ return loadTTGlyphOrder(fontFilename.rstrip('/ '))
+ return None
+
+
+def loadUFOGlyphNames(ufoPath):
+ font = OpenFont(ufoPath)
+
+ libPlist = PList(os.path.join(ufoPath, 'lib.plist'))
+ orderedNames = libPlist['public.glyphOrder'] # [ 'Omega', ...]
+
+ # append any glyphs that are missing in orderedNames
+ allNames = set(font.keys())
+ for name in orderedNames:
+ allNames.discard(name)
+ for name in allNames:
+ orderedNames.append(name)
+
+ ucToNames = font.getCharacterMapping() # { 2126: [ 'Omega', ...], ...}
+ nameToUc = revCharMap(ucToNames) # { 'Omega': 2126, ...}
+
+ gol = OrderedDict() # OrderedDict{ ('Omega', 2126|None), ...}
+ for name in orderedNames:
+ gol[name] = nameToUc.get(name)
+ # gol.append((name, nameToUc.get(name)))
+
+ return gol, ucToNames, nameToUc, libPlist
+
+
+def saveUFOGlyphOrder(libPlist, orderedNames, dryRun):
+ libPlist['public.glyphOrder'] = orderedNames
+
+ roboSort = libPlist.get('com.typemytype.robofont.sort')
+ if roboSort is not None:
+ # lib['com.typemytype.robofont.sort'] has schema
+ # [ { type: "glyphList", ascending: [glyphname, ...] }, ...]
+ for i in range(len(roboSort)):
+ ent = roboSort[i]
+ if isinstance(ent, dict) and ent.get('type') == 'glyphList':
+ roboSort[i] = {'type':'glyphList', 'ascending':orderedNames}
+ break
+
+ print('Writing', libPlist.filename)
+ if not dryRun:
+ libPlist.save()
+
+
+def getConfigResFile(config, basedir, name):
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ basedir = os.path.dirname(basedir)
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ fn = None
+ return fn
+
+
+def main():
+ argparser = ArgumentParser(description='Rewrite glyph order of UFO fonts')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ '-src-json', dest='srcJSONFile', metavar='<file>', type=str,
+ help='JSON file to read glyph order from.' +
+ ' Should be a list e.g. [["Omega", 2126], [".notdef", null], ...]')
+
+ argparser.add_argument(
+ '-src-font', dest='srcFontFile', metavar='<file>', type=str,
+ help='TrueType or OpenType font to read glyph order from.')
+
+ argparser.add_argument(
+ '-out', dest='outFile', metavar='<file>', type=str,
+ help='Write each name per line to <file>')
+
+ argparser.add_argument(
+ 'dstFontsPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+
+ if args.srcJSONFile and args.srcFontFile:
+ argparser.error('Both -src-json and -src-font specified -- please provide only one.')
+
+ srcGol = loadSrcGlyphOrder(args.srcJSONFile, args.srcFontFile)
+ if srcGol is None:
+ argparser.error('No source provided (-src-* argument missing)')
+
+ # Load Adobe Glyph List database
+ srcDir = os.path.dirname(args.dstFontsPaths[0])
+ config = RawConfigParser(dict_type=OrderedDict)
+ config.read(os.path.join(srcDir, 'fontbuild.cfg'))
+ aglUcToName = parseAGL(getConfigResFile(config, srcDir, 'agl_glyphlistfile'))
+ aglNameToUc = revCharMap(aglUcToName)
+
+ glyphorderUnion = OrderedDict()
+
+ for dstFontPath in args.dstFontsPaths:
+ glyphOrder, ucToNames, nameToUc, libPlist = loadUFOGlyphNames(dstFontPath)
+
+ newGol = OrderedDict()
+ for name, uc in srcGol:
+
+ if uc is None:
+ # if there's no unicode associated, derive from name if possible
+ m = uniNameRe.match(name)
+ if m:
+ try:
+ uc = int(m.group(1), 16)
+ except:
+ pass
+ if uc is None:
+ uc = aglNameToUc.get(name)
+
+ # has same glyph mapped to same unicode
+ names = ucToNames.get(uc)
+ if names is not None:
+ for name in names:
+ # print('U %s U+%04X' % (name, uc))
+ newGol[name] = uc
+ continue
+
+ # has same name in dst?
+ uc2 = glyphOrder.get(name)
+ if uc2 is not None:
+ # print('N %s U+%04X' % (name, uc2))
+ newGol[name] = uc2
+ continue
+
+ # Try AGL[uc] -> name == name
+ if uc is not None:
+ name2 = aglUcToName.get(uc)
+ if name2 is not None:
+ uc2 = glyphOrder.get(name2)
+ if uc2 is not None:
+ # print('A %s U+%04X' % (name2, uc2))
+ newGol[name2] = uc2
+ continue
+
+ # else: ignore glyph name in srcGol not found in target
+ # if uc is None:
+ # print('x %s -' % name)
+ # else:
+ # print('x %s U+%04X' % (name, uc))
+
+
+ # add remaining glyphs from original glyph order
+ for name, uc in glyphOrder.iteritems():
+ if name not in newGol:
+ # print('E %s U+%04X' % (name, uc))
+ newGol[name] = uc
+
+ orderedNames = []
+ for name in newGol.iterkeys():
+ orderedNames.append(name)
+ glyphorderUnion[name] = True
+
+ saveUFOGlyphOrder(libPlist, orderedNames, dryRun)
+
+ if args.outFile:
+ print('Write', args.outFile)
+ glyphorderUnionNames = glyphorderUnion.keys()
+ if not dryRun:
+ with open(args.outFile, 'w') as f:
+ f.write('\n'.join(glyphorderUnionNames) + '\n')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/rmglyph.py b/misc/tools/rmglyph.py
new file mode 100755
index 000000000..7e6d7f408
--- /dev/null
+++ b/misc/tools/rmglyph.py
@@ -0,0 +1,548 @@
+#!/usr/bin/env python
+# encoding: utf8
+from __future__ import print_function
+import os, sys, plistlib, re, subprocess
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+from textwrap import TextWrapper
+from StringIO import StringIO
+import glob
+import cleanup_kerning
+
+
+dryRun = False
+BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
+
+
+def readLines(filename):
+ with open(filename, 'r') as f:
+ return f.read().strip().splitlines()
+
+
+def loadAGL(filename): # -> { 2126: 'Omega', ... }
+ m = {}
+ with open(filename, 'r') as f:
+ for line in f:
+ # Omega;2126
+ # dalethatafpatah;05D3 05B2 # higher-level combinations; ignored
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ name, uc = tuple([c.strip() for c in line.split(';')])
+ if uc.find(' ') == -1:
+ # it's a 1:1 mapping
+ m[int(uc, 16)] = name
+ return m
+
+
+def decomposeComponentInstances(font, glyph, componentsToDecompose):
+ """Moves the components of a glyph to its outline."""
+ if len(glyph.components):
+ deepCopyContours(font, glyph, glyph, (0, 0), (1, 1), componentsToDecompose)
+ glyph.clearComponents()
+
+
+def deepCopyContours(font, parent, component, offset, scale, componentsToDecompose):
+ """Copy contours to parent from component, including nested components."""
+ for nested in component.components:
+ if componentsToDecompose is None or nested.baseGlyph in componentsToDecompose:
+ deepCopyContours(
+ font, parent, font[nested.baseGlyph],
+ (offset[0] + nested.offset[0], offset[1] + nested.offset[1]),
+ (scale[0] * nested.scale[0], scale[1] * nested.scale[1]),
+ None)
+ component.removeComponent(nested)
+ if component == parent:
+ return
+ for contour in component:
+ contour = contour.copy()
+ contour.scale(scale)
+ contour.move(offset)
+ parent.appendContour(contour)
+
+
+def addGlyphsForCPFont(cp, ucmap, glyphnames):
+ if cp in ucmap:
+ for name in ucmap[cp]:
+ glyphnames.add(name)
+ # else:
+ # print('no glyph for U+%04X' % cp)
+
+
+def getGlyphNamesFont(font, ucmap, glyphs):
+ glyphnames = set()
+ for s in glyphs:
+ if len(s) > 2 and s[:2] == 'U+':
+ p = s.find('-')
+ if p != -1:
+ # range, e.g. "U+1D0A-1DBC"
+ cpStart = int(s[2:p], 16)
+ cpEnd = int(s[p+1:], 16)
+ for cp in range(cpStart, cpEnd+1):
+ addGlyphsForCPFont(cp, ucmap, glyphnames)
+ else:
+ # single code point e.g. "U+1D0A"
+ cp = int(s[2:], 16)
+ addGlyphsForCPFont(cp, ucmap, glyphnames)
+ elif s in font:
+ glyphnames.add(s)
+ return glyphnames
+
+
+def addGlyphsForCPComps(cp, comps, agl, glyphnames):
+ uniName = 'uni%04X' % cp
+ symbolicName = agl.get(cp)
+ if uniName in comps:
+ glyphnames.add(uniName)
+ if symbolicName in comps:
+ glyphnames.add(symbolicName)
+
+
+def getGlyphNamesComps(comps, agl, glyphs):
+ # comps: { glyphName => (baseName, accentNames, offset) ... }
+ # agl: { 2126: 'Omega' ... }
+ glyphnames = set()
+ for s in glyphs:
+ if len(s) > 2 and s[:2] == 'U+':
+ p = s.find('-')
+ if p != -1:
+ # range, e.g. "U+1D0A-1DBC"
+ cpStart = int(s[2:p], 16)
+ cpEnd = int(s[p+1:], 16)
+ for cp in range(cpStart, cpEnd+1):
+ addGlyphsForCPComps(cp, comps, agl, glyphnames)
+ else:
+ # single code point e.g. "U+1D0A"
+ cp = int(s[2:], 16)
+ addGlyphsForCPComps(cp, comps, agl, glyphnames)
+ elif s in comps:
+ glyphnames.add(s)
+ return glyphnames
+
+
+def updateConfigFile(config, filename, rmnames):
+ wrapper = TextWrapper()
+ wrapper.width = 80
+ wrapper.break_long_words = False
+ wrapper.break_on_hyphens = False
+ wrap = lambda names: '\n'.join(wrapper.wrap(' '.join(names)))
+
+ didChange = False
+
+ for propertyName, values in config.items('glyphs'):
+ glyphNames = values.split()
+ propChanged = False
+ glyphNames2 = [name for name in glyphNames if name not in rmnames]
+ if len(glyphNames2) < len(glyphNames):
+ print('[fontbuild.cfg] updating glyphs property', propertyName)
+ config.set('glyphs', propertyName, wrap(glyphNames2)+'\n')
+ didChange = True
+
+ if didChange:
+ s = StringIO()
+ config.write(s)
+ s = s.getvalue()
+ s = re.sub(r'\n(\w+)\s+=\s*', '\n\\1: ', s, flags=re.M)
+ s = re.sub(r'((?:^|\n)\[[^\]]*\])', '\\1\n', s, flags=re.M)
+ s = re.sub(r'\n\t\n', '\n\n', s, flags=re.M)
+ s = s.strip() + '\n'
+ print('Writing', filename)
+ if not dryRun:
+ with open(filename, 'w') as f:
+ f.write(s)
+
+
+def parseGlyphComposition(composite):
+ c = composite.split("=")
+ d = c[1].split("/")
+ glyphName = d[0]
+ if len(d) == 1:
+ offset = [0, 0]
+ else:
+ offset = [int(i) for i in d[1].split(",")]
+ accentString = c[0]
+ accents = accentString.split("+")
+ baseName = accents.pop(0)
+ accentNames = [i.split(":") for i in accents]
+ return (glyphName, baseName, accentNames, offset)
+
+
+def fmtGlyphComposition(glyphName, baseName, accentNames, offset):
+ # glyphName = 'uni03D3'
+ # baseName = 'uni03D2'
+ # accentNames = [['tonos', 'top'], ['acute', 'top']]
+ # offset = [100, 0]
+ # => "uni03D2+tonos:top+acute:top=uni03D3/100,0"
+ s = baseName
+ for accentNameTuple in accentNames:
+ s += '+' + accentNameTuple[0]
+ if len(accentNameTuple) > 1:
+ s += ':' + accentNameTuple[1]
+ s += '=' + glyphName
+ if offset[0] != 0 or offset[1] != 0:
+ s += '/%d,%d' % tuple(offset)
+ return s
+
+
+def loadGlyphCompositions(filename): # { glyphName => (baseName, accentNames, offset) }
+ compositions = OrderedDict()
+ with open(filename, 'r') as f:
+ for line in f:
+ line = line.strip()
+ if len(line) > 0 and line[0] != '#':
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+ compositions[glyphName] = (baseName, accentNames, offset)
+ return compositions
+
+
+def updateDiacriticsFile(filename, rmnames):
+ lines = []
+ didChange = False
+
+ for line in readLines(filename):
+ line = line.strip()
+ if len(line) == 0 or len(line.lstrip()) == 0 or line.lstrip()[0] == '#':
+ lines.append(line)
+ else:
+ glyphName, baseName, accentNames, offset = parseGlyphComposition(line)
+
+ skipLine = False
+ if baseName in rmnames or glyphName in rmnames:
+ skipLine = True
+ else:
+ for accent in accentNames:
+ name = accent[0]
+ if name in rmnames:
+ skipLine = True
+ break
+
+ if not skipLine:
+ lines.append(line)
+ else:
+ print('[diacritics] removing', line.strip())
+ didChange = True
+
+ if didChange:
+ print('Writing', filename)
+ if not dryRun:
+ with open(filename, 'w') as f:
+ for line in lines:
+ f.write(line + '\n')
+
+
+def configFindResFile(config, basedir, name):
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ basedir = os.path.dirname(basedir)
+ fn = os.path.join(basedir, config.get("res", name))
+ if not os.path.isfile(fn):
+ fn = None
+ return fn
+
+
+includeRe = re.compile(r'^include\(([^\)]+)\);\s*$')
+tokenSepRe = re.compile(r'([\@A-Za-z0-9_\.]+|[=\-\[\]\(\)\{\};<>\'])')
+spaceRe = re.compile(r'[ \t]+')
+
+
+def loadFeaturesFile(filepath, followIncludes=True):
+ print('read', filepath)
+ lines = []
+ with open(filepath, 'r') as f:
+ for line in f:
+ m = includeRe.match(line)
+ if m is not None:
+ if followIncludes:
+ includedFilename = m.group(1)
+ includedPath = os.path.normpath(os.path.join(os.path.dirname(filepath), includedFilename))
+ lines = lines + loadFeaturesFile(includedPath, followIncludes)
+ else:
+ lines.append(line)
+ return lines
+
+
+def collapseSpace(s):
+ lm = len(s) - len(s.lstrip(' \t'))
+ return s[:lm] + spaceRe.sub(' ', s[lm:])
+
+def updateFeaturesFile(filename, rmnames):
+ # this is a VERY crude approach that simply tokenizes the input and filters
+ # out strings that seem to be names but are not found in glyphnames.
+
+ lines = []
+ didChange = False
+
+ for line in loadFeaturesFile(filename, followIncludes=False):
+ line = line.rstrip('\r\n ')
+ tokens = tokenSepRe.split(line)
+ tokens2 = [t for t in tokens if t not in rmnames]
+ if len(tokens2) != len(tokens):
+ line = collapseSpace(''.join(tokens2))
+ didChange = True
+ lines.append(line)
+
+ if didChange:
+ print('Write', filename)
+ if not dryRun:
+ with open(filename, 'w') as f:
+ for line in lines:
+ f.write(line + '\n')
+
+ return didChange
+
+
+def grep(filename, names):
+ hasPrintedFilename = False
+ relFilename = os.path.relpath(os.path.abspath(filename), BASEDIR)
+ findCount = 0
+ with open(filename, 'r') as f:
+ lineno = 1
+ for line in f:
+ foundNames = []
+ for name in names:
+ col = line.find(name)
+ if col != -1:
+ foundNames.append((name, lineno, col, line))
+ findCount += 1
+ if len(foundNames):
+ if not hasPrintedFilename:
+ print('%s:' % relFilename)
+ hasPrintedFilename = True
+ for name, lineno, col, line in foundNames:
+ line = line.strip()
+ if len(line) > 50:
+ line = line[:47] + '...'
+ print(' %s\t%d:%d\t%s' % (name, lineno, col, line))
+ lineno += 1
+ return findCount
+
+
+
+def main(argv=None):
+ argparser = ArgumentParser(
+ description='Remove glyphs from all UFOs in src dir')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ '-decompose', dest='decompose', action='store_const', const=True, default=False,
+ help='When deleting a glyph which is used as a component by another glyph '+
+ 'which is not being deleted, instead of refusing to delete the glyph, '+
+ 'decompose the component instances in other glyphs.')
+
+ argparser.add_argument(
+ '-ignore-git-state', dest='ignoreGitState', action='store_const', const=True, default=False,
+ help='Skip checking with git if there are changes to the target UFO file.')
+
+ argparser.add_argument(
+ 'glyphs', metavar='<glyph>', type=str, nargs='+',
+ help='Glyph to remove. '+
+ 'Can be a glyphname, '+
+ 'a Unicode code point formatted as "U+<CP>", '+
+ 'or a Unicode code point range formatted as "U+<CP>-<CP>"')
+
+ args = argparser.parse_args(argv)
+ global dryRun
+ dryRun = args.dryRun
+ srcDir = os.path.join(BASEDIR, 'src')
+
+ # check if src font has modifications
+ if not args.ignoreGitState:
+ gitStatus = subprocess.check_output(
+ ['git', '-C', BASEDIR, 'status', '-s', '--',
+ os.path.relpath(os.path.abspath(srcDir), BASEDIR) ],
+ shell=False)
+ gitIsDirty = False
+ gitStatusLines = gitStatus.splitlines()
+ for line in gitStatusLines:
+ if len(line) > 3 and line[:2] != '??':
+ gitIsDirty = True
+ break
+ if gitIsDirty:
+ if len(gitStatusLines) > 5:
+ gitStatusLines = gitStatusLines[:5] + [' ...']
+ print(
+ ("%s has uncommitted changes. It's strongly recommended to run this "+
+ "script on an unmodified UFO path so to allow \"undoing\" any changes. "+
+ "Run with -ignore-git-state to ignore this warning.\n%s") % (
+ srcDir, '\n'.join(gitStatusLines)),
+ file=sys.stderr)
+ sys.exit(1)
+
+ # Find UFO fonts
+ fontPaths = glob.glob(os.path.join(srcDir, '*.ufo'))
+ if len(fontPaths) == 0:
+ print('No UFOs found in', srcDir, file=sys.stderr)
+ sys.exit(1)
+
+ # load fontbuild config
+ config = RawConfigParser(dict_type=OrderedDict)
+ configFilename = os.path.join(srcDir, 'fontbuild.cfg')
+ config.read(configFilename)
+ glyphOrderFile = configFindResFile(config, srcDir, 'glyphorder')
+ diacriticsFile = configFindResFile(config, srcDir, 'diacriticfile')
+ featuresFile = os.path.join(srcDir, 'features.fea')
+
+ # load AGL and diacritics
+ agl = loadAGL(os.path.join(srcDir, 'glyphlist.txt')) # { 2126: 'Omega', ... }
+ comps = loadGlyphCompositions(diacriticsFile)
+ # { glyphName => (baseName, accentNames, offset) }
+
+ # find glyphnames to remove that are composed (removal happens later)
+ rmnamesUnion = getGlyphNamesComps(comps, agl, args.glyphs)
+
+ # find glyphnames to remove from UFOs (and remove them)
+ for fontPath in fontPaths:
+ relFontPath = os.path.relpath(fontPath, BASEDIR)
+ print('Loading glyph data for %s...' % relFontPath)
+ font = OpenFont(fontPath)
+ ucmap = font.getCharacterMapping() # { 2126: [ 'Omega', ...], ...}
+ cnmap = font.getReverseComponentMapping() # { 'A' : ['Aacute', 'Aring'], 'acute' : ['Aacute'] ... }
+
+ glyphnames = getGlyphNamesFont(font, ucmap, args.glyphs)
+
+ if len(glyphnames) == 0:
+ print('None of the glyphs requested exist in', relFontPath, file=sys.stderr)
+
+ print('Preparing to remove %d glyphs — resolving component usage...' % len(glyphnames))
+
+ # Check component usage
+ cnConflicts = {}
+ for gname in glyphnames:
+ cnUses = cnmap.get(gname)
+ if cnUses:
+ extCnUses = [n for n in cnUses if n not in glyphnames]
+ if len(extCnUses) > 0:
+ cnConflicts[gname] = extCnUses
+
+ if len(cnConflicts) > 0:
+ if args.decompose:
+ componentsToDecompose = set()
+ for gname in cnConflicts.keys():
+ componentsToDecompose.add(gname)
+ for gname, dependants in cnConflicts.iteritems():
+ print('decomposing %s in %s' % (gname, ', '.join(dependants)))
+ for depname in dependants:
+ decomposeComponentInstances(font, font[depname], componentsToDecompose)
+ else:
+ print(
+ '\nComponent conflicts.\n\n'+
+ 'Some glyphs to-be deleted are used as components in other glyphs.\n'+
+ 'You need to either decompose the components, also delete glyphs\n'+
+ 'using them, or not delete the glyphs at all.\n', file=sys.stderr)
+ for gname, dependants in cnConflicts.iteritems():
+ print('%s used by %s' % (gname, ', '.join(dependants)), file=sys.stderr)
+ sys.exit(1)
+
+ # find orphaned pure-components
+ for gname in glyphnames:
+ try:
+ g = font[gname]
+ except:
+ print('no glyph %r in %s' % (gname, relFontPath), file=sys.stderr)
+ sys.exit(1)
+ useCount = 0
+ for cn in g.components:
+ usedBy = cnmap.get(cn.baseGlyph)
+ if usedBy:
+ usedBy = [name for name in usedBy if name not in glyphnames]
+ if len(usedBy) == 0:
+ cng = font[cn.baseGlyph]
+ if len(cng.unicodes) == 0:
+ print('Note: pure-component %s orphaned' % cn.baseGlyph)
+
+ # remove glyphs from UFO
+ print('Removing %d glyphs' % len(glyphnames))
+
+ libPlistFilename = os.path.join(fontPath, 'lib.plist')
+ libPlist = plistlib.readPlist(libPlistFilename)
+
+ glyphOrder = libPlist.get('public.glyphOrder')
+ if glyphOrder is not None:
+ v = [name for name in glyphOrder if name not in glyphnames]
+ libPlist['public.glyphOrder'] = v
+
+ roboSort = libPlist.get('com.typemytype.robofont.sort')
+ if roboSort is not None:
+ for entry in roboSort:
+ if isinstance(entry, dict) and entry.get('type') == 'glyphList':
+ asc = entry.get('ascending')
+ if asc is not None:
+ entry['ascending'] = [name for name in asc if name not in glyphnames]
+ desc = entry.get('descending')
+ if desc is not None:
+ entry['descending'] = [name for name in desc if name not in glyphnames]
+
+ for gname in glyphnames:
+ font.removeGlyph(gname)
+ rmnamesUnion.add(gname)
+
+ if not dryRun:
+ print('Writing changes to %s' % relFontPath)
+ font.save()
+ plistlib.writePlist(libPlist, libPlistFilename)
+ else:
+ print('Writing changes to %s (dry run)' % relFontPath)
+
+ print('Cleaning up kerning')
+ if dryRun:
+ cleanup_kerning.main(['-dry', fontPath])
+ else:
+ cleanup_kerning.main([fontPath])
+
+ # end for fontPath in fontPaths
+
+
+ # fontbuild config
+ updateDiacriticsFile(diacriticsFile, rmnamesUnion)
+ updateConfigFile(config, configFilename, rmnamesUnion)
+ featuresChanged = updateFeaturesFile(featuresFile, rmnamesUnion)
+
+
+ # TMP for testing fuzzy
+ # rmnamesUnion = set()
+ # featuresChanged = False
+ # with open('_local/rmlog') as f:
+ # for line in f:
+ # line = line.strip()
+ # if len(line):
+ # rmnamesUnion.add(line)
+
+
+ print('\n————————————————————————————————————————————————————\n'+
+ 'Removed %d glyphs:\n %s' % (
+ len(rmnamesUnion), '\n '.join(sorted(rmnamesUnion))))
+
+ print('\n————————————————————————————————————————————————————\n')
+
+ # find possibly-missed instances
+ print('Fuzzy matches:')
+ fuzzyMatchCount = 0
+ fuzzyMatchCount += grep(diacriticsFile, rmnamesUnion)
+ fuzzyMatchCount += grep(configFilename, rmnamesUnion)
+ fuzzyMatchCount += grep(featuresFile, rmnamesUnion)
+ for fontPath in fontPaths:
+ fuzzyMatchCount += grep(os.path.join(fontPath, 'lib.plist'), rmnamesUnion)
+ if fuzzyMatchCount == 0:
+ print(' (none)\n')
+ else:
+ print('You may want to look into those ^\n')
+
+ if featuresChanged:
+ print('You need to manually edit features.\n'+
+ '- git diff src/features.fea\n'+
+ '- $EDITOR %s/features.fea\n' % '/features.fea\n- $EDITOR '.join(fontPaths))
+
+ print(('You need to re-generate %s via\n'+
+ '`make src/glyphorder.txt` (or misc/gen-glyphorder.py)'
+ ) % glyphOrderFile)
+
+ print('\nFinally, you should build the Medium weight and make sure it all '+
+ 'looks good and that no mixglyph failures occur. E.g. `make Medium -j`')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/show-changes.py b/misc/tools/show-changes.py
new file mode 100755
index 000000000..59f79b3e5
--- /dev/null
+++ b/misc/tools/show-changes.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+from __future__ import print_function
+import os, sys, subprocess
+from argparse import ArgumentParser
+from xml.dom.minidom import parse as xmlParseFile
+from collections import OrderedDict
+
+
+def main():
+ opts = ArgumentParser(description='Shows glyph-related changes.')
+
+ opts.add_argument(
+ 'sinceCommit', metavar='<since-commit>', type=str,
+ help='Start commit.')
+
+ opts.add_argument(
+ 'untilCommit', metavar='<until-commit>', type=str, nargs='?',
+ default='HEAD', help='End commit. Defaults to HEAD.')
+
+ opts.add_argument(
+ '-markdown', dest='markdown', action='store_const',
+ const=True, default=False,
+ help='Output text suitable for Markdown (rather than plain text.)')
+
+ a = opts.parse_args()
+
+ rootdir = os.path.abspath(os.path.join(
+ os.path.dirname(__file__),
+ os.pardir
+ ))
+
+ try:
+ out = subprocess.check_output(
+ [
+ 'git',
+ '-C', rootdir,
+ 'diff',
+ '--name-status',
+ a.sinceCommit + '..' + a.untilCommit,
+ '--', 'src'
+ ],
+ shell=False
+ ).strip()
+ except Exception as e:
+ print('Did you forget to `git fetch --tags` perhaps?', file=sys.stderr)
+ sys.exit(1)
+
+ ufoPrefix = 'src/Inter-UI-'
+ changes = OrderedDict()
+ deleted = []
+
+ for line in out.split('\n'):
+ changeType, name = line.split('\t')
+ if name.startswith(ufoPrefix) and name.endswith('.glif'):
+ weight = name[len(ufoPrefix):name.find('.ufo/')]
+ filename = os.path.join(rootdir, name)
+ try:
+ doc = xmlParseFile(filename)
+ except:
+ deleted.append('%s/%s' % (weight, os.path.basename(name)))
+ continue
+
+ g = doc.documentElement
+ gname = g.attributes['name'].value
+ unicodes = set([
+ 'U+' + u.attributes['hex'].value
+ for u in g.getElementsByTagName('unicode')
+ ])
+
+ c = changes.get(gname)
+ if c is None:
+ c = {
+ 'unicodes': unicodes,
+ 'weights': [(weight, changeType)]
+ }
+ changes[gname] = c
+ else:
+ c['unicodes'] = c['unicodes'].union(unicodes)
+ c['weights'].append((weight, changeType))
+
+ longestName = 0
+ names = sorted(changes.keys())
+
+ if not a.markdown:
+ # find longest name
+ for name in names:
+ z = len(name)
+ if z > longestName:
+ longestName = z
+
+ for name in names:
+ c = changes[name]
+ weights = [ w[0] for w in c['weights'] ]
+ unicodes = c['unicodes']
+
+ if a.markdown:
+ unicodess = ''
+ if len(unicodes) != 0:
+ unicodess = ' %s' % ', '.join(['`%s`' % s for s in unicodes])
+ weightss = ' & '.join(weights)
+ print('- %s%s _%s_' % (name, unicodess, weightss))
+ else:
+ unicodess = ''
+ if len(unicodes) != 0:
+ unicodess = ' (%s)' % ', '.join(unicodes)
+ weightss = ' & '.join(weights)
+ print('%s%s %s' % (name.ljust(longestName), unicodess, weightss))
+
+ if len(deleted):
+ print('\nDeleted files')
+ for filename in deleted:
+ print('- %s' % filename)
+
+
+main() \ No newline at end of file
diff --git a/misc/tools/svgsync.py b/misc/tools/svgsync.py
new file mode 100755
index 000000000..d8781272b
--- /dev/null
+++ b/misc/tools/svgsync.py
@@ -0,0 +1,438 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Sync glyph shapes between SVG and UFO, creating a bridge between UFO and Figma.
+#
+import os
+import sys
+import argparse
+import re
+from xml.dom.minidom import parseString as xmlparseString
+
+# from robofab.world import world, RFont, RGlyph, OpenFont, NewFont
+from robofab.objects.objectsRF import RFont, RGlyph, OpenFont, NewFont, RContour
+from robofab.objects.objectsBase import MOVE, LINE, CORNER, CURVE, QCURVE, OFFCURVE
+
+font = None # RFont
+ufopath = ''
+svgdir = ''
+effectiveAscender = 0
+
+
+def num(s):
+ return int(s) if s.find('.') == -1 else float(s)
+
+
+def glyphToSVGPath(g, yMul):
+ commands = {'move':'M','line':'L','curve':'Y','offcurve':'X','offCurve':'X'}
+ svg = ''
+ contours = []
+ if len(g.components):
+ font.newGlyph('__svgsync')
+ new = font['__svgsync']
+ new.width = g.width
+ new.appendGlyph(g)
+ new.decompose()
+ g = new
+ if len(g):
+ for c in range(len(g)):
+ contours.append(g[c])
+ for i in range(len(contours)):
+ c = contours[i]
+ contour = end = ''
+ curve = False
+ points = c.points
+ if points[0].type == 'offCurve':
+ points.append(points.pop(0))
+ if points[0].type == 'offCurve':
+ points.append(points.pop(0))
+ for x in range(len(points)):
+ p = points[x]
+ command = commands[str(p.type)]
+ if command == 'X':
+ if curve == True:
+ command = ''
+ else:
+ command = 'C'
+ curve = True
+ if command == 'Y':
+ command = ''
+ curve = False
+ if x == 0:
+ command = 'M'
+ if p.type == 'curve':
+ end = ' ' + str(p.x) + ' ' + str(p.y * yMul)
+ contour += ' ' + command + str(p.x) + ' ' + str(p.y * yMul)
+ svg += ' ' + contour + end + 'z'
+ if font.has_key('__svgsync'):
+ font.removeGlyph('__svgsync')
+ return svg.strip()
+
+
+def maybeAddMove(contour, x, y, smooth):
+ if len(contour.segments) == 0:
+ contour.appendSegment(MOVE, [(x, y)], smooth=smooth)
+
+
+
+svgPathDataRegEx = re.compile(r'(?:([A-Z])\s*|)([0-9\.\-\+eE]+)')
+
+
+def drawSVGPath(g, d, tr):
+ yMul = -1
+ xOffs = tr[0]
+ yOffs = -(font.info.unitsPerEm - tr[1])
+
+ for pathd in d.split('M'):
+ pathd = pathd.strip()
+ # print 'pathd', pathd
+ if len(pathd) == 0:
+ continue
+ i = 0
+ closePath = False
+ if pathd[-1] == 'z':
+ closePath = True
+ pathd = pathd[0:-1]
+
+ pv = []
+ for m in svgPathDataRegEx.finditer('M' + pathd):
+ if m.group(1) is not None:
+ pv.append(m.group(1) + m.group(2))
+ else:
+ pv.append(m.group(2))
+
+ initX = 0
+ initY = 0
+
+ pen = g.getPen()
+
+ while i < len(pv):
+ pd = pv[i]; i += 1
+ cmd = pd[0]
+ x = num(pd[1:]) + xOffs
+ y = (num(pv[i]) + yOffs) * yMul; i += 1
+
+ if cmd == 'M':
+ # print cmd, x, y, '/', num(pv[i-2][1:])
+ initX = x
+ initY = y
+ pen.moveTo((x, y))
+ continue
+
+ if cmd == 'C':
+ # Bezier curve: "C x1 y1, x2 y2, x y"
+ x1 = x
+ y1 = y
+ x2 = num(pv[i]) + xOffs; i += 1
+ y2 = (num(pv[i]) + yOffs) * yMul; i += 1
+ x = num(pv[i]) + xOffs; i += 1
+ y = (num(pv[i]) + yOffs) * yMul; i += 1
+ pen.curveTo((x1, y1), (x2, y2), (x, y))
+ # print cmd, x1, y1, x2, y2, x, y
+
+ elif cmd == 'L':
+ pen.lineTo((x, y))
+
+ else:
+ raise Exception('unexpected SVG path command %r' % cmd)
+
+ if closePath:
+ pen.closePath()
+ else:
+ pen.endPath()
+ # print 'path ended. closePath:', closePath
+
+
+def glyphToSVG(g):
+ width = g.width
+ height = font.info.unitsPerEm
+
+ d = {
+ 'name': g.name,
+ 'width': width,
+ 'height': effectiveAscender - font.info.descender,
+ 'effectiveAscender': effectiveAscender,
+ 'leftMargin': g.leftMargin,
+ 'rightMargin': g.rightMargin,
+ 'glyphSVGPath': glyphToSVGPath(g, -1),
+ 'ascender': font.info.ascender,
+ 'descender': font.info.descender,
+ 'baselineOffset': height + font.info.descender,
+ 'unitsPerEm': font.info.unitsPerEm,
+ }
+
+ # for kv in d.iteritems():
+ # if kv[0] == 'glyphSVGPath':
+ # print ' %s: ...' % kv[0]
+ # else:
+ # print ' %s: %r' % kv
+
+ svg = '''
+<svg xmlns="http://www.w3.org/2000/svg" width="%(width)d" height="%(height)d">
+ <g id="%(name)s">
+ <path d="%(glyphSVGPath)s" transform="translate(0 %(effectiveAscender)d)" />
+ <rect x="0" y="0" width="%(width)d" height="%(height)d" fill="none" stroke="black" />
+ </g>
+</svg>
+ ''' % d
+ # print svg
+ return svg.strip()
+
+
+def _findPathNodes(n, paths, defs, uses, isDef=False):
+ for cn in n.childNodes:
+ if cn.nodeName == 'path':
+ if isDef:
+ defs[cn.getAttribute('id')] = cn
+ else:
+ paths.append(cn)
+ elif cn.nodeName == 'use':
+ uses[cn.getAttribute('xlink:href').lstrip('#')] = {'useNode': cn, 'targetNode': None}
+ elif cn.nodeName == 'defs':
+ _findPathNodes(cn, paths, defs, uses, isDef=True)
+ elif not isinstance(cn, basestring) and cn.childNodes and len(cn.childNodes) > 0:
+ _findPathNodes(cn, paths, defs, uses, isDef)
+ # return translate
+
+
+def findPathNodes(n, isDef=False):
+ paths = []
+ defs = {}
+ uses = {}
+ # <g id="Canvas" transform="translate(-3677 -24988)">
+ # <g id="six 2">
+ # <g id="six">
+ # <g id="Vector">
+ # <use xlink:href="#path0_fill" transform="translate(3886 25729)"/>
+ # ...
+ # <defs>
+ # <path id="path0_fill" ...
+ #
+ _findPathNodes(n, paths, defs, uses)
+
+ # flatten uses & defs
+ for k in uses.keys():
+ dfNode = defs.get(k)
+ if dfNode is not None:
+ v = uses[k]
+ v['targetNode'] = dfNode
+ if dfNode.nodeName == 'path':
+ useNode = v['useNode']
+ useNode.parentNode.replaceChild(dfNode, useNode)
+ attrs = useNode.attributes
+ for k in attrs.keys():
+ if k != 'xlink:href':
+ dfNode.setAttribute(k, attrs[k])
+ paths.append(dfNode)
+
+ else:
+ del defs[k]
+
+ return paths
+
+
+def nodeTranslation(path, x=0, y=0):
+ tr = path.getAttribute('transform')
+ if tr is not None:
+ if not isinstance(tr, basestring):
+ tr = tr.value
+ if len(tr) > 0:
+ m = re.match(r"translate\s*\(\s*(?P<x>[\-\d\.eE]+)[\s,]*(?P<y>[\-\d\.eE]+)\s*\)", tr)
+ if m is not None:
+ x += num(m.group('x'))
+ y += num(m.group('y'))
+ else:
+ raise Exception('Unable to handle transform="%s"' % tr)
+ # m = re.match(r"matrix\s*\(\s*(?P<a>[\-\d\.eE]+)[\s,]*(?P<b>[\-\d\.eE]+)[\s,]*(?P<c>[\-\d\.eE]+)[\s,]*(?P<d>[\-\d\.eE]+)[\s,]*(?P<e>[\-\d\.eE]+)[\s,]*(?P<f>[\-\d\.eE]+)[\s,]*", tr)
+ # if m is not None:
+ # a, b, c = num(m.group('a')), num(m.group('b')), num(m.group('c'))
+ # d, e, f = num(m.group('d')), num(m.group('e')), num(m.group('f'))
+ # # matrix -1 0 0 -1 -660.719 31947
+ # print 'matrix', a, b, c, d, e, f
+ # # matrix(-1 0 -0 -1 -2553 31943)
+ pn = path.parentNode
+ if pn is not None and pn.nodeName != '#document':
+ x, y = nodeTranslation(pn, x, y)
+ return (x, y)
+
+
+def glyphUpdateFromSVG(g, svgCode):
+ doc = xmlparseString(svgCode)
+ svg = doc.documentElement
+ paths = findPathNodes(svg)
+ if len(paths) == 0:
+ raise Exception('no <path> found in SVG')
+ path = paths[0]
+ if len(paths) != 1:
+ for p in paths:
+ id = p.getAttribute('id')
+ if id is not None and id.find('stroke') == -1:
+ path = p
+ break
+
+ tr = nodeTranslation(path)
+ d = path.getAttribute('d')
+ g.clearContours()
+ drawSVGPath(g, d, tr)
+
+
+def stat(path):
+ try:
+ return os.stat(path)
+ except OSError as e:
+ return None
+
+
+def writeFile(file, s):
+ with open(file, 'w') as f:
+ f.write(s)
+
+
+def writeFileAndMkDirsIfNeeded(file, s):
+ try:
+ writeFile(file, s)
+ except IOError as e:
+ if e.errno == 2:
+ os.makedirs(os.path.dirname(file))
+ writeFile(file, s)
+
+
+def syncGlyphUFOToSVG(glyphname, svgFile, mtime):
+ print glyphname + ': UFO -> SVG'
+ g = font.getGlyph(glyphname)
+ svg = glyphToSVG(g)
+ writeFileAndMkDirsIfNeeded(svgFile, svg)
+ os.utime(svgFile, (mtime, mtime))
+ print 'write', svgFile
+
+
+def syncGlyphSVGToUFO(glyphname, svgFile):
+ print glyphname + ': SVG -> UFO'
+ svg = ''
+ with open(svgFile, 'r') as f:
+ svg = f.read()
+ g = font.getGlyph(glyphname)
+ glyphUpdateFromSVG(g, svg)
+
+
+def findGlifFile(glyphname):
+ # glyphname.glif
+ # glyphname_.glif
+ # glyphname__.glif
+ # glyphname___.glif
+ for underscoreCount in range(0, 5):
+ fn = os.path.join(ufopath, 'glyphs', glyphname + ('_' * underscoreCount) + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ if glyphname.find('.') != -1:
+ # glyph_.name.glif
+ # glyph__.name.glif
+ # glyph___.name.glif
+ for underscoreCount in range(0, 5):
+ nv = glyphname.split('.')
+ nv[0] = nv[0] + ('_' * underscoreCount)
+ ns = '.'.join(nv)
+ fn = os.path.join(ufopath, 'glyphs', ns + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ if glyphname.find('_') != -1:
+ # glyph_name.glif
+ # glyph_name_.glif
+ # glyph_name__.glif
+ # glyph__name.glif
+ # glyph__name_.glif
+ # glyph__name__.glif
+ # glyph___name.glif
+ # glyph___name_.glif
+ # glyph___name__.glif
+ for x in range(0, 4):
+ for y in range(0, 5):
+ ns = glyphname.replace('_', '__' + ('_' * x))
+ fn = os.path.join(ufopath, 'glyphs', ns + ('_' * y) + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ return ('', None)
+
+
+def syncGlyph(glyphname):
+ glyphFile, glyphStat = findGlifFile(glyphname)
+
+ svgFile = os.path.join(svgdir, glyphname + '.svg')
+ svgStat = stat(svgFile)
+
+ if glyphStat is None and svgStat is None:
+ raise Exception("glyph %r doesn't exist in UFO or SVG directory" % glyphname)
+
+ c = cmp(
+ 0 if glyphStat is None else glyphStat.st_mtime,
+ 0 if svgStat is None else svgStat.st_mtime
+ )
+ if c < 0:
+ syncGlyphSVGToUFO(glyphname, svgFile)
+ return (glyphFile, svgStat.st_mtime) # glif file in UFO change + it's new mtime
+ elif c > 0:
+ syncGlyphUFOToSVG(glyphname, svgFile, glyphStat.st_mtime)
+ # else:
+ # print glyphname + ': up to date'
+
+ return (None, 0) # UFO did not change
+
+
+# ————————————————————————————————————————————————————————————————————————
+# main
+
+argparser = argparse.ArgumentParser(description='Convert UFO glyphs to SVG')
+
+argparser.add_argument('--svgdir', dest='svgdir', metavar='<dir>', type=str,
+ default='',
+ help='Write SVG files to <dir>. If not specified, SVG files are' +
+ ' written to: {dirname(<ufopath>)/svg/<familyname>/<style>')
+
+argparser.add_argument('ufopath', metavar='<ufopath>', type=str,
+ help='Path to UFO packages')
+
+argparser.add_argument('glyphs', metavar='<glyphname>', type=str, nargs='*',
+ help='Glyphs to convert. Converts all if none specified.')
+
+args = argparser.parse_args()
+
+ufopath = args.ufopath.rstrip('/')
+
+font = OpenFont(ufopath)
+effectiveAscender = max(font.info.ascender, font.info.unitsPerEm)
+
+svgdir = args.svgdir
+if len(svgdir) == 0:
+ svgdir = os.path.join(
+ os.path.dirname(ufopath),
+ 'svg',
+ font.info.familyName,
+ font.info.styleName
+ )
+
+print 'sync %s (%s)' % (font.info.familyName, font.info.styleName)
+
+glyphnames = args.glyphs if len(args.glyphs) else font.keys()
+
+modifiedGlifFiles = []
+ignoreGlyphs = set(['.notdef', '.null'])
+for glyphname in glyphnames:
+ if glyphname in ignoreGlyphs:
+ continue
+ glyphFile, mtime = syncGlyph(glyphname)
+ if glyphFile is not None:
+ modifiedGlifFiles.append((glyphFile, mtime))
+
+if len(modifiedGlifFiles) > 0:
+ print 'Saving font'
+ font.save()
+ for glyphFile, mtime in modifiedGlifFiles:
+ os.utime(glyphFile, (mtime, mtime))
+ print 'write', glyphFile
+
diff --git a/misc/tools/svgsync2.py b/misc/tools/svgsync2.py
new file mode 100755
index 000000000..992d6d314
--- /dev/null
+++ b/misc/tools/svgsync2.py
@@ -0,0 +1,626 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Sync glyph shapes between SVG and UFO, creating a bridge between UFO and Figma.
+#
+import os
+import sys
+import argparse
+import re
+from StringIO import StringIO
+from hashlib import sha256
+from xml.dom.minidom import parseString as xmlparseString
+from svgpathtools import svg2paths, parse_path, Path, Line, CubicBezier
+from base64 import b64encode
+
+# from robofab.world import world, RFont, RGlyph, OpenFont, NewFont
+from robofab.objects.objectsRF import RFont, RGlyph, OpenFont, NewFont, RContour
+from robofab.objects.objectsBase import MOVE, LINE, CORNER, CURVE, QCURVE, OFFCURVE
+
+font = None # RFont
+ufopath = ''
+svgdir = ''
+effectiveAscender = 0
+
+
+def num(s):
+ return int(s) if s.find('.') == -1 else float(s)
+
+
+def glyphToSVGPath(g, yMul=-1):
+ commands = {'move':'M','line':'L','curve':'Y','offcurve':'X','offCurve':'X'}
+ svg = ''
+ contours = []
+ if len(g.components):
+ font.newGlyph('__svgsync')
+ new = font['__svgsync']
+ new.width = g.width
+ new.appendGlyph(g)
+ new.decompose()
+ g = new
+ if len(g):
+ for c in range(len(g)):
+ contours.append(g[c])
+ for i in range(len(contours)):
+ c = contours[i]
+ contour = end = ''
+ curve = False
+ points = c.points
+ if points[0].type == 'offCurve':
+ points.append(points.pop(0))
+ if points[0].type == 'offCurve':
+ points.append(points.pop(0))
+ for x in range(len(points)):
+ p = points[x]
+ command = commands[str(p.type)]
+ if command == 'X':
+ if curve == True:
+ command = ''
+ else:
+ command = 'C'
+ curve = True
+ if command == 'Y':
+ command = ''
+ curve = False
+ if x == 0:
+ command = 'M'
+ if p.type == 'curve':
+ end = ' ' + str(p.x) + ' ' + str(p.y * yMul)
+ contour += ' ' + command + str(p.x) + ' ' + str(p.y * yMul)
+ svg += ' ' + contour + end + 'z'
+ if font.has_key('__svgsync'):
+ font.removeGlyph('__svgsync')
+ return svg.strip()
+
+
+def vec2(x, y):
+ return float(x) + float(y) * 1j
+
+
+def glyphToPaths(g, yMul=-1):
+ paths = []
+ contours = []
+ yOffs = -font.info.unitsPerEm
+
+ # decompose components
+ if len(g.components):
+ font.newGlyph('__svgsync')
+ ng = font['__svgsync']
+ ng.width = g.width
+ ng.appendGlyph(g)
+ ng.decompose()
+ g = ng
+
+ for c in g:
+ curve = False
+ points = c.points
+ path = Path()
+ currentPos = 0j
+ controlPoints = []
+
+ for x in range(len(points)):
+ p = points[x]
+ # print 'p#' + str(x) + '.type = ' + repr(p.type)
+
+ if p.type == 'move':
+ currentPos = vec2(p.x, (p.y + yOffs) * yMul)
+ elif p.type == 'offcurve':
+ controlPoints.append(p)
+ elif p.type == 'curve':
+ pos = vec2(p.x, (p.y + yOffs) * yMul)
+ if len(controlPoints) == 2:
+ cp1, cp2 = controlPoints
+ path.append(CubicBezier(
+ currentPos,
+ vec2(cp1.x, (cp1.y + yOffs) * yMul),
+ vec2(cp2.x, (cp2.y + yOffs) * yMul),
+ pos))
+ else:
+ if len(controlPoints) != 1:
+ raise Exception('unexpected number of control points for curve')
+ cp = controlPoints[0]
+ path.append(QuadraticBezier(currentPos, vec2(cp.x, (cp.y + yOffs) * yMul), pos))
+ currentPos = pos
+ controlPoints = []
+ elif p.type == 'line':
+ pos = vec2(p.x, (p.y + yOffs) * yMul)
+ path.append(Line(currentPos, pos))
+ currentPos = pos
+
+ paths.append(path)
+
+ if font.has_key('__svgsync'):
+ font.removeGlyph('__svgsync')
+
+ return paths
+
+
+def maybeAddMove(contour, x, y, smooth):
+ if len(contour.segments) == 0:
+ contour.appendSegment(MOVE, [(x, y)], smooth=smooth)
+
+
+
+svgPathDataRegEx = re.compile(r'(?:([A-Z])\s*|)([0-9\.\-\+eE]+)')
+
+
+def drawSVGPath(g, d, tr):
+ yMul = -1
+ xOffs = tr[0]
+ yOffs = -(font.info.unitsPerEm - tr[1])
+
+ for pathd in d.split('M'):
+ pathd = pathd.strip()
+ # print 'pathd', pathd
+ if len(pathd) == 0:
+ continue
+ i = 0
+ closePath = False
+ if pathd[-1] == 'z':
+ closePath = True
+ pathd = pathd[0:-1]
+
+ pv = []
+ for m in svgPathDataRegEx.finditer('M' + pathd):
+ if m.group(1) is not None:
+ pv.append(m.group(1) + m.group(2))
+ else:
+ pv.append(m.group(2))
+
+ initX = 0
+ initY = 0
+
+ pen = g.getPen()
+
+ while i < len(pv):
+ pd = pv[i]; i += 1
+ cmd = pd[0]
+ x = num(pd[1:]) + xOffs
+ y = (num(pv[i]) + yOffs) * yMul; i += 1
+
+ if cmd == 'M':
+ # print cmd, x, y, '/', num(pv[i-2][1:])
+ initX = x
+ initY = y
+ pen.moveTo((x, y))
+ continue
+
+ if cmd == 'C':
+ # Bezier curve: "C x1 y1, x2 y2, x y"
+ x1 = x
+ y1 = y
+ x2 = num(pv[i]) + xOffs; i += 1
+ y2 = (num(pv[i]) + yOffs) * yMul; i += 1
+ x = num(pv[i]) + xOffs; i += 1
+ y = (num(pv[i]) + yOffs) * yMul; i += 1
+ pen.curveTo((x1, y1), (x2, y2), (x, y))
+ # print cmd, x1, y1, x2, y2, x, y
+
+ elif cmd == 'L':
+ pen.lineTo((x, y))
+
+ else:
+ raise Exception('unexpected SVG path command %r' % cmd)
+
+ if closePath:
+ pen.closePath()
+ else:
+ pen.endPath()
+ # print 'path ended. closePath:', closePath
+
+
+def glyphToSVG(g, path, hash):
+ width = g.width
+ height = font.info.unitsPerEm
+
+ d = {
+ 'name': g.name,
+ 'width': width,
+ 'height': effectiveAscender - font.info.descender,
+ 'effectiveAscender': effectiveAscender,
+ 'leftMargin': g.leftMargin,
+ 'rightMargin': g.rightMargin,
+ 'd': path.d(use_closed_attrib=True),
+ 'ascender': font.info.ascender,
+ 'descender': font.info.descender,
+ 'baselineOffset': height + font.info.descender,
+ 'unitsPerEm': font.info.unitsPerEm,
+ 'hash': hash,
+ }
+
+ svg = '''
+<svg xmlns="http://www.w3.org/2000/svg" width="%(width)d" height="%(height)d" data-svgsync-hash="%(hash)s">
+ <g id="%(name)s">
+ <path d="%(d)s" transform="translate(0 %(effectiveAscender)d)" />
+ <rect x="0" y="0" width="%(width)d" height="%(height)d" fill="" stroke="black" />
+ </g>
+</svg>
+ ''' % d
+ # print svg
+ return svg.strip()
+
+
+def _findPathNodes(n, paths, defs, uses, isDef=False):
+ for cn in n.childNodes:
+ if cn.nodeName == 'path':
+ if isDef:
+ defs[cn.getAttribute('id')] = cn
+ else:
+ paths.append(cn)
+ elif cn.nodeName == 'use':
+ uses[cn.getAttribute('xlink:href').lstrip('#')] = {'useNode': cn, 'targetNode': None}
+ elif cn.nodeName == 'defs':
+ _findPathNodes(cn, paths, defs, uses, isDef=True)
+ elif not isinstance(cn, basestring) and cn.childNodes and len(cn.childNodes) > 0:
+ _findPathNodes(cn, paths, defs, uses, isDef)
+ # return translate
+
+
+def findPathNodes(n, isDef=False):
+ paths = []
+ defs = {}
+ uses = {}
+ # <g id="Canvas" transform="translate(-3677 -24988)">
+ # <g id="six 2">
+ # <g id="six">
+ # <g id="Vector">
+ # <use xlink:href="#path0_fill" transform="translate(3886 25729)"/>
+ # ...
+ # <defs>
+ # <path id="path0_fill" ...
+ #
+ _findPathNodes(n, paths, defs, uses)
+
+ # flatten uses & defs
+ for k in uses.keys():
+ dfNode = defs.get(k)
+ if dfNode is not None:
+ v = uses[k]
+ v['targetNode'] = dfNode
+ if dfNode.nodeName == 'path':
+ useNode = v['useNode']
+ useNode.parentNode.replaceChild(dfNode, useNode)
+ attrs = useNode.attributes
+ for k in attrs.keys():
+ if k != 'xlink:href':
+ dfNode.setAttribute(k, attrs[k])
+ paths.append(dfNode)
+
+ else:
+ del defs[k]
+
+ return paths
+
+
+def nodeTranslation(path, x=0, y=0):
+ tr = path.getAttribute('transform')
+ if tr is not None:
+ if not isinstance(tr, basestring):
+ tr = tr.value
+ if len(tr) > 0:
+ m = re.match(r"translate\s*\(\s*(?P<x>[\-\d\.eE]+)[\s,]*(?P<y>[\-\d\.eE]+)\s*\)", tr)
+ if m is not None:
+ x += num(m.group('x'))
+ y += num(m.group('y'))
+ else:
+ raise Exception('Unable to handle transform="%s"' % tr)
+ # m = re.match(r"matrix\s*\(\s*(?P<a>[\-\d\.eE]+)[\s,]*(?P<b>[\-\d\.eE]+)[\s,]*(?P<c>[\-\d\.eE]+)[\s,]*(?P<d>[\-\d\.eE]+)[\s,]*(?P<e>[\-\d\.eE]+)[\s,]*(?P<f>[\-\d\.eE]+)[\s,]*", tr)
+ # if m is not None:
+ # a, b, c = num(m.group('a')), num(m.group('b')), num(m.group('c'))
+ # d, e, f = num(m.group('d')), num(m.group('e')), num(m.group('f'))
+ # # matrix -1 0 0 -1 -660.719 31947
+ # print 'matrix', a, b, c, d, e, f
+ # # matrix(-1 0 -0 -1 -2553 31943)
+ pn = path.parentNode
+ if pn is not None and pn.nodeName != '#document':
+ x, y = nodeTranslation(pn, x, y)
+ return (x, y)
+
+
+def glyphUpdateFromSVG(g, svgCode):
+ doc = xmlparseString(svgCode)
+ svg = doc.documentElement
+ paths = findPathNodes(svg)
+ if len(paths) == 0:
+ raise Exception('no <path> found in SVG')
+ path = paths[0]
+ if len(paths) != 1:
+ for p in paths:
+ id = p.getAttribute('id')
+ if id is not None and id.find('stroke') == -1:
+ path = p
+ break
+
+ tr = nodeTranslation(path)
+ d = path.getAttribute('d')
+ g.clearContours()
+ drawSVGPath(g, d, tr)
+
+
+def stat(path):
+ try:
+ return os.stat(path)
+ except OSError as e:
+ return None
+
+
+def writeFile(file, s):
+ with open(file, 'w') as f:
+ f.write(s)
+
+
+def writeFileAndMkDirsIfNeeded(file, s):
+ try:
+ writeFile(file, s)
+ except IOError as e:
+ if e.errno == 2:
+ os.makedirs(os.path.dirname(file))
+ writeFile(file, s)
+
+
+def findSvgSyncHashInSVG(svgCode):
+ # with open(svgFile, 'r') as f:
+ # svgCode = f.readline(512)
+ r = re.compile(r'^\s*<svg[^>]+data-svgsync-hash="([^"]*)".+')
+ m = r.match(svgCode)
+ if m is not None:
+ return m.group(1)
+ return None
+
+
+def computeSVGHashFromSVG(g):
+ # h = sha256()
+ return 'abc123'
+
+
+def encodePath(o, path):
+ o.write(path.d())
+
+
+def hashPaths(paths):
+ h = sha256()
+ for path in paths:
+ h.update(path.d()+';')
+ return b64encode(h.digest(), '-_')
+
+
+def svgGetPaths(svgCode):
+ doc = xmlparseString(svgCode)
+ svg = doc.documentElement
+ paths = findPathNodes(svg)
+ isFigmaSVG = svgCode.find('Figma</desc>') != -1
+
+ if len(paths) == 0:
+ return paths, (0,0)
+
+ paths2 = []
+ for path in paths:
+ id = path.getAttribute('id')
+ if not isFigmaSVG or (id is None or id.find('stroke') == -1):
+ tr = nodeTranslation(path)
+ d = path.getAttribute('d')
+ paths2.append((d, tr))
+
+ return paths2, isFigmaSVG
+
+
+def translatePath(path, trX, trY):
+ pass
+
+
+def parseSVG(svgFile):
+ svgCode = None
+ with open(svgFile, 'r') as f:
+ svgCode = f.read()
+
+ existingSvgHash = findSvgSyncHashInSVG(svgCode)
+ print 'hash in SVG file:', existingSvgHash
+
+ svgPathDefs, isFigmaSVG = svgGetPaths(svgCode)
+ paths = []
+ for pathDef, tr in svgPathDefs:
+ print 'pathDef:', pathDef, 'tr:', tr
+ path = parse_path(pathDef)
+ if tr[0] != 0 or tr[1] != 0:
+ path = path.translated(vec2(*tr))
+ paths.append(path)
+
+ return paths, existingSvgHash
+
+
+def syncGlyphUFOToSVG(g, glyphFile, svgFile, mtime, hasSvgFile):
+ # # Let's print out the first path object and the color it was in the SVG
+ # # We'll see it is composed of two CubicBezier objects and, in the SVG file it
+ # # came from, it was red
+ # paths, attributes, svg_attributes = svg2paths(svgFile, return_svg_attributes=True)
+ # print('svg_attributes:', repr(svg_attributes))
+ # # redpath = paths[0]
+ # # redpath_attribs = attributes[0]
+ # print(paths)
+ # print(attributes)
+ # wsvg(paths, attributes=attributes, svg_attributes=svg_attributes, filename=svgFile + '-x.svg')
+
+ # existingSVGHash = readSVGHash(svgFile)
+ svgPaths = None
+ existingSVGHash = None
+ if hasSvgFile:
+ svgPaths, existingSVGHash = parseSVG(svgFile)
+ print 'existingSVGHash:', existingSVGHash
+ print 'svgPaths:\n', '\n'.join([p.d() for p in svgPaths])
+ svgHash = hashPaths(svgPaths)
+ print 'hash(SVG-glyph) =>', svgHash
+
+ # computedSVGHash = computeSVGHashFromSVG(svgFile)
+ # print 'computeSVGHashFromSVG:', computedSVGHash
+
+ ufoPaths = glyphToPaths(g)
+ print 'ufoPaths:\n', '\n'.join([p.d() for p in ufoPaths])
+ ufoGlyphHash = hashPaths(ufoPaths)
+ print 'hash(UFO-glyph) =>', ufoGlyphHash
+
+ # svg = glyphToSVG(g, ufoGlyphHash)
+
+ # with open('/Users/rsms/src/interface/_local/svgPaths.txt', 'w') as f:
+ # f.write(svgPaths[0].d())
+ # with open('/Users/rsms/src/interface/_local/ufoPaths.txt', 'w') as f:
+ # f.write(ufoPaths[0].d())
+ # print svgPaths[0].d() == ufoPaths[0].d()
+
+ # svgHash = hashPaths()
+ # print 'hash(UFO-glyph) =>', pathHash
+
+ sys.exit(1)
+ if pathHash == existingSVGHash:
+ return (None, 0) # did not change
+
+ svg = glyphToSVG(g, pathHash)
+ sys.exit(1)
+
+ writeFileAndMkDirsIfNeeded(svgFile, svg)
+ os.utime(svgFile, (mtime, mtime))
+ print 'svgsync write', svgFile
+
+ g.lib['svgsync.hash'] = pathHash
+ return (glyphFile, mtime)
+
+
+def syncGlyphSVGToUFO(glyphname, svgFile):
+ print glyphname + ': SVG -> UFO'
+ sys.exit(1)
+ svg = ''
+ with open(svgFile, 'r') as f:
+ svg = f.read()
+ g = font.getGlyph(glyphname)
+ glyphUpdateFromSVG(g, svg)
+
+
+def findGlifFile(glyphname):
+ # glyphname.glif
+ # glyphname_.glif
+ # glyphname__.glif
+ # glyphname___.glif
+ for underscoreCount in range(0, 5):
+ fn = os.path.join(ufopath, 'glyphs', glyphname + ('_' * underscoreCount) + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ if glyphname.find('.') != -1:
+ # glyph_.name.glif
+ # glyph__.name.glif
+ # glyph___.name.glif
+ for underscoreCount in range(0, 5):
+ nv = glyphname.split('.')
+ nv[0] = nv[0] + ('_' * underscoreCount)
+ ns = '.'.join(nv)
+ fn = os.path.join(ufopath, 'glyphs', ns + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ if glyphname.find('_') != -1:
+ # glyph_name.glif
+ # glyph_name_.glif
+ # glyph_name__.glif
+ # glyph__name.glif
+ # glyph__name_.glif
+ # glyph__name__.glif
+ # glyph___name.glif
+ # glyph___name_.glif
+ # glyph___name__.glif
+ for x in range(0, 4):
+ for y in range(0, 5):
+ ns = glyphname.replace('_', '__' + ('_' * x))
+ fn = os.path.join(ufopath, 'glyphs', ns + ('_' * y) + '.glif')
+ st = stat(fn)
+ if st is not None:
+ return fn, st
+
+ return ('', None)
+
+
+def syncGlyph(glyphname, createSVG=False): # => (glyphname, mtime) or (None, 0) if noop
+ glyphFile, glyphStat = findGlifFile(glyphname)
+
+ svgFile = os.path.join(svgdir, glyphname + '.svg')
+ svgStat = stat(svgFile)
+
+ if glyphStat is None and svgStat is None:
+ raise Exception("glyph %r doesn't exist in UFO or SVG directory" % glyphname)
+
+ c = cmp(
+ 0 if glyphStat is None else glyphStat.st_mtime,
+ 0 if svgStat is None else svgStat.st_mtime
+ )
+
+ g = font.getGlyph(glyphname)
+ ufoPathHash = g.lib['svgsync.hash'] if 'svgsync.hash' in g.lib else None
+ print '[syncGlyph] g.lib["svgsync.hash"] =', ufoPathHash
+
+ c = 1 # XXX DEBUG
+
+ if c < 0:
+ syncGlyphSVGToUFO(glyphname, svgFile)
+ return (glyphFile, svgStat.st_mtime) # glif file in UFO change + it's new mtime
+ elif c > 0 and (svgStat is not None or createSVG):
+ print glyphname + ': UFO -> SVG'
+ return syncGlyphUFOToSVG(
+ g,
+ glyphFile,
+ svgFile,
+ glyphStat.st_mtime,
+ hasSvgFile=svgStat is not None
+ )
+
+ return (None, 0) # UFO did not change
+
+
+# ————————————————————————————————————————————————————————————————————————
+# main
+
+argparser = argparse.ArgumentParser(description='Convert UFO glyphs to SVG')
+
+argparser.add_argument('--svgdir', dest='svgdir', metavar='<dir>', type=str,
+ default='',
+ help='Write SVG files to <dir>. If not specified, SVG files are' +
+ ' written to: {dirname(<ufopath>)/svg/<familyname>/<style>')
+
+argparser.add_argument('ufopath', metavar='<ufopath>', type=str,
+ help='Path to UFO packages')
+
+argparser.add_argument('glyphs', metavar='<glyphname>', type=str, nargs='*',
+ help='Glyphs to convert. Converts all if none specified.')
+
+args = argparser.parse_args()
+
+ufopath = args.ufopath.rstrip('/')
+
+font = OpenFont(ufopath)
+effectiveAscender = max(font.info.ascender, font.info.unitsPerEm)
+
+svgdir = args.svgdir
+if len(svgdir) == 0:
+ svgdir = os.path.join(
+ os.path.dirname(ufopath),
+ 'svg',
+ font.info.familyName,
+ font.info.styleName
+ )
+
+print 'svgsync sync %s (%s)' % (font.info.familyName, font.info.styleName)
+
+createSVGs = len(args.glyphs) > 0
+glyphnames = args.glyphs if len(args.glyphs) else font.keys()
+
+modifiedGlifFiles = []
+for glyphname in glyphnames:
+ glyphFile, mtime = syncGlyph(glyphname, createSVG=createSVGs)
+ if glyphFile is not None:
+ modifiedGlifFiles.append((glyphFile, mtime))
+
+if len(modifiedGlifFiles) > 0:
+ font.save()
+ for glyphFile, mtime in modifiedGlifFiles:
+ os.utime(glyphFile, (mtime, mtime))
+ print 'svgsync write', glyphFile
+
diff --git a/misc/tools/ufo-color-glyphs.py b/misc/tools/ufo-color-glyphs.py
new file mode 100755
index 000000000..9ec050853
--- /dev/null
+++ b/misc/tools/ufo-color-glyphs.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Grab http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+#
+from __future__ import print_function
+import os, sys
+from argparse import ArgumentParser
+from robofab.objects.objectsRF import OpenFont
+from unicode_util import parseUnicodeDataFile, MainCategories as UniMainCategories
+
+lightBlueColor = (0.86, 0.92, 0.97, 1.0)
+lightTealColor = (0.8, 0.94, 0.95, 1.0)
+lightYellowColor = (0.97, 0.95, 0.83, 1.0)
+lightPurpleColor = (0.93, 0.9, 0.98, 1.0)
+lightGreyColor = (0.94, 0.94, 0.94, 1.0)
+mediumGreyColor = (0.87, 0.87, 0.87, 1.0)
+lightGreenColor = (0.89, 0.96, 0.92, 1.0)
+mediumGreenColor = (0.77, 0.95, 0.76, 1.0)
+lightRedColor = (0.98, 0.89, 0.89, 1.0)
+lightOrangeColor = (1.0, 0.89, 0.82, 1.0)
+redColor = (1, 0.3, 0.3, 1)
+
+colorsByGlyphName = [
+ (set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'), lightBlueColor), # light blue 1
+]
+
+colorsByUCMainCategory = {
+ # UniMainCategories.Letter: (1, 1, 1, 1),
+ UniMainCategories.Mark: lightRedColor,
+ UniMainCategories.Punctuation: lightGreyColor,
+ UniMainCategories.Format: lightGreyColor,
+ UniMainCategories.Number: lightGreenColor,
+ UniMainCategories.Symbol: lightTealColor,
+ UniMainCategories.Separator: lightPurpleColor,
+ UniMainCategories.Control: redColor,
+ UniMainCategories.Surrogate: redColor,
+ UniMainCategories.PrivateUse: lightYellowColor,
+ UniMainCategories.Unassigned: lightYellowColor,
+ UniMainCategories.Other: lightOrangeColor,
+}
+
+
+def colorForGlyph(name, unicodes, ucd):
+ for nameSet, color in colorsByGlyphName:
+ if name in nameSet:
+ return color
+
+ for uc in unicodes:
+ cp = ucd.get(uc)
+ if cp is None:
+ continue
+ return colorsByUCMainCategory.get(cp.mainCategory)
+
+ if len(unicodes) == 0:
+ if name.find('.cn') != -1:
+ # pure component
+ return mediumGreenColor
+ else:
+ # precomposed
+ return mediumGreyColor
+
+ return None
+
+
+def main():
+ argparser = ArgumentParser(
+ description='Set robofont color marks on glyphs based on unicode categories')
+
+ argparser.add_argument(
+ '-dry', dest='dryRun', action='store_const', const=True, default=False,
+ help='Do not modify anything, but instead just print what would happen.')
+
+ argparser.add_argument(
+ '-ucd', dest='ucdFile', metavar='<file>', type=str,
+ help='UnicodeData.txt file from http://www.unicode.org/')
+
+ argparser.add_argument(
+ 'fontPaths', metavar='<ufofile>', type=str, nargs='+', help='UFO fonts to update')
+
+ args = argparser.parse_args()
+ dryRun = args.dryRun
+ markLibKey = 'com.typemytype.robofont.mark'
+
+ ucd = {}
+ if args.ucdFile:
+ ucd = parseUnicodeDataFile(args.ucdFile)
+
+ for fontPath in args.fontPaths:
+ font = OpenFont(fontPath)
+ for g in font:
+ rgba = colorForGlyph(g.name, g.unicodes, ucd)
+ if rgba is None:
+ if markLibKey in g.lib:
+ del g.lib[markLibKey]
+ else:
+ g.lib[markLibKey] = [float(n) for n in rgba]
+
+ print('Write', fontPath)
+ if not dryRun:
+ font.save()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/misc/tools/unicode_util.py b/misc/tools/unicode_util.py
new file mode 100644
index 000000000..18196e87e
--- /dev/null
+++ b/misc/tools/unicode_util.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+# encoding: utf8
+
+class MainCategories:
+ Letter = 'Letter'
+ Mark = 'Mark'
+ Number = 'Number'
+ Punctuation = 'Punctuation'
+ Symbol = 'Symbol'
+ Separator = 'Separator'
+ Control = 'Control'
+ Format = 'Format'
+ Surrogate = 'Surrogate'
+ PrivateUse = 'Private_Use'
+ Unassigned = 'Unassigned'
+ Other = 'Other'
+
+GeneralCategories = {
+ 'Lu': ('Uppercase_Letter', MainCategories.Letter),
+ 'Ll': ('Lowercase_Letter', MainCategories.Letter),
+ 'Lt': ('Titlecase_Letter', MainCategories.Letter),
+ 'LC': ('Cased_Letter', MainCategories.Letter),
+ 'Lm': ('Modifier_Letter', MainCategories.Letter),
+ 'Lo': ('Other_Letter', MainCategories.Letter),
+ 'L': ('Letter', MainCategories.Letter),
+ 'Mn': ('Nonspacing_Mark', MainCategories.Mark),
+ 'Mc': ('Spacing_Mark', MainCategories.Mark),
+ 'Me': ('Enclosing_Mark', MainCategories.Mark),
+ 'M': ('Mark', MainCategories.Mark),
+ 'Nd': ('Decimal_Number', MainCategories.Number),
+ 'Nl': ('Letter_Number', MainCategories.Number),
+ 'No': ('Other_Number', MainCategories.Number),
+ 'N': ('Number', MainCategories.Number),
+ 'Pc': ('Connector_Punctuation', MainCategories.Punctuation),
+ 'Pd': ('Dash_Punctuation', MainCategories.Punctuation),
+ 'Ps': ('Open_Punctuation', MainCategories.Punctuation),
+ 'Pe': ('Close_Punctuation', MainCategories.Punctuation),
+ 'Pi': ('Initial_Punctuation', MainCategories.Punctuation),
+ 'Pf': ('Final_Punctuation', MainCategories.Punctuation),
+ 'Po': ('Other_Punctuation', MainCategories.Punctuation),
+ 'P': ('Punctuation', MainCategories.Punctuation),
+ 'Sm': ('Math_Symbol', MainCategories.Symbol),
+ 'Sc': ('Currency_Symbol', MainCategories.Symbol),
+ 'Sk': ('Modifier_Symbol', MainCategories.Symbol),
+ 'So': ('Other_Symbol', MainCategories.Symbol),
+ 'S': ('Symbol', MainCategories.Symbol),
+ 'Zs': ('Space_Separator', MainCategories.Separator),
+ 'Zl': ('Line_Separator', MainCategories.Separator),
+ 'Zp': ('Paragraph_Separator', MainCategories.Separator),
+ 'Z': ('Separator', MainCategories.Separator),
+ 'Cc': ('Control', MainCategories.Control),
+ 'Cf': ('Format', MainCategories.Format),
+ 'Cs': ('Surrogate', MainCategories.Surrogate),
+ 'Co': ('Private_Use', MainCategories.PrivateUse),
+ 'Cn': ('Unassigned', MainCategories.Unassigned),
+ 'C': ('Other', MainCategories.Other),
+}
+
+
+class Codepoint:
+ def __init__(self, v):
+ self.codePoint = int(v[0], 16)
+ self.name = v[1]
+
+ self.category = v[2]
+ c = GeneralCategories.get(self.category, ('', MainCategories.Other))
+ self.categoryName = c[0]
+ self.mainCategory = c[1]
+
+ self.decDigitValue = v[6]
+ self.numValue = v[8]
+
+ def isLetter(self): return self.mainCategory is MainCategories.Letter
+ def isMark(self): return self.mainCategory is MainCategories.Mark
+ def isNumber(self): return self.mainCategory is MainCategories.Number
+ def isPunctuation(self): return self.mainCategory is MainCategories.Punctuation
+ def isSymbol(self): return self.mainCategory is MainCategories.Symbol
+ def isSeparator(self): return self.mainCategory is MainCategories.Separator
+ def isControl(self): return self.mainCategory is MainCategories.Control
+ def isFormat(self): return self.mainCategory is MainCategories.Format
+ def isSurrogate(self): return self.mainCategory is MainCategories.Surrogate
+ def isPrivateUse(self): return self.mainCategory is MainCategories.PrivateUse
+ def isUnassigned(self): return self.mainCategory is MainCategories.Unassigned
+ def isOther(self): return self.mainCategory is MainCategories.Other
+
+
+# http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+def parseUnicodeDataFile(ucdFile): # { codepoint:int => Codepoint() }
+ ucd = {}
+ with open(ucdFile, 'r') as f:
+ for line in f:
+ # See http://unicode.org/reports/tr44/#UnicodeData.txt for fields
+ # e.g. "001D;<control>;Cc;0;B;;;;;N;INFORMATION SEPARATOR THREE;;;;"
+ if len(line) == 0 or line.startswith('#'):
+ continue
+ v = line.split(';')
+ if len(v) < 10:
+ continue
+ try:
+ cp = Codepoint(v)
+ ucd[cp.codePoint] = cp
+ except:
+ pass
+ return ucd
diff --git a/misc/tools/versionize-css.py b/misc/tools/versionize-css.py
new file mode 100755
index 000000000..0b317462f
--- /dev/null
+++ b/misc/tools/versionize-css.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# encoding: utf8
+#
+# Updates the "?v=x" in docs/inter-ui.css
+#
+from __future__ import print_function
+import os, sys, re
+from collections import OrderedDict
+from ConfigParser import RawConfigParser
+
+
+def main():
+ rootDir = os.path.dirname(os.path.dirname(__file__))
+
+ config = RawConfigParser(dict_type=OrderedDict)
+ config.read(os.path.join(rootDir, 'src', 'fontbuild.cfg'))
+ version = config.get('main', 'version')
+
+ regex = re.compile(r'(url\("[^"]+?v=)([^"]+)("\))')
+
+ cssFileName = os.path.join(rootDir, 'docs', 'inter-ui.css')
+
+ s = ''
+ with open(cssFileName, 'r') as f:
+ s = f.read()
+
+ s = regex.sub(
+ lambda m: '%s%s%s' % (m.group(1), version, m.group(3)),
+ s
+ )
+
+ with open(cssFileName, 'w') as f:
+ f.write(s)
+
+if __name__ == '__main__':
+ main()