bug 1432229, land compare-locales 2.7 and python-fluent 0.6.2, r?gandalf draft
authorAxel Hecht <axel@pike.org>
Wed, 07 Feb 2018 20:22:37 +0100
changeset 752627 0ff82704b1456ce910c4a6dd235c828dc8272ef4
parent 752153 65133e49fbfd5306632301f74be7cd15890bdf9f
push id98324
push useraxel@mozilla.com
push dateThu, 08 Feb 2018 17:59:49 +0000
reviewersgandalf
bugs1432229
milestone60.0a1
bug 1432229, land compare-locales 2.7 and python-fluent 0.6.2, r?gandalf MozReview-Commit-ID: 2Oep88PhVXF
third_party/python/compare-locales/compare_locales/__init__.py
third_party/python/compare-locales/compare_locales/checks.py
third_party/python/compare-locales/compare_locales/commands.py
third_party/python/compare-locales/compare_locales/compare.py
third_party/python/compare-locales/compare_locales/integration_tests/__init__.py
third_party/python/compare-locales/compare_locales/integration_tests/test_plurals.py
third_party/python/compare-locales/compare_locales/merge.py
third_party/python/compare-locales/compare_locales/parser.py
third_party/python/compare-locales/compare_locales/plurals.py
third_party/python/compare-locales/compare_locales/tests/__init__.py
third_party/python/compare-locales/compare_locales/tests/test_checks.py
third_party/python/compare-locales/compare_locales/tests/test_defines.py
third_party/python/compare-locales/compare_locales/tests/test_dtd.py
third_party/python/compare-locales/compare_locales/tests/test_ftl.py
third_party/python/compare-locales/compare_locales/tests/test_ini.py
third_party/python/compare-locales/compare_locales/tests/test_merge.py
third_party/python/compare-locales/compare_locales/tests/test_merge_ftl.py
third_party/python/compare-locales/compare_locales/tests/test_parser.py
third_party/python/compare-locales/compare_locales/tests/test_properties.py
third_party/python/fluent/fluent/migrate/__init__.py
third_party/python/fluent/fluent/migrate/cldr.py
third_party/python/fluent/fluent/migrate/context.py
third_party/python/fluent/fluent/migrate/errors.py
third_party/python/fluent/fluent/migrate/merge.py
third_party/python/fluent/fluent/migrate/transforms.py
third_party/python/fluent/fluent/migrate/util.py
third_party/python/fluent/fluent/syntax/ast.py
third_party/python/fluent/fluent/syntax/errors.py
third_party/python/fluent/fluent/syntax/ftlstream.py
third_party/python/fluent/fluent/syntax/parser.py
third_party/python/fluent/fluent/syntax/serializer.py
third_party/python/fluent/fluent/syntax/stream.py
third_party/python/fluent/tools/migrate/blame.py
third_party/python/fluent/tools/migrate/examples/about_dialog.py
third_party/python/fluent/tools/migrate/examples/about_downloads.py
third_party/python/fluent/tools/migrate/examples/bug_1291693.py
third_party/python/fluent/tools/migrate/migrate-l10n.py
--- a/third_party/python/compare-locales/compare_locales/__init__.py
+++ b/third_party/python/compare-locales/compare_locales/__init__.py
@@ -1,1 +1,1 @@
-version = "2.5.1"
+version = "2.7.0"
--- a/third_party/python/compare-locales/compare_locales/checks.py
+++ b/third_party/python/compare-locales/compare_locales/checks.py
@@ -6,32 +6,36 @@ import re
 from collections import Counter
 from difflib import SequenceMatcher
 from xml import sax
 try:
     from cStringIO import StringIO
 except ImportError:
     from StringIO import StringIO
 
-from compare_locales.parser import DTDParser, PropertiesEntity
+from fluent.syntax import ast as ftl
+
+from compare_locales.parser import DTDParser, PropertiesEntity, FluentMessage
+from compare_locales import plurals
 
 
 class Checker(object):
     '''Abstract class to implement checks per file type.
     '''
     pattern = None
     # if a check uses all reference entities, set this to True
     needs_reference = False
 
     @classmethod
     def use(cls, file):
         return cls.pattern.match(file.file)
 
-    def __init__(self, extra_tests):
+    def __init__(self, extra_tests, locale=None):
         self.extra_tests = extra_tests
+        self.locale = locale
         self.reference = None
 
     def check(self, refEnt, l10nEnt):
         '''Given the reference and localized Entities, performs checks.
 
         This is a generator yielding tuples of
         - "warning" or "error", depending on what should be reported,
         - tuple of line, column info for the error within the string
@@ -66,33 +70,24 @@ class PropertiesChecker(Checker):
 
     def check(self, refEnt, l10nEnt):
         '''Test for the different variable formats.
         '''
         refValue, l10nValue = refEnt.val, l10nEnt.val
         refSpecs = None
         # check for PluralForm.jsm stuff, should have the docs in the
         # comment
+        # That also includes intl.properties' pluralRule, so exclude
+        # entities with that key and values with just numbers
         if (refEnt.pre_comment
-                and 'Localization_and_Plurals' in refEnt.pre_comment.all):
-            # For plurals, common variable pattern is #1. Try that.
-            pats = set(int(m.group(1)) for m in re.finditer('#([0-9]+)',
-                                                            refValue))
-            if len(pats) == 0:
-                return
-            lpats = set(int(m.group(1)) for m in re.finditer('#([0-9]+)',
-                                                             l10nValue))
-            if pats - lpats:
-                yield ('warning', 0, 'not all variables used in l10n',
-                       'plural')
-                return
-            if lpats - pats:
-                yield ('error', 0, 'unreplaced variables in l10n',
-                       'plural')
-                return
+                and 'Localization_and_Plurals' in refEnt.pre_comment.all
+                and refEnt.key != 'pluralRule'
+                and not re.match(r'\d+$', refValue)):
+            for msg_tuple in self.check_plural(refValue, l10nValue):
+                yield msg_tuple
             return
         # check for lost escapes
         raw_val = l10nEnt.raw_val
         for m in PropertiesEntity.escape.finditer(raw_val):
             if m.group('single') and \
                m.group('single') not in PropertiesEntity.known_escapes:
                 yield ('warning', m.start(),
                        'unknown escape sequence, \\' + m.group('single'),
@@ -101,16 +96,45 @@ class PropertiesChecker(Checker):
             refSpecs = self.getPrintfSpecs(refValue)
         except PrintfException:
             refSpecs = []
         if refSpecs:
             for t in self.checkPrintf(refSpecs, l10nValue):
                 yield t
             return
 
+    def check_plural(self, refValue, l10nValue):
+        '''Check for the stringbundle plurals logic.
+        The common variable pattern is #1.
+        '''
+        if self.locale in plurals.CATEGORIES_BY_LOCALE:
+            expected_forms = len(plurals.CATEGORIES_BY_LOCALE[self.locale])
+            found_forms = l10nValue.count(';') + 1
+            msg = 'expecting {} plurals, found {}'.format(
+                expected_forms,
+                found_forms
+            )
+            if expected_forms > found_forms:
+                yield ('warning', 0, msg, 'plural')
+            if expected_forms < found_forms:
+                yield ('warning', 0, msg, 'plural')
+        pats = set(int(m.group(1)) for m in re.finditer('#([0-9]+)',
+                                                        refValue))
+        if len(pats) == 0:
+            return
+        lpats = set(int(m.group(1)) for m in re.finditer('#([0-9]+)',
+                                                         l10nValue))
+        if pats - lpats:
+            yield ('warning', 0, 'not all variables used in l10n',
+                   'plural')
+            return
+        if lpats - pats:
+            yield ('error', 0, 'unreplaced variables in l10n',
+                   'plural')
+
     def checkPrintf(self, refSpecs, l10nValue):
         try:
             l10nSpecs = self.getPrintfSpecs(l10nValue)
         except PrintfException, e:
             yield ('error', e.pos, e.msg, 'printf')
             return
         if refSpecs != l10nSpecs:
             sm = SequenceMatcher()
@@ -197,18 +221,18 @@ class DTDChecker(Checker):
     needs_reference = True  # to cast a wider net for known entity references
 
     eref = re.compile('&(%s);' % DTDParser.Name)
     tmpl = '''<!DOCTYPE elem [%s]>
 <elem>%s</elem>
 '''
     xmllist = set(('amp', 'lt', 'gt', 'apos', 'quot'))
 
-    def __init__(self, extra_tests):
-        super(DTDChecker, self).__init__(extra_tests)
+    def __init__(self, extra_tests, locale=None):
+        super(DTDChecker, self).__init__(extra_tests, locale=locale)
         self.processContent = False
         if self.extra_tests is not None and 'android-dtd' in self.extra_tests:
             self.processContent = True
         self.__known_entities = None
 
     def known_entities(self, refValue):
         if self.__known_entities is None and self.reference is not None:
             self.__known_entities = set()
@@ -430,27 +454,62 @@ class DTDChecker(Checker):
                 yield ('error', m.end(0)+offset, msg, 'android')
 
 
 class FluentChecker(Checker):
     '''Tests to run on Fluent (FTL) files.
     '''
     pattern = re.compile('.*\.ftl')
 
-    def check(self, refEnt, l10nEnt):
-        ref_entry = refEnt.entry
-        l10n_entry = l10nEnt.entry
-        # verify that values match, either both have a value or none
+    def find_message_references(self, entry):
+        refs = {}
+
+        def collect_message_references(node):
+            if isinstance(node, ftl.MessageReference):
+                # The key is the name of the referenced message and it will
+                # be used in set algebra to find missing and obsolete
+                # references. The value is the node itself and its span
+                # will be used to pinpoint the error.
+                refs[node.id.name] = node
+            # BaseNode.traverse expects this function to return the node.
+            return node
+
+        entry.traverse(collect_message_references)
+        return refs
+
+    def check_values(self, ref_entry, l10n_entry):
+        '''Verify that values match, either both have a value or none.'''
         if ref_entry.value is not None and l10n_entry.value is None:
             yield ('error', 0, 'Missing value', 'fluent')
         if ref_entry.value is None and l10n_entry.value is not None:
             offset = l10n_entry.value.span.start - l10n_entry.span.start
             yield ('error', offset, 'Obsolete value', 'fluent')
 
-        # verify that we're having the same set of attributes
+    def check_message_references(self, ref_entry, l10n_entry):
+        '''Verify that message references are the same.'''
+        ref_msg_refs = self.find_message_references(ref_entry)
+        l10n_msg_refs = self.find_message_references(l10n_entry)
+
+        # create unique sets of message names referenced in both entries
+        ref_msg_refs_names = set(ref_msg_refs.keys())
+        l10n_msg_refs_names = set(l10n_msg_refs.keys())
+
+        missing_msg_ref_names = ref_msg_refs_names - l10n_msg_refs_names
+        for msg_name in missing_msg_ref_names:
+            yield ('warning', 0, 'Missing message reference: ' + msg_name,
+                   'fluent')
+
+        obsolete_msg_ref_names = l10n_msg_refs_names - ref_msg_refs_names
+        for msg_name in obsolete_msg_ref_names:
+            pos = l10n_msg_refs[msg_name].span.start - l10n_entry.span.start
+            yield ('warning', pos, 'Obsolete message reference: ' + msg_name,
+                   'fluent')
+
+    def check_attributes(self, ref_entry, l10n_entry):
+        '''Verify that ref_entry and l10n_entry have the same attributes.'''
         ref_attr_names = set((attr.id.name for attr in ref_entry.attributes))
         ref_pos = dict((attr.id.name, i)
                        for i, attr in enumerate(ref_entry.attributes))
         l10n_attr_counts = \
             Counter(attr.id.name for attr in l10n_entry.attributes)
         l10n_attr_names = set(l10n_attr_counts)
         l10n_pos = dict((attr.id.name, i)
                         for i, attr in enumerate(l10n_entry.attributes))
@@ -479,17 +538,34 @@ class FluentChecker(Checker):
             attr
             for attr in l10n_entry.attributes
             if attr.id.name in obsolete_attr_names
         ]
         for attr in obsolete_attrs:
             yield ('error', attr.span.start - l10n_entry.span.start,
                    'Obsolete attribute: ' + attr.id.name, 'fluent')
 
+    def check(self, refEnt, l10nEnt):
+        ref_entry = refEnt.entry
+        l10n_entry = l10nEnt.entry
+
+        # PY3 Replace with `yield from` in Python 3.3+
+        for check in self.check_values(ref_entry, l10n_entry):
+            yield check
+
+        for check in self.check_message_references(ref_entry, l10n_entry):
+            yield check
+
+        # Only compare attributes of Fluent Messages. Attributes defined on
+        # Fluent Terms are private.
+        if isinstance(refEnt, FluentMessage):
+            for check in self.check_attributes(ref_entry, l10n_entry):
+                yield check
+
 
 def getChecker(file, extra_tests=None):
     if PropertiesChecker.use(file):
-        return PropertiesChecker(extra_tests)
+        return PropertiesChecker(extra_tests, locale=file.locale)
     if DTDChecker.use(file):
-        return DTDChecker(extra_tests)
+        return DTDChecker(extra_tests, locale=file.locale)
     if FluentChecker.use(file):
-        return FluentChecker(extra_tests)
+        return FluentChecker(extra_tests, locale=file.locale)
     return None
--- a/third_party/python/compare-locales/compare_locales/commands.py
+++ b/third_party/python/compare-locales/compare_locales/commands.py
@@ -25,20 +25,23 @@ or the all-locales file referenced by th
         self.parser = None
 
     def get_parser(self):
         """Get an ArgumentParser, with class docstring as description.
         """
         parser = ArgumentParser(description=self.__doc__)
         parser.add_argument('--version', action='version',
                             version='%(prog)s ' + version)
-        parser.add_argument('-v', '--verbose', action='count', dest='v',
+        parser.add_argument('-v', '--verbose', action='count',
                             default=0, help='Make more noise')
-        parser.add_argument('-q', '--quiet', action='count', dest='q',
-                            default=0, help='Make less noise')
+        parser.add_argument('-q', '--quiet', action='count',
+                            default=0, help='''Show less data.
+Specified once, doesn't record entities. Specified twice, also drops
+missing and obsolete files. Specify thrice to hide errors and warnings and
+just show stats''')
         parser.add_argument('-m', '--merge',
                             help='''Use this directory to stage merged files,
 use {ab_CD} to specify a different directory for each locale''')
         parser.add_argument('config_paths', metavar='l10n.toml', nargs='+',
                             help='TOML or INI file for the project')
         parser.add_argument('l10n_base_dir', metavar='l10n-base-dir',
                             help='Parent directory of localizations')
         parser.add_argument('locales', nargs='*', metavar='locale-code',
@@ -77,27 +80,26 @@ data in a json useful for Exhibit
         cmd = cls()
         return cmd.handle_()
 
     def handle_(self):
         """The instance part of the classmethod call."""
         self.parser = self.get_parser()
         args = self.parser.parse_args()
         # log as verbose or quiet as we want, warn by default
+        logging_level = logging.WARNING - (args.verbose - args.quiet) * 10
         logging.basicConfig()
-        logging.getLogger().setLevel(logging.WARNING -
-                                     (args.v - args.q) * 10)
+        logging.getLogger().setLevel(logging_level)
         kwargs = vars(args)
         # strip handeld arguments
-        kwargs.pop('q')
-        kwargs.pop('v')
+        kwargs.pop('verbose')
         return self.handle(**kwargs)
 
     def handle(self, config_paths, l10n_base_dir, locales,
-               merge=None, defines=None, unified=False, full=False,
+               merge=None, defines=None, unified=False, full=False, quiet=0,
                clobber=False, data='text'):
         # using nargs multiple times in argparser totally screws things
         # up, repair that.
         # First files are configs, then the base dir, everything else is
         # locales
         all_args = config_paths + [l10n_base_dir] + locales
         config_paths = []
         locales = []
@@ -134,19 +136,20 @@ data in a json useful for Exhibit
                 configs.append(config)
             else:
                 app = EnumerateApp(
                     config_path, l10n_base_dir, locales)
                 configs.append(app.asConfig())
         try:
             unified_observer = None
             if unified:
-                unified_observer = Observer()
+                unified_observer = Observer(quiet=quiet)
             observers = compareProjects(
                 configs,
+                quiet=quiet,
                 stat_observer=unified_observer,
                 merge_stage=merge, clobber_merge=clobber)
         except (OSError, IOError), exc:
             print "FAIL: " + str(exc)
             self.parser.exit(2)
         if unified:
             observers = [unified_observer]
 
--- a/third_party/python/compare-locales/compare_locales/compare.py
+++ b/third_party/python/compare-locales/compare_locales/compare.py
@@ -142,19 +142,25 @@ class AddRemove(object):
             elif item in left_items:
                 yield ('delete', item)
             else:
                 yield ('add', item)
 
 
 class Observer(object):
 
-    def __init__(self, filter=None, file_stats=False):
+    def __init__(self, quiet=0, filter=None, file_stats=False):
+        '''Create Observer
+        For quiet=1, skip per-entity missing and obsolete strings,
+        for quiet=2, skip missing and obsolete files. For quiet=3,
+        skip warnings and errors.
+        '''
         self.summary = defaultdict(lambda: defaultdict(int))
         self.details = Tree(list)
+        self.quiet = quiet
         self.filter = filter
         self.file_stats = None
         if file_stats:
             self.file_stats = defaultdict(lambda: defaultdict(dict))
 
     # support pickling
     def __getstate__(self):
         state = dict(summary=self._dictify(self.summary), details=self.details)
@@ -212,27 +218,28 @@ class Observer(object):
             return  # there are no other stats for missing files
         self.file_stats[file.locale][file.localpath].update(stats)
 
     def notify(self, category, file, data):
         rv = 'error'
         if category in ['missingFile', 'obsoleteFile']:
             if self.filter is not None:
                 rv = self.filter(file)
-            if rv != "ignore":
+            if rv != "ignore" and self.quiet < 2:
                 self.details[file].append({category: rv})
             return rv
         if category in ['missingEntity', 'obsoleteEntity']:
             if self.filter is not None:
                 rv = self.filter(file, data)
             if rv == "ignore":
                 return rv
-            self.details[file].append({category: data})
+            if self.quiet < 1:
+                self.details[file].append({category: data})
             return rv
-        if category in ('error', 'warning'):
+        if category in ('error', 'warning') and self.quiet < 3:
             self.details[file].append({category: data})
             self.summary[file.locale][category + 's'] += 1
         return rv
 
     def toExhibit(self):
         items = []
         for locale in sorted(self.summary.iterkeys()):
             summary = self.summary[locale]
@@ -581,24 +588,33 @@ class ContentComparer:
         # overload this if needed
         pass
 
     def doChanged(self, file, ref_entity, l10n_entity):
         # overload this if needed
         pass
 
 
-def compareProjects(project_configs, stat_observer=None,
-                    file_stats=False,
-                    merge_stage=None, clobber_merge=False):
+def compareProjects(
+            project_configs,
+            stat_observer=None,
+            file_stats=False,
+            merge_stage=None,
+            clobber_merge=False,
+            quiet=0,
+        ):
     locales = set()
     observers = []
     for project in project_configs:
         observers.append(
-            Observer(filter=project.filter, file_stats=file_stats))
+            Observer(
+                quiet=quiet,
+                filter=project.filter,
+                file_stats=file_stats,
+            ))
         locales.update(project.locales)
     if stat_observer is not None:
         stat_observers = [stat_observer]
     else:
         stat_observers = None
     comparer = ContentComparer(observers, stat_observers=stat_observers)
     for locale in sorted(locales):
         files = paths.ProjectFiles(locale, project_configs,
new file mode 100644
--- /dev/null
+++ b/third_party/python/compare-locales/compare_locales/integration_tests/__init__.py
@@ -0,0 +1,5 @@
+'''Tests that are not run by default.
+
+They might just take long, or depend on external services, or both.
+They might also fail for external changes.
+'''
new file mode 100644
--- /dev/null
+++ b/third_party/python/compare-locales/compare_locales/integration_tests/test_plurals.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import ast
+import json
+import os
+import unittest
+import urllib2
+
+
+TRANSVISION_URL = (
+    'https://transvision.mozfr.org/'
+    'api/v1/entity/gecko_strings/'
+    '?id=toolkit/chrome/global/intl.properties:pluralRule'
+)
+
+
+class TestPlural(unittest.TestCase):
+    '''Integration test for plural forms and l10n-central.
+
+    Having more plural forms than in l10n-central is OK, missing or
+    mismatching ones isn't.
+    Depends on transvision.
+    '''
+    maxDiff = None
+
+    def test_valid_forms(self):
+        reference_form_map = self._load_transvision()
+        compare_locales_map = self._parse_plurals_py()
+        # Notify locales in compare-locales but not in transvision
+        # Might be incubator locales
+        extra_locales = set()
+        extra_locales.update(compare_locales_map)
+        extra_locales.difference_update(reference_form_map)
+        for locale in sorted(extra_locales):
+            print("{} not in transvision, OK".format(locale))
+            compare_locales_map.pop(locale)
+        # Strip matches from dicts, to make diff for test small
+        locales = set()
+        locales.update(compare_locales_map)
+        locales.intersection_update(reference_form_map)
+        for locale in locales:
+            if compare_locales_map[locale] == reference_form_map[locale]:
+                compare_locales_map.pop(locale)
+                reference_form_map.pop(locale)
+        self.assertDictEqual(reference_form_map, compare_locales_map)
+
+    def _load_transvision(self):
+        '''Use the Transvision API to load all values of pluralRule
+        in intl.properties.
+        Skip test on load failure.
+        '''
+        try:
+            data = urllib2.urlopen(TRANSVISION_URL).read()
+        except urllib2.URLError:
+            raise unittest.SkipTest("Couldn't load transvision API.")
+        return json.loads(data)
+
+    def _parse_plurals_py(self):
+        '''Load compare_locales.plurals, parse the AST, and inspect
+        the dictionary assigned to CATEGORIES_BY_LOCALE to find
+        the actual plural number.
+        Convert both number and locale code to unicode for comparing
+        to json.
+        '''
+        path = os.path.join(os.path.dirname(__file__), '..', 'plurals.py')
+        with open(path) as source_file:
+            plurals_ast = ast.parse(source_file.read())
+        assign_cats_statement = [
+            s for s in plurals_ast.body
+            if isinstance(s, ast.Assign)
+            and any(t.id == 'CATEGORIES_BY_LOCALE' for t in s.targets)
+        ][0]
+        return dict(
+            (unicode(k.s), unicode(v.slice.value.n))
+            for k, v in zip(
+                assign_cats_statement.value.keys,
+                assign_cats_statement.value.values
+            )
+        )
--- a/third_party/python/compare-locales/compare_locales/merge.py
+++ b/third_party/python/compare-locales/compare_locales/merge.py
@@ -30,20 +30,20 @@ def merge_channels(name, *resources):
         # The counter dict keeps track of number of identical comments.
         counter = defaultdict(int)
         parser.readContents(resource)
         pairs = [get_key_value(entity, counter) for entity in parser.walk()]
         return OrderedDict(pairs)
 
     def get_key_value(entity, counter):
         if isinstance(entity, cl.Comment):
-            counter[entity.all] += 1
+            counter[entity.val] += 1
             # Use the (value, index) tuple as the key. AddRemove will
             # de-deplicate identical comments at the same index.
-            return ((entity.all, counter[entity.all]), entity)
+            return ((entity.val, counter[entity.val]), entity)
 
         if isinstance(entity, cl.Whitespace):
             # Use the Whitespace instance as the key so that it's always
             # unique. Adjecent whitespace will be folded into the longer one in
             # prune.
             return (entity, entity)
 
         # When comments change, AddRemove gives us one 'add' and one 'delete'
--- a/third_party/python/compare-locales/compare_locales/parser.py
+++ b/third_party/python/compare-locales/compare_locales/parser.py
@@ -121,25 +121,48 @@ class Entity(EntityBase):
     pass
 
 
 class Comment(EntityBase):
     def __init__(self, ctx, span):
         self.ctx = ctx
         self.span = span
         self.val_span = None
+        self._val_cache = None
 
     @property
     def key(self):
         return None
 
+    @property
+    def val(self):
+        if self._val_cache is None:
+            self._val_cache = self.all
+        return self._val_cache
+
     def __repr__(self):
         return self.all
 
 
+class OffsetComment(Comment):
+    '''Helper for file formats that have a constant number of leading
+    chars to strip from comments.
+    Offset defaults to 1
+    '''
+    comment_offset = 1
+
+    @property
+    def val(self):
+        if self._val_cache is None:
+            self._val_cache = ''.join((
+                l[self.comment_offset:] for l in self.all.splitlines(True)
+            ))
+        return self._val_cache
+
+
 class Junk(object):
     '''
     An almost-Entity, representing junk data that we didn't parse.
     This way, we can signal bad content as stuff we don't understand.
     And the either fix that, or report real bugs in localizations.
     '''
     junkid = 0
 
@@ -184,16 +207,17 @@ class Whitespace(EntityBase):
 
     def __repr__(self):
         return self.raw_val
 
 
 class Parser(object):
     capabilities = CAN_SKIP | CAN_MERGE
     reWhitespace = re.compile('\s+', re.M)
+    Comment = Comment
 
     class Context(object):
         "Fixture for content and line numbers"
         def __init__(self, contents):
             self.contents = contents
             # Subclasses may use bitmasks to keep state.
             self.state = 0
             self._lines = None
@@ -263,17 +287,17 @@ class Parser(object):
         m = self.reWhitespace.match(ctx.contents, offset)
         if m:
             return Whitespace(ctx, m.span())
         m = self.reKey.match(ctx.contents, offset)
         if m:
             return self.createEntity(ctx, m)
         m = self.reComment.match(ctx.contents, offset)
         if m:
-            self.last_comment = Comment(ctx, m.span())
+            self.last_comment = self.Comment(ctx, m.span())
             return self.last_comment
         return self.getJunk(ctx, offset, self.reKey, self.reComment)
 
     def getJunk(self, ctx, offset, *expressions):
         junkend = None
         for exp in expressions:
             m = exp.search(ctx.contents, offset)
             if m:
@@ -356,16 +380,24 @@ class DTDParser(Parser):
     reHeader = re.compile(u'^\ufeff')
     reComment = re.compile('<!--(?P<val>-?[%s])*?-->' % CharMinusDash,
                            re.S)
     rePE = re.compile(u'<!ENTITY\s+%\s+(?P<key>' + Name + ')\s+'
                       u'SYSTEM\s+(?P<val>\"[^\"]*\"|\'[^\']*\')\s*>\s*'
                       u'%' + Name + ';'
                       u'(?:[ \t]*(?:' + XmlComment + u'\s*)*\n?)?')
 
+    class Comment(Comment):
+        @property
+        def val(self):
+            if self._val_cache is None:
+                # Strip "<!--" and "-->" to comment contents
+                self._val_cache = self.all[4:-3]
+            return self._val_cache
+
     def getNext(self, ctx, offset):
         '''
         Overload Parser.getNext to special-case ParsedEntities.
         Just check for a parsed entity if that method claims junk.
 
         <!ENTITY % foo SYSTEM "url">
         %foo;
         '''
@@ -403,16 +435,19 @@ class PropertiesEntity(Entity):
             if found['nl']:
                 return ''
             return self.known_escapes.get(found['single'], found['single'])
 
         return self.escape.sub(unescape, self.raw_val)
 
 
 class PropertiesParser(Parser):
+
+    Comment = OffsetComment
+
     def __init__(self):
         self.reKey = re.compile(
             '(?P<key>[^#!\s\n][^=:\n]*?)\s*[:=][ \t]*', re.M)
         self.reComment = re.compile('(?:[#!][^\n]*\n)*(?:[#!][^\n]*)', re.M)
         self._escapedEnd = re.compile(r'\\+$')
         self._trailingWS = re.compile(r'\s*(?:\n|\Z)', re.M)
         Parser.__init__(self)
 
@@ -421,17 +456,17 @@ class PropertiesParser(Parser):
         contents = ctx.contents
 
         m = self.reWhitespace.match(contents, offset)
         if m:
             return Whitespace(ctx, m.span())
 
         m = self.reComment.match(contents, offset)
         if m:
-            self.last_comment = Comment(ctx, m.span())
+            self.last_comment = self.Comment(ctx, m.span())
             return self.last_comment
 
         m = self.reKey.match(contents, offset)
         if m:
             startline = offset = m.end()
             while True:
                 endval = nextline = contents.find('\n', offset)
                 if nextline == -1:
@@ -479,16 +514,19 @@ class DefinesInstruction(EntityBase):
 class DefinesParser(Parser):
     # can't merge, #unfilter needs to be the last item, which we don't support
     capabilities = CAN_COPY
     reWhitespace = re.compile('\n+', re.M)
 
     EMPTY_LINES = 1 << 0
     PAST_FIRST_LINE = 1 << 1
 
+    class Comment(OffsetComment):
+        comment_offset = 2
+
     def __init__(self):
         self.reComment = re.compile('(?:^# .*?\n)*(?:^# [^\n]*)', re.M)
         # corresponds to
         # https://hg.mozilla.org/mozilla-central/file/72ee4800d4156931c89b58bd807af4a3083702bb/python/mozbuild/mozbuild/preprocessor.py#l561  # noqa
         self.reKey = re.compile(
             '#define[ \t]+(?P<key>\w+)(?:[ \t](?P<val>[^\n]*))?', re.M)
         self.rePI = re.compile('#(?P<val>\w+[ \t]+[^\n]+)', re.M)
         Parser.__init__(self)
@@ -505,17 +543,17 @@ class DefinesParser(Parser):
             else:
                 return Junk(ctx, m.span())
 
         # We're not in the first line anymore.
         ctx.state |= self.PAST_FIRST_LINE
 
         m = self.reComment.match(contents, offset)
         if m:
-            self.last_comment = Comment(ctx, m.span())
+            self.last_comment = self.Comment(ctx, m.span())
             return self.last_comment
         m = self.reKey.match(contents, offset)
         if m:
             return self.createEntity(ctx, m)
         m = self.rePI.match(contents, offset)
         if m:
             instr = DefinesInstruction(ctx, m.span(), m.span('val'))
             if instr.val == 'filter emptyLines':
@@ -544,30 +582,33 @@ class IniParser(Parser):
     Parse files of the form:
     # initial comment
     [cat]
     whitespace*
     #comment
     string=value
     ...
     '''
+
+    Comment = OffsetComment
+
     def __init__(self):
         self.reComment = re.compile('(?:^[;#][^\n]*\n)*(?:^[;#][^\n]*)', re.M)
         self.reSection = re.compile('\[(?P<val>.*?)\]', re.M)
         self.reKey = re.compile('(?P<key>.+?)=(?P<val>.*)', re.M)
         Parser.__init__(self)
 
     def getNext(self, ctx, offset):
         contents = ctx.contents
         m = self.reWhitespace.match(contents, offset)
         if m:
             return Whitespace(ctx, m.span())
         m = self.reComment.match(contents, offset)
         if m:
-            self.last_comment = Comment(ctx, m.span())
+            self.last_comment = self.Comment(ctx, m.span())
             return self.last_comment
         m = self.reSection.match(contents, offset)
         if m:
             return IniSection(ctx, m.span(), m.span('val'))
         m = self.reKey.match(contents, offset)
         if m:
             return self.createEntity(ctx, m)
         return self.getJunk(
@@ -587,17 +628,17 @@ class FluentAttribute(EntityBase):
         if not isinstance(other, FluentAttribute):
             return False
         return self.attr.equals(
             other.attr, ignored_fields=self.ignored_fields)
 
 
 class FluentEntity(Entity):
     # Fields ignored when comparing two entities.
-    ignored_fields = ['comment', 'span', 'tags']
+    ignored_fields = ['comment', 'span']
 
     def __init__(self, ctx, entry):
         start = entry.span.start
         end = entry.span.end
 
         self.ctx = ctx
         self.span = (start, end)
 
@@ -611,28 +652,36 @@ class FluentEntity(Entity):
         self.entry = entry
 
         # EntityBase instances are expected to have pre_comment. It's used by
         # other formats to associate a Comment with an Entity. FluentEntities
         # don't need it because message comments are part of the entry AST and
         # are not separate Comment instances.
         self.pre_comment = None
 
+    @property
+    def root_node(self):
+        '''AST node at which to start traversal for count_words.
+
+        By default we count words in the value and in all attributes.
+        '''
+        return self.entry
+
     _word_count = None
 
     def count_words(self):
         if self._word_count is None:
             self._word_count = 0
 
             def count_words(node):
                 if isinstance(node, ftl.TextElement):
                     self._word_count += len(node.value.split())
                 return node
 
-            self.entry.traverse(count_words)
+            self.root_node.traverse(count_words)
 
         return self._word_count
 
     def equals(self, other):
         return self.entry.equals(
             other.entry, ignored_fields=self.ignored_fields)
 
     # In Fluent we treat entries as a whole.  FluentChecker reports errors at
@@ -641,75 +690,78 @@ class FluentEntity(Entity):
         return self.position(offset)
 
     @property
     def attributes(self):
         for attr_node in self.entry.attributes:
             yield FluentAttribute(self, attr_node)
 
 
-class FluentSection(EntityBase):
-    def __init__(self, ctx, entry):
-        self.entry = entry
-        self.ctx = ctx
+class FluentMessage(FluentEntity):
+    pass
+
+
+class FluentTerm(FluentEntity):
+    # Fields ignored when comparing two terms.
+    ignored_fields = ['attributes', 'comment', 'span']
 
-        self.span = (entry.span.start, entry.span.end)
-        self.key_span = self.val_span = (
-            entry.name.span.start, entry.name.span.end)
+    @property
+    def root_node(self):
+        '''AST node at which to start traversal for count_words.
+
+        In Fluent Terms we only count words in the value. Attributes are
+        private and do not count towards the word total.
+        '''
+        return self.entry.value
+
+
+class FluentComment(Comment):
+    def __init__(self, ctx, span, entry):
+        super(FluentComment, self).__init__(ctx, span)
+        self._val_cache = entry.content
 
 
 class FluentParser(Parser):
     capabilities = CAN_SKIP
 
     def __init__(self):
         super(FluentParser, self).__init__()
         self.ftl_parser = FTLParser()
 
     def walk(self, only_localizable=False):
         if not self.ctx:
             # loading file failed, or we just didn't load anything
             return
 
         resource = self.ftl_parser.parse(self.ctx.contents)
 
-        if resource.comment:
-            last_span_end = resource.comment.span.end
-
-            if not only_localizable:
-                if 0 < resource.comment.span.start:
-                    yield Whitespace(
-                        self.ctx, (0, resource.comment.span.start))
-                yield Comment(
-                    self.ctx,
-                    (resource.comment.span.start, resource.comment.span.end))
-        else:
-            last_span_end = 0
+        last_span_end = 0
 
         for entry in resource.body:
             if not only_localizable:
                 if entry.span.start > last_span_end:
                     yield Whitespace(
                         self.ctx, (last_span_end, entry.span.start))
 
             if isinstance(entry, ftl.Message):
-                yield FluentEntity(self.ctx, entry)
+                yield FluentMessage(self.ctx, entry)
+            elif isinstance(entry, ftl.Term):
+                yield FluentTerm(self.ctx, entry)
             elif isinstance(entry, ftl.Junk):
                 start = entry.span.start
                 end = entry.span.end
                 # strip leading whitespace
                 start += re.match('\s*', entry.content).end()
                 # strip trailing whitespace
                 ws, we = re.search('\s*$', entry.content).span()
                 end -= we - ws
                 yield Junk(self.ctx, (start, end))
-            elif isinstance(entry, ftl.Comment) and not only_localizable:
+            elif isinstance(entry, ftl.BaseComment) and not only_localizable:
                 span = (entry.span.start, entry.span.end)
-                yield Comment(self.ctx, span)
-            elif isinstance(entry, ftl.Section) and not only_localizable:
-                yield FluentSection(self.ctx, entry)
+                yield FluentComment(self.ctx, span, entry)
 
             last_span_end = entry.span.end
 
         # Yield Whitespace at the EOF.
         if not only_localizable:
             eof_offset = len(self.ctx.contents)
             if eof_offset > last_span_end:
                 yield Whitespace(self.ctx, (last_span_end, eof_offset))
--- a/third_party/python/compare-locales/compare_locales/plurals.py
+++ b/third_party/python/compare-locales/compare_locales/plurals.py
@@ -33,128 +33,130 @@ CATEGORIES_BY_INDEX = (
     ('one', 'two', 'few', 'other'),
     # 11 (Irish Gaelic)
     ('one', 'two', 'few', 'many', 'other'),
     # 12 (Arabic)
     # CLDR: zero, one, two, few, many, other
     ('one', 'two', 'few', 'many', 'other', 'zero'),
     # 13 (Maltese)
     ('one', 'few', 'many', 'other'),
-    # 14 (Macedonian)
+    # 14 (Unused)
     # CLDR: one, other
     ('one', 'two', 'other'),
-    # 15 (Icelandic)
+    # 15 (Icelandic, Macedonian)
     ('one', 'other'),
     # 16 (Breton)
     ('one', 'two', 'few', 'many', 'other'),
     # 17 (Shuar)
     # CLDR: (missing)
-    ('zero', 'other')
+    ('zero', 'other'),
+    # 18 (Welsh),
+    ('zero', 'one', 'two', 'few', 'many', 'other'),
 )
 
 CATEGORIES_BY_LOCALE = {
     'ach': CATEGORIES_BY_INDEX[1],
     'af': CATEGORIES_BY_INDEX[1],
     'an': CATEGORIES_BY_INDEX[1],
     'ar': CATEGORIES_BY_INDEX[12],
     'as': CATEGORIES_BY_INDEX[1],
     'ast': CATEGORIES_BY_INDEX[1],
-    'az': CATEGORIES_BY_INDEX[0],
+    'az': CATEGORIES_BY_INDEX[1],
     'be': CATEGORIES_BY_INDEX[7],
     'bg': CATEGORIES_BY_INDEX[1],
-    'bn-BD': CATEGORIES_BY_INDEX[1],
-    'bn-IN': CATEGORIES_BY_INDEX[1],
+    'bn-BD': CATEGORIES_BY_INDEX[2],
+    'bn-IN': CATEGORIES_BY_INDEX[2],
     'br': CATEGORIES_BY_INDEX[1],
-    'bs': CATEGORIES_BY_INDEX[1],
+    'bs': CATEGORIES_BY_INDEX[7],
     'ca': CATEGORIES_BY_INDEX[1],
     'cak': CATEGORIES_BY_INDEX[1],
     'cs': CATEGORIES_BY_INDEX[8],
-    'cy': CATEGORIES_BY_INDEX[1],
+    'cy': CATEGORIES_BY_INDEX[18],
     'da': CATEGORIES_BY_INDEX[1],
     'de': CATEGORIES_BY_INDEX[1],
     'dsb': CATEGORIES_BY_INDEX[10],
     'el': CATEGORIES_BY_INDEX[1],
     'en-GB': CATEGORIES_BY_INDEX[1],
     'en-US': CATEGORIES_BY_INDEX[1],
     'en-ZA': CATEGORIES_BY_INDEX[1],
     'eo': CATEGORIES_BY_INDEX[1],
     'es-AR': CATEGORIES_BY_INDEX[1],
     'es-CL': CATEGORIES_BY_INDEX[1],
     'es-ES': CATEGORIES_BY_INDEX[1],
     'es-MX': CATEGORIES_BY_INDEX[1],
     'et': CATEGORIES_BY_INDEX[1],
     'eu': CATEGORIES_BY_INDEX[1],
-    'fa': CATEGORIES_BY_INDEX[0],
+    'fa': CATEGORIES_BY_INDEX[2],
     'ff': CATEGORIES_BY_INDEX[1],
     'fi': CATEGORIES_BY_INDEX[1],
     'fr': CATEGORIES_BY_INDEX[2],
     'fy-NL': CATEGORIES_BY_INDEX[1],
     'ga-IE': CATEGORIES_BY_INDEX[11],
     'gd': CATEGORIES_BY_INDEX[4],
     'gl': CATEGORIES_BY_INDEX[1],
     'gn': CATEGORIES_BY_INDEX[1],
     'gu-IN': CATEGORIES_BY_INDEX[2],
     'he': CATEGORIES_BY_INDEX[1],
-    'hi-IN': CATEGORIES_BY_INDEX[1],
+    'hi-IN': CATEGORIES_BY_INDEX[2],
     'hr': CATEGORIES_BY_INDEX[7],
     'hsb': CATEGORIES_BY_INDEX[10],
     'hu': CATEGORIES_BY_INDEX[1],
     'hy-AM': CATEGORIES_BY_INDEX[1],
     'ia': CATEGORIES_BY_INDEX[1],
     'id': CATEGORIES_BY_INDEX[0],
     'is': CATEGORIES_BY_INDEX[15],
     'it': CATEGORIES_BY_INDEX[1],
     'ja': CATEGORIES_BY_INDEX[0],
     'ja-JP-mac': CATEGORIES_BY_INDEX[0],
     'jiv': CATEGORIES_BY_INDEX[17],
-    'ka': CATEGORIES_BY_INDEX[0],
+    'ka': CATEGORIES_BY_INDEX[1],
     'kab': CATEGORIES_BY_INDEX[1],
     'kk': CATEGORIES_BY_INDEX[1],
-    'km': CATEGORIES_BY_INDEX[1],
+    'km': CATEGORIES_BY_INDEX[0],
     'kn': CATEGORIES_BY_INDEX[1],
     'ko': CATEGORIES_BY_INDEX[0],
     'lij': CATEGORIES_BY_INDEX[1],
     'lo': CATEGORIES_BY_INDEX[0],
     'lt': CATEGORIES_BY_INDEX[6],
     'ltg': CATEGORIES_BY_INDEX[3],
     'lv': CATEGORIES_BY_INDEX[3],
     'mai': CATEGORIES_BY_INDEX[1],
     'mk': CATEGORIES_BY_INDEX[15],
     'ml': CATEGORIES_BY_INDEX[1],
     'mr': CATEGORIES_BY_INDEX[1],
-    'ms': CATEGORIES_BY_INDEX[1],
-    'my': CATEGORIES_BY_INDEX[1],
+    'ms': CATEGORIES_BY_INDEX[0],
+    'my': CATEGORIES_BY_INDEX[0],
     'nb-NO': CATEGORIES_BY_INDEX[1],
     'ne-NP': CATEGORIES_BY_INDEX[1],
     'nl': CATEGORIES_BY_INDEX[1],
     'nn-NO': CATEGORIES_BY_INDEX[1],
-    'oc': CATEGORIES_BY_INDEX[1],
+    'oc': CATEGORIES_BY_INDEX[2],
     'or': CATEGORIES_BY_INDEX[1],
-    'pa-IN': CATEGORIES_BY_INDEX[1],
+    'pa-IN': CATEGORIES_BY_INDEX[2],
     'pl': CATEGORIES_BY_INDEX[9],
     'pt-BR': CATEGORIES_BY_INDEX[1],
     'pt-PT': CATEGORIES_BY_INDEX[1],
     'rm': CATEGORIES_BY_INDEX[1],
-    'ro': CATEGORIES_BY_INDEX[1],
+    'ro': CATEGORIES_BY_INDEX[5],
     'ru': CATEGORIES_BY_INDEX[7],
     'si': CATEGORIES_BY_INDEX[1],
     'sk': CATEGORIES_BY_INDEX[8],
     'sl': CATEGORIES_BY_INDEX[10],
     'son': CATEGORIES_BY_INDEX[1],
     'sq': CATEGORIES_BY_INDEX[1],
     'sr': CATEGORIES_BY_INDEX[7],
     'sv-SE': CATEGORIES_BY_INDEX[1],
     'ta': CATEGORIES_BY_INDEX[1],
     'te': CATEGORIES_BY_INDEX[1],
     'th': CATEGORIES_BY_INDEX[0],
     'tl': CATEGORIES_BY_INDEX[1],
-    'tr': CATEGORIES_BY_INDEX[0],
+    'tr': CATEGORIES_BY_INDEX[1],
     'trs': CATEGORIES_BY_INDEX[1],
     'uk': CATEGORIES_BY_INDEX[7],
     'ur': CATEGORIES_BY_INDEX[1],
-    'uz': CATEGORIES_BY_INDEX[0],
-    'vi': CATEGORIES_BY_INDEX[1],
+    'uz': CATEGORIES_BY_INDEX[1],
+    'vi': CATEGORIES_BY_INDEX[0],
     'wo': CATEGORIES_BY_INDEX[0],
     'xh': CATEGORIES_BY_INDEX[1],
     'zam': CATEGORIES_BY_INDEX[1],
     'zh-CN': CATEGORIES_BY_INDEX[1],
     'zh-TW': CATEGORIES_BY_INDEX[0]
 }
--- a/third_party/python/compare-locales/compare_locales/tests/__init__.py
+++ b/third_party/python/compare-locales/compare_locales/tests/__init__.py
@@ -41,10 +41,10 @@ class ParserTestMixin():
         entities = list(self.parser.walk())
         for entity, ref in izip_longest(entities, refs):
             self.assertTrue(entity, 'excess reference entity ' + unicode(ref))
             self.assertTrue(ref, 'excess parsed entity ' + unicode(entity))
             if isinstance(entity, parser.Entity):
                 self.assertEqual(entity.key, ref[0])
                 self.assertEqual(entity.val, ref[1])
             else:
-                self.assertEqual(type(entity).__name__, ref[0])
+                self.assertIsInstance(entity, ref[0])
                 self.assertIn(ref[1], entity.all)
--- a/third_party/python/compare-locales/compare_locales/tests/test_checks.py
+++ b/third_party/python/compare-locales/compare_locales/tests/test_checks.py
@@ -85,16 +85,45 @@ downloadsTitleFiles=#1 file - Downloads;
 # See: http://developer.mozilla.org/en/docs/Localization_and_Plurals
 # #1 number of files
 # example: 111 files - Downloads
 downloadsTitleFiles=#1 file - Downloads;#1 files - #2;#1 #3
 ''',
                    (('error', 0, 'unreplaced variables in l10n', 'plural'),))
 
 
+class TestPluralForms(BaseHelper):
+    file = File('foo.properties', 'foo.properties', locale='en-GB')
+    refContent = '''\
+# LOCALIZATION NOTE (downloadsTitleFiles): Semi-colon list of plural forms.
+# See: http://developer.mozilla.org/en/docs/Localization_and_Plurals
+# #1 number of files
+# example: 111 files - Downloads
+downloadsTitleFiles=#1 file;#1 files
+'''
+
+    def test_matching_forms(self):
+        self._test('''\
+downloadsTitleFiles=#1 fiiilee;#1 fiiilees
+''',
+                   tuple())
+
+    def test_lacking_forms(self):
+        self._test('''\
+downloadsTitleFiles=#1 fiiilee
+''',
+                   (('warning', 0, 'expecting 2 plurals, found 1', 'plural'),))
+
+    def test_excess_forms(self):
+        self._test('''\
+downloadsTitleFiles=#1 fiiilee;#1 fiiilees;#1 fiiilees
+''',
+                   (('warning', 0, 'expecting 2 plurals, found 3', 'plural'),))
+
+
 class TestDTDs(BaseHelper):
     file = File('foo.dtd', 'foo.dtd')
     refContent = '''<!ENTITY foo "This is &apos;good&apos;">
 <!ENTITY width "10ch">
 <!ENTITY style "width: 20ch; height: 280px;">
 <!ENTITY minStyle "min-height: 50em;">
 <!ENTITY ftd "0">
 <!ENTITY formatPercent "This is 100&#037; correct">
@@ -223,29 +252,26 @@ class TestAndroid(unittest.TestCase):
     Make sure we're hitting our extra rules only if
     we're passing in a DTD file in the embedding/android module.
     """
     apos_msg = u"Apostrophes in Android DTDs need escaping with \\' or " + \
                u"\\u0027, or use \u2019, or put string in quotes."
     quot_msg = u"Quotes in Android DTDs need escaping with \\\" or " + \
                u"\\u0022, or put string in apostrophes."
 
-    def getEntity(self, v):
+    def getNext(self, v):
         ctx = Parser.Context(v)
         return DTDEntity(
-            ctx, '', (0, len(v)), (), (), (), (0, len(v)), ())
+            ctx, '', (0, len(v)), (), (0, len(v)))
 
     def getDTDEntity(self, v):
         v = v.replace('"', '&quot;')
         ctx = Parser.Context('<!ENTITY foo "%s">' % v)
         return DTDEntity(
-            ctx,
-            '',
-            (0, len(v) + 16), (), (), (9, 12),
-            (14, len(v) + 14), ())
+            ctx, '', (0, len(v) + 16), (9, 12), (14, len(v) + 14))
 
     def test_android_dtd(self):
         """Testing the actual android checks. The logic is involved,
         so this is a lot of nitty gritty detail tests.
         """
         f = File("embedding/android/strings.dtd", "strings.dtd",
                  "embedding/android")
         checker = getChecker(f, extra_tests=['android-dtd'])
@@ -321,33 +347,33 @@ class TestAndroid(unittest.TestCase):
                          (('error', 14, 'truncated \\uXXXX escape',
                            'android'),))
 
     def test_android_prop(self):
         f = File("embedding/android/strings.properties", "strings.properties",
                  "embedding/android")
         checker = getChecker(f, extra_tests=['android-dtd'])
         # good plain string
-        ref = self.getEntity("plain string")
-        l10n = self.getEntity("plain localized string")
+        ref = self.getNext("plain string")
+        l10n = self.getNext("plain localized string")
         self.assertEqual(tuple(checker.check(ref, l10n)),
                          ())
         # no dtd warning
-        ref = self.getEntity("plain string")
-        l10n = self.getEntity("plain localized string &ref;")
+        ref = self.getNext("plain string")
+        l10n = self.getNext("plain localized string &ref;")
         self.assertEqual(tuple(checker.check(ref, l10n)),
                          ())
         # no report on stray ampersand
-        ref = self.getEntity("plain string")
-        l10n = self.getEntity("plain localized string with apos: '")
+        ref = self.getNext("plain string")
+        l10n = self.getNext("plain localized string with apos: '")
         self.assertEqual(tuple(checker.check(ref, l10n)),
                          ())
         # report on bad printf
-        ref = self.getEntity("string with %s")
-        l10n = self.getEntity("string with %S")
+        ref = self.getNext("string with %s")
+        l10n = self.getNext("string with %S")
         self.assertEqual(tuple(checker.check(ref, l10n)),
                          (('error', 0, 'argument 1 `S` should be `s`',
                            'printf'),))
 
     def test_non_android_dtd(self):
         f = File("browser/strings.dtd", "strings.dtd", "browser")
         checker = getChecker(f)
         # good string
--- a/third_party/python/compare-locales/compare_locales/tests/test_defines.py
+++ b/third_party/python/compare-locales/compare_locales/tests/test_defines.py
@@ -1,16 +1,22 @@
 # -*- coding: utf-8 -*-
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import unittest
 
 from compare_locales.tests import ParserTestMixin
+from compare_locales.parser import (
+    Comment,
+    DefinesInstruction,
+    Junk,
+    Whitespace,
+)
 
 
 mpl2 = '''\
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.'''
 
 
@@ -26,166 +32,166 @@ class TestDefinesParser(ParserTestMixin,
 
 # If non-English locales wish to credit multiple contributors, uncomment this
 # variable definition and use the format specified.
 # #define MOZ_LANGPACK_CONTRIBUTORS <em:contributor>Joe Solon</em:contributor>
 
 #unfilter emptyLines
 
 ''', (
-            ('Comment', mpl2),
-            ('Whitespace', '\n'),
-            ('DefinesInstruction', 'filter emptyLines'),
-            ('Whitespace', '\n\n'),
+            (Comment, mpl2),
+            (Whitespace, '\n'),
+            (DefinesInstruction, 'filter emptyLines'),
+            (Whitespace, '\n\n'),
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Whitespace', '\n\n'),
-            ('Comment', '#define'),
-            ('Whitespace', '\n\n'),
-            ('DefinesInstruction', 'unfilter emptyLines'),
-            ('Junk', '\n\n')))
+            (Whitespace, '\n\n'),
+            (Comment, '#define'),
+            (Whitespace, '\n\n'),
+            (DefinesInstruction, 'unfilter emptyLines'),
+            (Junk, '\n\n')))
 
     def testBrowserWithContributors(self):
         self._test(mpl2 + '''
 #filter emptyLines
 
 #define MOZ_LANGPACK_CREATOR mozilla.org
 
 # If non-English locales wish to credit multiple contributors, uncomment this
 # variable definition and use the format specified.
 #define MOZ_LANGPACK_CONTRIBUTORS <em:contributor>Joe Solon</em:contributor>
 
 #unfilter emptyLines
 
 ''', (
-            ('Comment', mpl2),
-            ('Whitespace', '\n'),
-            ('DefinesInstruction', 'filter emptyLines'),
-            ('Whitespace', '\n\n'),
+            (Comment, mpl2),
+            (Whitespace, '\n'),
+            (DefinesInstruction, 'filter emptyLines'),
+            (Whitespace, '\n\n'),
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Whitespace', '\n\n'),
-            ('Comment', 'non-English'),
-            ('Whitespace', '\n'),
+            (Whitespace, '\n\n'),
+            (Comment, 'non-English'),
+            (Whitespace, '\n'),
             ('MOZ_LANGPACK_CONTRIBUTORS',
              '<em:contributor>Joe Solon</em:contributor>'),
-            ('Whitespace', '\n\n'),
-            ('DefinesInstruction', 'unfilter emptyLines'),
-            ('Junk', '\n\n')))
+            (Whitespace, '\n\n'),
+            (DefinesInstruction, 'unfilter emptyLines'),
+            (Junk, '\n\n')))
 
     def testCommentWithNonAsciiCharacters(self):
         self._test(mpl2 + '''
 #filter emptyLines
 
 # e.g. #define seamonkey_l10n <DT><A HREF="urn:foo">SeaMonkey v češtině</a>
 #define seamonkey_l10n_long
 
 #unfilter emptyLines
 
 ''', (
-            ('Comment', mpl2),
-            ('Whitespace', '\n'),
-            ('DefinesInstruction', 'filter emptyLines'),
-            ('Whitespace', '\n\n'),
-            ('Comment', u'češtině'),
-            ('Whitespace', '\n'),
+            (Comment, mpl2),
+            (Whitespace, '\n'),
+            (DefinesInstruction, 'filter emptyLines'),
+            (Whitespace, '\n\n'),
+            (Comment, u'češtině'),
+            (Whitespace, '\n'),
             ('seamonkey_l10n_long', ''),
-            ('Whitespace', '\n\n'),
-            ('DefinesInstruction', 'unfilter emptyLines'),
-            ('Junk', '\n\n')))
+            (Whitespace, '\n\n'),
+            (DefinesInstruction, 'unfilter emptyLines'),
+            (Junk, '\n\n')))
 
     def test_no_empty_lines(self):
         self._test('''#define MOZ_LANGPACK_CREATOR mozilla.org
 #define MOZ_LANGPACK_CREATOR mozilla.org
 ''', (
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Whitespace', '\n'),
+            (Whitespace, '\n'),
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Whitespace', '\n')))
+            (Whitespace, '\n')))
 
     def test_empty_line_between(self):
         self._test('''#define MOZ_LANGPACK_CREATOR mozilla.org
 
 #define MOZ_LANGPACK_CREATOR mozilla.org
 ''', (
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Junk', '\n'),
+            (Junk, '\n'),
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Whitespace', '\n')))
+            (Whitespace, '\n')))
 
     def test_empty_line_at_the_beginning(self):
         self._test('''
 #define MOZ_LANGPACK_CREATOR mozilla.org
 #define MOZ_LANGPACK_CREATOR mozilla.org
 ''', (
-            ('Junk', '\n'),
+            (Junk, '\n'),
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Whitespace', '\n'),
+            (Whitespace, '\n'),
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Whitespace', '\n')))
+            (Whitespace, '\n')))
 
     def test_filter_empty_lines(self):
         self._test('''#filter emptyLines
 
 #define MOZ_LANGPACK_CREATOR mozilla.org
 #define MOZ_LANGPACK_CREATOR mozilla.org
 #unfilter emptyLines''', (
-            ('DefinesInstruction', 'filter emptyLines'),
-            ('Whitespace', '\n\n'),
+            (DefinesInstruction, 'filter emptyLines'),
+            (Whitespace, '\n\n'),
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Whitespace', '\n'),
+            (Whitespace, '\n'),
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Whitespace', '\n'),
-            ('DefinesInstruction', 'unfilter emptyLines')))
+            (Whitespace, '\n'),
+            (DefinesInstruction, 'unfilter emptyLines')))
 
     def test_unfilter_empty_lines_with_trailing(self):
         self._test('''#filter emptyLines
 
 #define MOZ_LANGPACK_CREATOR mozilla.org
 #define MOZ_LANGPACK_CREATOR mozilla.org
 #unfilter emptyLines
 ''', (
-            ('DefinesInstruction', 'filter emptyLines'),
-            ('Whitespace', '\n\n'),
+            (DefinesInstruction, 'filter emptyLines'),
+            (Whitespace, '\n\n'),
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Whitespace', '\n'),
+            (Whitespace, '\n'),
             ('MOZ_LANGPACK_CREATOR', 'mozilla.org'),
-            ('Whitespace', '\n'),
-            ('DefinesInstruction', 'unfilter emptyLines'),
-            ('Whitespace', '\n')))
+            (Whitespace, '\n'),
+            (DefinesInstruction, 'unfilter emptyLines'),
+            (Whitespace, '\n')))
 
     def testToolkit(self):
         self._test('''#define MOZ_LANG_TITLE English (US)
 ''', (
             ('MOZ_LANG_TITLE', 'English (US)'),
-            ('Whitespace', '\n')))
+            (Whitespace, '\n')))
 
     def testToolkitEmpty(self):
         self._test('', tuple())
 
     def test_empty_file(self):
         '''Test that empty files generate errors
 
         defines.inc are interesting that way, as their
         content is added to the generated file.
         '''
-        self._test('\n', (('Junk', '\n'),))
-        self._test('\n\n', (('Junk', '\n\n'),))
-        self._test(' \n\n', (('Junk', ' \n\n'),))
+        self._test('\n', ((Junk, '\n'),))
+        self._test('\n\n', ((Junk, '\n\n'),))
+        self._test(' \n\n', ((Junk, ' \n\n'),))
 
     def test_whitespace_value(self):
         '''Test that there's only one whitespace between key and value
         '''
         # funny formatting of trailing whitespace to make it explicit
         # and flake-8 happy
         self._test('''\
 #define one \n\
 #define two  \n\
 #define tre   \n\
 ''', (
             ('one', ''),
-            ('Whitespace', '\n'),
+            (Whitespace, '\n'),
             ('two', ' '),
-            ('Whitespace', '\n'),
+            (Whitespace, '\n'),
             ('tre', '  '),
-            ('Whitespace', '\n'),))
+            (Whitespace, '\n'),))
 
 
 if __name__ == '__main__':
     unittest.main()
--- a/third_party/python/compare-locales/compare_locales/tests/test_dtd.py
+++ b/third_party/python/compare-locales/compare_locales/tests/test_dtd.py
@@ -4,16 +4,21 @@
 
 '''Tests for the DTD parser.
 '''
 
 import unittest
 import re
 
 from compare_locales import parser
+from compare_locales.parser import (
+    Comment,
+    Junk,
+    Whitespace,
+)
 from compare_locales.tests import ParserTestMixin
 
 
 class TestDTD(ParserTestMixin, unittest.TestCase):
     '''Tests for the DTD Parser.'''
     filename = 'foo.dtd'
 
     def test_one_entity(self):
@@ -25,27 +30,27 @@ class TestDTD(ParserTestMixin, unittest.
 <!ENTITY good.two "two">
 <!ENTITY bad.two "bad "quoted" word">
 <!ENTITY good.three "three">
 <!ENTITY good.four "good ' quote">
 <!ENTITY good.five "good 'quoted' word">
 '''
     quoteRef = (
         ('good.one', 'one'),
-        ('Whitespace', '\n'),
-        ('Junk', '<!ENTITY bad.one "bad " quote">\n'),
+        (Whitespace, '\n'),
+        (Junk, '<!ENTITY bad.one "bad " quote">\n'),
         ('good.two', 'two'),
-        ('Whitespace', '\n'),
-        ('Junk', '<!ENTITY bad.two "bad "quoted" word">\n'),
+        (Whitespace, '\n'),
+        (Junk, '<!ENTITY bad.two "bad "quoted" word">\n'),
         ('good.three', 'three'),
-        ('Whitespace', '\n'),
+        (Whitespace, '\n'),
         ('good.four', 'good \' quote'),
-        ('Whitespace', '\n'),
+        (Whitespace, '\n'),
         ('good.five', 'good \'quoted\' word'),
-        ('Whitespace', '\n'),)
+        (Whitespace, '\n'),)
 
     def test_quotes(self):
         self._test(self.quoteContent, self.quoteRef)
 
     def test_apos(self):
         qr = re.compile('[\'"]', re.M)
 
         def quot2apos(s):
@@ -64,27 +69,27 @@ class TestDTD(ParserTestMixin, unittest.
         self._test('''<!ENTITY first "string">
 <!ENTITY second "string">
 <!--
 <!ENTITY commented "out">
 -->
 ''',
                    (
                        ('first', 'string'),
-                       ('Whitespace', '\n'),
+                       (Whitespace, '\n'),
                        ('second', 'string'),
-                       ('Whitespace', '\n'),
-                       ('Comment', 'out'),
-                       ('Whitespace', '\n')))
+                       (Whitespace, '\n'),
+                       (Comment, 'out'),
+                       (Whitespace, '\n')))
 
     def test_license_header(self):
         p = parser.getParser('foo.dtd')
         p.readContents(self.resource('triple-license.dtd'))
         entities = list(p.walk())
-        self.assert_(isinstance(entities[0], parser.Comment))
+        self.assertIsInstance(entities[0], parser.Comment)
         self.assertIn('MPL', entities[0].all)
         e = entities[2]
         self.assert_(isinstance(e, parser.Entity))
         self.assertEqual(e.key, 'foo')
         self.assertEqual(e.val, 'value')
         self.assertEqual(len(entities), 4)
         p.readContents('''\
 <!-- This Source Code Form is subject to the terms of the Mozilla Public
@@ -102,27 +107,27 @@ class TestDTD(ParserTestMixin, unittest.
         self.assertEqual(len(entities), 4)
 
     def testBOM(self):
         self._test(u'\ufeff<!ENTITY foo.label "stuff">'.encode('utf-8'),
                    (('foo.label', 'stuff'),))
 
     def test_trailing_whitespace(self):
         self._test('<!ENTITY foo.label "stuff">\n  \n',
-                   (('foo.label', 'stuff'), ('Whitespace', '\n  \n')))
+                   (('foo.label', 'stuff'), (Whitespace, '\n  \n')))
 
     def test_unicode_comment(self):
         self._test('<!-- \xe5\x8f\x96 -->',
-                   (('Comment', u'\u53d6'),))
+                   ((Comment, u'\u53d6'),))
 
     def test_empty_file(self):
         self._test('', tuple())
-        self._test('\n', (('Whitespace', '\n'),))
-        self._test('\n\n', (('Whitespace', '\n\n'),))
-        self._test(' \n\n', (('Whitespace', ' \n\n'),))
+        self._test('\n', ((Whitespace, '\n'),))
+        self._test('\n\n', ((Whitespace, '\n\n'),))
+        self._test(' \n\n', ((Whitespace, ' \n\n'),))
 
     def test_positions(self):
         self.parser.readContents('''\
 <!ENTITY one  "value">
 <!ENTITY  two "other
 escaped value">
 ''')
         one, two = list(self.parser)
@@ -172,11 +177,38 @@ escaped value">
         entity = next(entities)
         self.assertEqual(entity.raw_val, '&#x0026;')
         self.assertEqual(entity.val, '&')
 
         entity = next(entities)
         self.assertEqual(entity.raw_val, '&unknownEntity;')
         self.assertEqual(entity.val, '&unknownEntity;')
 
+    def test_comment_val(self):
+        self.parser.readContents('''\
+<!-- comment
+spanning lines -->  <!--
+-->
+<!-- last line -->
+''')
+        entities = self.parser.walk()
+
+        entity = next(entities)
+        self.assertIsInstance(entity, parser.Comment)
+        self.assertEqual(entity.val, ' comment\nspanning lines ')
+        entity = next(entities)
+        self.assertIsInstance(entity, parser.Whitespace)
+
+        entity = next(entities)
+        self.assertIsInstance(entity, parser.Comment)
+        self.assertEqual(entity.val, '\n')
+        entity = next(entities)
+        self.assertIsInstance(entity, parser.Whitespace)
+
+        entity = next(entities)
+        self.assertIsInstance(entity, parser.Comment)
+        self.assertEqual(entity.val, ' last line ')
+        entity = next(entities)
+        self.assertIsInstance(entity, parser.Whitespace)
+
 
 if __name__ == '__main__':
     unittest.main()
--- a/third_party/python/compare-locales/compare_locales/tests/test_ftl.py
+++ b/third_party/python/compare-locales/compare_locales/tests/test_ftl.py
@@ -41,19 +41,19 @@ class TestFluentParser(ParserTestMixin, 
 a = One
 b = One two three
 c = One { $arg } two
 d =
     One { $arg ->
        *[x] Two three
         [y] Four
     } five.
-e
+e =
     .attr = One
-f
+f =
     .attr1 = One
     .attr2 = Two
 g = One two
     .attr = Three
 h =
     One { $arg ->
        *[x] Two three
         [y] Four
@@ -98,17 +98,17 @@ h =
 abc =
     A
     B
     C
 ''')
 
         [abc] = list(self.parser)
         self.assertEqual(abc.key, 'abc')
-        self.assertEqual(abc.val, '    A\n    B\n    C')
+        self.assertEqual(abc.val, 'A\n    B\n    C')
         self.assertEqual(abc.all, 'abc =\n    A\n    B\n    C')
 
     def test_message_with_attribute(self):
         self.parser.readContents('''\
 abc = ABC
     .attr = Attr
 ''')
 
@@ -130,76 +130,205 @@ abc
         attributes = list(abc.attributes)
         self.assertEqual(len(attributes), 1)
         attr = attributes[0]
         self.assertEqual(attr.key, 'attr')
         self.assertEqual(attr.val, 'Attr')
 
     def test_non_localizable(self):
         self.parser.readContents('''\
+### Resource Comment
+
+foo = Foo
+
+## Group Comment
+
+-bar = Bar
+
+##
+
+# Standalone Comment
+
+# Baz Comment
+baz = Baz
+''')
+        entities = self.parser.walk()
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity,  parser.FluentComment))
+        self.assertEqual(entity.all, '### Resource Comment')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
+        self.assertEqual(entity.all, '\n\n')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.FluentMessage))
+        self.assertEqual(entity.val, 'Foo')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
+        self.assertEqual(entity.all, '\n\n')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity,  parser.FluentComment))
+        self.assertEqual(entity.all, '## Group Comment')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
+        self.assertEqual(entity.all, '\n\n')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.FluentTerm))
+        self.assertEqual(entity.val, 'Bar')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
+        self.assertEqual(entity.all, '\n\n')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity,  parser.FluentComment))
+        self.assertEqual(entity.all, '##')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
+        self.assertEqual(entity.all, '\n\n')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity,  parser.FluentComment))
+        self.assertEqual(entity.all, '# Standalone Comment')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
+        self.assertEqual(entity.all, '\n\n')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.FluentMessage))
+        self.assertEqual(entity.val, 'Baz')
+        self.assertEqual(entity.entry.comment.content, 'Baz Comment')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
+        self.assertEqual(entity.all, '\n')
+
+    def test_non_localizable_syntax_zero_four(self):
+        self.parser.readContents('''\
 // Resource Comment
 
 foo = Foo
 
 // Section Comment
 [[ Section Header ]]
 
 bar = Bar
 
+[[ Another Section ]]
+
 // Standalone Comment
 
 // Baz Comment
 baz = Baz
 ''')
         entities = self.parser.walk()
 
         entity = next(entities)
-        self.assertTrue(isinstance(entity, parser.Comment))
+        self.assertTrue(isinstance(entity,  parser.FluentComment))
         self.assertEqual(entity.all, '// Resource Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.FluentEntity))
         self.assertEqual(entity.val, 'Foo')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
-        self.assertTrue(isinstance(entity, parser.FluentSection))
+        self.assertTrue(isinstance(entity,  parser.FluentComment))
         self.assertEqual(
-            entity.all, '// Section Comment\n[[ Section Header ]]')
-        self.assertEqual(entity.val, 'Section Header ')
-        self.assertEqual(
-            entity.entry.comment.content, 'Section Comment')
+            entity.all,
+            '// Section Comment\n[[ Section Header ]]'
+        )
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.FluentEntity))
         self.assertEqual(entity.val, 'Bar')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
-        self.assertTrue(isinstance(entity, parser.Comment))
+        self.assertTrue(isinstance(entity,  parser.FluentComment))
+        self.assertEqual(entity.all, '[[ Another Section ]]')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
+        self.assertEqual(entity.all, '\n\n')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity,  parser.FluentComment))
         self.assertEqual(entity.all, '// Standalone Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.FluentEntity))
         self.assertEqual(entity.val, 'Baz')
         self.assertEqual(entity.entry.comment.content, 'Baz Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n')
+
+    def test_comments_val(self):
+        self.parser.readContents('''\
+// Legacy Comment
+
+### Resource Comment
+
+## Section Comment
+
+# Standalone Comment
+''')
+        entities = self.parser.walk()
+
+        entity = next(entities)
+        # ensure that fluent comments are FluentComments and Comments
+        self.assertTrue(isinstance(entity,  parser.FluentComment))
+
+        # now test the actual .val values
+        self.assertTrue(isinstance(entity,   parser.Comment))
+        self.assertEqual(entity.val, 'Legacy Comment')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity,   parser.Comment))
+        self.assertEqual(entity.val, 'Resource Comment')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity,   parser.Comment))
+        self.assertEqual(entity.val, 'Section Comment')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity,   parser.Comment))
+        self.assertEqual(entity.val, 'Standalone Comment')
+
+        entity = next(entities)
+        self.assertTrue(isinstance(entity, parser.Whitespace))
--- a/third_party/python/compare-locales/compare_locales/tests/test_ini.py
+++ b/third_party/python/compare-locales/compare_locales/tests/test_ini.py
@@ -1,140 +1,185 @@
 # -*- coding: utf-8 -*-
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import unittest
 
 from compare_locales.tests import ParserTestMixin
+from compare_locales.parser import (
+    Comment,
+    IniSection,
+    Junk,
+    Whitespace,
+)
 
 
 mpl2 = '''\
 ; This Source Code Form is subject to the terms of the Mozilla Public
 ; License, v. 2.0. If a copy of the MPL was not distributed with this file,
-; You can obtain one at http://mozilla.org/MPL/2.0/.
-'''
+; You can obtain one at http://mozilla.org/MPL/2.0/.'''
 
 
 class TestIniParser(ParserTestMixin, unittest.TestCase):
 
     filename = 'foo.ini'
 
     def testSimpleHeader(self):
         self._test('''; This file is in the UTF-8 encoding
 [Strings]
 TitleText=Some Title
 ''', (
-            ('Comment', 'UTF-8 encoding'),
-            ('IniSection', 'Strings'),
-            ('TitleText', 'Some Title'),))
+            (Comment, 'UTF-8 encoding'),
+            (Whitespace, '\n'),
+            (IniSection, 'Strings'),
+            (Whitespace, '\n'),
+            ('TitleText', 'Some Title'),
+            (Whitespace, '\n')))
 
     def testMPL2_Space_UTF(self):
         self._test(mpl2 + '''
+
 ; This file is in the UTF-8 encoding
 [Strings]
 TitleText=Some Title
 ''', (
-            ('Comment', mpl2),
-            ('Comment', 'UTF-8'),
-            ('IniSection', 'Strings'),
-            ('TitleText', 'Some Title'),))
+            (Comment, mpl2),
+            (Whitespace, '\n\n'),
+            (Comment, 'UTF-8'),
+            (Whitespace, '\n'),
+            (IniSection, 'Strings'),
+            (Whitespace, '\n'),
+            ('TitleText', 'Some Title'),
+            (Whitespace, '\n')))
 
     def testMPL2_Space(self):
         self._test(mpl2 + '''
+
 [Strings]
 TitleText=Some Title
 ''', (
-            ('Comment', mpl2),
-            ('IniSection', 'Strings'),
-            ('TitleText', 'Some Title'),))
+            (Comment, mpl2),
+            (Whitespace, '\n\n'),
+            (IniSection, 'Strings'),
+            (Whitespace, '\n'),
+            ('TitleText', 'Some Title'),
+            (Whitespace, '\n')))
 
     def testMPL2_MultiSpace(self):
-        self._test(mpl2 + '''\
+        self._test(mpl2 + '''
 
 ; more comments
 
 [Strings]
 TitleText=Some Title
 ''', (
-            ('Comment', mpl2),
-            ('Comment', 'more comments'),
-            ('IniSection', 'Strings'),
-            ('TitleText', 'Some Title'),))
+            (Comment, mpl2),
+            (Whitespace, '\n\n'),
+            (Comment, 'more comments'),
+            (Whitespace, '\n\n'),
+            (IniSection, 'Strings'),
+            (Whitespace, '\n'),
+            ('TitleText', 'Some Title'),
+            (Whitespace, '\n')))
 
     def testMPL2_JunkBeforeCategory(self):
-        self._test(mpl2 + '''\
+        self._test(mpl2 + '''
 Junk
 [Strings]
 TitleText=Some Title
 ''', (
-            ('Comment', mpl2),
-            ('Junk', 'Junk'),
-            ('IniSection', 'Strings'),
-            ('TitleText', 'Some Title')))
+            (Comment, mpl2),
+            (Whitespace, '\n'),
+            (Junk, 'Junk\n'),
+            (IniSection, 'Strings'),
+            (Whitespace, '\n'),
+            ('TitleText', 'Some Title'),
+            (Whitespace, '\n')))
 
     def test_TrailingComment(self):
         self._test(mpl2 + '''
+
 [Strings]
 TitleText=Some Title
 ;Stray trailing comment
 ''', (
-            ('Comment', mpl2),
-            ('IniSection', 'Strings'),
+            (Comment, mpl2),
+            (Whitespace, '\n\n'),
+            (IniSection, 'Strings'),
+            (Whitespace, '\n'),
             ('TitleText', 'Some Title'),
-            ('Comment', 'Stray trailing')))
+            (Whitespace, '\n'),
+            (Comment, 'Stray trailing'),
+            (Whitespace, '\n')))
 
     def test_SpacedTrailingComments(self):
         self._test(mpl2 + '''
+
 [Strings]
 TitleText=Some Title
 
 ;Stray trailing comment
 ;Second stray comment
 
 ''', (
-            ('Comment', mpl2),
-            ('IniSection', 'Strings'),
+            (Comment, mpl2),
+            (Whitespace, '\n\n'),
+            (IniSection, 'Strings'),
+            (Whitespace, '\n'),
             ('TitleText', 'Some Title'),
-            ('Comment', 'Second stray comment')))
+            (Whitespace, '\n\n'),
+            (Comment, 'Second stray comment'),
+            (Whitespace, '\n\n')))
 
     def test_TrailingCommentsAndJunk(self):
         self._test(mpl2 + '''
+
 [Strings]
 TitleText=Some Title
 
 ;Stray trailing comment
 Junk
 ;Second stray comment
 
 ''', (
-            ('Comment', mpl2),
-            ('IniSection', 'Strings'),
+            (Comment, mpl2),
+            (Whitespace, '\n\n'),
+            (IniSection, 'Strings'),
+            (Whitespace, '\n'),
             ('TitleText', 'Some Title'),
-            ('Comment', 'Stray trailing'),
-            ('Junk', 'Junk'),
-            ('Comment', 'Second stray comment')))
+            (Whitespace, '\n\n'),
+            (Comment, 'Stray trailing'),
+            (Whitespace, '\n'),
+            (Junk, 'Junk\n'),
+            (Comment, 'Second stray comment'),
+            (Whitespace, '\n\n')))
 
     def test_JunkInbetweenEntries(self):
         self._test(mpl2 + '''
+
 [Strings]
 TitleText=Some Title
 
 Junk
 
 Good=other string
 ''', (
-            ('Comment', mpl2),
-            ('IniSection', 'Strings'),
+            (Comment, mpl2),
+            (Whitespace, '\n\n'),
+            (IniSection, 'Strings'),
+            (Whitespace, '\n'),
             ('TitleText', 'Some Title'),
-            ('Junk', 'Junk'),
-            ('Good', 'other string')))
+            (Whitespace, '\n\n'),
+            (Junk, 'Junk\n\n'),
+            ('Good', 'other string'),
+            (Whitespace, '\n')))
 
     def test_empty_file(self):
         self._test('', tuple())
-        self._test('\n', (('Whitespace', '\n'),))
-        self._test('\n\n', (('Whitespace', '\n\n'),))
-        self._test(' \n\n', (('Whitespace', ' \n\n'),))
+        self._test('\n', ((Whitespace, '\n'),))
+        self._test('\n\n', ((Whitespace, '\n\n'),))
+        self._test(' \n\n', ((Whitespace, ' \n\n'),))
 
 
 if __name__ == '__main__':
     unittest.main()
--- a/third_party/python/compare-locales/compare_locales/tests/test_merge.py
+++ b/third_party/python/compare-locales/compare_locales/tests/test_merge.py
@@ -384,22 +384,22 @@ class TestFluent(unittest.TestCase):
         del self.tmp
         del self.ref
         del self.l10n
 
     def testGood(self):
         self.reference("""\
 foo = fooVal
 bar = barVal
-eff = effVal
+-eff = effVal
 """)
         self.localized("""\
 foo = lFoo
 bar = lBar
-eff = lEff
+-eff = lEff
 """)
         cc = ContentComparer([Observer()])
         cc.compare(File(self.ref, "en-reference.ftl", ""),
                    File(self.l10n, "l10n.ftl", ""),
                    mozpath.join(self.tmp, "merge", "l10n.ftl"))
 
         self.assertDictEqual(
             cc.observers[0].toJSON(),
@@ -415,41 +415,43 @@ eff = lEff
         # validate merge results
         mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
         self.assert_(not os.path.exists(mergepath))
 
     def testMissing(self):
         self.reference("""\
 foo = fooVal
 bar = barVal
+-baz = bazVal
 eff = effVal
 """)
         self.localized("""\
 foo = lFoo
 eff = lEff
 """)
         cc = ContentComparer([Observer()])
         cc.compare(File(self.ref, "en-reference.ftl", ""),
                    File(self.l10n, "l10n.ftl", ""),
                    mozpath.join(self.tmp, "merge", "l10n.ftl"))
 
         self.assertDictEqual(
             cc.observers[0].toJSON(),
             {
                 'details': {
                     'l10n.ftl': [
-                        {'missingEntity': u'bar'}
+                        {'missingEntity': u'bar'},
+                        {'missingEntity': u'-baz'},
                     ],
                 },
                 'summary': {
                     None: {
                         'changed': 2,
                         'changed_w': 2,
-                        'missing': 1,
-                        'missing_w': 1
+                        'missing': 2,
+                        'missing_w': 2,
                     }
                 }
             }
         )
 
         # validate merge results
         mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
         self.assert_(not os.path.exists(mergepath))
@@ -512,16 +514,102 @@ eff = lEff {
         merged_foo = merged_entities[merged_map['foo']]
 
         # foo should be l10n
         p.readFile(self.l10n)
         l10n_entities, l10n_map = p.parse()
         l10n_foo = l10n_entities[l10n_map['foo']]
         self.assertTrue(merged_foo.equals(l10n_foo))
 
+    def testMatchingReferences(self):
+        self.reference("""\
+foo = Reference { bar }
+""")
+        self.localized("""\
+foo = Localized { bar }
+""")
+        cc = ContentComparer([Observer()])
+        cc.compare(File(self.ref, "en-reference.ftl", ""),
+                   File(self.l10n, "l10n.ftl", ""),
+                   mozpath.join(self.tmp, "merge", "l10n.ftl"))
+
+        self.assertDictEqual(
+            cc.observers[0].toJSON(),
+            {
+                'details': {},
+                'summary': {
+                    None: {
+                        'changed': 1,
+                        'changed_w': 1
+                    }
+                }
+            }
+        )
+
+        # validate merge results
+        mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
+        self.assert_(not os.path.exists(mergepath))
+
+    def testMismatchingReferences(self):
+        self.reference("""\
+foo = Reference { bar }
+bar = Reference { baz }
+baz = Reference
+""")
+        self.localized("""\
+foo = Localized { qux }
+bar = Localized
+baz = Localized { qux }
+""")
+        cc = ContentComparer([Observer()])
+        cc.compare(File(self.ref, "en-reference.ftl", ""),
+                   File(self.l10n, "l10n.ftl", ""),
+                   mozpath.join(self.tmp, "merge", "l10n.ftl"))
+
+        self.assertDictEqual(
+            cc.observers[0].toJSON(),
+            {
+                'details': {
+                    'l10n.ftl': [
+                            {
+                                'warning':
+                                    u'Missing message reference: bar '
+                                    u'at line 1, column 1 for foo'
+                            },
+                            {
+                                'warning':
+                                    u'Obsolete message reference: qux '
+                                    u'at line 1, column 19 for foo'
+                            },
+                            {
+                                'warning':
+                                    u'Missing message reference: baz '
+                                    u'at line 2, column 1 for bar'
+                            },
+                            {
+                                'warning':
+                                    u'Obsolete message reference: qux '
+                                    u'at line 3, column 19 for baz'
+                            },
+                    ],
+                },
+                'summary': {
+                    None: {
+                        'changed': 3,
+                        'changed_w': 3,
+                        'warnings': 4
+                    }
+                }
+            }
+        )
+
+        # validate merge results
+        mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
+        self.assert_(not os.path.exists(mergepath))
+
     def testMismatchingAttributes(self):
         self.reference("""
 foo = Foo
 bar = Bar
   .tender = Attribute value
 eff = Eff
 """)
         self.localized("""\
@@ -569,16 +657,63 @@ eff = lEff
         merged_eff = merged_entities[merged_map['eff']]
 
         # eff should be l10n
         p.readFile(self.l10n)
         l10n_entities, l10n_map = p.parse()
         l10n_eff = l10n_entities[l10n_map['eff']]
         self.assertTrue(merged_eff.equals(l10n_eff))
 
+    def test_term_attributes(self):
+        self.reference("""
+-foo = Foo
+-bar = Bar
+-baz = Baz
+    .attr = Baz Attribute
+-qux = Qux
+    .attr = Qux Attribute
+-missing = Missing
+    .attr = An Attribute
+""")
+        self.localized("""\
+-foo = Localized Foo
+-bar = Localized Bar
+    .attr = Locale-specific Bar Attribute
+-baz = Localized Baz
+-qux = Localized Qux
+    .other = Locale-specific Qux Attribute
+""")
+        cc = ContentComparer([Observer()])
+        cc.compare(File(self.ref, "en-reference.ftl", ""),
+                   File(self.l10n, "l10n.ftl", ""),
+                   mozpath.join(self.tmp, "merge", "l10n.ftl"))
+
+        self.assertDictEqual(
+            cc.observers[0].toJSON(),
+            {
+                'details': {
+                    'l10n.ftl': [
+                        {'missingEntity': u'-missing'},
+                    ],
+                },
+                'summary': {
+                    None: {
+                        'changed': 4,
+                        'changed_w': 4,
+                        'missing': 1,
+                        'missing_w': 1,
+                    }
+                }
+            }
+        )
+
+        # validate merge results
+        mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
+        self.assert_(not os.path.exists(mergepath))
+
     def testMismatchingValues(self):
         self.reference("""
 foo = Foo
   .foottr = something
 bar
   .tender = Attribute value
 """)
         self.localized("""\
@@ -617,21 +752,21 @@ bar = lBar
         mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
         self.assert_(os.path.exists(mergepath))
 
         p = getParser(mergepath)
         p.readFile(mergepath)
         merged_entities, _ = p.parse()
         self.assertEqual([e.key for e in merged_entities], [])
 
-    def testMissingSection(self):
+    def testMissingGroupComment(self):
         self.reference("""\
 foo = fooVal
 
-[[ Section ]]
+## Group Comment
 bar = barVal
 """)
         self.localized("""\
 foo = lFoo
 bar = lBar
 """)
         cc = ContentComparer([Observer()])
         cc.compare(File(self.ref, "en-reference.ftl", ""),
@@ -654,17 +789,17 @@ bar = lBar
         # validate merge results
         mergepath = mozpath.join(self.tmp, "merge", "l10n.ftl")
         self.assert_(not os.path.exists(mergepath))
 
     def testMissingAttachedComment(self):
         self.reference("""\
 foo = fooVal
 
-// Attached Comment
+# Attached Comment
 bar = barVal
 """)
         self.localized("""\
 foo = lFoo
 bar = barVal
 """)
         cc = ContentComparer([Observer()])
         cc.compare(File(self.ref, "en-reference.ftl", ""),
@@ -693,17 +828,17 @@ bar = barVal
     def testObsoleteStandaloneComment(self):
         self.reference("""\
 foo = fooVal
 bar = barVal
 """)
         self.localized("""\
 foo = lFoo
 
-// Standalone Comment
+# Standalone Comment
 
 bar = lBar
 """)
         cc = ContentComparer([Observer()])
         cc.compare(File(self.ref, "en-reference.ftl", ""),
                    File(self.l10n, "l10n.ftl", ""),
                    mozpath.join(self.tmp, "merge", "l10n.ftl"))
 
@@ -784,59 +919,11 @@ bar = duplicated bar
                       u'Attribute "attr" occurs 3 times '
                       u'at line 4, column 5 for foo'
                       }]
                 }
              })
         mergefile = mozpath.join(self.tmp, "merge", "l10n.ftl")
         self.assertFalse(os.path.isfile(mergefile))
 
-    def test_unmatched_tags(self):
-        self.assertTrue(os.path.isdir(self.tmp))
-        self.reference("""foo = fooVal
-    #yes
-""")
-        self.localized("""foo = fooVal
-    #no
-""")
-        cc = ContentComparer([Observer()])
-        cc.compare(File(self.ref, "en-reference.ftl", ""),
-                   File(self.l10n, "l10n.ftl", ""),
-                   mozpath.join(self.tmp, "merge", "l10n.ftl"))
-        self.assertDictEqual(
-            cc.observers[0].toJSON(),
-            {'summary':
-                {None: {
-                    'unchanged': 1,
-                    'unchanged_w': 1
-                }},
-             'details': {}
-             })
-        mergefile = mozpath.join(self.tmp, "merge", "l10n.ftl")
-        self.assertFalse(os.path.isfile(mergefile))
-
-    def test_matching_tags(self):
-        self.assertTrue(os.path.isdir(self.tmp))
-        self.reference("""foo = fooVal
-    #yes
-""")
-        self.localized("""foo = fooVal
-    #yes
-""")
-        cc = ContentComparer([Observer()])
-        cc.compare(File(self.ref, "en-reference.ftl", ""),
-                   File(self.l10n, "l10n.ftl", ""),
-                   mozpath.join(self.tmp, "merge", "l10n.ftl"))
-        self.assertDictEqual(
-            cc.observers[0].toJSON(),
-            {'summary':
-                {None: {
-                    'unchanged': 1,
-                    'unchanged_w': 1
-                }},
-             'details': {}
-             })
-        mergefile = mozpath.join(self.tmp, "merge", "l10n.ftl")
-        self.assertFalse(os.path.isfile(mergefile))
-
 
 if __name__ == '__main__':
     unittest.main()
--- a/third_party/python/compare-locales/compare_locales/tests/test_merge_ftl.py
+++ b/third_party/python/compare-locales/compare_locales/tests/test_merge_ftl.py
@@ -54,263 +54,240 @@ foo = Foo 2
     .attr = Attr 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
 foo = Foo 1
     .attr = Attr 1
 """)
 
-    def test_tag_in_first(self):
-        channels = (b"""
-foo = Foo 1
-    #tag
-""", b"""
-foo = Foo 2
-""")
-        self.assertEqual(
-            merge_channels(self.name, *channels), b"""
-foo = Foo 1
-    #tag
-""")
-
-    def test_tag_in_last(self):
+    def test_group_comment_in_first(self):
         channels = (b"""
-foo = Foo 1
-""", b"""
-foo = Foo 2
-    #tag
-""")
-        self.assertEqual(
-            merge_channels(self.name, *channels), b"""
-foo = Foo 1
-""")
-
-    def test_tag_changed(self):
-        channels = (b"""
-foo = Foo 1
-    #tag1
-""", b"""
-foo = Foo 2
-    #tag2
-""")
-        self.assertEqual(
-            merge_channels(self.name, *channels), b"""
-foo = Foo 1
-    #tag1
-""")
-
-    def test_section_in_first(self):
-        channels = (b"""
-[[ Section 1 ]]
+## Group Comment 1
 foo = Foo 1
 """, b"""
 foo = Foo 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
-[[ Section 1 ]]
+## Group Comment 1
 foo = Foo 1
 """)
 
-    def test_section_in_last(self):
+    def test_group_comment_in_last(self):
         channels = (b"""
 foo = Foo 1
 """, b"""
-[[ Section 2 ]]
+## Group Comment 2
 foo = Foo 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
-[[ Section 2 ]]
+## Group Comment 2
 foo = Foo 1
 """)
 
-    def test_section_changed(self):
+    def test_group_comment_changed(self):
         channels = (b"""
-[[ Section 1 ]]
+## Group Comment 1
 foo = Foo 1
 """, b"""
-[[ Section 2 ]]
+## Group Comment 2
 foo = Foo 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
-[[ Section 2 ]]
-[[ Section 1 ]]
+## Group Comment 2
+## Group Comment 1
+foo = Foo 1
+""")
+
+    def test_group_comment_and_section(self):
+        channels = (b"""
+## Group Comment
+foo = Foo 1
+""", b"""
+// Section Comment
+[[ Section ]]
+foo = Foo 2
+""")
+        self.assertEqual(
+            merge_channels(self.name, *channels), b"""
+// Section Comment
+[[ Section ]]
+## Group Comment
 foo = Foo 1
 """)
 
     def test_message_comment_in_first(self):
         channels = (b"""
-// Comment 1
+# Comment 1
 foo = Foo 1
 """, b"""
 foo = Foo 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
-// Comment 1
+# Comment 1
 foo = Foo 1
 """)
 
     def test_message_comment_in_last(self):
         channels = (b"""
 foo = Foo 1
 """, b"""
-// Comment 2
+# Comment 2
 foo = Foo 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
 foo = Foo 1
 """)
 
     def test_message_comment_changed(self):
         channels = (b"""
-// Comment 1
+# Comment 1
 foo = Foo 1
 """, b"""
-// Comment 2
+# Comment 2
 foo = Foo 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
-// Comment 1
+# Comment 1
 foo = Foo 1
 """)
 
-    def test_section_comment_in_first(self):
-        channels = (b"""
-// Comment 1
-[[ Section ]]
-""", b"""
-[[ Section ]]
-""")
-        self.assertEqual(
-            merge_channels(self.name, *channels), b"""
-// Comment 1
-[[ Section ]]
-""")
-
-    def test_section_comment_in_last(self):
-        channels = (b"""
-[[ Section ]]
-""", b"""
-// Comment 2
-[[ Section ]]
-""")
-        self.assertEqual(
-            merge_channels(self.name, *channels), b"""
-[[ Section ]]
-""")
-
-    def test_section_comment_changed(self):
-        channels = (b"""
-// Comment 1
-[[ Section ]]
-""", b"""
-// Comment 2
-[[ Section ]]
-""")
-        self.assertEqual(
-            merge_channels(self.name, *channels), b"""
-// Comment 1
-[[ Section ]]
-""")
-
     def test_standalone_comment_in_first(self):
         channels = (b"""
 foo = Foo 1
 
-// Comment 1
+# Comment 1
 """, b"""
 foo = Foo 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
 foo = Foo 1
 
-// Comment 1
+# Comment 1
 """)
 
     def test_standalone_comment_in_last(self):
         channels = (b"""
 foo = Foo 1
 """, b"""
 foo = Foo 2
 
-// Comment 2
+# Comment 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
 foo = Foo 1
 
-// Comment 2
+# Comment 2
 """)
 
     def test_standalone_comment_changed(self):
         channels = (b"""
 foo = Foo 1
 
-// Comment 1
+# Comment 1
 """, b"""
 foo = Foo 2
 
-// Comment 2
+# Comment 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
 foo = Foo 1
 
-// Comment 2
+# Comment 2
 
-// Comment 1
+# Comment 1
 """)
 
     def test_resource_comment_in_first(self):
         channels = (b"""
-// Resource Comment 1
+### Resource Comment 1
 
 foo = Foo 1
 """, b"""
 foo = Foo 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
-// Resource Comment 1
+### Resource Comment 1
 
 foo = Foo 1
 """)
 
     def test_resource_comment_in_last(self):
         channels = (b"""
 foo = Foo 1
 """, b"""
-// Resource Comment 1
+### Resource Comment 1
 
 foo = Foo 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
-// Resource Comment 1
+### Resource Comment 1
 
 foo = Foo 1
 """)
 
     def test_resource_comment_changed(self):
         channels = (b"""
-// Resource Comment 1
+### Resource Comment 1
 
 foo = Foo 1
 """, b"""
-// Resource Comment 2
+### Resource Comment 2
 
 foo = Foo 2
 """)
         self.assertEqual(
             merge_channels(self.name, *channels), b"""
-// Resource Comment 2
+### Resource Comment 2
 
-// Resource Comment 1
+### Resource Comment 1
 
 foo = Foo 1
 """)
+
+    def test_cross_grammar(self):
+        channels = (b"""
+# Comment 1
+foo =
+    .attr = Attribute 1
+""", b"""
+// Comment 2
+foo
+    .attr = Attribute 2
+""")
+        self.assertEqual(
+            merge_channels(self.name, *channels), b"""
+# Comment 1
+foo =
+    .attr = Attribute 1
+""")
+
+    def test_cross_grammar_standalone_comment(self):
+        '''This is in particular going to be triggered for license headers.'''
+        channels = (b"""
+# Same comment
+
+foo =
+    .attr = Attribute 1
+""", b"""
+// Same comment
+
+foo
+    .attr = Attribute 2
+""")
+        self.assertEqual(
+            merge_channels(self.name, *channels), b"""
+# Same comment
+
+foo =
+    .attr = Attribute 1
+""")
--- a/third_party/python/compare-locales/compare_locales/tests/test_parser.py
+++ b/third_party/python/compare-locales/compare_locales/tests/test_parser.py
@@ -1,12 +1,13 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
+import textwrap
 import unittest
 
 from compare_locales import parser
 
 
 class TestParserContext(unittest.TestCase):
     def test_linecol(self):
         "Should return 1-based line and column numbers."
@@ -41,8 +42,27 @@ third line
         self.assertListEqual(
             entities,
             []
         )
         self.assertDictEqual(
             _map,
             {}
         )
+
+
+class TestOffsetComment(unittest.TestCase):
+    def test_offset(self):
+        ctx = parser.Parser.Context(textwrap.dedent('''\
+            #foo
+            #bar
+            # baz
+            '''
+        ))  # noqa
+        offset_comment = parser.OffsetComment(ctx, (0, len(ctx.contents)))
+        self.assertEqual(
+            offset_comment.val,
+            textwrap.dedent('''\
+                foo
+                bar
+                 baz
+            ''')
+        )
--- a/third_party/python/compare-locales/compare_locales/tests/test_properties.py
+++ b/third_party/python/compare-locales/compare_locales/tests/test_properties.py
@@ -1,41 +1,46 @@
 # -*- coding: utf-8 -*-
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import unittest
 
 from compare_locales.tests import ParserTestMixin
+from compare_locales.parser import (
+    Comment,
+    Junk,
+    Whitespace,
+)
 
 
 class TestPropertiesParser(ParserTestMixin, unittest.TestCase):
 
     filename = 'foo.properties'
 
     def testBackslashes(self):
         self._test(r'''one_line = This is one line
 two_line = This is the first \
 of two lines
 one_line_trailing = This line ends in \\
 and has junk
 two_lines_triple = This line is one of two and ends in \\\
 and still has another line coming
 ''', (
             ('one_line', 'This is one line'),
-            ('Whitespace', '\n'),
+            (Whitespace, '\n'),
             ('two_line', u'This is the first of two lines'),
-            ('Whitespace', '\n'),
+            (Whitespace, '\n'),
             ('one_line_trailing', u'This line ends in \\'),
-            ('Whitespace', '\n'),
-            ('Junk', 'and has junk\n'),
+            (Whitespace, '\n'),
+            (Junk, 'and has junk\n'),
             ('two_lines_triple', 'This line is one of two and ends in \\'
              'and still has another line coming'),
-            ('Whitespace', '\n')))
+            (Whitespace, '\n')))
 
     def testProperties(self):
         # port of netwerk/test/PropertiesTest.cpp
         self.parser.readContents(self.resource('test.properties'))
         ref = ['1', '2', '3', '4', '5', '6', '7', '8',
                'this is the first part of a continued line '
                'and here is the 2nd part']
         i = iter(self.parser)
@@ -63,20 +68,20 @@ and an end''', (('bar', 'one line with a
     def test_license_header(self):
         self._test('''\
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 foo=value
 ''', (
-            ('Comment', 'MPL'),
-            ('Whitespace', '\n\n'),
+            (Comment, 'MPL'),
+            (Whitespace, '\n\n'),
             ('foo', 'value'),
-            ('Whitespace', '\n')))
+            (Whitespace, '\n')))
 
     def test_escapes(self):
         self.parser.readContents(r'''
 # unicode escapes
 zero = some \unicode
 one = \u0
 two = \u41
 three = \u042
@@ -92,71 +97,71 @@ seven = \n\r\t\\
     def test_trailing_comment(self):
         self._test('''first = string
 second = string
 
 #
 #commented out
 ''', (
             ('first', 'string'),
-            ('Whitespace', '\n'),
+            (Whitespace, '\n'),
             ('second', 'string'),
-            ('Whitespace', '\n\n'),
-            ('Comment', 'commented out'),
-            ('Whitespace', '\n')))
+            (Whitespace, '\n\n'),
+            (Comment, 'commented out'),
+            (Whitespace, '\n')))
 
     def test_trailing_newlines(self):
         self._test('''\
 foo = bar
 
 \x20\x20
-  ''', (('foo', 'bar'), ('Whitespace', '\n\n\x20\x20\n ')))
+  ''', (('foo', 'bar'), (Whitespace, '\n\n\x20\x20\n ')))
 
     def test_just_comments(self):
         self._test('''\
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # LOCALIZATION NOTE These strings are used inside the Promise debugger
 # which is available as a panel in the Debugger.
 ''', (
-            ('Comment', 'MPL'),
-            ('Whitespace', '\n\n'),
-            ('Comment', 'LOCALIZATION NOTE'),
-            ('Whitespace', '\n')))
+            (Comment, 'MPL'),
+            (Whitespace, '\n\n'),
+            (Comment, 'LOCALIZATION NOTE'),
+            (Whitespace, '\n')))
 
     def test_just_comments_without_trailing_newline(self):
         self._test('''\
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # LOCALIZATION NOTE These strings are used inside the Promise debugger
 # which is available as a panel in the Debugger.''', (
-            ('Comment', 'MPL'),
-            ('Whitespace', '\n\n'),
-            ('Comment', 'LOCALIZATION NOTE')))
+            (Comment, 'MPL'),
+            (Whitespace, '\n\n'),
+            (Comment, 'LOCALIZATION NOTE')))
 
     def test_trailing_comment_and_newlines(self):
         self._test('''\
 # LOCALIZATION NOTE These strings are used inside the Promise debugger
 # which is available as a panel in the Debugger.
 
 
 
 ''',  (
-            ('Comment', 'LOCALIZATION NOTE'),
-            ('Whitespace', '\n\n\n')))
+            (Comment, 'LOCALIZATION NOTE'),
+            (Whitespace, '\n\n\n')))
 
     def test_empty_file(self):
         self._test('', tuple())
-        self._test('\n', (('Whitespace', '\n'),))
-        self._test('\n\n', (('Whitespace', '\n\n'),))
-        self._test(' \n\n', (('Whitespace', '\n\n'),))
+        self._test('\n', ((Whitespace, '\n'),))
+        self._test('\n\n', ((Whitespace, '\n\n'),))
+        self._test(' \n\n', ((Whitespace, '\n\n'),))
 
     def test_positions(self):
         self.parser.readContents('''\
 one = value
 two = other \\
 escaped value
 ''')
         one, two = list(self.parser)
--- a/third_party/python/fluent/fluent/migrate/__init__.py
+++ b/third_party/python/fluent/fluent/migrate/__init__.py
@@ -1,13 +1,5 @@
 # coding=utf8
 
-from .context import MergeContext                      # noqa: F401
-from .errors import (                                  # noqa: F401
-    MigrationError, NotSupportedError, UnreadableReferenceError
+from .transforms import (                              # noqa: F401
+    CONCAT, COPY, PLURALS, REPLACE, REPLACE_IN_TEXT
 )
-from .transforms import (                              # noqa: F401
-    Source, COPY, REPLACE_IN_TEXT, REPLACE, PLURALS, CONCAT
-)
-from .helpers import (                                 # noqa: F401
-    EXTERNAL_ARGUMENT, MESSAGE_REFERENCE
-)
-from .changesets import convert_blame_to_changesets    # noqa: F401
--- a/third_party/python/fluent/fluent/migrate/cldr.py
+++ b/third_party/python/fluent/fluent/migrate/cldr.py
@@ -43,13 +43,13 @@ def get_plural_categories(lang):
 
     langs_categories = categories.get(lang, None)
 
     if langs_categories is None:
         # Remove the trailing subtag.
         fallback_lang, _, _ = lang.rpartition('-')
 
         if fallback_lang == '':
-            raise RuntimeError('Unknown language: {}'.format(lang))
+            raise RuntimeError('Missing plural categories for {}'.format(lang))
 
         return get_plural_categories(fallback_lang)
 
     return langs_categories
--- a/third_party/python/fluent/fluent/migrate/context.py
+++ b/third_party/python/fluent/fluent/migrate/context.py
@@ -9,26 +9,24 @@ try:
     from itertools import zip_longest
 except ImportError:
     from itertools import izip_longest as zip_longest
 
 import fluent.syntax.ast as FTL
 from fluent.syntax.parser import FluentParser
 from fluent.syntax.serializer import FluentSerializer
 from fluent.util import fold
-try:
-    from compare_locales.parser import getParser
-except ImportError:
-    def getParser(path):
-        raise RuntimeError('compare-locales required')
+from compare_locales.parser import getParser
 
 from .cldr import get_plural_categories
 from .transforms import Source
 from .merge import merge_resource
-from .errors import NotSupportedError, UnreadableReferenceError
+from .util import get_message
+from .errors import (
+    EmptyLocalizationError, NotSupportedError, UnreadableReferenceError)
 
 
 class MergeContext(object):
     """Stateful context for merging translation resources.
 
     `MergeContext` must be configured with the target language and the
     directory locations of the input data.
 
@@ -54,18 +52,18 @@ class MergeContext(object):
         self.fluent_parser = FluentParser(with_spans=False)
         self.fluent_serializer = FluentSerializer()
 
         # An iterable of plural category names relevant to the context's
         # language.  E.g. ('one', 'other') for English.
         try:
             self.plural_categories = get_plural_categories(lang)
         except RuntimeError as e:
-            print(e.message)
-            self.plural_categories = 'en'
+            logging.getLogger('migrate').warn(e)
+            self.plural_categories = get_plural_categories('en')
 
         # Paths to directories with input data, relative to CWD.
         self.reference_dir = reference_dir
         self.localization_dir = localization_dir
 
         # Parsed input resources stored by resource path.
         self.reference_resources = {}
         self.localization_resources = {}
@@ -109,16 +107,57 @@ class MergeContext(object):
 
     def read_legacy_resource(self, path):
         """Read a legacy resource and parse it into a dict."""
         parser = getParser(path)
         parser.readFile(path)
         # Transform the parsed result which is an iterator into a dict.
         return {entity.key: entity.val for entity in parser}
 
+    def read_reference_ftl(self, path):
+        """Read and parse a reference FTL file.
+
+        A missing resource file is a fatal error and will raise an
+        UnreadableReferenceError.
+        """
+        fullpath = os.path.join(self.reference_dir, path)
+        try:
+            return self.read_ftl_resource(fullpath)
+        except IOError as err:
+            error_message = 'Missing reference file: {}'.format(fullpath)
+            logging.getLogger('migrate').error(error_message)
+            raise UnreadableReferenceError(error_message)
+        except UnicodeDecodeError as err:
+            error_message = 'Error reading file {}: {}'.format(fullpath, err)
+            logging.getLogger('migrate').error(error_message)
+            raise UnreadableReferenceError(error_message)
+
+    def read_localization_ftl(self, path):
+        """Read and parse an existing localization FTL file.
+
+        Create a new FTL.Resource if the file doesn't exist or can't be
+        decoded.
+        """
+        fullpath = os.path.join(self.localization_dir, path)
+        try:
+            return self.read_ftl_resource(fullpath)
+        except IOError:
+            logger = logging.getLogger('migrate')
+            logger.info(
+                'Localization file {} does not exist and '
+                'it will be created'.format(path))
+            return FTL.Resource()
+        except UnicodeDecodeError:
+            logger = logging.getLogger('migrate')
+            logger.warn(
+                'Localization file {} has broken encoding. '
+                'It will be re-created and some translations '
+                'may be lost'.format(path))
+            return FTL.Resource()
+
     def maybe_add_localization(self, path):
         """Add a localization resource to migrate translations from.
 
         Only legacy resources can be added as migration sources.  The resource
         may be missing on disk.
 
         Uses a compare-locales parser to create a dict of (key, string value)
         tuples.
@@ -154,62 +193,62 @@ class MergeContext(object):
         Each transform is scanned for `Source` nodes which will be used to
         build the list of dependencies for the transformed message.
         """
         def get_sources(acc, cur):
             if isinstance(cur, Source):
                 acc.add((cur.path, cur.key))
             return acc
 
-        refpath = os.path.join(self.reference_dir, reference)
-        try:
-            ast = self.read_ftl_resource(refpath)
-        except IOError as err:
-            error_message = 'Missing reference file: {}'.format(refpath)
-            logging.getLogger('migrate').error(error_message)
-            raise UnreadableReferenceError(error_message)
-        except UnicodeDecodeError as err:
-            error_message = 'Error reading file {}: {}'.format(refpath, err)
-            logging.getLogger('migrate').error(error_message)
-            raise UnreadableReferenceError(error_message)
-        else:
-            # The reference file will be used by the merge function as
-            # a template for serializing the merge results.
-            self.reference_resources[target] = ast
+        reference_ast = self.read_reference_ftl(reference)
+        self.reference_resources[target] = reference_ast
 
         for node in transforms:
+            ident = node.id.name
             # Scan `node` for `Source` nodes and collect the information they
             # store into a set of dependencies.
             dependencies = fold(get_sources, node, set())
             # Set these sources as dependencies for the current transform.
-            self.dependencies[(target, node.id.name)] = dependencies
+            self.dependencies[(target, ident)] = dependencies
+
+            # The target Fluent message should exist in the reference file. If
+            # it doesn't, it's probably a typo.
+            if get_message(reference_ast.body, ident) is None:
+                logger = logging.getLogger('migrate')
+                logger.warn(
+                    '{} "{}" was not found in {}'.format(
+                        type(node).__name__, ident, reference))
 
-            # Read all legacy translation files defined in Source transforms.
+        # Keep track of localization resource paths which were defined as
+        # sources in the transforms.
+        expected_paths = set()
+
+        # Read all legacy translation files defined in Source transforms. This
+        # may fail but a single missing legacy resource doesn't mean that the
+        # migration can't succeed.
+        for dependencies in self.dependencies.values():
             for path in set(path for path, _ in dependencies):
+                expected_paths.add(path)
                 self.maybe_add_localization(path)
 
+        # However, if all legacy resources are missing, bail out early. There
+        # are no translations to migrate. We'd also get errors in hg annotate.
+        if len(expected_paths) > 0 and len(self.localization_resources) == 0:
+            error_message = 'No localization files were found'
+            logging.getLogger('migrate').error(error_message)
+            raise EmptyLocalizationError(error_message)
+
+        # Add the current transforms to any other transforms added earlier for
+        # this path.
         path_transforms = self.transforms.setdefault(target, [])
         path_transforms += transforms
 
         if target not in self.localization_resources:
-            fullpath = os.path.join(self.localization_dir, target)
-            try:
-                ast = self.read_ftl_resource(fullpath)
-            except IOError:
-                logger = logging.getLogger('migrate')
-                logger.info(
-                    'Localization file {} does not exist and '
-                    'it will be created'.format(target))
-            except UnicodeDecodeError:
-                logger = logging.getLogger('migrate')
-                logger.warn(
-                    'Localization file {} will be re-created and some '
-                    'translations might be lost'.format(target))
-            else:
-                self.localization_resources[target] = ast
+            target_ast = self.read_localization_ftl(target)
+            self.localization_resources[target] = target_ast
 
     def get_source(self, path, key):
         """Get an entity value from a localized legacy source.
 
         Used by the `Source` transform.
         """
         resource = self.localization_resources[path]
         return resource.get(key, None)
@@ -237,58 +276,76 @@ class MergeContext(object):
                 return False
         return True
 
     def merge_changeset(self, changeset=None):
         """Return a generator of FTL ASTs for the changeset.
 
         The input data must be configured earlier using the `add_*` methods.
         if given, `changeset` must be a set of (path, key) tuples describing
-        which legacy translations are to be merged.
+        which legacy translations are to be merged. If `changeset` is None,
+        all legacy translations will be allowed to be migrated in a single
+        changeset.
+
+        The inner `in_changeset` function is used to determine if a message
+        should be migrated for the given changeset. It compares the legacy
+        dependencies of the transform defined for the message with legacy
+        translations available in the changeset. If all dependencies are
+        present, the message will be migrated.
 
         Given `changeset`, return a dict whose keys are resource paths and
         values are `FTL.Resource` instances.  The values will also be used to
         update this context's existing localization resources.
         """
 
         if changeset is None:
             # Merge all known legacy translations. Used in tests.
             changeset = {
                 (path, key)
                 for path, strings in self.localization_resources.iteritems()
                 if not path.endswith('.ftl')
                 for key in strings.iterkeys()
             }
 
         for path, reference in self.reference_resources.iteritems():
-            current = self.localization_resources.get(path, FTL.Resource())
+            current = self.localization_resources[path]
             transforms = self.transforms.get(path, [])
 
             def in_changeset(ident):
-                """Check if entity should be merged.
+                """Check if a message should be migrated.
+
+                A message will be migrated only if all of its dependencies
+                are present in the currently processed changeset.
 
-                If at least one dependency of the entity is in the current
-                set of changeset, merge it.
+                If a transform defined for this message points to a missing
+                legacy translation, this message will not be merged. The
+                missing legacy dependency won't be present in the changeset.
+
+                This also means that partially translated messages (e.g.
+                constructed from two legacy strings out of which only one is
+                avaiable) will never be migrated.
                 """
                 message_deps = self.dependencies.get((path, ident), None)
 
                 # Don't merge if we don't have a transform for this message.
                 if message_deps is None:
                     return False
 
                 # As a special case, if a transform exists but has no
                 # dependecies, it's a hardcoded `FTL.Node` which doesn't
                 # migrate any existing translation but rather creates a new
                 # one.  Merge it.
                 if len(message_deps) == 0:
                     return True
 
-                # If the intersection of the dependencies and the current
-                # changeset is non-empty, merge this message.
-                return message_deps & changeset
+                # Make sure all the dependencies are present in the current
+                # changeset. Partial migrations are not currently supported.
+                # See https://bugzilla.mozilla.org/show_bug.cgi?id=1321271
+                available_deps = message_deps & changeset
+                return message_deps == available_deps
 
             # Merge legacy translations with the existing ones using the
             # reference as a template.
             snapshot = merge_resource(
                 self, reference, current, transforms, in_changeset
             )
 
             # Skip this path if the messages in the merged snapshot are
--- a/third_party/python/fluent/fluent/migrate/errors.py
+++ b/third_party/python/fluent/fluent/migrate/errors.py
@@ -1,10 +1,14 @@
 class MigrationError(ValueError):
     pass
 
 
+class EmptyLocalizationError(MigrationError):
+    pass
+
+
 class NotSupportedError(MigrationError):
     pass
 
 
 class UnreadableReferenceError(MigrationError):
     pass
--- a/third_party/python/fluent/fluent/migrate/merge.py
+++ b/third_party/python/fluent/fluent/migrate/merge.py
@@ -21,21 +21,17 @@ def merge_resource(ctx, reference, curre
         return [
             entry
             for entry in map(merge_entry, body)
             if entry is not None
         ]
 
     def merge_entry(entry):
         # All standalone comments will be merged.
-        if isinstance(entry, FTL.Comment):
-            return entry
-
-        # All section headers will be merged.
-        if isinstance(entry, FTL.Section):
+        if isinstance(entry, FTL.BaseComment):
             return entry
 
         # Ignore Junk
         if isinstance(entry, FTL.Junk):
             return None
 
         ident = entry.id.name
 
@@ -51,9 +47,9 @@ def merge_resource(ctx, reference, curre
         # Make sure this message is supposed to be migrated as part of the
         # current changeset.
         if transform is not None and in_changeset(ident):
             if transform.comment is None:
                 transform.comment = entry.comment
             return evaluate(ctx, transform)
 
     body = merge_body(reference.body)
-    return FTL.Resource(body, reference.comment)
+    return FTL.Resource(body)
--- a/third_party/python/fluent/fluent/migrate/transforms.py
+++ b/third_party/python/fluent/fluent/migrate/transforms.py
@@ -59,16 +59,17 @@ them.
                     '#1': EXTERNAL_ARGUMENT('num')
                 }
             )
         )
     )
 """
 
 from __future__ import unicode_literals
+import itertools
 
 import fluent.syntax.ast as FTL
 from .errors import NotSupportedError
 
 
 def pattern_from_text(value):
     return FTL.Pattern([
         FTL.TextElement(value)
@@ -84,16 +85,48 @@ def evaluate(ctx, node):
 
     return node.traverse(eval_node)
 
 
 class Transform(FTL.BaseNode):
     def __call__(self, ctx):
         raise NotImplementedError
 
+    @staticmethod
+    def flatten_elements(elements):
+        '''Flatten a list of FTL nodes into valid Pattern's elements'''
+        flattened = []
+        for element in elements:
+            if isinstance(element, FTL.Pattern):
+                flattened.extend(element.elements)
+            elif isinstance(element, FTL.PatternElement):
+                flattened.append(element)
+            elif isinstance(element, FTL.Expression):
+                flattened.append(FTL.Placeable(element))
+            else:
+                raise RuntimeError(
+                    'Expected Pattern, PatternElement or Expression')
+        return flattened
+
+    @staticmethod
+    def prune_text_elements(elements):
+        '''Join adjacent TextElements and remove empty ones'''
+        pruned = []
+        # Group elements in contiguous sequences of the same type.
+        for elem_type, elems in itertools.groupby(elements, key=type):
+            if elem_type is FTL.TextElement:
+                # Join adjacent TextElements.
+                text = FTL.TextElement(''.join(elem.value for elem in elems))
+                # And remove empty ones.
+                if len(text.value) > 0:
+                    pruned.append(text)
+            else:
+                pruned.extend(elems)
+        return pruned
+
 
 class Source(Transform):
     """Declare the source translation to be migrated with other transforms.
 
     When evaluated, `Source` returns a simple string value. Escaped characters
     are unescaped by the compare-locales parser according to the file format:
 
       - in properties files: \\uXXXX,
@@ -123,77 +156,61 @@ class COPY(Source):
     """Create a Pattern with the translation value from the given source."""
 
     def __call__(self, ctx):
         source = super(self.__class__, self).__call__(ctx)
         return pattern_from_text(source)
 
 
 class REPLACE_IN_TEXT(Transform):
-    """Replace various placeables in the translation with FTL placeables.
+    """Replace various placeables in the translation with FTL.
 
     The original placeables are defined as keys on the `replacements` dict.
-    For each key the value is defined as a list of FTL Expressions to be
-    interpolated.
+    For each key the value is defined as a FTL Pattern, Placeable,
+    TextElement or Expressions to be interpolated.
     """
 
     def __init__(self, value, replacements):
         self.value = value
         self.replacements = replacements
 
     def __call__(self, ctx):
 
-        # Only replace placeable which are present in the translation.
+        # Only replace placeables which are present in the translation.
         replacements = {
             key: evaluate(ctx, repl)
             for key, repl in self.replacements.iteritems()
             if key in self.value
         }
 
         # Order the original placeables by their position in the translation.
         keys_in_order = sorted(
             replacements.keys(),
             lambda x, y: self.value.find(x) - self.value.find(y)
         )
 
-        # Used to reduce the `keys_in_order` list.
-        def replace(acc, cur):
-            """Convert original placeables and text into FTL Nodes.
-
-            For each original placeable the translation will be partitioned
-            around it and the text before it will be converted into an
-            `FTL.TextElement` and the placeable will be replaced with its
-            replacement. The text following the placebale will be fed again to
-            the `replace` function.
-            """
-
-            parts, rest = acc
-            before, key, after = rest.value.partition(cur)
-
-            placeable = FTL.Placeable(replacements[key])
+        # A list of PatternElements built from the legacy translation and the
+        # FTL replacements. It may contain empty or adjacent TextElements.
+        elements = []
+        tail = self.value
 
-            # Return the elements found and converted so far, and the remaining
-            # text which hasn't been scanned for placeables yet.
-            return (
-                parts + [FTL.TextElement(before), placeable],
-                FTL.TextElement(after)
-            )
+        # Convert original placeables and text into FTL Nodes. For each
+        # original placeable the translation will be partitioned around it and
+        # the text before it will be converted into an `FTL.TextElement` and
+        # the placeable will be replaced with its replacement.
+        for key in keys_in_order:
+            before, key, tail = tail.partition(key)
+            elements.append(FTL.TextElement(before))
+            elements.append(replacements[key])
 
-        def is_non_empty(elem):
-            """Used for filtering empty `FTL.TextElement` nodes out."""
-            return not isinstance(elem, FTL.TextElement) or len(elem.value)
+        # Dont' forget about the tail after the loop ends.
+        elements.append(FTL.TextElement(tail))
 
-        # Start with an empty list of elements and the original translation.
-        init = ([], FTL.TextElement(self.value))
-        parts, tail = reduce(replace, keys_in_order, init)
-
-        # Explicitly concat the trailing part to get the full list of elements
-        # and filter out the empty ones.
-        elements = filter(is_non_empty, parts + [tail])
-
+        elements = self.flatten_elements(elements)
+        elements = self.prune_text_elements(elements)
         return FTL.Pattern(elements)
 
 
 class REPLACE(Source):
     """Create a Pattern with interpolations from given source.
 
     Interpolations in the translation value from the given source will be
     replaced with FTL placeables using the `REPLACE_IN_TEXT` transform.
@@ -240,89 +257,36 @@ class PLURALS(Source):
 
         def createVariant(zipped_enum):
             index, (key, variant) = zipped_enum
             # Run the legacy variant through `foreach` which returns an
             # `FTL.Node` describing the transformation required for each
             # variant.  Then evaluate it to a migrated FTL node.
             value = evaluate(ctx, self.foreach(variant))
             return FTL.Variant(
-                key=FTL.Symbol(key),
+                key=FTL.VariantName(key),
                 value=value,
                 default=index == last_index
             )
 
         select = FTL.SelectExpression(
             expression=selector,
             variants=map(createVariant, enumerate(zip(keys, variants)))
         )
 
         placeable = FTL.Placeable(select)
         return FTL.Pattern([placeable])
 
 
 class CONCAT(Transform):
-    """Concatenate elements of many patterns."""
+    """Create a new Pattern from Patterns, PatternElements and Expressions."""
 
-    def __init__(self, *patterns):
-        self.patterns = list(patterns)
+    def __init__(self, *elements, **kwargs):
+        # We want to support both passing elements as *elements in the
+        # migration specs and as elements=[]. The latter is used by
+        # FTL.BaseNode.traverse when it recreates the traversed node using its
+        # attributes as kwargs.
+        self.elements = list(kwargs.get('elements', elements))
 
     def __call__(self, ctx):
-        # Flatten the list of patterns of which each has a list of elements.
-        def concat_elements(acc, cur):
-            if isinstance(cur, FTL.Pattern):
-                acc.extend(cur.elements)
-                return acc
-            elif (isinstance(cur, FTL.TextElement) or
-                  isinstance(cur, FTL.Placeable)):
-                acc.append(cur)
-                return acc
-
-            raise RuntimeError(
-                'CONCAT accepts FTL Patterns, TextElements and Placeables.'
-            )
-
-        # Merge adjecent `FTL.TextElement` nodes.
-        def merge_adjecent_text(acc, cur):
-            if type(cur) == FTL.TextElement and len(acc):
-                last = acc[-1]
-                if type(last) == FTL.TextElement:
-                    last.value += cur.value
-                else:
-                    acc.append(cur)
-            else:
-                acc.append(cur)
-            return acc
-
-        elements = reduce(concat_elements, self.patterns, [])
-        elements = reduce(merge_adjecent_text, elements, [])
+        elements = self.flatten_elements(self.elements)
+        elements = self.prune_text_elements(elements)
         return FTL.Pattern(elements)
-
-    def traverse(self, fun):
-        def visit(value):
-            if isinstance(value, FTL.BaseNode):
-                return value.traverse(fun)
-            if isinstance(value, list):
-                return fun(map(visit, value))
-            else:
-                return fun(value)
-
-        node = self.__class__(
-            *[
-                visit(value) for value in self.patterns
-            ]
-        )
-
-        return fun(node)
-
-    def to_json(self):
-        def to_json(value):
-            if isinstance(value, FTL.BaseNode):
-                return value.to_json()
-            else:
-                return value
-
-        return {
-            'type': self.__class__.__name__,
-            'patterns': [
-                to_json(value) for value in self.patterns
-            ]
-        }
--- a/third_party/python/fluent/fluent/migrate/util.py
+++ b/third_party/python/fluent/fluent/migrate/util.py
@@ -37,20 +37,22 @@ def ftl_message_to_json(code):
 
 def to_json(merged_iter):
     return {
         path: resource.to_json()
         for path, resource in merged_iter
     }
 
 
+LOCALIZABLE_ENTRIES = (FTL.Message, FTL.Term)
+
 def get_message(body, ident):
     """Get message called `ident` from the `body` iterable."""
     for entity in body:
-        if isinstance(entity, FTL.Message) and entity.id.name == ident:
+        if isinstance(entity, LOCALIZABLE_ENTRIES) and entity.id.name == ident:
             return entity
 
 
 def get_transform(body, ident):
     """Get entity called `ident` from the `body` iterable."""
     for transform in body:
         if transform.id.name == ident:
             return transform
--- a/third_party/python/fluent/fluent/syntax/ast.py
+++ b/third_party/python/fluent/fluent/syntax/ast.py
@@ -3,16 +3,18 @@ import sys
 import json
 
 
 def to_json(value):
     if isinstance(value, BaseNode):
         return value.to_json()
     if isinstance(value, list):
         return list(map(to_json, value))
+    if isinstance(value, tuple):
+        return list(map(to_json, value))
     else:
         return value
 
 
 def from_json(value):
     if isinstance(value, dict):
         cls = getattr(sys.modules[__name__], value['type'])
         args = {
@@ -59,30 +61,28 @@ class BaseNode(object):
             """Call `fun` on `value` and its descendants."""
             if isinstance(value, BaseNode):
                 return value.traverse(fun)
             if isinstance(value, list):
                 return fun(list(map(visit, value)))
             else:
                 return fun(value)
 
+        # Use all attributes found on the node as kwargs to the constructor.
+        kwargs = vars(self).items()
         node = self.__class__(
-            **{
-                name: visit(value)
-                for name, value in vars(self).items()
-            }
-        )
+            **{name: visit(value) for name, value in kwargs})
 
         return fun(node)
 
     def equals(self, other, ignored_fields=['span']):
         """Compare two nodes.
 
         Nodes are deeply compared on a field by field basis. If possible, False
-        is returned early. When comparing attributes, tags and variants in
+        is returned early. When comparing attributes and variants in
         SelectExpressions, the order doesn't matter. By default, spans are not
         taken into account.
         """
 
         self_keys = set(vars(self).keys())
         other_keys = set(vars(other).keys())
 
         if ignored_fields:
@@ -93,27 +93,26 @@ class BaseNode(object):
         if self_keys != other_keys:
             return False
 
         for key in self_keys:
             field1 = getattr(self, key)
             field2 = getattr(other, key)
 
             # List-typed nodes are compared item-by-item.  When comparing
-            # attributes, tags and variants, the order of items doesn't matter.
+            # attributes and variants, the order of items doesn't matter.
             if isinstance(field1, list) and isinstance(field2, list):
                 if len(field1) != len(field2):
                     return False
 
                 # These functions are used to sort lists of items for when
                 # order doesn't matter.  Annotations are also lists but they
                 # can't be keyed on any of their fields reliably.
                 field_sorting = {
                     'attributes': lambda elem: elem.id.name,
-                    'tags': lambda elem: elem.name.name,
                     'variants': lambda elem: elem.key.name,
                 }
 
                 if key in field_sorting:
                     sorting = field_sorting[key]
                     field1 = sorted(field1, key=sorting)
                     field2 = sorted(field2, key=sorting)
 
@@ -147,52 +146,62 @@ class SyntaxNode(BaseNode):
         super(SyntaxNode, self).__init__(**kwargs)
         self.span = span
 
     def add_span(self, start, end):
         self.span = Span(start, end)
 
 
 class Resource(SyntaxNode):
-    def __init__(self, body=None, comment=None, **kwargs):
+    def __init__(self, body=None, **kwargs):
         super(Resource, self).__init__(**kwargs)
         self.body = body or []
-        self.comment = comment
 
 
 class Entry(SyntaxNode):
     def __init__(self, annotations=None, **kwargs):
         super(Entry, self).__init__(**kwargs)
         self.annotations = annotations or []
 
     def add_annotation(self, annot):
         self.annotations.append(annot)
 
 
 class Message(Entry):
-    def __init__(self, id, value=None, attributes=None, tags=None,
+    def __init__(self, id, value=None, attributes=None,
                  comment=None, **kwargs):
         super(Message, self).__init__(**kwargs)
         self.id = id
         self.value = value
         self.attributes = attributes or []
-        self.tags = tags or []
+        self.comment = comment
+
+class Term(Entry):
+    def __init__(self, id, value, attributes=None,
+                 comment=None, **kwargs):
+        super(Term, self).__init__(**kwargs)
+        self.id = id
+        self.value = value
+        self.attributes = attributes or []
         self.comment = comment
 
 class Pattern(SyntaxNode):
     def __init__(self, elements, **kwargs):
         super(Pattern, self).__init__(**kwargs)
         self.elements = elements
 
-class TextElement(SyntaxNode):
+class PatternElement(SyntaxNode):
+    pass
+
+class TextElement(PatternElement):
     def __init__(self, value, **kwargs):
         super(TextElement, self).__init__(**kwargs)
         self.value = value
 
-class Placeable(SyntaxNode):
+class Placeable(PatternElement):
     def __init__(self, expression, **kwargs):
         super(Placeable, self).__init__(**kwargs)
         self.expression = expression
 
 class Expression(SyntaxNode):
     def __init__(self, **kwargs):
         super(Expression, self).__init__(**kwargs)
 
@@ -230,32 +239,27 @@ class AttributeExpression(Expression):
 
 class VariantExpression(Expression):
     def __init__(self, id, key, **kwargs):
         super(VariantExpression, self).__init__(**kwargs)
         self.id = id
         self.key = key
 
 class CallExpression(Expression):
-    def __init__(self, callee, args, **kwargs):
+    def __init__(self, callee, args=None, **kwargs):
         super(CallExpression, self).__init__(**kwargs)
         self.callee = callee
-        self.args = args
+        self.args = args or []
 
 class Attribute(SyntaxNode):
     def __init__(self, id, value, **kwargs):
         super(Attribute, self).__init__(**kwargs)
         self.id = id
         self.value = value
 
-class Tag(SyntaxNode):
-    def __init__(self, name, **kwargs):
-        super(Tag, self).__init__(**kwargs)
-        self.name = name
-
 class Variant(SyntaxNode):
     def __init__(self, key, value, default=False, **kwargs):
         super(Variant, self).__init__(**kwargs)
         self.key = key
         self.value = value
         self.default = default
 
 class NamedArgument(SyntaxNode):
@@ -264,30 +268,41 @@ class NamedArgument(SyntaxNode):
         self.name = name
         self.val = val
 
 class Identifier(SyntaxNode):
     def __init__(self, name, **kwargs):
         super(Identifier, self).__init__(**kwargs)
         self.name = name
 
-class Symbol(Identifier):
+class VariantName(Identifier):
     def __init__(self, name, **kwargs):
-        super(Symbol, self).__init__(name, **kwargs)
+        super(VariantName, self).__init__(name, **kwargs)
 
-class Comment(Entry):
+
+class BaseComment(Entry):
     def __init__(self, content=None, **kwargs):
-        super(Comment, self).__init__(**kwargs)
+        super(BaseComment, self).__init__(**kwargs)
         self.content = content
 
-class Section(Entry):
-    def __init__(self, name, comment=None, **kwargs):
-        super(Section, self).__init__(**kwargs)
-        self.name = name
-        self.comment = comment
+
+class Comment(BaseComment):
+    def __init__(self, content=None, **kwargs):
+        super(Comment, self).__init__(content, **kwargs)
+
+
+class GroupComment(BaseComment):
+    def __init__(self, content=None, **kwargs):
+        super(GroupComment, self).__init__(content, **kwargs)
+
+
+class ResourceComment(BaseComment):
+    def __init__(self, content=None, **kwargs):
+        super(ResourceComment, self).__init__(content, **kwargs)
+
 
 class Function(Identifier):
     def __init__(self, name, **kwargs):
         super(Function, self).__init__(name, **kwargs)
 
 class Junk(Entry):
     def __init__(self, content=None, **kwargs):
         super(Junk, self).__init__(**kwargs)
--- a/third_party/python/fluent/fluent/syntax/errors.py
+++ b/third_party/python/fluent/fluent/syntax/errors.py
@@ -13,31 +13,42 @@ def get_error_message(code, args):
         return 'Generic error'
     if code == 'E0002':
         return 'Expected an entry start'
     if code == 'E0003':
         return 'Expected token: "{}"'.format(args[0])
     if code == 'E0004':
         return 'Expected a character from range: "{}"'.format(args[0])
     if code == 'E0005':
-        msg = 'Expected entry "{}" to have a value or attributes'
+        msg = 'Expected message "{}" to have a value or attributes'
         return msg.format(args[0])
     if code == 'E0006':
-        return 'Expected field: "{}"'.format(args[0])
+        msg = 'Expected term "{}" to have a value'
+        return msg.format(args[0])
     if code == 'E0007':
         return 'Keyword cannot end with a whitespace'
     if code == 'E0008':
-        return 'Callee has to be a simple identifier'
+        return 'The callee has to be a simple, upper-case identifier'
     if code == 'E0009':
-        return 'Key has to be a simple identifier'
+        return 'The key has to be a simple identifier'
     if code == 'E0010':
         return 'Expected one of the variants to be marked as default (*)'
     if code == 'E0011':
         return 'Expected at least one variant after "->"'
     if code == 'E0012':
-        return 'Tags cannot be added to messages with attributes'
+        return 'Expected value'
     if code == 'E0013':
         return 'Expected variant key'
     if code == 'E0014':
         return 'Expected literal'
     if code == 'E0015':
         return 'Only one variant can be marked as default (*)'
+    if code == 'E0016':
+        return 'Message references cannot be used as selectors'
+    if code == 'E0017':
+        return 'Variants cannot be used as selectors'
+    if code == 'E0018':
+        return 'Attributes of messages cannot be used as selectors'
+    if code == 'E0019':
+        return 'Attributes of terms cannot be used as placeables'
+    if code == 'E0020':
+        return 'Unterminated string expression'
     return code
--- a/third_party/python/fluent/fluent/syntax/ftlstream.py
+++ b/third_party/python/fluent/fluent/syntax/ftlstream.py
@@ -1,17 +1,26 @@
 from __future__ import unicode_literals
 from .stream import ParserStream
 from .errors import ParseError
 
 
 INLINE_WS = (' ', '\t')
+SPECIAL_LINE_START_CHARS = ('}', '.', '[', '*')
 
 
 class FTLParserStream(ParserStream):
+    last_comment_zero_four_syntax = False
+
+    def skip_inline_ws(self):
+        while self.ch:
+            if self.ch not in INLINE_WS:
+                break
+            self.next()
+
     def peek_inline_ws(self):
         ch = self.current_peek()
         while ch:
             if ch not in INLINE_WS:
                 break
             ch = self.peek()
 
     def skip_blank_lines(self):
@@ -20,67 +29,152 @@ class FTLParserStream(ParserStream):
 
             if self.current_peek_is('\n'):
                 self.skip_to_peek()
                 self.next()
             else:
                 self.reset_peek()
                 break
 
-    def skip_inline_ws(self):
-        while self.ch:
-            if self.ch not in INLINE_WS:
+    def peek_blank_lines(self):
+        while True:
+            line_start = self.get_peek_index()
+
+            self.peek_inline_ws()
+
+            if self.current_peek_is('\n'):
+                self.peek()
+            else:
+                self.reset_peek(line_start)
                 break
-            self.next()
+
+    def skip_indent(self):
+        self.skip_blank_lines()
+        self.skip_inline_ws()
 
     def expect_char(self, ch):
         if self.ch == ch:
             self.next()
             return True
 
         if ch == '\n':
             # Unicode Character 'SYMBOL FOR NEWLINE' (U+2424)
             raise ParseError('E0003', '\u2424')
 
         raise ParseError('E0003', ch)
 
+    def expect_indent(self):
+        self.expect_char('\n')
+        self.skip_blank_lines()
+        self.expect_char(' ')
+        self.skip_inline_ws()
+
     def take_char_if(self, ch):
         if self.ch == ch:
             self.next()
             return True
         return False
 
     def take_char(self, f):
         ch = self.ch
         if ch is not None and f(ch):
             self.next()
             return ch
         return None
 
-    def is_id_start(self):
-        if self.ch is None:
+    def is_char_id_start(self, ch=None):
+        if ch is None:
             return False
 
-        cc = ord(self.ch)
-
+        cc = ord(ch)
         return (cc >= 97 and cc <= 122) or \
-               (cc >= 65 and cc <= 90) or \
-                cc == 95
+               (cc >= 65 and cc <= 90)
+
+    def is_entry_id_start(self):
+        if self.current_is('-'):
+            self.peek()
+
+        ch = self.current_peek()
+        is_id = self.is_char_id_start(ch)
+        self.reset_peek()
+        return is_id
 
     def is_number_start(self):
-        cc = ord(self.ch)
+        if self.current_is('-'):
+            self.peek()
+
+        cc = ord(self.current_peek())
+        is_digit = cc >= 48 and cc <= 57
+        self.reset_peek()
+        return is_digit
+
+    def is_char_pattern_continuation(self, ch):
+        return ch not in SPECIAL_LINE_START_CHARS
+
+    def is_peek_pattern_start(self):
+        self.peek_inline_ws()
+        ch = self.current_peek()
+
+        # Inline Patterns may start with any char.
+        if ch is not None and ch != '\n':
+            return True
+
+        return self.is_peek_next_line_pattern_start()
+
+    def is_peek_next_line_zero_four_style_comment(self):
+        if not self.current_peek_is('\n'):
+            return False
+
+        self.peek()
 
-        return (cc >= 48 and cc <= 57) or cc == 45
+        if self.current_peek_is('/'):
+            self.peek()
+            if self.current_peek_is('/'):
+                self.reset_peek()
+                return True
+
+        self.reset_peek()
+        return False
+
+    # -1 - any
+    #  0 - comment
+    #  1 - group comment
+    #  2 - resource comment
+    def is_peek_next_line_comment(self, level=-1):
+        if not self.current_peek_is('\n'):
+            return False
+
+        i = 0
+
+        while (i <= level or (level == -1 and i < 3)):
+            self.peek()
+            if not self.current_peek_is('#'):
+                if i != level and level != -1:
+                    self.reset_peek()
+                    return False
+                break
+            i += 1
+
+        self.peek()
+
+        if self.current_peek() in [' ', '\n']:
+            self.reset_peek()
+            return True
+
+        self.reset_peek()
+        return False
 
     def is_peek_next_line_variant_start(self):
         if not self.current_peek_is('\n'):
             return False
 
         self.peek()
 
+        self.peek_blank_lines()
+
         ptr = self.get_peek_index()
 
         self.peek_inline_ws()
 
         if (self.get_peek_index() - ptr == 0):
             self.reset_peek()
             return False
 
@@ -95,114 +189,99 @@ class FTLParserStream(ParserStream):
         return False
 
     def is_peek_next_line_attribute_start(self):
         if not self.current_peek_is('\n'):
             return False
 
         self.peek()
 
+        self.peek_blank_lines()
+
         ptr = self.get_peek_index()
 
         self.peek_inline_ws()
 
         if (self.get_peek_index() - ptr == 0):
             self.reset_peek()
             return False
 
         if self.current_peek_is('.'):
             self.reset_peek()
             return True
 
         self.reset_peek()
         return False
 
-    def is_peek_next_line_pattern(self):
+    def is_peek_next_line_pattern_start(self):
         if not self.current_peek_is('\n'):
             return False
 
         self.peek()
 
+        self.peek_blank_lines()
+
         ptr = self.get_peek_index()
 
         self.peek_inline_ws()
 
         if (self.get_peek_index() - ptr == 0):
             self.reset_peek()
             return False
 
-        if (self.current_peek_is('}') or
-                self.current_peek_is('.') or
-                self.current_peek_is('#') or
-                self.current_peek_is('[') or
-                self.current_peek_is('*')):
+        if not self.is_char_pattern_continuation(self.current_peek()):
             self.reset_peek()
             return False
 
         self.reset_peek()
         return True
 
-    def is_peek_next_line_tag_start(self):
-
-        if not self.current_peek_is('\n'):
-            return False
-
-        self.peek()
-
-        ptr = self.get_peek_index()
-
-        self.peek_inline_ws()
-
-        if (self.get_peek_index() - ptr == 0):
-            self.reset_peek()
-            return False
-
-        if self.current_peek_is('#'):
-            self.reset_peek()
-            return True
-
-        self.reset_peek()
-        return False
-
     def skip_to_next_entry_start(self):
         while self.ch:
             if self.current_is('\n') and not self.peek_char_is('\n'):
                 self.next()
 
-                if self.ch is None or self.is_id_start() or \
+                if self.ch is None or \
+                   self.is_entry_id_start() or \
+                   self.current_is('#') or \
                    (self.current_is('/') and self.peek_char_is('/')) or \
                    (self.current_is('[') and self.peek_char_is('[')):
                     break
             self.next()
 
-    def take_id_start(self):
-        if self.is_id_start():
+    def take_id_start(self, allow_term):
+        if allow_term and self.current_is('-'):
+            self.next()
+            return '-'
+
+        if self.is_char_id_start(self.ch):
             ret = self.ch
             self.next()
             return ret
 
-        raise ParseError('E0004', 'a-zA-Z_')
+        allowed_range = 'a-zA-Z-' if allow_term else 'a-zA-Z'
+        raise ParseError('E0004', allowed_range)
 
     def take_id_char(self):
         def closure(ch):
             cc = ord(ch)
             return ((cc >= 97 and cc <= 122) or
                     (cc >= 65 and cc <= 90) or
                     (cc >= 48 and cc <= 57) or
                     cc == 95 or cc == 45)
         return self.take_char(closure)
 
-    def take_symb_char(self):
+    def take_variant_name_char(self):
         def closure(ch):
             if ch is None:
                 return False
             cc = ord(ch)
             return (cc >= 97 and cc <= 122) or \
                    (cc >= 65 and cc <= 90) or \
                    (cc >= 48 and cc <= 57) or \
-                    cc == 95 or cc == 45 or cc == 32
+                cc == 95 or cc == 45 or cc == 32
         return self.take_char(closure)
 
     def take_digit(self):
         def closure(ch):
             cc = ord(ch)
             return (cc >= 48 and cc <= 57)
         return self.take_char(closure)
--- a/third_party/python/fluent/fluent/syntax/parser.py
+++ b/third_party/python/fluent/fluent/syntax/parser.py
@@ -13,51 +13,57 @@ def with_span(fn):
         start = ps.get_index()
         node = fn(self, ps, *args)
 
         # Don't re-add the span if the node already has it.  This may happen
         # when one decorated function calls another decorated function.
         if node.span is not None:
             return node
 
-        # Spans of Messages and Sections should include the attached Comment.
-        if isinstance(node, ast.Message) or isinstance(node, ast.Section):
+        # Spans of Messages should include the attached Comment.
+        if isinstance(node, ast.Message):
             if node.comment is not None:
                 start = node.comment.span.start
 
         end = ps.get_index()
         node.add_span(start, end)
         return node
 
     return decorated
 
 
 class FluentParser(object):
     def __init__(self, with_spans=True):
         self.with_spans = with_spans
 
     def parse(self, source):
-        comment = None
-
         ps = FTLParserStream(source)
         ps.skip_blank_lines()
 
         entries = []
 
         while ps.current():
             entry = self.get_entry_or_junk(ps)
 
-            if isinstance(entry, ast.Comment) and len(entries) == 0:
-                comment = entry
+            if entry is None:
+                continue
+
+            if isinstance(entry, ast.Comment) \
+               and ps.last_comment_zero_four_syntax \
+               and len(entries) == 0:
+                comment = ast.ResourceComment(entry.content)
+                comment.span = entry.span
+                entries.append(comment)
             else:
                 entries.append(entry)
 
+            ps.last_comment_zero_four_syntax = False
             ps.skip_blank_lines()
 
-        res = ast.Resource(entries, comment)
+        res = ast.Resource(entries)
 
         if self.with_spans:
             res.add_span(0, ps.get_index())
 
         return res
 
     def parse_entry(self, source):
         ps = FTLParserStream(source)
@@ -82,182 +88,214 @@ class FluentParser(object):
             annot = ast.Annotation(err.code, err.args, err.message)
             annot.add_span(error_index, error_index)
             junk.add_annotation(annot)
             return junk
 
     def get_entry(self, ps):
         comment = None
 
-        if ps.current_is('/'):
+        if ps.current_is('/') or ps.current_is('#'):
             comment = self.get_comment(ps)
 
+            # The Comment content doesn't include the trailing newline. Consume
+            # this newline to be ready for the next entry. None stands for EOF.
+            ps.expect_char('\n' if ps.current() else None)
+
         if ps.current_is('['):
-            return self.get_section(ps, comment)
+            group_comment = self.get_group_comment_from_section(ps, comment)
+            if comment and self.with_spans:
+                # The Group Comment should start where the section comment
+                # starts.
+                group_comment.span.start = comment.span.start
+            return group_comment
 
-        if ps.is_id_start():
+        if ps.is_entry_id_start() \
+           and (comment is None or isinstance(comment, ast.Comment)):
             return self.get_message(ps, comment)
 
         if comment:
             return comment
 
         raise ParseError('E0002')
 
     @with_span
-    def get_comment(self, ps):
+    def get_zero_four_style_comment(self, ps):
         ps.expect_char('/')
         ps.expect_char('/')
         ps.take_char_if(' ')
 
         content = ''
 
-        def until_eol(x):
-            return x != '\n'
-
         while True:
-            ch = ps.take_char(until_eol)
+            ch = ps.take_char(lambda x: x != '\n')
             while ch:
                 content += ch
-                ch = ps.take_char(until_eol)
+                ch = ps.take_char(lambda x: x != '\n')
 
-            ps.next()
-
-            if ps.current_is('/'):
+            if ps.is_peek_next_line_zero_four_style_comment():
                 content += '\n'
                 ps.next()
                 ps.expect_char('/')
+                ps.expect_char('/')
                 ps.take_char_if(' ')
             else:
                 break
 
-        return ast.Comment(content)
+        comment = ast.Comment(content)
+        ps.last_comment_zero_four_syntax = True
+        return comment
 
     @with_span
-    def get_section(self, ps, comment):
+    def get_comment(self, ps):
+        if ps.current_is('/'):
+            return self.get_zero_four_style_comment(ps)
+
+        # 0 - comment
+        # 1 - group comment
+        # 2 - resource comment
+        level = -1
+        content = ''
+
+        while True:
+            i = -1
+            while ps.current_is('#') and (i < (2 if level == -1 else level)):
+                ps.next()
+                i += 1
+
+            if level == -1:
+                level = i
+
+            if not ps.current_is('\n'):
+                ps.expect_char(' ')
+                ch = ps.take_char(lambda x: x != '\n')
+                while ch:
+                    content += ch
+                    ch = ps.take_char(lambda x: x != '\n')
+
+            if ps.is_peek_next_line_comment(level):
+                content += '\n'
+                ps.next()
+            else:
+                break
+
+        if level == 0:
+            return ast.Comment(content)
+        elif level == 1:
+            return ast.GroupComment(content)
+        elif level == 2:
+            return ast.ResourceComment(content)
+
+    @with_span
+    def get_group_comment_from_section(self, ps, comment):
         ps.expect_char('[')
         ps.expect_char('[')
 
         ps.skip_inline_ws()
 
-        symb = self.get_symbol(ps)
+        self.get_variant_name(ps)
 
         ps.skip_inline_ws()
 
         ps.expect_char(']')
         ps.expect_char(']')
 
-        ps.skip_inline_ws()
+        if comment:
+            return ast.GroupComment(comment.content)
 
-        ps.expect_char('\n')
-
-        return ast.Section(symb, comment)
+        # A Section without a comment is like an empty Group Comment.
+        # Semantically it ends the previous group and starts a new one.
+        return ast.GroupComment('')
 
     @with_span
     def get_message(self, ps, comment):
-        id = self.get_identifier(ps)
+        id = self.get_entry_identifier(ps)
 
         ps.skip_inline_ws()
 
         pattern = None
         attrs = None
-        tags = None
 
+        # XXX Syntax 0.4 compat
         if ps.current_is('='):
             ps.next()
-            ps.skip_inline_ws()
 
-            pattern = self.get_pattern(ps)
+            if ps.is_peek_pattern_start():
+                ps.skip_indent()
+                pattern = self.get_pattern(ps)
+
+        if id.name.startswith('-') and pattern is None:
+            raise ParseError('E0006', id.name)
 
         if ps.is_peek_next_line_attribute_start():
             attrs = self.get_attributes(ps)
 
-        if ps.is_peek_next_line_tag_start():
-            if attrs is not None:
-                raise ParseError('E0012')
-            tags = self.get_tags(ps)
+        if id.name.startswith('-'):
+            return ast.Term(id, pattern, attrs, comment)
 
         if pattern is None and attrs is None:
             raise ParseError('E0005', id.name)
 
-        return ast.Message(id, pattern, attrs, tags, comment)
+        return ast.Message(id, pattern, attrs, comment)
 
     @with_span
     def get_attribute(self, ps):
         ps.expect_char('.')
 
         key = self.get_identifier(ps)
 
         ps.skip_inline_ws()
         ps.expect_char('=')
-        ps.skip_inline_ws()
-
-        value = self.get_pattern(ps)
 
-        if value is None:
-            raise ParseError('E0006', 'value')
+        if ps.is_peek_pattern_start():
+            ps.skip_indent()
+            value = self.get_pattern(ps)
+            return ast.Attribute(key, value)
 
-        return ast.Attribute(key, value)
+        raise ParseError('E0012')
 
     def get_attributes(self, ps):
         attrs = []
 
         while True:
-            ps.expect_char('\n')
-            ps.skip_inline_ws()
-
+            ps.expect_indent()
             attr = self.get_attribute(ps)
             attrs.append(attr)
 
             if not ps.is_peek_next_line_attribute_start():
                 break
         return attrs
 
-    @with_span
-    def get_tag(self, ps):
-        ps.expect_char('#')
-        symb = self.get_symbol(ps)
-        return ast.Tag(symb)
-
-    def get_tags(self, ps):
-        tags = []
-
-        while True:
-            ps.expect_char('\n')
-            ps.skip_inline_ws()
-
-            tag = self.get_tag(ps)
-            tags.append(tag)
-
-            if not ps.is_peek_next_line_tag_start():
-                break
-        return tags
+    def get_entry_identifier(self, ps):
+        return self.get_identifier(ps, True)
 
     @with_span
-    def get_identifier(self, ps):
+    def get_identifier(self, ps, allow_term=False):
         name = ''
 
-        name += ps.take_id_start()
+        name += ps.take_id_start(allow_term)
 
         ch = ps.take_id_char()
         while ch:
             name += ch
             ch = ps.take_id_char()
 
         return ast.Identifier(name)
 
     def get_variant_key(self, ps):
         ch = ps.current()
 
         if ch is None:
             raise ParseError('E0013')
 
-        if ps.is_number_start():
+        cc = ord(ch)
+        if ((cc >= 48 and cc <= 57) or cc == 45):  # 0-9, -
             return self.get_number(ps)
 
-        return self.get_symbol(ps)
+        return self.get_variant_name(ps)
 
     @with_span
     def get_variant(self, ps, has_default):
         default_index = False
 
         if ps.current_is('*'):
             if has_default:
                 raise ParseError('E0015')
@@ -265,62 +303,58 @@ class FluentParser(object):
             default_index = True
 
         ps.expect_char('[')
 
         key = self.get_variant_key(ps)
 
         ps.expect_char(']')
 
-        ps.skip_inline_ws()
-
-        value = self.get_pattern(ps)
+        if ps.is_peek_pattern_start():
+            ps.skip_indent()
+            value = self.get_pattern(ps)
+            return ast.Variant(key, value, default_index)
 
-        if value is None:
-            raise ParseError('E0006', 'value')
-
-        return ast.Variant(key, value, default_index)
+        raise ParseError('E0012')
 
     def get_variants(self, ps):
         variants = []
         has_default = False
 
         while True:
-            ps.expect_char('\n')
-            ps.skip_inline_ws()
-
+            ps.expect_indent()
             variant = self.get_variant(ps, has_default)
 
             if variant.default:
                 has_default = True
 
             variants.append(variant)
 
             if not ps.is_peek_next_line_variant_start():
                 break
 
         if not has_default:
             raise ParseError('E0010')
 
         return variants
 
     @with_span
-    def get_symbol(self, ps):
+    def get_variant_name(self, ps):
         name = ''
 
-        name += ps.take_id_start()
+        name += ps.take_id_start(False)
 
         while True:
-            ch = ps.take_symb_char()
+            ch = ps.take_variant_name_char()
             if ch:
                 name += ch
             else:
                 break
 
-        return ast.Symbol(name.rstrip())
+        return ast.VariantName(name.rstrip())
 
     def get_digits(self, ps):
         num = ''
 
         ch = ps.take_digit()
         while ch:
             num += ch
             ch = ps.take_digit()
@@ -347,27 +381,22 @@ class FluentParser(object):
 
         return ast.NumberExpression(num)
 
     @with_span
     def get_pattern(self, ps):
         elements = []
         ps.skip_inline_ws()
 
-        # Special-case: trim leading whitespace and newlines.
-        if ps.is_peek_next_line_pattern():
-            ps.skip_blank_lines()
-            ps.skip_inline_ws()
-
         while ps.current():
             ch = ps.current()
 
             # The end condition for get_pattern's while loop is a newline
             # which is not followed by a valid pattern continuation.
-            if ch == '\n' and not ps.is_peek_next_line_pattern():
+            if ch == '\n' and not ps.is_peek_next_line_pattern_start():
                 break
 
             if ch == '{':
                 element = self.get_placeable(ps)
             else:
                 element = self.get_text_element(ps)
 
             elements.append(element)
@@ -380,17 +409,17 @@ class FluentParser(object):
 
         while ps.current():
             ch = ps.current()
 
             if ch == '{':
                 return ast.TextElement(buf)
 
             if ch == '\n':
-                if not ps.is_peek_next_line_pattern():
+                if not ps.is_peek_next_line_pattern_start():
                     return ast.TextElement(buf)
 
                 ps.next()
                 ps.skip_inline_ws()
 
                 # Add the new line to the buffer
                 buf += ch
                 continue
@@ -418,48 +447,59 @@ class FluentParser(object):
         return ast.Placeable(expression)
 
     @with_span
     def get_expression(self, ps):
 
         if ps.is_peek_next_line_variant_start():
             variants = self.get_variants(ps)
 
-            ps.expect_char('\n')
-            ps.expect_char(' ')
-            ps.skip_inline_ws()
+            ps.expect_indent()
 
             return ast.SelectExpression(None, variants)
 
         ps.skip_inline_ws()
 
         selector = self.get_selector_expression(ps)
 
         ps.skip_inline_ws()
 
         if ps.current_is('-'):
             ps.peek()
+
             if not ps.current_peek_is('>'):
                 ps.reset_peek()
-            else:
-                ps.next()
-                ps.next()
+                return selector
+
+            if isinstance(selector, ast.MessageReference):
+                raise ParseError('E0016')
 
-                ps.skip_inline_ws()
+            if isinstance(selector, ast.AttributeExpression) and \
+               not selector.id.name.startswith('-'):
+                raise ParseError('E0018')
 
-                variants = self.get_variants(ps)
+            if isinstance(selector, ast.VariantExpression):
+                raise ParseError('E0017')
 
-                if len(variants) == 0:
-                    raise ParseError('E0011')
+            ps.next()
+            ps.next()
+
+            ps.skip_inline_ws()
+
+            variants = self.get_variants(ps)
 
-                ps.expect_char('\n')
-                ps.expect_char(' ')
-                ps.skip_inline_ws()
+            if len(variants) == 0:
+                raise ParseError('E0011')
+
+            ps.expect_indent()
 
-                return ast.SelectExpression(selector, variants)
+            return ast.SelectExpression(selector, variants)
+        elif isinstance(selector, ast.AttributeExpression) and \
+                selector.id.name.startswith('-'):
+            raise ParseError('E0019')
 
         return selector
 
     @with_span
     def get_selector_expression(self, ps):
         literal = self.get_literal(ps)
 
         if not isinstance(literal, ast.MessageReference):
@@ -480,17 +520,17 @@ class FluentParser(object):
 
         if (ch == '('):
             ps.next()
 
             args = self.get_call_args(ps)
 
             ps.expect_char(')')
 
-            if not re.match('^[A-Z_-]+$', literal.id.name):
+            if not re.match('^[A-Z][A-Z_?-]*$', literal.id.name):
                 raise ParseError('E0008')
 
             return ast.CallExpression(
                 ast.Function(literal.id.name),
                 args
             )
 
         return literal
@@ -537,43 +577,48 @@ class FluentParser(object):
 
         return args
 
     def get_arg_val(self, ps):
         if ps.is_number_start():
             return self.get_number(ps)
         elif ps.current_is('"'):
             return self.get_string(ps)
-        raise ParseError('E0006', 'value')
+        raise ParseError('E0012')
 
     @with_span
     def get_string(self, ps):
         val = ''
 
         ps.expect_char('"')
 
-        ch = ps.take_char(lambda x: x != '"')
+        ch = ps.take_char(lambda x: x != '"' and x != '\n')
         while ch:
             val += ch
-            ch = ps.take_char(lambda x: x != '"')
+            ch = ps.take_char(lambda x: x != '"' and x != '\n')
+
+        if ps.current_is('\n'):
+            raise ParseError('E0020')
 
         ps.next()
 
         return ast.StringExpression(val)
 
     @with_span
     def get_literal(self, ps):
         ch = ps.current()
 
         if ch is None:
             raise ParseError('E0014')
 
-        if ps.is_number_start():
+        if ch == '$':
+            ps.next()
+            name = self.get_identifier(ps)
+            return ast.ExternalArgument(name)
+        elif ps.is_entry_id_start():
+            name = self.get_entry_identifier(ps)
+            return ast.MessageReference(name)
+        elif ps.is_number_start():
             return self.get_number(ps)
         elif ch == '"':
             return self.get_string(ps)
-        elif ch == '$':
-            ps.next()
-            name = self.get_identifier(ps)
-            return ast.ExternalArgument(name)
 
-        name = self.get_identifier(ps)
-        return ast.MessageReference(name)
+        raise ParseError('E0014')
--- a/third_party/python/fluent/fluent/syntax/serializer.py
+++ b/third_party/python/fluent/fluent/syntax/serializer.py
@@ -11,100 +11,108 @@ def indent(content):
 def contain_new_line(elems):
     return bool([
         elem for elem in elems
         if isinstance(elem, ast.TextElement) and "\n" in elem.value
     ])
 
 
 class FluentSerializer(object):
+    HAS_ENTRIES = 1
+
     def __init__(self, with_junk=False):
         self.with_junk = with_junk
 
     def serialize(self, resource):
+        if not isinstance(resource, ast.Resource):
+            raise Exception('Unknown resource type: {}'.format(type(resource)))
+
+        state = 0
+
         parts = []
-        if resource.comment:
-            parts.append(
-                "{}\n\n".format(
-                    serialize_comment(resource.comment)
-                )
-            )
         for entry in resource.body:
             if not isinstance(entry, ast.Junk) or self.with_junk:
-                parts.append(self.serialize_entry(entry))
+                parts.append(self.serialize_entry(entry, state))
+                if not state & self.HAS_ENTRIES:
+                    state |= self.HAS_ENTRIES
 
         return "".join(parts)
 
-    def serialize_entry(self, entry):
+    def serialize_entry(self, entry, state=0):
         if isinstance(entry, ast.Message):
             return serialize_message(entry)
-        if isinstance(entry, ast.Section):
-            return serialize_section(entry)
+        if isinstance(entry, ast.Term):
+            return serialize_message(entry)
         if isinstance(entry, ast.Comment):
-            return "\n{}\n\n".format(serialize_comment(entry))
+            if state & self.HAS_ENTRIES:
+                return "\n{}\n\n".format(serialize_comment(entry))
+            return "{}\n\n".format(serialize_comment(entry))
+        if isinstance(entry, ast.GroupComment):
+            if state & self.HAS_ENTRIES:
+                return "\n{}\n\n".format(serialize_group_comment(entry))
+            return "{}\n\n".format(serialize_group_comment(entry))
+        if isinstance(entry, ast.ResourceComment):
+            if state & self.HAS_ENTRIES:
+                return "\n{}\n\n".format(serialize_resource_comment(entry))
+            return "{}\n\n".format(serialize_resource_comment(entry))
         if isinstance(entry, ast.Junk):
             return serialize_junk(entry)
-        raise Exception('Unknown entry type: {}'.format(entry.type))
+        raise Exception('Unknown entry type: {}'.format(type(entry)))
+
+    def serialize_expression(self, expr):
+        return serialize_expression(expr)
 
 
 def serialize_comment(comment):
-    return "".join([
-        "{}{}".format("// ", line)
-        for line in comment.content.splitlines(True)
+    return "\n".join([
+        "#" if len(line) == 0 else "# {}".format(line)
+        for line in comment.content.splitlines(False)
     ])
 
 
-def serialize_section(section):
-    if section.comment:
-        return "\n\n{}\n[[ {} ]]\n\n".format(
-            serialize_comment(section.comment),
-            serialize_symbol(section.name)
-        )
-    else:
-        return "\n\n[[ {} ]]\n\n".format(
-            serialize_symbol(section.name)
-        )
+def serialize_group_comment(comment):
+    return "\n".join([
+        "##" if len(line) == 0 else "## {}".format(line)
+        for line in comment.content.splitlines(False)
+    ])
+
+
+def serialize_resource_comment(comment):
+    return "\n".join([
+        "###" if len(line) == 0 else "### {}".format(line)
+        for line in comment.content.splitlines(False)
+    ])
 
 
 def serialize_junk(junk):
     return junk.content
 
 
 def serialize_message(message):
     parts = []
 
     if message.comment:
         parts.append(serialize_comment(message.comment))
         parts.append("\n")
 
     parts.append(serialize_identifier(message.id))
+    parts.append(" =")
 
     if message.value:
-        parts.append(" =")
         parts.append(serialize_value(message.value))
 
-    if message.tags:
-        for tag in message.tags:
-            parts.append(serialize_tag(tag))
-
     if message.attributes:
         for attribute in message.attributes:
             parts.append(serialize_attribute(attribute))
 
     parts.append("\n")
 
     return ''.join(parts)
 
 
-def serialize_tag(tag):
-    return "\n    #{}".format(
-        serialize_symbol(tag.name),
-    )
-
-
 def serialize_attribute(attribute):
     return "\n    .{} ={}".format(
         serialize_identifier(attribute.id),
         indent(serialize_value(attribute.value))
     )
 
 
 def serialize_value(pattern):
@@ -122,35 +130,39 @@ def serialize_pattern(pattern):
     ])
 
 
 def serialize_element(element):
     if isinstance(element, ast.TextElement):
         return serialize_text_element(element)
     if isinstance(element, ast.Placeable):
         return serialize_placeable(element)
-    raise Exception('Unknown element type: {}'.format(element.type))
+    raise Exception('Unknown element type: {}'.format(type(element)))
 
 
 def serialize_text_element(text):
     return text.value
 
 
 def serialize_placeable(placeable):
     expr = placeable.expression
 
     if isinstance(expr, ast.Placeable):
-        return "{{{}}}".format(
-            serialize_placeable(expr))
+        return "{{{}}}".format(serialize_placeable(expr))
     if isinstance(expr, ast.SelectExpression):
-        return "{{{}}}".format(
-            serialize_select_expression(expr))
+        # Special-case select expressions to control the withespace around the
+        # opening and the closing brace.
+        if expr.expression is not None:
+            # A select expression with a selector.
+            return "{{ {}}}".format(serialize_select_expression(expr))
+        else:
+            # A variant list without a selector.
+            return "{{{}}}".format(serialize_select_expression(expr))
     if isinstance(expr, ast.Expression):
-        return "{{ {} }}".format(
-            serialize_expression(expr))
+        return "{{ {} }}".format(serialize_expression(expr))
 
 
 def serialize_expression(expression):
     if isinstance(expression, ast.StringExpression):
         return serialize_string_expression(expression)
     if isinstance(expression, ast.NumberExpression):
         return serialize_number_expression(expression)
     if isinstance(expression, ast.MessageReference):
@@ -158,17 +170,19 @@ def serialize_expression(expression):
     if isinstance(expression, ast.ExternalArgument):
         return serialize_external_argument(expression)
     if isinstance(expression, ast.AttributeExpression):
         return serialize_attribute_expression(expression)
     if isinstance(expression, ast.VariantExpression):
         return serialize_variant_expression(expression)
     if isinstance(expression, ast.CallExpression):
         return serialize_call_expression(expression)
-    raise Exception('Unknown expression type: {}'.format(expression.type))
+    if isinstance(expression, ast.SelectExpression):
+        return serialize_select_expression(expression)
+    raise Exception('Unknown expression type: {}'.format(type(expression)))
 
 
 def serialize_string_expression(expr):
     return "\"{}\"".format(expr.value)
 
 
 def serialize_number_expression(expr):
     return expr.value
@@ -181,17 +195,17 @@ def serialize_message_reference(expr):
 def serialize_external_argument(expr):
     return "${}".format(serialize_identifier(expr.id))
 
 
 def serialize_select_expression(expr):
     parts = []
 
     if expr.expression:
-        selector = " {} ->".format(
+        selector = "{} ->".format(
             serialize_expression(expr.expression)
         )
         parts.append(selector)
 
     for variant in expr.variants:
         parts.append(serialize_variant(variant))
 
     parts.append("\n")
@@ -245,29 +259,29 @@ def serialize_named_argument(arg):
     )
 
 
 def serialize_argument_value(argval):
     if isinstance(argval, ast.StringExpression):
         return serialize_string_expression(argval)
     if isinstance(argval, ast.NumberExpression):
         return serialize_number_expression(argval)
-    raise Exception('Unknown argument type: {}'.format(argval.type))
+    raise Exception('Unknown argument type: {}'.format(type(argval)))
 
 
 def serialize_identifier(identifier):
     return identifier.name
 
 
-def serialize_symbol(symbol):
+def serialize_variant_name(symbol):
     return symbol.name
 
 
 def serialize_variant_key(key):
-    if isinstance(key, ast.Symbol):
-        return serialize_symbol(key)
+    if isinstance(key, ast.VariantName):
+        return serialize_variant_name(key)
     if isinstance(key, ast.NumberExpression):
         return serialize_number_expression(key)
-    raise Exception('Unknown variant key type: {}'.format(key.type))
+    raise Exception('Unknown variant key type: {}'.format(type(key)))
 
 
 def serialize_function(function):
     return function.name
--- a/third_party/python/fluent/fluent/syntax/stream.py
+++ b/third_party/python/fluent/fluent/syntax/stream.py
@@ -38,17 +38,17 @@ class ParserStream():
 
         if len(self.buf) == 0:
             self.ch = self.iter.next()
         else:
             self.ch = self.buf.pop(0)
 
         self.index += 1
 
-        if self.ch == None:
+        if self.ch is None:
             self.iter_end = True
             self.peek_end = True
 
         self.peek_index = self.index
 
         return self.ch
 
     def current(self):
@@ -99,19 +99,24 @@ class ParserStream():
             return False
 
         ret = self.peek()
 
         self.peek_index -= 1
 
         return ret == ch
 
-    def reset_peek(self):
-        self.peek_index = self.index
-        self.peek_end = self.iter_end
+    def reset_peek(self, pos=False):
+        if pos:
+            if pos < self.peek_index:
+                self.peek_end = False
+            self.peek_index = pos
+        else:
+            self.peek_index = self.index
+            self.peek_end = self.iter_end
 
     def skip_to_peek(self):
         diff = self.peek_index - self.index
 
         for i in range(0, diff):
             self.ch = self.buf.pop(0)
 
         self.index = self.peek_index
--- a/third_party/python/fluent/tools/migrate/blame.py
+++ b/third_party/python/fluent/tools/migrate/blame.py
@@ -1,60 +1,64 @@
 import argparse
 import json
+import os
+
+from compare_locales.parser import getParser, Junk
 import hglib
 from hglib.util import b, cmdbuilder
-from compare_locales.parser import getParser, Junk
 
 
 class Blame(object):
-    def __init__(self, repopath):
-        self.client = hglib.open(repopath)
+    def __init__(self, client):
+        self.client = client
         self.users = []
         self.blame = {}
 
-    def main(self):
+    def attribution(self, file_paths):
         args = cmdbuilder(
-            b('annotate'), self.client.root(), d=True, u=True, T='json')
+            b('annotate'), *map(b, file_paths), template='json',
+            date=True, user=True, cwd=self.client.root())
         blame_json = ''.join(self.client.rawcommand(args))
         file_blames = json.loads(blame_json)
 
         for file_blame in file_blames:
             self.handleFile(file_blame)
 
         return {'authors': self.users,
                 'blame': self.blame}
 
     def handleFile(self, file_blame):
-        abspath = file_blame['abspath']
+        path = file_blame['path']
 
         try:
-            parser = getParser(abspath)
+            parser = getParser(path)
         except UserWarning:
             return
 
-        self.blame[abspath] = {}
+        self.blame[path] = {}
 
-        parser.readFile(file_blame['path'])
+        parser.readFile(os.path.join(self.client.root(), path))
         entities, emap = parser.parse()
         for e in entities:
             if isinstance(e, Junk):
                 continue
             entity_lines = file_blame['lines'][
                 (e.value_position()[0] - 1):e.value_position(-1)[0]
             ]
             # ignore timezone
             entity_lines.sort(key=lambda blame: -blame['date'][0])
             line_blame = entity_lines[0]
             user = line_blame['user']
             timestamp = line_blame['date'][0]  # ignore timezone
             if user not in self.users:
                 self.users.append(user)
             userid = self.users.index(user)
-            self.blame[abspath][e.key] = [userid, timestamp]
+            self.blame[path][e.key] = [userid, timestamp]
 
 if __name__ == '__main__':
     parser = argparse.ArgumentParser()
-    parser.add_argument("repopath")
+    parser.add_argument('repo_path')
+    parser.add_argument('file_path', nargs='+')
     args = parser.parse_args()
-    blame = Blame(args.repopath)
-    blimey = blame.main()
-    print(json.dumps(blimey, indent=4, separators=(',', ': ')))
+    blame = Blame(hglib.open(args.repo_path))
+    attrib = blame.attribution(args.file_path)
+    print(json.dumps(attrib, indent=4, separators=(',', ': ')))
--- a/third_party/python/fluent/tools/migrate/examples/about_dialog.py
+++ b/third_party/python/fluent/tools/migrate/examples/about_dialog.py
@@ -1,14 +1,13 @@
 # coding=utf8
 
 import fluent.syntax.ast as FTL
-from fluent.migrate import (
-    CONCAT, EXTERNAL_ARGUMENT, MESSAGE_REFERENCE, COPY, REPLACE
-)
+from fluent.migrate import CONCAT, COPY, REPLACE
+from fluent.migrate.helpers import EXTERNAL_ARGUMENT, MESSAGE_REFERENCE
 
 
 def migrate(ctx):
     """Migrate about:dialog, part {index}"""
 
     ctx.add_transforms('browser/about_dialog.ftl', 'about_dialog.ftl', [
         FTL.Message(
             id=FTL.Identifier('update-failed'),
--- a/third_party/python/fluent/tools/migrate/examples/about_downloads.py
+++ b/third_party/python/fluent/tools/migrate/examples/about_downloads.py
@@ -1,12 +1,13 @@
 # coding=utf8
 
 import fluent.syntax.ast as FTL
-from fluent.migrate import EXTERNAL_ARGUMENT, COPY, PLURALS, REPLACE_IN_TEXT
+from fluent.migrate import COPY, PLURALS, REPLACE_IN_TEXT
+from fluent.migrate.helpers import EXTERNAL_ARGUMENT
 
 
 def migrate(ctx):
     """Migrate about:download in Firefox for Android, part {index}"""
 
     ctx.add_transforms('mobile/about_downloads.ftl', 'about_downloads.ftl', [
         FTL.Message(
             id=FTL.Identifier('title'),
--- a/third_party/python/fluent/tools/migrate/examples/bug_1291693.py
+++ b/third_party/python/fluent/tools/migrate/examples/bug_1291693.py
@@ -1,12 +1,13 @@
 # coding=utf8
 
 import fluent.syntax.ast as FTL
-from fluent.migrate import MESSAGE_REFERENCE, COPY, REPLACE
+from fluent.migrate import COPY, REPLACE
+from fluent.migrate.helpers import MESSAGE_REFERENCE
 
 
 def migrate(ctx):
     """Bug 1291693 - Migrate the menubar to FTL, part {index}"""
 
     ctx.add_transforms('browser/menubar.ftl', 'menubar.ftl', [
         FTL.Message(
             id=FTL.Identifier('file-menu'),
--- a/third_party/python/fluent/tools/migrate/migrate-l10n.py
+++ b/third_party/python/fluent/tools/migrate/migrate-l10n.py
@@ -6,43 +6,53 @@ import sys
 import json
 import logging
 import argparse
 import importlib
 
 import hglib
 from hglib.util import b
 
-from fluent.migrate import (
-    MergeContext, MigrationError, convert_blame_to_changesets
-)
+from fluent.migrate.context import MergeContext
+from fluent.migrate.errors import MigrationError
+from fluent.migrate.changesets import convert_blame_to_changesets
 from blame import Blame
 
 
-def main(lang, reference_dir, localization_dir, blame, migrations, dry_run):
+def main(lang, reference_dir, localization_dir, migrations, dry_run):
     """Run migrations and commit files with the result."""
-    changesets = convert_blame_to_changesets(blame)
     client = hglib.open(localization_dir)
 
     for migration in migrations:
 
-        print('Running migration {}'.format(migration.__name__))
+        print('\nRunning migration {} for {}'.format(
+            migration.__name__, lang))
 
         # For each migration create a new context.
         ctx = MergeContext(lang, reference_dir, localization_dir)
 
         try:
             # Add the migration spec.
             migration.migrate(ctx)
-        except MigrationError as err:
-            sys.exit(err.message)
+        except MigrationError:
+            print('  Skipping migration {} for {}'.format(
+                migration.__name__, lang))
+            continue
 
         # Keep track of how many changesets we're committing.
         index = 0
 
+        # Annotate legacy localization files used as sources by this migration
+        # to preserve attribution of translations.
+        files = (
+            path for path in ctx.localization_resources.keys()
+            if not path.endswith('.ftl'))
+        blame = Blame(client).attribution(files)
+        changesets = convert_blame_to_changesets(blame)
+
         for changeset in changesets:
             # Run the migration for the changeset.
             snapshot = ctx.serialize_changeset(changeset['changes'])
 
             # Did it change any files?
             if not snapshot:
                 continue
 
@@ -87,38 +97,25 @@ if __name__ == '__main__':
         '--reference-dir', type=str,
         help='directory with reference FTL files'
     )
     parser.add_argument(
         '--localization-dir', type=str,
         help='directory for localization files'
     )
     parser.add_argument(
-        '--blame', type=argparse.FileType(), default=None,
-        help='path to a JSON with blame information'
-    )
-    parser.add_argument(
         '--dry-run', action='store_true',
         help='do not write to disk nor commit any changes'
     )
     parser.set_defaults(dry_run=False)
 
     logger = logging.getLogger('migrate')
     logger.setLevel(logging.INFO)
 
     args = parser.parse_args()
 
-    if args.blame:
-        # Load pre-computed blame from a JSON file.
-        blame = json.load(args.blame)
-    else:
-        # Compute blame right now.
-        print('Annotating {}'.format(args.localization_dir))
-        blame = Blame(args.localization_dir).main()
-
     main(
         lang=args.lang,
         reference_dir=args.reference_dir,
         localization_dir=args.localization_dir,
-        blame=blame,
         migrations=map(importlib.import_module, args.migrations),
         dry_run=args.dry_run
     )