Bug 1376704 - Remove szip support in the linker. r?snorp draft
authorMike Hommey <mh+mozilla@glandium.org>
Wed, 28 Jun 2017 15:47:31 +0900
changeset 601590 b4bc18fa4da36a265f977c4813afb47a4d92a0f5
parent 600694 8f80d594c08d5c7a112e5d4b9eb44ffca717eb7b
child 601592 05fabf1310e69581c88a850c23fbf4702aa6dd86
push id66127
push userbmo:mh+mozilla@glandium.org
push dateWed, 28 Jun 2017 22:41:11 +0000
reviewerssnorp
bugs1376704, 1307886
milestone56.0a1
Bug 1376704 - Remove szip support in the linker. r?snorp Since bug 1307886, we don't actually use szip anymore, and don't even have the option to package Fennec using it. We can thus remove the support for loading them, as well as on demand linkage. The latter might mean we can remove the segfault handler, but it's unclear whether this is currently working around other issues with registering signal handlers, so we'll leave that to a followup.
mobile/android/geckoview/src/main/java/org/mozilla/gecko/mozglue/GeckoLoader.java
mozglue/linker/CustomElf.cpp
mozglue/linker/CustomElf.h
mozglue/linker/ElfLoader.cpp
mozglue/linker/ElfLoader.h
mozglue/linker/Mappable.cpp
mozglue/linker/Mappable.h
mozglue/linker/SeekableZStream.cpp
mozglue/linker/SeekableZStream.h
mozglue/linker/XZStream.cpp
mozglue/linker/moz.build
mozglue/linker/szip.cpp
toolkit/components/startup/StartupTimeline.h
--- a/mobile/android/geckoview/src/main/java/org/mozilla/gecko/mozglue/GeckoLoader.java
+++ b/mobile/android/geckoview/src/main/java/org/mozilla/gecko/mozglue/GeckoLoader.java
@@ -200,26 +200,16 @@ public final class GeckoLoader {
 
         // setup the libs cache
         String linkerCache = System.getenv("MOZ_LINKER_CACHE");
         if (linkerCache == null) {
             linkerCache = cacheFile.getPath();
             putenv("MOZ_LINKER_CACHE=" + linkerCache);
         }
 
-        // Disable on-demand decompression of the linker on devices where it
-        // is known to cause crashes.
-        String forced_ondemand = System.getenv("MOZ_LINKER_ONDEMAND");
-        if (forced_ondemand == null) {
-            if ("HTC".equals(android.os.Build.MANUFACTURER) &&
-                "HTC Vision".equals(android.os.Build.MODEL)) {
-                putenv("MOZ_LINKER_ONDEMAND=0");
-            }
-        }
-
         putenv("MOZ_LINKER_EXTRACT=1");
     }
 
     @RobocopTarget
     public synchronized static void loadSQLiteLibs(final Context context, final String apkName) {
         if (sSQLiteLibsLoaded) {
             return;
         }
--- a/mozglue/linker/CustomElf.cpp
+++ b/mozglue/linker/CustomElf.cpp
@@ -86,18 +86,16 @@ static int p_flags_to_mprot(Word flags)
  */
 class Mappable1stPagePtr: public GenericMappedPtr<Mappable1stPagePtr> {
 public:
   Mappable1stPagePtr(Mappable *mappable)
   : GenericMappedPtr<Mappable1stPagePtr>(
       mappable->mmap(nullptr, PageSize(), PROT_READ, MAP_PRIVATE, 0))
   , mappable(mappable)
   {
-    /* Ensure the content of this page */
-    mappable->ensure(*this);
   }
 
 private:
   friend class GenericMappedPtr<Mappable1stPagePtr>;
   void munmap(void *buf, size_t length) {
     mappable->munmap(buf, length);
   }
 
@@ -257,19 +255,16 @@ CustomElf::Load(Mappable *mappable, cons
     return nullptr;
 
 #ifdef __ARM_EABI__
   if (arm_exidx_phdr)
     elf->arm_exidx.InitSize(elf->GetPtr(arm_exidx_phdr->p_vaddr),
                             arm_exidx_phdr->p_memsz);
 #endif
 
-  if (MOZ_UNLIKELY(Logging::isVerbose())) {
-    elf->stats("oneLibLoaded");
-  }
   DEBUG_LOG("CustomElf::Load(\"%s\", 0x%x) = %p", path, flags,
             static_cast<void *>(elf));
   return elf.forget();
 }
 
 CustomElf::~CustomElf()
 {
   DEBUG_LOG("CustomElf::~CustomElf(%p [\"%s\"])",
@@ -308,18 +303,16 @@ CustomElf::GetSymbolPtrInDeps(const char
 #else
     if (strcmp(symbol + 2, "cxa_atexit") == 0)
       return FunctionPtr(&ElfLoader::__wrap_cxa_atexit);
 #endif
     if (strcmp(symbol + 2, "cxa_finalize") == 0)
       return FunctionPtr(&ElfLoader::__wrap_cxa_finalize);
     if (strcmp(symbol + 2, "dso_handle") == 0)
       return const_cast<CustomElf *>(this);
-    if (strcmp(symbol + 2, "moz_linker_stats") == 0)
-      return FunctionPtr(&ElfLoader::stats);
 #ifdef __ARM_EABI__
     if (strcmp(symbol + 2, "gnu_Unwind_Find_exidx") == 0)
       return FunctionPtr(__wrap___gnu_Unwind_Find_exidx);
 #endif
   } else if (symbol[0] == 's' && symbol[1] == 'i') {
     if (strcmp(symbol + 2, "gnal") == 0)
       return FunctionPtr(signal);
     if (strcmp(symbol + 2, "gaction") == 0)
@@ -358,22 +351,16 @@ CustomElf::GetSymbolPtrInDeps(const char
       sym = (*it)->GetSymbolPtr(symbol);
     }
     if (sym)
       return sym;
   }
   return nullptr;
 }
 
-void
-CustomElf::stats(const char *when) const
-{
-  mappable->stats(when, GetPath());
-}
-
 bool
 CustomElf::LoadSegment(const Phdr *pt_load) const
 {
   if (pt_load->p_type != PT_LOAD) {
     DEBUG_LOG("%s: Elf::LoadSegment only takes PT_LOAD program headers", GetPath());
     return false;;
   }
 
@@ -410,42 +397,28 @@ CustomElf::LoadSegment(const Phdr *pt_lo
       ERROR("%s: Failed to mmap", GetPath());
     } else {
       ERROR("%s: Didn't map at the expected location (wanted: %p, got: %p)",
           GetPath(), where, mapped);
     }
     return false;
   }
 
-  /* Ensure the availability of all pages within the mapping if on-demand
-   * decompression is disabled (MOZ_LINKER_ONDEMAND=0 or signal handler not
-   * registered). */
-  const char *ondemand = getenv("MOZ_LINKER_ONDEMAND");
-  if (!ElfLoader::Singleton.hasRegisteredHandler() ||
-      (ondemand && !strncmp(ondemand, "0", 2 /* Including '\0' */))) {
-    for (Addr off = 0; off < pt_load->p_filesz + align_offset;
-         off += PageSize()) {
-      mappable->ensure(reinterpret_cast<char *>(mapped) + off);
-    }
-  }
   /* When p_memsz is greater than p_filesz, we need to have nulled out memory
    * after p_filesz and before p_memsz.
    * Above the end of the last page, and up to p_memsz, we already have nulled
    * out memory because we mapped anonymous memory on the whole library virtual
    * address space. We just need to adjust this anonymous memory protection
    * flags. */
   if (pt_load->p_memsz > pt_load->p_filesz) {
     Addr file_end = pt_load->p_vaddr + pt_load->p_filesz;
     Addr mem_end = pt_load->p_vaddr + pt_load->p_memsz;
     Addr next_page = PageAlignedEndPtr(file_end);
     if (next_page > file_end) {
-      /* The library is not registered at this point, so we can't rely on
-       * on-demand decompression to handle missing pages here. */
       void *ptr = GetPtr(file_end);
-      mappable->ensure(ptr);
       memset(ptr, 0, next_page - file_end);
     }
     if (mem_end > next_page) {
       if (mprotect(GetPtr(next_page), mem_end - next_page, prot) < 0) {
         ERROR("%s: Failed to mprotect", GetPath());
         return false;
       }
     }
--- a/mozglue/linker/CustomElf.h
+++ b/mozglue/linker/CustomElf.h
@@ -36,23 +36,16 @@ public:
    */
   virtual ~CustomElf();
 
 protected:
   virtual Mappable *GetMappable() const;
 
 public:
   /**
-   * Shows some stats about the Mappable instance. The when argument is to be
-   * used by the caller to give an identifier of the when the stats call is
-   * made.
-   */
-  virtual void stats(const char *when) const;
-
-  /**
    * Returns the instance, casted as BaseElf. (short of a better way to do
    * this without RTTI)
    */
   virtual BaseElf *AsBaseElf() { return this; }
 
 private:
   /**
    * Scan dependent libraries to find the address corresponding to the
--- a/mozglue/linker/ElfLoader.cpp
+++ b/mozglue/linker/ElfLoader.cpp
@@ -277,22 +277,16 @@ LibHandle::GetMappableLength() const
 void *
 LibHandle::MappableMMap(void *addr, size_t length, off_t offset) const
 {
   if (!mappable)
     mappable = GetMappable();
   if (!mappable)
     return MAP_FAILED;
   void* mapped = mappable->mmap(addr, length, PROT_READ, MAP_PRIVATE, offset);
-  if (mapped != MAP_FAILED) {
-    /* Ensure the availability of all pages within the mapping */
-    for (size_t off = 0; off < length; off += PageSize()) {
-      mappable->ensure(reinterpret_cast<char *>(mapped) + off);
-    }
-  }
   return mapped;
 }
 
 void
 LibHandle::MappableMUnmap(void *addr, size_t length) const
 {
   if (mappable)
     mappable->munmap(addr, length);
@@ -490,18 +484,16 @@ ElfLoader::GetMappableFromPath(const cha
        * files, in the directory pointed by the MOZ_LINKER_CACHE
        * environment variable. */
       const char *extract = getenv("MOZ_LINKER_EXTRACT");
       if (extract && !strncmp(extract, "1", 2 /* Including '\0' */))
         mappable = MappableExtractFile::Create(name, zip, &s);
       if (!mappable) {
         if (s.GetType() == Zip::Stream::DEFLATE) {
           mappable = MappableDeflate::Create(name, zip, &s);
-        } else if (s.GetType() == Zip::Stream::STORE) {
-          mappable = MappableSeekableZStream::Create(name, zip, &s);
         }
       }
     }
   }
   /* If we couldn't load above, try with a MappableFile */
   if (!mappable && !zip)
     mappable = MappableFile::Create(path);
 
@@ -619,28 +611,16 @@ ElfLoader::~ElfLoader()
          * destroying them as a side effect, and possibly leaving dangling
          * pointers in the handle list we're scanning */
       }
     }
   }
   pthread_mutex_destroy(&handlesMutex);
 }
 
-void
-ElfLoader::stats(const char *when)
-{
-  if (MOZ_LIKELY(!Logging::isVerbose()))
-    return;
-
-  AutoLock lock(&Singleton.handlesMutex);
-  for (LibHandleList::iterator it = Singleton.handles.begin();
-       it < Singleton.handles.end(); ++it)
-    (*it)->stats(when);
-}
-
 #ifdef __ARM_EABI__
 int
 ElfLoader::__wrap_aeabi_atexit(void *that, ElfLoader::Destructor destructor,
                                void *dso_handle)
 {
   Singleton.destructors.push_back(
     DestructorCaller(destructor, that, dso_handle));
   return 0;
@@ -1271,30 +1251,16 @@ void SEGVHandler::test_handler(int signu
 }
 
 /* TODO: "properly" handle signal masks and flags */
 void SEGVHandler::handler(int signum, siginfo_t *info, void *context)
 {
   //ASSERT(signum == SIGSEGV);
   DEBUG_LOG("Caught segmentation fault @%p", info->si_addr);
 
-  /* Check whether we segfaulted in the address space of a CustomElf. We're
-   * only expecting that to happen as an access error. */
-  if (info->si_code == SEGV_ACCERR) {
-    RefPtr<LibHandle> handle =
-      ElfLoader::Singleton.GetHandleByPtr(info->si_addr);
-    BaseElf *elf;
-    if (handle && (elf = handle->AsBaseElf())) {
-      DEBUG_LOG("Within the address space of %s", handle->GetPath());
-      if (elf->mappable && elf->mappable->ensure(info->si_addr)) {
-        return;
-      }
-    }
-  }
-
   /* Redispatch to the registered handler */
   SEGVHandler &that = ElfLoader::Singleton;
   if (that.action.sa_flags & SA_SIGINFO) {
     DEBUG_LOG("Redispatching to registered handler @%p",
               FunctionPtr(that.action.sa_sigaction));
     that.action.sa_sigaction(signum, info, context);
   } else if (that.action.sa_handler == SIG_DFL) {
     DEBUG_LOG("Redispatching to default handler");
--- a/mozglue/linker/ElfLoader.h
+++ b/mozglue/linker/ElfLoader.h
@@ -200,23 +200,16 @@ public:
 #ifdef __ARM_EABI__
   /**
    * Find the address and entry count of the ARM.exidx section
    * associated with the library
    */
   virtual const void *FindExidx(int *pcount) const = 0;
 #endif
 
-  /**
-   * Shows some stats about the Mappable instance. The when argument is to be
-   * used by the caller to give an identifier of the when the stats call is
-   * made.
-   */
-  virtual void stats(const char *when) const { };
-
 protected:
   /**
    * Returns a mappable object for use by MappableMMap and related functions.
    */
   virtual Mappable *GetMappable() const = 0;
 
   /**
    * Returns the instance, casted as the wanted type. Returns nullptr if
@@ -486,22 +479,16 @@ private:
   typedef std::vector<LibHandle *> LibHandleList;
   LibHandleList handles;
 
   pthread_mutex_t handlesMutex;
 
 protected:
   friend class CustomElf;
   friend class LoadedElf;
-  /**
-   * Show some stats about Mappables in CustomElfs. The when argument is to
-   * be used by the caller to give an identifier of the when the stats call
-   * is made.
-   */
-  static void stats(const char *when);
 
   /* Definition of static destructors as to be used for C++ ABI compatibility */
   typedef void (*Destructor)(void *object);
 
   /**
    * C++ ABI makes static initializers register destructors through a specific
    * atexit interface. On glibc/linux systems, the dso_handle is a pointer
    * within a given library. On bionic/android systems, it is an undefined
--- a/mozglue/linker/Mappable.cpp
+++ b/mozglue/linker/Mappable.cpp
@@ -18,17 +18,16 @@
 #include "mozilla/UniquePtr.h"
 
 #ifdef ANDROID
 #include <linux/ashmem.h>
 #endif
 #include <sys/stat.h>
 #include <errno.h>
 #include "ElfLoader.h"
-#include "SeekableZStream.h"
 #include "XZStream.h"
 #include "Logging.h"
 
 using mozilla::MakeUnique;
 using mozilla::UniquePtr;
 
 class CacheValidator
 {
@@ -228,37 +227,16 @@ MappableExtractFile::Create(const char *
       return nullptr;
     }
     const size_t written = xzStream.Decode(buffer, buffer.GetLength());
     DEBUG_LOG("XZStream decoded %" PRIuPTR, written);
     if (written != buffer.GetLength()) {
       ERROR("Error decoding XZ file %s", file.get());
       return nullptr;
     }
-  } else if (stream->GetType() == Zip::Stream::STORE) {
-    SeekableZStream zStream;
-    if (!zStream.Init(stream->GetBuffer(), stream->GetSize())) {
-      ERROR("Couldn't initialize SeekableZStream for %s", name);
-      return nullptr;
-    }
-    if (ftruncate(fd, zStream.GetUncompressedSize()) == -1) {
-      ERROR("Couldn't ftruncate %s to decompress library", file.get());
-      return nullptr;
-    }
-    MappedPtr buffer(MemoryRange::mmap(nullptr, zStream.GetUncompressedSize(),
-                                       PROT_WRITE, MAP_SHARED, fd, 0));
-    if (buffer == MAP_FAILED) {
-      ERROR("Couldn't map %s to decompress library", file.get());
-      return nullptr;
-    }
-
-    if (!zStream.Decompress(buffer, 0, zStream.GetUncompressedSize())) {
-      ERROR("%s: failed to decompress", name);
-      return nullptr;
-    }
   } else {
     return nullptr;
   }
 
   validator.CacheChecksum();
   return new MappableExtractFile(fd.forget(), file.release());
 }
 
@@ -470,218 +448,8 @@ MappableDeflate::finalize()
   zip = nullptr;
 }
 
 size_t
 MappableDeflate::GetLength() const
 {
   return buffer->GetLength();
 }
-
-Mappable *
-MappableSeekableZStream::Create(const char *name, Zip *zip,
-                                Zip::Stream *stream)
-{
-  MOZ_ASSERT(stream->GetType() == Zip::Stream::STORE);
-  UniquePtr<MappableSeekableZStream> mappable(new MappableSeekableZStream(zip));
-
-  pthread_mutexattr_t recursiveAttr;
-  pthread_mutexattr_init(&recursiveAttr);
-  pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE);
-
-  if (pthread_mutex_init(&mappable->mutex, &recursiveAttr))
-    return nullptr;
-
-  if (!mappable->zStream.Init(stream->GetBuffer(), stream->GetSize()))
-    return nullptr;
-
-  mappable->buffer.reset(_MappableBuffer::Create(name,
-                              mappable->zStream.GetUncompressedSize()));
-  if (!mappable->buffer)
-    return nullptr;
-
-  mappable->chunkAvail = MakeUnique<unsigned char[]>(mappable->zStream.GetChunksNum());
-
-  return mappable.release();
-}
-
-MappableSeekableZStream::MappableSeekableZStream(Zip *zip)
-: zip(zip), chunkAvailNum(0) { }
-
-MappableSeekableZStream::~MappableSeekableZStream()
-{
-  pthread_mutex_destroy(&mutex);
-}
-
-MemoryRange
-MappableSeekableZStream::mmap(const void *addr, size_t length, int prot,
-                              int flags, off_t offset)
-{
-  /* Map with PROT_NONE so that accessing the mapping would segfault, and
-   * bring us to ensure() */
-  void *res = buffer->mmap(addr, length, PROT_NONE, flags, offset);
-  if (res == MAP_FAILED)
-    return MemoryRange(MAP_FAILED, 0);
-
-  /* Store the mapping, ordered by offset and length */
-  std::vector<LazyMap>::reverse_iterator it;
-  for (it = lazyMaps.rbegin(); it < lazyMaps.rend(); ++it) {
-    if ((it->offset < offset) ||
-        ((it->offset == offset) && (it->length < length)))
-      break;
-  }
-  LazyMap map = { res, length, prot, offset };
-  lazyMaps.insert(it.base(), map);
-  return MemoryRange(res, length);
-}
-
-void
-MappableSeekableZStream::munmap(void *addr, size_t length)
-{
-  std::vector<LazyMap>::iterator it;
-  for (it = lazyMaps.begin(); it < lazyMaps.end(); ++it)
-    if ((it->addr = addr) && (it->length == length)) {
-      lazyMaps.erase(it);
-      ::munmap(addr, length);
-      return;
-    }
-  MOZ_CRASH("munmap called with unknown mapping");
-}
-
-void
-MappableSeekableZStream::finalize() { }
-
-bool
-MappableSeekableZStream::ensure(const void *addr)
-{
-  DEBUG_LOG("ensure @%p", addr);
-  const void *addrPage = PageAlignedPtr(addr);
-  /* Find the mapping corresponding to the given page */
-  std::vector<LazyMap>::iterator map;
-  for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
-    if (map->Contains(addrPage))
-      break;
-  }
-  if (map == lazyMaps.end())
-    return false;
-
-  /* Find corresponding chunk */
-  off_t mapOffset = map->offsetOf(addrPage);
-  off_t chunk = mapOffset / zStream.GetChunkSize();
-
-  /* In the typical case, we just need to decompress the chunk entirely. But
-   * when the current mapping ends in the middle of the chunk, we want to
-   * stop at the end of the corresponding page.
-   * However, if another mapping needs the last part of the chunk, we still
-   * need to continue. As mappings are ordered by offset and length, we don't
-   * need to scan the entire list of mappings.
-   * It is safe to run through lazyMaps here because the linker is never
-   * going to call mmap (which adds lazyMaps) while this function is
-   * called. */
-  size_t length = zStream.GetChunkSize(chunk);
-  off_t chunkStart = chunk * zStream.GetChunkSize();
-  off_t chunkEnd = chunkStart + length;
-  std::vector<LazyMap>::iterator it;
-  for (it = map; it < lazyMaps.end(); ++it) {
-    if (chunkEnd <= it->endOffset())
-      break;
-  }
-  if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
-    /* The mapping "it" points at now is past the interesting one */
-    --it;
-    length = it->endOffset() - chunkStart;
-  }
-
-  length = PageAlignedSize(length);
-
-  /* The following lock can be re-acquired by the thread holding it.
-   * If this happens, it means the following code is interrupted somehow by
-   * some signal, and ends up retriggering a chunk decompression for the
-   * same MappableSeekableZStream.
-   * If the chunk to decompress is different the second time, then everything
-   * is safe as the only common data touched below is chunkAvailNum, and it is
-   * atomically updated (leaving out any chance of an interruption while it is
-   * updated affecting the result). If the chunk to decompress is the same, the
-   * worst thing that can happen is chunkAvailNum being incremented one too
-   * many times, which doesn't affect functionality. The chances of it
-   * happening being pretty slim, and the effect being harmless, we can just
-   * ignore the issue. Other than that, we'd just be wasting time decompressing
-   * the same chunk twice. */
-  AutoLock lock(&mutex);
-
-  /* The very first page is mapped and accessed separately of the rest, and
-   * as such, only the first page of the first chunk is decompressed this way.
-   * When we fault in the remaining pages of that chunk, we want to decompress
-   * the complete chunk again. Short of doing that, we would end up with
-   * no data between PageSize() and chunkSize, which would effectively corrupt
-   * symbol resolution in the underlying library. */
-  if (chunkAvail[chunk] < PageNumber(length)) {
-    if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
-      return false;
-
-#if defined(ANDROID) && defined(__arm__)
-    if (map->prot & PROT_EXEC) {
-      /* We just extracted data that may be executed in the future.
-       * We thus need to ensure Instruction and Data cache coherency. */
-      DEBUG_LOG("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
-      cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
-                 reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
-    }
-#endif
-    /* Only count if we haven't already decompressed parts of the chunk */
-    if (chunkAvail[chunk] == 0)
-      chunkAvailNum++;
-
-    chunkAvail[chunk] = PageNumber(length);
-  }
-
-  /* Flip the chunk mapping protection to the recorded flags. We could
-   * also flip the protection for other mappings of the same chunk,
-   * but it's easier to skip that and let further segfaults call
-   * ensure again. */
-  const void *chunkAddr = reinterpret_cast<const void *>
-                          (reinterpret_cast<uintptr_t>(addrPage)
-                           - mapOffset % zStream.GetChunkSize());
-  const void *chunkEndAddr = reinterpret_cast<const void *>
-                             (reinterpret_cast<uintptr_t>(chunkAddr) + length);
-  
-  const void *start = std::max(map->addr, chunkAddr);
-  const void *end = std::min(map->end(), chunkEndAddr);
-  length = reinterpret_cast<uintptr_t>(end)
-           - reinterpret_cast<uintptr_t>(start);
-
-  if (mprotect(const_cast<void *>(start), length, map->prot) == 0) {
-    DEBUG_LOG("mprotect @%p, 0x%" PRIxSIZE ", 0x%x", start, length, map->prot);
-    return true;
-  }
-
-  ERROR("mprotect @%p, 0x%" PRIxSIZE ", 0x%x failed with errno %d",
-      start, length, map->prot, errno);
-  return false;
-}
-
-void
-MappableSeekableZStream::stats(const char *when, const char *name) const
-{
-  size_t nEntries = zStream.GetChunksNum();
-  DEBUG_LOG("%s: %s; %" PRIuSIZE "/%" PRIuSIZE " chunks decompressed",
-            name, when, static_cast<size_t>(chunkAvailNum), nEntries);
-
-  size_t len = 64;
-  UniquePtr<char[]> map = MakeUnique<char[]>(len + 3);
-  map[0] = '[';
-
-  for (size_t i = 0, j = 1; i < nEntries; i++, j++) {
-    map[j] = chunkAvail[i] ? '*' : '_';
-    if ((j == len) || (i == nEntries - 1)) {
-      map[j + 1] = ']';
-      map[j + 2] = '\0';
-      DEBUG_LOG("%s", static_cast<char *>(map.get()));
-      j = 0;
-    }
-  }
-}
-
-size_t
-MappableSeekableZStream::GetLength() const
-{
-  return buffer->GetLength();
-}
--- a/mozglue/linker/Mappable.h
+++ b/mozglue/linker/Mappable.h
@@ -1,17 +1,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef Mappable_h
 #define Mappable_h
 
 #include "Zip.h"
-#include "SeekableZStream.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/UniquePtr.h"
 #include "zlib.h"
 
 /**
  * Abstract class to handle mmap()ing from various kind of entities, such as
  * plain files or Zip entries. The virtual members are meant to act as the
  * equivalent system functions, except mapped memory is always MAP_PRIVATE,
@@ -41,38 +40,21 @@ private:
   }
   /* Limit use of Mappable::munmap to classes that keep track of the address
    * and size of the mapping. This allows to ignore ::munmap return value. */
   friend class Mappable1stPagePtr;
   friend class LibHandle;
 
 public:
   /**
-   * Ensures the availability of the memory pages for the page(s) containing
-   * the given address. Returns whether the pages were successfully made
-   * available.
-   */
-  virtual bool ensure(const void *addr) { return true; }
-
-  /**
    * Indicate to a Mappable instance that no further mmap is going to happen.
    */
   virtual void finalize() = 0;
 
   /**
-   * Shows some stats about the Mappable instance.
-   * Meant for MappableSeekableZStream only.
-   * As Mappables don't keep track of what they are instanciated for, the name
-   * argument is used to make the stats logging useful to the reader. The when
-   * argument is to be used by the caller to give an identifier of the when
-   * the stats call is made.
-   */
-  virtual void stats(const char *when, const char *name) const { }
-
-  /**
    * Returns the maximum length that can be mapped from this Mappable for
    * offset = 0.
    */
   virtual size_t GetLength() const = 0;
 };
 
 /**
  * Mappable implementation for plain files
@@ -173,95 +155,9 @@ private:
 
   /* Decompression buffer */
   mozilla::UniquePtr<_MappableBuffer> buffer;
 
   /* Zlib data */
   zxx_stream zStream;
 };
 
-/**
- * Mappable implementation for seekable zStreams.
- * Inflates the mapped bits in a temporary buffer, on demand.
- */
-class MappableSeekableZStream: public Mappable
-{
-public:
-  ~MappableSeekableZStream();
-
-  /**
-   * Create a MappableSeekableZStream instance for the given Zip stream. The
-   * name argument is used for an appropriately named temporary file, and the
-   * Zip instance is given for the MappableSeekableZStream to keep a reference
-   * of it.
-   */
-  static Mappable *Create(const char *name, Zip *zip,
-                                         Zip::Stream *stream);
-
-  /* Inherited from Mappable */
-  virtual MemoryRange mmap(const void *addr, size_t length, int prot, int flags, off_t offset);
-  virtual void munmap(void *addr, size_t length);
-  virtual void finalize();
-  virtual bool ensure(const void *addr);
-  virtual void stats(const char *when, const char *name) const;
-  virtual size_t GetLength() const;
-
-  virtual Kind GetKind() const { return MAPPABLE_SEEKABLE_ZSTREAM; };
-private:
-  MappableSeekableZStream(Zip *zip);
-
-  /* Zip reference */
-  RefPtr<Zip> zip;
-
-  /* Decompression buffer */
-  mozilla::UniquePtr<_MappableBuffer> buffer;
-
-  /* Seekable ZStream */
-  SeekableZStream zStream;
-
-  /* Keep track of mappings performed with MappableSeekableZStream::mmap so
-   * that they can be realized by MappableSeekableZStream::ensure.
-   * Values stored in the struct are those passed to mmap */
-  struct LazyMap
-  {
-    const void *addr;
-    size_t length;
-    int prot;
-    off_t offset;
-
-    /* Returns addr + length, as a pointer */
-    const void *end() const {
-      return reinterpret_cast<const void *>
-             (reinterpret_cast<const unsigned char *>(addr) + length);
-    }
-
-    /* Returns offset + length */
-    off_t endOffset() const {
-      return offset + length;
-    }
-
-    /* Returns the offset corresponding to the given address */
-    off_t offsetOf(const void *ptr) const {
-      return reinterpret_cast<uintptr_t>(ptr)
-             - reinterpret_cast<uintptr_t>(addr) + offset;
-    }
-
-    /* Returns whether the given address is in the LazyMap range */
-    bool Contains(const void *ptr) const {
-      return (ptr >= addr) && (ptr < end());
-    }
-  };
-
-  /* List of all mappings */
-  std::vector<LazyMap> lazyMaps;
-
-  /* Array keeping track of which chunks have already been decompressed.
-   * Each value is the number of pages decompressed for the given chunk. */
-  mozilla::UniquePtr<unsigned char[]> chunkAvail;
-
-  /* Number of chunks that have already been decompressed. */
-  mozilla::Atomic<size_t> chunkAvailNum;
-
-  /* Mutex protecting decompression */
-  pthread_mutex_t mutex;
-};
-
 #endif /* Mappable_h */
deleted file mode 100644
--- a/mozglue/linker/SeekableZStream.cpp
+++ /dev/null
@@ -1,262 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include <algorithm>
-#include "SeekableZStream.h"
-#include "Logging.h"
-#include "mozilla/SizePrintfMacros.h"
-
-bool
-SeekableZStream::Init(const void *buf, size_t length)
-{
-  const SeekableZStreamHeader *header = SeekableZStreamHeader::validate(buf);
-  if (!header) {
-    ERROR("Not a seekable zstream");
-    return false;
-  }
-
-  buffer = reinterpret_cast<const unsigned char *>(buf);
-  totalSize = header->totalSize;
-  chunkSize = header->chunkSize;
-  lastChunkSize = header->lastChunkSize;
-  windowBits = header->windowBits;
-  dictionary.Init(buffer + sizeof(SeekableZStreamHeader), header->dictSize);
-  offsetTable.Init(buffer + sizeof(SeekableZStreamHeader) + header->dictSize,
-                   header->nChunks);
-  filter = GetFilter(header->filter);
-
-  /* Sanity check */
-  if ((chunkSize == 0) ||
-      (!IsPageAlignedSize(chunkSize)) ||
-      (chunkSize > 8 * PageSize()) ||
-      (offsetTable.numElements() < 1) ||
-      (lastChunkSize == 0) ||
-      (lastChunkSize > chunkSize) ||
-      (length < totalSize)) {
-    ERROR("Malformed or broken seekable zstream");
-    return false;
-  }
-
-  return true;
-}
-
-bool
-SeekableZStream::Decompress(void *where, size_t chunk, size_t length)
-{
-  while (length) {
-    size_t len = std::min(length, static_cast<size_t>(chunkSize));
-    if (!DecompressChunk(where, chunk, len))
-      return false;
-    where = reinterpret_cast<unsigned char *>(where) + len;
-    length -= len;
-    chunk++;
-  }
-  return true;
-}
-
-bool
-SeekableZStream::DecompressChunk(void *where, size_t chunk, size_t length)
-{
-  if (chunk >= offsetTable.numElements()) {
-    ERROR("DecompressChunk: chunk #%" PRIuSIZE " out of range [0-%" PRIuSIZE ")",
-        chunk, offsetTable.numElements());
-    return false;
-  }
-
-  bool isLastChunk = (chunk == offsetTable.numElements() - 1);
-
-  size_t chunkLen = isLastChunk ? lastChunkSize : chunkSize;
-
-  if (length == 0 || length > chunkLen)
-    length = chunkLen;
-
-  DEBUG_LOG("DecompressChunk #%" PRIuSIZE " @%p (%" PRIuSIZE "/%" PRIuSIZE ")",
-        chunk, where, length, chunkLen);
-  zxx_stream zStream(&allocator);
-  zStream.avail_in = (isLastChunk ? totalSize : uint32_t(offsetTable[chunk + 1]))
-                     - uint32_t(offsetTable[chunk]);
-  zStream.next_in = const_cast<Bytef *>(buffer + uint32_t(offsetTable[chunk]));
-  zStream.avail_out = length;
-  zStream.next_out = reinterpret_cast<Bytef *>(where);
-
-  /* Decompress chunk */
-  if (inflateInit2(&zStream, windowBits) != Z_OK) {
-    ERROR("inflateInit failed: %s", zStream.msg);
-    return false;
-  }
-  if (dictionary && inflateSetDictionary(&zStream, dictionary,
-                                         dictionary.numElements()) != Z_OK) {
-    ERROR("inflateSetDictionary failed: %s", zStream.msg);
-    return false;
-  }
-  if (inflate(&zStream, (length == chunkLen) ? Z_FINISH : Z_SYNC_FLUSH)
-      != (length == chunkLen) ? Z_STREAM_END : Z_OK) {
-    ERROR("inflate failed: %s", zStream.msg);
-    return false;
-  }
-  if (inflateEnd(&zStream) != Z_OK) {
-    ERROR("inflateEnd failed: %s", zStream.msg);
-    return false;
-  }
-  if (filter)
-    filter(chunk * chunkSize, UNFILTER, (unsigned char *)where, chunkLen);
-
-  return true;
-}
-
-/* Branch/Call/Jump conversion filter for Thumb, derived from xz-utils
- * by Igor Pavlov and Lasse Collin, published in the public domain */
-static void
-BCJ_Thumb_filter(off_t offset, SeekableZStream::FilterDirection dir,
-                 unsigned char *buf, size_t size)
-{
-  size_t i;
-  for (i = 0; i + 4 <= size; i += 2) {
-    if ((buf[i + 1] & 0xf8) == 0xf0 && (buf[i + 3] & 0xf8) == 0xf8) {
-      uint32_t src = (buf[i] << 11)
-                     | ((buf[i + 1] & 0x07) << 19)
-                     | buf[i + 2]
-                     | ((buf[i + 3] & 0x07) << 8);
-      src <<= 1;
-      uint32_t dest;
-      if (dir == SeekableZStream::FILTER)
-        dest = offset + (uint32_t)(i) + 4 + src;
-      else
-        dest = src - (offset + (uint32_t)(i) + 4);
-
-      dest >>= 1;
-      buf[i] = dest >> 11;
-      buf[i + 1] = 0xf0 | ((dest >> 19) & 0x07);
-      buf[i + 2] = dest;
-      buf[i + 3] = 0xf8 | ((dest >> 8) & 0x07);
-      i += 2;
-    }
-  }
-}
-
-/* Branch/Call/Jump conversion filter for ARM, derived from xz-utils
- * by Igor Pavlov and Lasse Collin, published in the public domain */
-static void
-BCJ_ARM_filter(off_t offset, SeekableZStream::FilterDirection dir,
-               unsigned char *buf, size_t size)
-{
-  size_t i;
-  for (i = 0; i + 4 <= size; i += 4) {
-    if (buf[i + 3] == 0xeb) {
-      uint32_t src = buf[i]
-                     | (buf[i + 1] << 8)
-                     | (buf[i + 2] << 16);
-      src <<= 2;
-      uint32_t dest;
-      if (dir == SeekableZStream::FILTER)
-        dest = offset + (uint32_t)(i) + 8 + src;
-      else
-        dest = src - (offset + (uint32_t)(i) + 8);
-
-      dest >>= 2;
-      buf[i] = dest;
-      buf[i + 1] = dest >> 8;
-      buf[i + 2] = dest >> 16;
-    }
-  }
-}
-
-/* Branch/Call/Jump conversion filter for x86, derived from xz-utils
- * by Igor Pavlov and Lasse Collin, published in the public domain */
-
-#define Test86MSByte(b) ((b) == 0 || (b) == 0xff)
-
-static void
-BCJ_X86_filter(off_t offset, SeekableZStream::FilterDirection dir,
-               unsigned char *buf, size_t size)
-{
-  static const bool MASK_TO_ALLOWED_STATUS[8] =
-    { true, true, true, false, true, false, false, false };
-
-  static const uint32_t MASK_TO_BIT_NUMBER[8] =
-    { 0, 1, 2, 2, 3, 3, 3, 3 };
-
-  uint32_t prev_mask = 0;
-  uint32_t prev_pos = 0;
-
-  for (size_t i = 0; i + 5 <= size;) {
-    uint8_t b = buf[i];
-    if (b != 0xe8 && b != 0xe9) {
-      ++i;
-      continue;
-    }
-
-    const uint32_t off = offset + (uint32_t)(i) - prev_pos;
-    prev_pos = offset + (uint32_t)(i);
-
-    if (off > 5) {
-      prev_mask = 0;
-    } else {
-      for (uint32_t i = 0; i < off; ++i) {
-        prev_mask &= 0x77;
-        prev_mask <<= 1;
-      }
-    }
-
-    b = buf[i + 4];
-
-    if (Test86MSByte(b) && MASK_TO_ALLOWED_STATUS[(prev_mask >> 1) & 0x7]
-        && (prev_mask >> 1) < 0x10) {
-
-      uint32_t src = ((uint32_t)(b) << 24)
-                     | ((uint32_t)(buf[i + 3]) << 16)
-                     | ((uint32_t)(buf[i + 2]) << 8)
-                     | (buf[i + 1]);
-
-      uint32_t dest;
-      while (true) {
-        if (dir == SeekableZStream::FILTER)
-          dest = src + (offset + (uint32_t)(i) + 5);
-        else
-          dest = src - (offset + (uint32_t)(i) + 5);
-
-        if (prev_mask == 0)
-          break;
-
-        const uint32_t i = MASK_TO_BIT_NUMBER[prev_mask >> 1];
-
-        b = (uint8_t)(dest >> (24 - i * 8));
-
-        if (!Test86MSByte(b))
-          break;
-
-        src = dest ^ ((1 << (32 - i * 8)) - 1);
-      }
-
-      buf[i + 4] = (uint8_t)(~(((dest >> 24) & 1) - 1));
-      buf[i + 3] = (uint8_t)(dest >> 16);
-      buf[i + 2] = (uint8_t)(dest >> 8);
-      buf[i + 1] = (uint8_t)(dest);
-      i += 5;
-      prev_mask = 0;
-
-    } else {
-      ++i;
-      prev_mask |= 1;
-      if (Test86MSByte(b))
-        prev_mask |= 0x10;
-    }
-  }
-}
-
-SeekableZStream::ZStreamFilter
-SeekableZStream::GetFilter(SeekableZStream::FilterId id)
-{
-  switch (id) {
-  case BCJ_THUMB:
-    return BCJ_Thumb_filter;
-  case BCJ_ARM:
-    return BCJ_ARM_filter;
-  case BCJ_X86:
-    return BCJ_X86_filter;
-  default:
-    return nullptr;
-  }
-  return nullptr;
-}
deleted file mode 100644
--- a/mozglue/linker/SeekableZStream.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef SeekableZStream_h
-#define SeekableZStream_h
-
-#include "Zip.h"
-
-/**
- * Seekable compressed stream are created by splitting the original
- * decompressed data in small chunks and compress these chunks
- * individually.
- *
- * The seekable compressed file format consists in a header defined below,
- * followed by a table of 32-bits words containing the offsets for each
- * individual compressed chunk, then followed by the compressed chunks.
- */
-
-#pragma pack(1)
-struct SeekableZStreamHeader: public Zip::SignedEntity<SeekableZStreamHeader>
-{
-  SeekableZStreamHeader()
-  : Zip::SignedEntity<SeekableZStreamHeader>(magic)
-  , totalSize(0), chunkSize(0), dictSize(0), nChunks(0), lastChunkSize(0)
-  , windowBits(0), filter(0) { }
-
-  /* Reuse Zip::SignedEntity to handle the magic number used in the Seekable
-   * ZStream file format. The magic number is "SeZz". */
-  static const uint32_t magic = 0x7a5a6553;
-
-  /* Total size of the stream, including the 4 magic bytes. */
-  le_uint32 totalSize;
-
-  /* Chunk size */
-  le_uint16 chunkSize;
-
-  /* Size of the dictionary */
-  le_uint16 dictSize;
-
-  /* Number of chunks */
-  le_uint32 nChunks;
-
-  /* Size of last chunk (> 0, <= Chunk size) */
-  le_uint16 lastChunkSize;
-
-  /* windowBits value used when deflating */
-  signed char windowBits;
-
-  /* Filter Id */
-  unsigned char filter;
-};
-#pragma pack()
-
-static_assert(sizeof(SeekableZStreamHeader) == 5 * 4,
-              "SeekableZStreamHeader should be 5 32-bits words");
-
-/**
- * Helper class used to decompress Seekable ZStreams.
- */
-class SeekableZStream {
-public:
-  /* Initialize from the given buffer. Returns whether initialization
-   * succeeded (true) or failed (false). */
-  bool Init(const void *buf, size_t length);
-
-  /* Decompresses starting from the given chunk. The decompressed data is
-   * stored at the given location. The given length, in bytes, indicates
-   * how much data to decompress. If length is 0, then exactly one chunk
-   * is decompressed.
-   * Returns whether decompression succeeded (true) or failed (false). */
-  bool Decompress(void *where, size_t chunk, size_t length = 0);
-
-  /* Decompresses the given chunk at the given address. If a length is given,
-   * only decompresses that amount of data instead of the entire chunk.
-   * Returns whether decompression succeeded (true) or failed (false). */
-  bool DecompressChunk(void *where, size_t chunk, size_t length = 0);
- 
-  /* Returns the uncompressed size of the complete zstream */
-  size_t GetUncompressedSize() const
-  {
-    return (offsetTable.numElements() - 1) * chunkSize + lastChunkSize;
-  }
-
-  /* Returns the chunk size of the given chunk */
-  size_t GetChunkSize(size_t chunk = 0) const {
-    return (chunk == offsetTable.numElements() - 1) ? lastChunkSize : chunkSize;
-  }
-
-  /* Returns the number of chunks */
-  size_t GetChunksNum() const {
-    return offsetTable.numElements();
-  }
-
-  /**
-   * Filters used to improve compression rate.
-   */
-  enum FilterDirection {
-    FILTER,
-    UNFILTER
-  };
-  typedef void (*ZStreamFilter)(off_t, FilterDirection,
-                                  unsigned char *, size_t);
-
-  enum FilterId {
-    NONE,
-    BCJ_THUMB,
-    BCJ_ARM,
-    BCJ_X86,
-    FILTER_MAX
-  };
-  static ZStreamFilter GetFilter(FilterId id);
-
-  static ZStreamFilter GetFilter(uint16_t id) {
-    return GetFilter(static_cast<FilterId>(id));
-  }
-
-private:
-  /* RAW Seekable SZtream buffer */
-  const unsigned char *buffer;
-
-  /* Total size of the stream, including the 4 magic bytes. */
-  uint32_t totalSize;
-
-  /* Chunk size */
-  uint32_t chunkSize;
-
-  /* Size of last chunk (> 0, <= Chunk size) */
-  uint32_t lastChunkSize;
-
-  /* windowBits value used when deflating */
-  int windowBits;
-
-  /* Offsets table */
-  Array<le_uint32> offsetTable;
-
-  /* Filter */
-  ZStreamFilter filter;
-
-  /* Deflate dictionary */
-  Array<unsigned char> dictionary;
-
-  /* Special allocator for inflate to use the same buffers for every chunk */
-  zxx_stream::StaticAllocator allocator;
-};
-
-inline void
-operator++(SeekableZStream::FilterId &other)
-{
-  const int orig = static_cast<int>(other);
-  other = static_cast<SeekableZStream::FilterId>(orig + 1);
-}
-
-#endif /* SeekableZStream_h */
--- a/mozglue/linker/XZStream.cpp
+++ b/mozglue/linker/XZStream.cpp
@@ -1,11 +1,12 @@
 #include "XZStream.h"
 
 #include <algorithm>
+#include <cstring>
 #include "mozilla/Assertions.h"
 #include "Logging.h"
 
 // LZMA dictionary size, should have a minimum size for the given compression
 // rate, see XZ Utils docs for details.
 static const uint32_t kDictSize = 1 << 24;
 
 static const size_t kFooterSize = 12;
--- a/mozglue/linker/moz.build
+++ b/mozglue/linker/moz.build
@@ -4,51 +4,30 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 SOURCES += [
     'BaseElf.cpp',
     'CustomElf.cpp',
     'ElfLoader.cpp',
     'Mappable.cpp',
-    'SeekableZStream.cpp',
     'XZStream.cpp',
     'Zip.cpp',
 ]
 
 Library('linker')
 
-HOST_SOURCES += [
-    'SeekableZStream.cpp',
-    'szip.cpp',
-]
-
-HostProgram('szip')
-
 FINAL_LIBRARY = 'mozglue'
 
 DEFINES['IMPL_MFBT'] = True
 
 DISABLE_STL_WRAPPING = True
 
 TEST_DIRS += ['tests']
 
-HOST_OS_LIBS += [
-    'z',
-]
-
-if CONFIG['TARGET_CPU'] == 'arm':
-    if CONFIG['MOZ_THUMB2']:
-        HOST_DEFINES['TARGET_THUMB'] = True
-    else:
-        HOST_DEFINES['TARGET_ARM'] = True
-
-if CONFIG['CPU_ARCH'] == 'x86':
-    HOST_DEFINES['TARGET_X86'] = True
-
 if CONFIG['GNU_CXX']:
     CXXFLAGS += ['-Wno-error=shadow']
 
 DEFINES['XZ_USE_CRC64'] = 1
 
 USE_LIBS += [
     'xz-embedded',
 ]
deleted file mode 100644
--- a/mozglue/linker/szip.cpp
+++ /dev/null
@@ -1,594 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include <algorithm>
-#include <map>
-#include <sys/stat.h>
-#include <string>
-#include <sstream>
-#include <cstring>
-#include <cstdlib>
-#include <zlib.h>
-#include <fcntl.h>
-#include <errno.h>
-#include "mozilla/Assertions.h"
-#include "mozilla/Scoped.h"
-#include "mozilla/SizePrintfMacros.h"
-#include "mozilla/UniquePtr.h"
-#include "SeekableZStream.h"
-#include "Utils.h"
-#include "Logging.h"
-
-Logging Logging::Singleton;
-
-const char *filterName[] = {
-  "none",
-  "thumb",
-  "arm",
-  "x86",
-  "auto"
-};
-
-/* Maximum supported size for chunkSize */
-static const size_t maxChunkSize =
-  1 << (8 * std::min(sizeof(((SeekableZStreamHeader *)nullptr)->chunkSize),
-                     sizeof(((SeekableZStreamHeader *)nullptr)->lastChunkSize)) - 1);
-
-class Buffer: public MappedPtr
-{
-public:
-  virtual ~Buffer() { }
-
-  virtual bool Resize(size_t size)
-  {
-    MemoryRange buf = mmap(nullptr, size, PROT_READ | PROT_WRITE,
-                           MAP_PRIVATE | MAP_ANON, -1, 0);
-    if (buf == MAP_FAILED)
-      return false;
-    if (*this != MAP_FAILED)
-      memcpy(buf, *this, std::min(size, GetLength()));
-    Assign(buf);
-    return true;
-  }
-
-  bool Fill(Buffer &other)
-  {
-    size_t size = other.GetLength();
-    if (!size || !Resize(size))
-      return false;
-    memcpy(static_cast<void *>(*this), static_cast<void *>(other), size);
-    return true;
-  }
-};
-
-class FileBuffer: public Buffer
-{
-public:
-  bool Init(const char *name, bool writable_ = false)
-  {
-    fd = open(name, writable_ ? O_RDWR | O_CREAT | O_TRUNC : O_RDONLY, 0666);
-    if (fd == -1)
-      return false;
-    writable = writable_;
-    return true;
-  }
-
-  virtual bool Resize(size_t size)
-  {
-    if (writable) {
-      if (ftruncate(fd, size) == -1)
-        return false;
-    }
-    Assign(MemoryRange::mmap(nullptr, size,
-                             PROT_READ | (writable ? PROT_WRITE : 0),
-                             writable ? MAP_SHARED : MAP_PRIVATE, fd, 0));
-    return this != MAP_FAILED;
-  }
-
-  int getFd()
-  {
-    return fd;
-  }
-
-private:
-  AutoCloseFD fd;
-  bool writable;
-};
-
-class FilteredBuffer: public Buffer
-{
-public:
-  void Filter(Buffer &other, SeekableZStream::FilterId filter, size_t chunkSize)
-  {
-    SeekableZStream::ZStreamFilter filterCB =
-      SeekableZStream::GetFilter(filter);
-    MOZ_ASSERT(filterCB);
-    Fill(other);
-    size_t size = other.GetLength();
-    Bytef *data = reinterpret_cast<Bytef *>(static_cast<void *>(*this));
-    size_t avail = 0;
-    /* Filter needs to be applied in chunks. */
-    while (size) {
-      avail = std::min(size, chunkSize);
-      filterCB(data - static_cast<unsigned char *>(static_cast<void *>(*this)),
-               SeekableZStream::FILTER, data, avail);
-      size -= avail;
-      data += avail;
-    }
-  }
-};
-
-template <typename T>
-class Dictionary: public Buffer
-{
-  typedef T piece;
-  typedef std::pair<piece, int> stat_pair;
-
-  static bool stat_cmp(stat_pair a, stat_pair b)
-  {
-    return a.second < b.second;
-  }
-
-public:
-  Dictionary(Buffer &inBuf, size_t size)
-  {
-    if (!size || !Resize(size))
-      return;
-    DEBUG_LOG("Creating dictionary");
-    piece *origBufPieces = reinterpret_cast<piece *>(
-                           static_cast<void *>(inBuf));
-    std::map<piece, int> stats;
-    for (unsigned int i = 0; i < inBuf.GetLength() / sizeof(piece); i++) {
-      stats[origBufPieces[i]]++;
-    }
-    std::vector<stat_pair> statsVec(stats.begin(), stats.end());
-    std::sort(statsVec.begin(), statsVec.end(), stat_cmp);
-
-    piece *dictPieces = reinterpret_cast<piece *>(
-                        static_cast<void *>(*this));
-    typename std::vector<stat_pair>::reverse_iterator it = statsVec.rbegin();
-    for (int i = size / sizeof(piece); i > 0 && it < statsVec.rend();
-         i--, ++it) {
-      dictPieces[i - 1] = it->first;
-    }
-  }
-};
-
-class SzipAction
-{
-public:
-  virtual int run(const char *name, Buffer &origBuf,
-                  const char *outName, Buffer &outBuf) = 0;
-
-  virtual ~SzipAction() {}
-};
-
-class SzipDecompress: public SzipAction
-{
-public:
-  int run(const char *name, Buffer &origBuf,
-          const char *outName, Buffer &outBuf);
-};
-
-
-class SzipCompress: public SzipAction
-{
-public:
-  int run(const char *name, Buffer &origBuf,
-          const char *outName, Buffer &outBuf);
-
-  SzipCompress(size_t aChunkSize, SeekableZStream::FilterId aFilter,
-               size_t aDictSize)
-  : chunkSize(aChunkSize ? aChunkSize : 16384)
-  , filter(aFilter)
-  , dictSize(aDictSize)
-  {}
-
-  const static signed char winSizeLog = 15;
-  const static size_t winSize = 1 << winSizeLog;
-
-  const static SeekableZStream::FilterId DEFAULT_FILTER =
-#if defined(TARGET_THUMB)
-    SeekableZStream::BCJ_THUMB;
-#elif defined(TARGET_ARM)
-    SeekableZStream::BCJ_ARM;
-#elif defined(TARGET_X86)
-    SeekableZStream::BCJ_X86;
-#else
-    SeekableZStream::NONE;
-#endif
-
-private:
-
-  int do_compress(Buffer &origBuf, Buffer &outBuf, const unsigned char *aDict,
-                  size_t aDictSize, SeekableZStream::FilterId aFilter);
-
-  size_t chunkSize;
-  SeekableZStream::FilterId filter;
-  size_t dictSize;
-};
-
-/* Decompress a seekable compressed stream */
-int SzipDecompress::run(const char *name, Buffer &origBuf,
-                        const char *outName, Buffer &outBuf)
-{
-  size_t origSize = origBuf.GetLength();
-  if (origSize < sizeof(SeekableZStreamHeader)) {
-    ERROR("%s is not compressed", name);
-    return 0;
-  }
-
-  SeekableZStream zstream;
-  if (!zstream.Init(origBuf, origSize))
-    return 0;
-
-  size_t size = zstream.GetUncompressedSize();
-
-  /* Give enough room for the uncompressed data */
-  if (!outBuf.Resize(size)) {
-    ERROR("Error resizing %s: %s", outName, strerror(errno));
-    return 1;
-  }
-
-  if (!zstream.Decompress(outBuf, 0, size))
-    return 1;
-
-  return 0;
-}
-
-/* Generate a seekable compressed stream. */
-int SzipCompress::run(const char *name, Buffer &origBuf,
-                      const char *outName, Buffer &outBuf)
-{
-  size_t origSize = origBuf.GetLength();
-  if (origSize == 0) {
-    ERROR("Won't compress %s: it's empty", name);
-    return 1;
-  }
-  if (SeekableZStreamHeader::validate(origBuf)) {
-    WARN("Skipping %s: it's already a szip", name);
-    return 0;
-  }
-  bool compressed = false;
-  LOG("Size = %" PRIuSIZE, origSize);
-
-  /* Allocate a buffer the size of the uncompressed data: we don't want
-   * a compressed file larger than that anyways. */
-  if (!outBuf.Resize(origSize)) {
-    ERROR("Couldn't allocate output buffer: %s", strerror(errno));
-    return 1;
-  }
-
-  /* Find the most appropriate filter */
-  SeekableZStream::FilterId firstFilter, lastFilter;
-  bool scanFilters;
-  if (filter == SeekableZStream::FILTER_MAX) {
-    firstFilter = SeekableZStream::NONE;
-    lastFilter = SeekableZStream::FILTER_MAX;
-    scanFilters = true;
-  } else {
-    firstFilter = lastFilter = filter;
-    ++lastFilter;
-    scanFilters = false;
-  }
-
-  mozilla::UniquePtr<Buffer> filteredBuf;
-  Buffer *origData;
-  for (SeekableZStream::FilterId f = firstFilter; f < lastFilter; ++f) {
-    mozilla::UniquePtr<FilteredBuffer> filteredTmp;
-    Buffer tmpBuf;
-    if (f != SeekableZStream::NONE) {
-      DEBUG_LOG("Applying filter \"%s\"", filterName[f]);
-      filteredTmp = mozilla::MakeUnique<FilteredBuffer>();
-      filteredTmp->Filter(origBuf, f, chunkSize);
-      origData = filteredTmp.get();
-    } else {
-      origData = &origBuf;
-    }
-    if (dictSize  && !scanFilters) {
-      filteredBuf = mozilla::Move(filteredTmp);
-      break;
-    }
-    DEBUG_LOG("Compressing with no dictionary");
-    if (do_compress(*origData, tmpBuf, nullptr, 0, f) == 0) {
-      if (tmpBuf.GetLength() < outBuf.GetLength()) {
-        outBuf.Fill(tmpBuf);
-        compressed = true;
-        filter = f;
-        filteredBuf = mozilla::Move(filteredTmp);
-        continue;
-      }
-    }
-  }
-
-  origData = filteredBuf ? filteredBuf.get() : &origBuf;
-
-  if (dictSize) {
-    Dictionary<uint64_t> dict(*origData, dictSize ? SzipCompress::winSize : 0);
-
-    /* Find the most appropriate dictionary size */
-    size_t firstDictSize, lastDictSize;
-    if (dictSize == (size_t) -1) {
-      /* If we scanned for filters, we effectively already tried dictSize=0 */
-      firstDictSize = scanFilters ? 4096 : 0;
-      lastDictSize = SzipCompress::winSize;
-    } else {
-      firstDictSize = lastDictSize = dictSize;
-    }
-
-    Buffer tmpBuf;
-    for (size_t d = firstDictSize; d <= lastDictSize; d += 4096) {
-      DEBUG_LOG("Compressing with dictionary of size %" PRIuSIZE, d);
-      if (do_compress(*origData, tmpBuf, static_cast<unsigned char *>(dict)
-                      + SzipCompress::winSize - d, d, filter))
-        continue;
-      if (!compressed || tmpBuf.GetLength() < outBuf.GetLength()) {
-        outBuf.Fill(tmpBuf);
-        compressed = true;
-        dictSize = d;
-      }
-    }
-  }
-
-  if (!compressed) {
-    outBuf.Fill(origBuf);
-    LOG("Not compressed");
-    return 0;
-  }
-
-  if (dictSize == (size_t) -1)
-    dictSize = 0;
-
-  DEBUG_LOG("Used filter \"%s\" and dictionary size of %" PRIuSIZE,
-            filterName[filter], dictSize);
-  LOG("Compressed size is %" PRIuSIZE, outBuf.GetLength());
-
-  /* Sanity check */
-  Buffer tmpBuf;
-  SzipDecompress decompress;
-  if (decompress.run("buffer", outBuf, "buffer", tmpBuf))
-    return 1;
-
-  size_t size = tmpBuf.GetLength();
-  if (size != origSize) {
-    ERROR("Compression error: %" PRIuSIZE " != %" PRIuSIZE, size, origSize);
-    return 1;
-  }
-  if (memcmp(static_cast<void *>(origBuf), static_cast<void *>(tmpBuf), size)) {
-    ERROR("Compression error: content mismatch");
-    return 1;
-  }
-  return 0;
-}
-
-int SzipCompress::do_compress(Buffer &origBuf, Buffer &outBuf,
-                              const unsigned char *aDict, size_t aDictSize,
-                              SeekableZStream::FilterId aFilter)
-{
-  size_t origSize = origBuf.GetLength();
-  MOZ_ASSERT(origSize != 0);
-
-  /* Expected total number of chunks */
-  size_t nChunks = ((origSize + chunkSize - 1) / chunkSize);
-
-  /* The first chunk is going to be stored after the header, the dictionary
-   * and the offset table */
-  size_t offset = sizeof(SeekableZStreamHeader) + aDictSize
-                  + nChunks * sizeof(uint32_t);
-
-  if (offset >= origSize)
-    return 1;
-
-    /* Allocate a buffer the size of the uncompressed data: we don't want
-   * a compressed file larger than that anyways. */
-  if (!outBuf.Resize(origSize)) {
-    ERROR("Couldn't allocate output buffer: %s", strerror(errno));
-    return 1;
-  }
-
-  SeekableZStreamHeader *header = new (outBuf) SeekableZStreamHeader;
-  unsigned char *dictionary = static_cast<unsigned char *>(
-                              outBuf + sizeof(SeekableZStreamHeader));
-  le_uint32 *entry =
-    reinterpret_cast<le_uint32 *>(dictionary + aDictSize);
-
-  /* Initialize header */
-  header->chunkSize = chunkSize;
-  header->dictSize = aDictSize;
-  header->totalSize = offset;
-  header->windowBits = -SzipCompress::winSizeLog; // Raw stream,
-                                                  // window size of 32k.
-  header->filter = aFilter;
-  if (aDictSize)
-    memcpy(dictionary, aDict, aDictSize);
-
-  /* Initialize zlib structure */
-  z_stream zStream;
-  memset(&zStream, 0, sizeof(zStream));
-  zStream.avail_out = origSize - offset;
-  zStream.next_out = static_cast<Bytef*>(outBuf) + offset;
-
-  size_t avail = 0;
-  size_t size = origSize;
-  unsigned char *data = reinterpret_cast<unsigned char *>(
-                        static_cast<void *>(origBuf));
-  while (size) {
-    avail = std::min(size, chunkSize);
-
-    /* Compress chunk */
-    int ret = deflateInit2(&zStream, 9, Z_DEFLATED, header->windowBits,
-                           MAX_MEM_LEVEL, Z_DEFAULT_STRATEGY);
-    if (aDictSize)
-      deflateSetDictionary(&zStream, dictionary, aDictSize);
-    MOZ_ASSERT(ret == Z_OK);
-    zStream.avail_in = avail;
-    zStream.next_in = data;
-    ret = deflate(&zStream, Z_FINISH);
-    /* Under normal conditions, deflate returns Z_STREAM_END. If there is not
-     * enough room to compress, deflate returns Z_OK and avail_out is 0. We
-     * still want to deflateEnd in that case, so fall through. It will bail
-     * on the avail_out test that follows. */
-    MOZ_ASSERT(ret == Z_STREAM_END || ret == Z_OK);
-    ret = deflateEnd(&zStream);
-    MOZ_ASSERT(ret == Z_OK);
-    if (zStream.avail_out <= 0)
-      return 1;
-
-    size_t len = origSize - offset - zStream.avail_out;
-
-    /* Adjust headers */
-    header->totalSize += len;
-    *entry++ = offset;
-    header->nChunks++;
-
-    /* Prepare for next iteration */
-    size -= avail;
-    data += avail;
-    offset += len;
-  }
-  header->lastChunkSize = avail;
-  MOZ_ASSERT(header->totalSize == offset);
-  MOZ_ASSERT(header->nChunks == nChunks);
-
-  if (!outBuf.Resize(offset)) {
-    ERROR("Error truncating output: %s", strerror(errno));
-    return 1;
-  }
-
-  return 0;
-
-}
-
-bool GetSize(const char *str, size_t *out)
-{
-  char *end;
-  MOZ_ASSERT(out);
-  errno = 0;
-  *out = strtol(str, &end, 10);
-  return (!errno && !*end);
-}
-
-int main(int argc, char* argv[])
-{
-  mozilla::UniquePtr<SzipAction> action;
-  char **firstArg;
-  bool compress = true;
-  size_t chunkSize = 0;
-  SeekableZStream::FilterId filter = SzipCompress::DEFAULT_FILTER;
-  size_t dictSize = (size_t) 0;
-
-  Logging::Init();
-
-  for (firstArg = &argv[1]; argc > 2; argc--, firstArg++) {
-    if (!firstArg[0] || firstArg[0][0] != '-')
-      break;
-    if (strcmp(firstArg[0], "-d") == 0) {
-      compress = false;
-    } else if (strcmp(firstArg[0], "-c") == 0) {
-      firstArg++;
-      argc--;
-      if (!firstArg[0])
-        break;
-      if (!GetSize(firstArg[0], &chunkSize) || !chunkSize ||
-          (chunkSize % 4096) || (chunkSize > maxChunkSize)) {
-        ERROR("Invalid chunk size");
-        return 1;
-      }
-    } else if (strcmp(firstArg[0], "-f") == 0) {
-      firstArg++;
-      argc--;
-      if (!firstArg[0])
-        break;
-      bool matched = false;
-      for (unsigned int i = 0; i < sizeof(filterName) / sizeof(char *); ++i) {
-        if (strcmp(firstArg[0], filterName[i]) == 0) {
-          filter = static_cast<SeekableZStream::FilterId>(i);
-          matched = true;
-          break;
-        }
-      }
-      if (!matched) {
-        ERROR("Invalid filter");
-        return 1;
-      }
-    } else if (strcmp(firstArg[0], "-D") == 0) {
-      firstArg++;
-      argc--;
-      if (!firstArg[0])
-        break;
-      if (strcmp(firstArg[0], "auto") == 0) {
-        dictSize = -1;
-      } else if (!GetSize(firstArg[0], &dictSize) || (dictSize >= 1 << 16)) {
-        ERROR("Invalid dictionary size");
-        return 1;
-      }
-    }
-  }
-
-  if (argc != 2 || !firstArg[0]) {
-    LOG("usage: %s [-d] [-c CHUNKSIZE] [-f FILTER] [-D DICTSIZE] file",
-        argv[0]);
-    return 1;
-  }
-
-  if (compress) {
-    action.reset(new SzipCompress(chunkSize, filter, dictSize));
-  } else {
-    if (chunkSize) {
-      ERROR("-c is incompatible with -d");
-      return 1;
-    }
-    if (dictSize) {
-      ERROR("-D is incompatible with -d");
-      return 1;
-    }
-    action.reset(new SzipDecompress());
-  }
-
-  std::stringstream tmpOutStream;
-  tmpOutStream << firstArg[0] << ".sz." << getpid();
-  std::string tmpOut(tmpOutStream.str());
-  int ret;
-  struct stat st;
-  {
-    FileBuffer origBuf;
-    if (!origBuf.Init(firstArg[0])) {
-      ERROR("Couldn't open %s: %s", firstArg[0], strerror(errno));
-      return 1;
-    }
-
-    ret = fstat(origBuf.getFd(), &st);
-    if (ret == -1) {
-      ERROR("Couldn't stat %s: %s", firstArg[0], strerror(errno));
-      return 1;
-    }
-
-    size_t origSize = st.st_size;
-
-    /* Mmap the original file */
-    if (!origBuf.Resize(origSize)) {
-      ERROR("Couldn't mmap %s: %s", firstArg[0], strerror(errno));
-      return 1;
-    }
-
-    /* Create the compressed file */
-    FileBuffer outBuf;
-    if (!outBuf.Init(tmpOut.c_str(), true)) {
-      ERROR("Couldn't open %s: %s", tmpOut.c_str(), strerror(errno));
-      return 1;
-    }
-
-    ret = action->run(firstArg[0], origBuf, tmpOut.c_str(), outBuf);
-    if ((ret == 0) && (fstat(outBuf.getFd(), &st) == -1)) {
-      st.st_size = 0;
-    }
-  }
-
-  if ((ret == 0) && st.st_size) {
-    rename(tmpOut.c_str(), firstArg[0]);
-  } else {
-    unlink(tmpOut.c_str());
-  }
-  return ret;
-}
--- a/toolkit/components/startup/StartupTimeline.h
+++ b/toolkit/components/startup/StartupTimeline.h
@@ -30,28 +30,16 @@ mozilla_StartupTimeline_Event(PROFILE_BE
 
 #include "mozilla/TimeStamp.h"
 #include "nscore.h"
 
 #ifdef MOZILLA_INTERNAL_API
 #include "GeckoProfiler.h"
 #endif
 
-#ifdef MOZ_LINKER
-extern "C" {
-/* This symbol is resolved by the custom linker. The function it resolves
- * to dumps some statistics about the linker at the key events recorded
- * by the startup timeline. */
-extern void __moz_linker_stats(const char *str)
-NS_VISIBILITY_DEFAULT __attribute__((weak));
-} /* extern "C" */
-#else
-
-#endif
-
 namespace mozilla {
 
 void RecordShutdownEndTimeStamp();
 void RecordShutdownStartTimeStamp();
 
 class StartupTimeline {
 public:
   enum Event {
@@ -72,20 +60,16 @@ public:
 #ifdef MOZILLA_INTERNAL_API
   static void Record(Event ev) {
     profiler_add_marker(Describe(ev));
     Record(ev, TimeStamp::Now());
   }
 
   static void Record(Event ev, TimeStamp when) {
     sStartupTimeline[ev] = when;
-#ifdef MOZ_LINKER
-    if (__moz_linker_stats)
-      __moz_linker_stats(Describe(ev));
-#endif
   }
 
   static void RecordOnce(Event ev) {
     if (!HasRecord(ev))
       Record(ev);
   }
 #endif