view mercurial/similar.py @ 44118:f81c17ec303c

hgdemandimport: apply lazy module loading to sys.meta_path finders Python's `sys.meta_path` finders are the primary objects whose job it is to find a module at import time. When `import` is called, Python iterates objects in this list and calls `o.find_spec(...)` to find a `ModuleSpec` (or None if the module couldn't be found by that finder). If no meta path finder can find a module, import fails. One of the default meta path finders is `PathFinder`. Its job is to import modules from the filesystem and is probably the most important importer. This finder looks at `sys.path` and `sys.path_hooks` to do its job. The `ModuleSpec` returned by `MetaPathImporter.find_spec()` has a `loader` attribute, which defines the concrete module loader to use. `sys.path_hooks` is a hook point for teaching `PathFinder` to instantiate custom loader types. Previously, we injected a custom `sys.path_hook` that told `PathFinder` to wrap the default loaders with a loader that creates a module object that is lazy. This approach worked. But its main limitation was that it only applied to the `PathFinder` meta path importer. There are other meta path importers that are registered. And in the case of PyOxidizer loading modules from memory, `PathFinder` doesn't come into play since PyOxidizer's own meta path importer was handling all imports. This commit changes our approach to lazy module loading by proxying all meta path importers. Specifically, we overload the `find_spec()` method to swap in a wrapped loader on the `ModuleSpec` before it is returned. The end result of this is all meta path importers should be lazy. As much as I would have loved to utilize .__class__ manipulation to achieve this, some meta path importers are implemented in C/Rust in such a way that they cannot be monkeypatched. This is why we use __getattribute__ to define a proxy. Also, this change could theoretically open us up to regressions in meta path importers whose loader is creating module objects which can't be monkeypatched. But I'm not aware of any of these in the wild. So I think we'll be safe. According to hyperfine, this change yields a decent startup time win of 5-6ms: ``` Benchmark #1: ~/.pyenv/versions/3.6.10/bin/python ./hg version Time (mean ? ?): 86.8 ms ? 0.5 ms [User: 78.0 ms, System: 8.7 ms] Range (min ? max): 86.0 ms ? 89.1 ms 50 runs Time (mean ? ?): 81.1 ms ? 2.7 ms [User: 74.5 ms, System: 6.5 ms] Range (min ? max): 77.8 ms ? 90.5 ms 50 runs Benchmark #2: ~/.pyenv/versions/3.7.6/bin/python ./hg version Time (mean ? ?): 78.9 ms ? 0.6 ms [User: 70.2 ms, System: 8.7 ms] Range (min ? max): 78.1 ms ? 81.2 ms 50 runs Time (mean ? ?): 73.4 ms ? 0.6 ms [User: 65.3 ms, System: 8.0 ms] Range (min ? max): 72.4 ms ? 75.7 ms 50 runs Benchmark #3: ~/.pyenv/versions/3.8.1/bin/python ./hg version Time (mean ? ?): 78.1 ms ? 0.6 ms [User: 70.2 ms, System: 7.9 ms] Range (min ? max): 77.4 ms ? 80.9 ms 50 runs Time (mean ? ?): 72.1 ms ? 0.4 ms [User: 64.4 ms, System: 7.6 ms] Range (min ? max): 71.4 ms ? 74.1 ms 50 runs ``` Differential Revision: https://phab.mercurial-scm.org/D7954
author Gregory Szorc <gregory.szorc@gmail.com>
date Mon, 20 Jan 2020 23:51:25 -0800
parents d783f945a701
children 89a2afe31e82
line wrap: on
line source

# similar.py - mechanisms for finding similar files
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.

from __future__ import absolute_import

from .i18n import _
from . import (
    mdiff,
    pycompat,
)


def _findexactmatches(repo, added, removed):
    '''find renamed files that have no changes

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after) tuples of exact matches.
    '''
    # Build table of removed files: {hash(fctx.data()): [fctx, ...]}.
    # We use hash() to discard fctx.data() from memory.
    hashes = {}
    progress = repo.ui.makeprogress(
        _(b'searching for exact renames'),
        total=(len(added) + len(removed)),
        unit=_(b'files'),
    )
    for fctx in removed:
        progress.increment()
        h = hash(fctx.data())
        if h not in hashes:
            hashes[h] = [fctx]
        else:
            hashes[h].append(fctx)

    # For each added file, see if it corresponds to a removed file.
    for fctx in added:
        progress.increment()
        adata = fctx.data()
        h = hash(adata)
        for rfctx in hashes.get(h, []):
            # compare between actual file contents for exact identity
            if adata == rfctx.data():
                yield (rfctx, fctx)
                break

    # Done
    progress.complete()


def _ctxdata(fctx):
    # lazily load text
    orig = fctx.data()
    return orig, mdiff.splitnewlines(orig)


def _score(fctx, otherdata):
    orig, lines = otherdata
    text = fctx.data()
    # mdiff.blocks() returns blocks of matching lines
    # count the number of bytes in each
    equal = 0
    matches = mdiff.blocks(text, orig)
    for x1, x2, y1, y2 in matches:
        for line in lines[y1:y2]:
            equal += len(line)

    lengths = len(text) + len(orig)
    return equal * 2.0 / lengths


def score(fctx1, fctx2):
    return _score(fctx1, _ctxdata(fctx2))


def _findsimilarmatches(repo, added, removed, threshold):
    '''find potentially renamed files based on similar file content

    Takes a list of new filectxs and a list of removed filectxs, and yields
    (before, after, score) tuples of partial matches.
    '''
    copies = {}
    progress = repo.ui.makeprogress(
        _(b'searching for similar files'), unit=_(b'files'), total=len(removed)
    )
    for r in removed:
        progress.increment()
        data = None
        for a in added:
            bestscore = copies.get(a, (None, threshold))[1]
            if data is None:
                data = _ctxdata(r)
            myscore = _score(a, data)
            if myscore > bestscore:
                copies[a] = (r, myscore)
    progress.complete()

    for dest, v in pycompat.iteritems(copies):
        source, bscore = v
        yield source, dest, bscore


def _dropempty(fctxs):
    return [x for x in fctxs if x.size() > 0]


def findrenames(repo, added, removed, threshold):
    '''find renamed files -- yields (before, after, score) tuples'''
    wctx = repo[None]
    pctx = wctx.p1()

    # Zero length files will be frequently unrelated to each other, and
    # tracking the deletion/addition of such a file will probably cause more
    # harm than good. We strip them out here to avoid matching them later on.
    addedfiles = _dropempty(wctx[fp] for fp in sorted(added))
    removedfiles = _dropempty(pctx[fp] for fp in sorted(removed) if fp in pctx)

    # Find exact matches.
    matchedfiles = set()
    for (a, b) in _findexactmatches(repo, addedfiles, removedfiles):
        matchedfiles.add(b)
        yield (a.path(), b.path(), 1.0)

    # If the user requested similar files to be matched, search for them also.
    if threshold < 1.0:
        addedfiles = [x for x in addedfiles if x not in matchedfiles]
        for (a, b, score) in _findsimilarmatches(
            repo, addedfiles, removedfiles, threshold
        ):
            yield (a.path(), b.path(), score)