comparison mercurial/utils/procutil.py @ 46889:8759e22f1649

procutil: avoid using os.fork() to implement runbgcommand We ran into the following deadlock: - some command creates an ssh peer, then raises without explicitly closing the peer (hg id + extension in our case) - dispatch catches the exception, calls ui.log('commandfinish', ..) (the sshpeer is still not closed), which calls logtoprocess, which calls procutil.runbgcommand. - in the child of runbgcommand's fork(), between the fork and the exec, the opening of file descriptors triggers a gc which runs the destructor for sshpeer, which waits on ssh's stderr being closed, which never happens since ssh's stderr is held open by the parent of the fork where said destructor hasn't run Remotefilelog appears to have a hack around this deadlock as well. I don't know if there's more subtlety to it, because even though the problem is determistic, it is very fragile, so I didn't manage to reduce it. I can imagine three ways of tackling this problem: 1. don't run any python between fork and exec in runbgcommand 2. make the finalizer harmless after the fork 3. close the peer without relying on gc behavior This commit goes with 1, as forking without exec'ing is tricky in general in a language with gc finalizers. And maybe it's better in the presence of rust threads. A future commit will try 2 or 3. Performance wise: at low memory usage, it's an improvement. At higher memory usage, it's about 2x faster than before when ensurestart=True, but 2x slower when ensurestart=False. Not sure if that matters. The reason for that last bit is that the subprocess.Popen always waits for the execve to finish, and at high memory usage, execve is slow because it deallocates the large page table. Numbers and script: before after mem=1.0GB, ensurestart=True 52.1ms 26.0ms mem=1.0GB, ensurestart=False 14.7ms 26.0ms mem=0.5GB, ensurestart=True 23.2ms 11.2ms mem=0.5GB, ensurestart=False 6.2ms 11.3ms mem=0.2GB, ensurestart=True 15.7ms 7.4ms mem=0.2GB, ensurestart=False 4.3ms 8.1ms mem=0.0GB, ensurestart=True 2.3ms 0.7ms mem=0.0GB, ensurestart=False 0.8ms 0.8ms import time for memsize in [1_000_000_000, 500_000_000, 250_000_000, 0]: mem = 'a' * memsize for ensurestart in [True, False]: now = time.time() n = 100 for i in range(n): procutil.runbgcommand([b'true'], {}, ensurestart=ensurestart) after = time.time() ms = (after - now) / float(n) * 1000 print(f'mem={memsize / 1e9:.1f}GB, ensurestart={ensurestart} -> {ms:.1f}ms') Differential Revision: https://phab.mercurial-scm.org/D9019
author Valentin Gatien-Baron <valentin.gatienbaron@gmail.com>
date Sun, 13 Sep 2020 22:14:25 -0400
parents d4ba4d51f85f
children 333a2656e981
comparison
equal deleted inserted replaced
46888:218a26df7813 46889:8759e22f1649
699 stdin.close() 699 stdin.close()
700 700
701 701
702 else: 702 else:
703 703
704 def runbgcommand( 704 def runbgcommandpy3(
705 cmd,
706 env,
707 shell=False,
708 stdout=None,
709 stderr=None,
710 ensurestart=True,
711 record_wait=None,
712 stdin_bytes=None,
713 ):
714 """Spawn a command without waiting for it to finish.
715
716
717 When `record_wait` is not None, the spawned process will not be fully
718 detached and the `record_wait` argument will be called with a the
719 `Subprocess.wait` function for the spawned process. This is mostly
720 useful for developers that need to make sure the spawned process
721 finished before a certain point. (eg: writing test)"""
722 if pycompat.isdarwin:
723 # avoid crash in CoreFoundation in case another thread
724 # calls gui() while we're calling fork().
725 gui()
726
727 if shell:
728 script = cmd
729 else:
730 if isinstance(cmd, bytes):
731 cmd = [cmd]
732 script = b' '.join(shellquote(x) for x in cmd)
733 if record_wait is None:
734 # double-fork to completely detach from the parent process
735 script = b'( %s ) &' % script
736 start_new_session = True
737 else:
738 start_new_session = False
739 ensurestart = True
740
741 try:
742 if stdin_bytes is None:
743 stdin = subprocess.DEVNULL
744 else:
745 stdin = pycompat.unnamedtempfile()
746 stdin.write(stdin_bytes)
747 stdin.flush()
748 stdin.seek(0)
749 if stdout is None:
750 stdout = subprocess.DEVNULL
751 if stderr is None:
752 stderr = subprocess.DEVNULL
753
754 p = subprocess.Popen(
755 script,
756 shell=True,
757 env=env,
758 close_fds=True,
759 stdin=stdin,
760 stdout=stdout,
761 stderr=stderr,
762 start_new_session=start_new_session,
763 )
764 except Exception:
765 if record_wait is not None:
766 record_wait(255)
767 raise
768 finally:
769 if stdin_bytes is not None:
770 stdin.close()
771 if not ensurestart:
772 # Even though we're not waiting on the child process,
773 # we still must call waitpid() on it at some point so
774 # it's not a zombie/defunct. This is especially relevant for
775 # chg since the parent process won't die anytime soon.
776 # We use a thread to make the overhead tiny.
777 t = threading.Thread(target=lambda: p.wait)
778 t.daemon = True
779 t.start()
780 else:
781 returncode = p.wait
782 if record_wait is not None:
783 record_wait(returncode)
784
785 def runbgcommandpy2(
705 cmd, 786 cmd,
706 env, 787 env,
707 shell=False, 788 shell=False,
708 stdout=None, 789 stdout=None,
709 stderr=None, 790 stderr=None,
809 # mission accomplished, this child needs to exit and not 890 # mission accomplished, this child needs to exit and not
810 # continue the hg process here. 891 # continue the hg process here.
811 stdin.close() 892 stdin.close()
812 if record_wait is None: 893 if record_wait is None:
813 os._exit(returncode) 894 os._exit(returncode)
895
896 if pycompat.ispy3:
897 # This branch is more robust, because it avoids running python
898 # code (hence gc finalizers, like sshpeer.__del__, which
899 # blocks). But we can't easily do the equivalent in py2,
900 # because of the lack of start_new_session=True flag. Given
901 # that the py2 branch should die soon, the short-lived
902 # duplication seems acceptable.
903 runbgcommand = runbgcommandpy3
904 else:
905 runbgcommand = runbgcommandpy2