Mercurial > public > mercurial-scm > hg-stable
changeset 44234:50e7ce1f96d1
merge with stable
author | Martin von Zweigbergk <martinvonz@google.com> |
---|---|
date | Thu, 23 Jan 2020 13:10:48 -0800 |
parents | 00aaf11ec399 (diff) ae596fac8ba0 (current diff) |
children | ff22c76825b9 |
files | |
diffstat | 33 files changed, 1163 insertions(+), 478 deletions(-) [+] |
line wrap: on
line diff
--- a/black.toml Tue Jan 21 17:15:34 2020 -0800 +++ b/black.toml Thu Jan 23 13:10:48 2020 -0800 @@ -9,7 +9,6 @@ | \.mypy_cache/ | \.venv/ | mercurial/thirdparty/ -| contrib/python-zstandard/ ''' skip-string-normalization = true quiet = true
--- a/contrib/examples/fix.hgrc Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/examples/fix.hgrc Thu Jan 23 13:10:48 2020 -0800 @@ -6,7 +6,7 @@ rustfmt:pattern = set:**.rs black:command = black --config=black.toml - -black:pattern = set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**" +black:pattern = set:**.py - mercurial/thirdparty/** # Mercurial doesn't have any Go code, but if we did this is how we # would configure `hg fix` for Go:
--- a/contrib/packaging/inno/requirements.txt Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/packaging/inno/requirements.txt Thu Jan 23 13:10:48 2020 -0800 @@ -8,6 +8,10 @@ --hash=sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50 \ --hash=sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef \ # via dulwich +configparser==4.0.2 \ + --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \ + --hash=sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df \ + # via entrypoints docutils==0.15.2 \ --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \ --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \ @@ -34,8 +38,7 @@ --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297 pywin32-ctypes==0.2.0 \ --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \ - --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 \ - # via keyring + --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 urllib3==1.25.6 \ --hash=sha256:3de946ffbed6e6746608990594d08faac602528ac7015ac28d33cee6a45b7398 \ --hash=sha256:9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86 \
--- a/contrib/python-zstandard/make_cffi.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/make_cffi.py Thu Jan 23 13:10:48 2020 -0800 @@ -52,7 +52,8 @@ # Headers whose preprocessed output will be fed into cdef(). HEADERS = [ - os.path.join(HERE, "zstd", *p) for p in (("zstd.h",), ("dictBuilder", "zdict.h"),) + os.path.join(HERE, "zstd", *p) + for p in (("zstd.h",), ("dictBuilder", "zdict.h"),) ] INCLUDE_DIRS = [ @@ -139,7 +140,9 @@ env = dict(os.environ) if getattr(compiler, "_paths", None): env["PATH"] = compiler._paths - process = subprocess.Popen(args + [input_file], stdout=subprocess.PIPE, env=env) + process = subprocess.Popen( + args + [input_file], stdout=subprocess.PIPE, env=env + ) output = process.communicate()[0] ret = process.poll() if ret:
--- a/contrib/python-zstandard/setup.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/setup.py Thu Jan 23 13:10:48 2020 -0800 @@ -87,7 +87,9 @@ break if not version: - raise Exception("could not resolve package version; " "this should never happen") + raise Exception( + "could not resolve package version; " "this should never happen" + ) setup( name="zstandard",
--- a/contrib/python-zstandard/setup_zstd.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/setup_zstd.py Thu Jan 23 13:10:48 2020 -0800 @@ -138,12 +138,16 @@ if not system_zstd: sources.update([os.path.join(actual_root, p) for p in zstd_sources]) if support_legacy: - sources.update([os.path.join(actual_root, p) for p in zstd_sources_legacy]) + sources.update( + [os.path.join(actual_root, p) for p in zstd_sources_legacy] + ) sources = list(sources) include_dirs = set([os.path.join(actual_root, d) for d in ext_includes]) if not system_zstd: - include_dirs.update([os.path.join(actual_root, d) for d in zstd_includes]) + include_dirs.update( + [os.path.join(actual_root, d) for d in zstd_includes] + ) if support_legacy: include_dirs.update( [os.path.join(actual_root, d) for d in zstd_includes_legacy]
--- a/contrib/python-zstandard/tests/common.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/tests/common.py Thu Jan 23 13:10:48 2020 -0800 @@ -50,7 +50,9 @@ os.environ.update(old_env) if mod.backend != "cffi": - raise Exception("got the zstandard %s backend instead of cffi" % mod.backend) + raise Exception( + "got the zstandard %s backend instead of cffi" % mod.backend + ) # If CFFI version is available, dynamically construct test methods # that use it. @@ -84,7 +86,9 @@ fn.__func__.func_defaults, fn.__func__.func_closure, ) - new_method = types.UnboundMethodType(new_fn, fn.im_self, fn.im_class) + new_method = types.UnboundMethodType( + new_fn, fn.im_self, fn.im_class + ) setattr(cls, name, new_method) @@ -194,4 +198,6 @@ expensive_settings = hypothesis.settings(deadline=None, max_examples=10000) hypothesis.settings.register_profile("expensive", expensive_settings) - hypothesis.settings.load_profile(os.environ.get("HYPOTHESIS_PROFILE", "default")) + hypothesis.settings.load_profile( + os.environ.get("HYPOTHESIS_PROFILE", "default") + )
--- a/contrib/python-zstandard/tests/test_buffer_util.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/tests/test_buffer_util.py Thu Jan 23 13:10:48 2020 -0800 @@ -67,7 +67,8 @@ self.skipTest("BufferWithSegments not available") b = zstd.BufferWithSegments( - b"foofooxfooxy", b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)]) + b"foofooxfooxy", + b"".join([ss.pack(0, 3), ss.pack(3, 4), ss.pack(7, 5)]), ) self.assertEqual(len(b), 3) self.assertEqual(b.size, 12) @@ -83,17 +84,23 @@ if not hasattr(zstd, "BufferWithSegmentsCollection"): self.skipTest("BufferWithSegmentsCollection not available") - with self.assertRaisesRegex(ValueError, "must pass at least 1 argument"): + with self.assertRaisesRegex( + ValueError, "must pass at least 1 argument" + ): zstd.BufferWithSegmentsCollection() def test_argument_validation(self): if not hasattr(zstd, "BufferWithSegmentsCollection"): self.skipTest("BufferWithSegmentsCollection not available") - with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"): + with self.assertRaisesRegex( + TypeError, "arguments must be BufferWithSegments" + ): zstd.BufferWithSegmentsCollection(None) - with self.assertRaisesRegex(TypeError, "arguments must be BufferWithSegments"): + with self.assertRaisesRegex( + TypeError, "arguments must be BufferWithSegments" + ): zstd.BufferWithSegmentsCollection( zstd.BufferWithSegments(b"foo", ss.pack(0, 3)), None )
--- a/contrib/python-zstandard/tests/test_compressor.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/tests/test_compressor.py Thu Jan 23 13:10:48 2020 -0800 @@ -24,7 +24,9 @@ def multithreaded_chunk_size(level, source_size=0): - params = zstd.ZstdCompressionParameters.from_level(level, source_size=source_size) + params = zstd.ZstdCompressionParameters.from_level( + level, source_size=source_size + ) return 1 << (params.window_log + 2) @@ -86,7 +88,9 @@ # This matches the test for read_to_iter() below. cctx = zstd.ZstdCompressor(level=1, write_content_size=False) - result = cctx.compress(b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o") + result = cctx.compress( + b"f" * zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE + b"o" + ) self.assertEqual( result, b"\x28\xb5\x2f\xfd\x00\x40\x54\x00\x00" @@ -99,7 +103,9 @@ result = cctx.compress(b"foo" * 256) def test_no_magic(self): - params = zstd.ZstdCompressionParameters.from_level(1, format=zstd.FORMAT_ZSTD1) + params = zstd.ZstdCompressionParameters.from_level( + 1, format=zstd.FORMAT_ZSTD1 + ) cctx = zstd.ZstdCompressor(compression_params=params) magic = cctx.compress(b"foobar") @@ -223,7 +229,8 @@ self.assertEqual( result, - b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00" b"\x66\x6f\x6f", + b"\x28\xb5\x2f\xfd\x23\x8f\x55\x0f\x70\x03\x19\x00\x00" + b"\x66\x6f\x6f", ) def test_multithreaded_compression_params(self): @@ -234,7 +241,9 @@ params = zstd.get_frame_parameters(result) self.assertEqual(params.content_size, 3) - self.assertEqual(result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f") + self.assertEqual( + result, b"\x28\xb5\x2f\xfd\x20\x03\x19\x00\x00\x66\x6f\x6f" + ) @make_cffi @@ -347,7 +356,9 @@ ) self.assertEqual(cobj.compress(b"bar"), b"") # 3 byte header plus content. - self.assertEqual(cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar") + self.assertEqual( + cobj.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK), b"\x18\x00\x00bar" + ) self.assertEqual(cobj.flush(), b"\x01\x00\x00") def test_flush_empty_block(self): @@ -445,7 +456,9 @@ self.assertEqual(int(r), 0) self.assertEqual(w, 9) - self.assertEqual(dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00") + self.assertEqual( + dest.getvalue(), b"\x28\xb5\x2f\xfd\x00\x48\x01\x00\x00" + ) def test_large_data(self): source = io.BytesIO() @@ -478,7 +491,9 @@ cctx = zstd.ZstdCompressor(level=1, write_checksum=True) cctx.copy_stream(source, with_checksum) - self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4) + self.assertEqual( + len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4 + ) no_params = zstd.get_frame_parameters(no_checksum.getvalue()) with_params = zstd.get_frame_parameters(with_checksum.getvalue()) @@ -585,7 +600,9 @@ cctx = zstd.ZstdCompressor() with cctx.stream_reader(b"foo") as reader: - with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"): + with self.assertRaisesRegex( + ValueError, "cannot __enter__ multiple times" + ): with reader as reader2: pass @@ -744,7 +761,9 @@ source = io.BytesIO(b"foobar") with cctx.stream_reader(source, size=2) as reader: - with self.assertRaisesRegex(zstd.ZstdError, "Src size is incorrect"): + with self.assertRaisesRegex( + zstd.ZstdError, "Src size is incorrect" + ): reader.read(10) # Try another compression operation. @@ -1126,7 +1145,9 @@ self.assertFalse(no_params.has_checksum) self.assertTrue(with_params.has_checksum) - self.assertEqual(len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4) + self.assertEqual( + len(with_checksum.getvalue()), len(no_checksum.getvalue()) + 4 + ) def test_write_content_size(self): no_size = NonClosingBytesIO() @@ -1145,7 +1166,9 @@ # Declaring size will write the header. with_size = NonClosingBytesIO() - with cctx.stream_writer(with_size, size=len(b"foobar" * 256)) as compressor: + with cctx.stream_writer( + with_size, size=len(b"foobar" * 256) + ) as compressor: self.assertEqual(compressor.write(b"foobar" * 256), 0) no_params = zstd.get_frame_parameters(no_size.getvalue()) @@ -1191,7 +1214,9 @@ self.assertFalse(no_params.has_checksum) self.assertFalse(with_params.has_checksum) - self.assertEqual(len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4) + self.assertEqual( + len(with_dict_id.getvalue()), len(no_dict_id.getvalue()) + 4 + ) def test_memory_size(self): cctx = zstd.ZstdCompressor(level=3) @@ -1337,7 +1362,9 @@ for chunk in cctx.read_to_iter(b"foobar"): pass - with self.assertRaisesRegex(ValueError, "must pass an object with a read"): + with self.assertRaisesRegex( + ValueError, "must pass an object with a read" + ): for chunk in cctx.read_to_iter(True): pass @@ -1513,7 +1540,9 @@ dctx = zstd.ZstdDecompressor() - self.assertEqual(dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24)) + self.assertEqual( + dctx.decompress(b"".join(chunks)), (b"x" * 1000) + (b"y" * 24) + ) def test_small_chunk_size(self): cctx = zstd.ZstdCompressor() @@ -1533,7 +1562,8 @@ dctx = zstd.ZstdDecompressor() self.assertEqual( - dctx.decompress(b"".join(chunks), max_output_size=10000), b"foo" * 1024 + dctx.decompress(b"".join(chunks), max_output_size=10000), + b"foo" * 1024, ) def test_input_types(self): @@ -1602,7 +1632,8 @@ list(chunker.finish()) with self.assertRaisesRegex( - zstd.ZstdError, r"cannot call compress\(\) after compression finished" + zstd.ZstdError, + r"cannot call compress\(\) after compression finished", ): list(chunker.compress(b"foo")) @@ -1644,7 +1675,9 @@ with self.assertRaises(TypeError): cctx.multi_compress_to_buffer((1, 2)) - with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"): + with self.assertRaisesRegex( + TypeError, "item 0 not a bytes like object" + ): cctx.multi_compress_to_buffer([u"foo"]) def test_empty_input(self):
--- a/contrib/python-zstandard/tests/test_compressor_fuzzing.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/tests/test_compressor_fuzzing.py Thu Jan 23 13:10:48 2020 -0800 @@ -28,9 +28,13 @@ original=strategies.sampled_from(random_input_data()), level=strategies.integers(min_value=1, max_value=5), source_read_size=strategies.integers(1, 16384), - read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE), + read_size=strategies.integers( + -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE + ), ) - def test_stream_source_read(self, original, level, source_read_size, read_size): + def test_stream_source_read( + self, original, level, source_read_size, read_size + ): if read_size == 0: read_size = -1 @@ -58,9 +62,13 @@ original=strategies.sampled_from(random_input_data()), level=strategies.integers(min_value=1, max_value=5), source_read_size=strategies.integers(1, 16384), - read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE), + read_size=strategies.integers( + -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE + ), ) - def test_buffer_source_read(self, original, level, source_read_size, read_size): + def test_buffer_source_read( + self, original, level, source_read_size, read_size + ): if read_size == 0: read_size = -1 @@ -155,9 +163,13 @@ original=strategies.sampled_from(random_input_data()), level=strategies.integers(min_value=1, max_value=5), source_read_size=strategies.integers(1, 16384), - read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE), + read_size=strategies.integers( + 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE + ), ) - def test_stream_source_readinto(self, original, level, source_read_size, read_size): + def test_stream_source_readinto( + self, original, level, source_read_size, read_size + ): refctx = zstd.ZstdCompressor(level=level) ref_frame = refctx.compress(original) @@ -184,9 +196,13 @@ original=strategies.sampled_from(random_input_data()), level=strategies.integers(min_value=1, max_value=5), source_read_size=strategies.integers(1, 16384), - read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE), + read_size=strategies.integers( + 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE + ), ) - def test_buffer_source_readinto(self, original, level, source_read_size, read_size): + def test_buffer_source_readinto( + self, original, level, source_read_size, read_size + ): refctx = zstd.ZstdCompressor(level=level) ref_frame = refctx.compress(original) @@ -285,9 +301,13 @@ original=strategies.sampled_from(random_input_data()), level=strategies.integers(min_value=1, max_value=5), source_read_size=strategies.integers(1, 16384), - read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE), + read_size=strategies.integers( + -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE + ), ) - def test_stream_source_read1(self, original, level, source_read_size, read_size): + def test_stream_source_read1( + self, original, level, source_read_size, read_size + ): if read_size == 0: read_size = -1 @@ -315,9 +335,13 @@ original=strategies.sampled_from(random_input_data()), level=strategies.integers(min_value=1, max_value=5), source_read_size=strategies.integers(1, 16384), - read_size=strategies.integers(-1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE), + read_size=strategies.integers( + -1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE + ), ) - def test_buffer_source_read1(self, original, level, source_read_size, read_size): + def test_buffer_source_read1( + self, original, level, source_read_size, read_size + ): if read_size == 0: read_size = -1 @@ -412,7 +436,9 @@ original=strategies.sampled_from(random_input_data()), level=strategies.integers(min_value=1, max_value=5), source_read_size=strategies.integers(1, 16384), - read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE), + read_size=strategies.integers( + 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE + ), ) def test_stream_source_readinto1( self, original, level, source_read_size, read_size @@ -446,7 +472,9 @@ original=strategies.sampled_from(random_input_data()), level=strategies.integers(min_value=1, max_value=5), source_read_size=strategies.integers(1, 16384), - read_size=strategies.integers(1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE), + read_size=strategies.integers( + 1, zstd.COMPRESSION_RECOMMENDED_OUTPUT_SIZE + ), ) def test_buffer_source_readinto1( self, original, level, source_read_size, read_size @@ -576,7 +604,9 @@ read_size=strategies.integers(min_value=1, max_value=1048576), write_size=strategies.integers(min_value=1, max_value=1048576), ) - def test_read_write_size_variance(self, original, level, read_size, write_size): + def test_read_write_size_variance( + self, original, level, read_size, write_size + ): refctx = zstd.ZstdCompressor(level=level) ref_frame = refctx.compress(original) @@ -585,7 +615,11 @@ dest = io.BytesIO() cctx.copy_stream( - source, dest, size=len(original), read_size=read_size, write_size=write_size + source, + dest, + size=len(original), + read_size=read_size, + write_size=write_size, ) self.assertEqual(dest.getvalue(), ref_frame) @@ -675,7 +709,9 @@ decompressed_chunks.append(dobj.decompress(chunk)) self.assertEqual( - dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)), + dctx.decompress( + b"".join(compressed_chunks), max_output_size=len(original) + ), original, ) self.assertEqual(b"".join(decompressed_chunks), original) @@ -690,7 +726,9 @@ read_size=strategies.integers(min_value=1, max_value=4096), write_size=strategies.integers(min_value=1, max_value=4096), ) - def test_read_write_size_variance(self, original, level, read_size, write_size): + def test_read_write_size_variance( + self, original, level, read_size, write_size + ): refcctx = zstd.ZstdCompressor(level=level) ref_frame = refcctx.compress(original) @@ -699,7 +737,10 @@ cctx = zstd.ZstdCompressor(level=level) chunks = list( cctx.read_to_iter( - source, size=len(original), read_size=read_size, write_size=write_size + source, + size=len(original), + read_size=read_size, + write_size=write_size, ) ) @@ -710,7 +751,9 @@ class TestCompressor_multi_compress_to_buffer_fuzzing(TestCase): @hypothesis.given( original=strategies.lists( - strategies.sampled_from(random_input_data()), min_size=1, max_size=1024 + strategies.sampled_from(random_input_data()), + min_size=1, + max_size=1024, ), threads=strategies.integers(min_value=1, max_value=8), use_dict=strategies.booleans(), @@ -776,7 +819,8 @@ dctx = zstd.ZstdDecompressor() self.assertEqual( - dctx.decompress(b"".join(chunks), max_output_size=len(original)), original + dctx.decompress(b"".join(chunks), max_output_size=len(original)), + original, ) self.assertTrue(all(len(chunk) == chunk_size for chunk in chunks[:-1])) @@ -794,7 +838,9 @@ input_sizes=strategies.data(), flushes=strategies.data(), ) - def test_flush_block(self, original, level, chunk_size, input_sizes, flushes): + def test_flush_block( + self, original, level, chunk_size, input_sizes, flushes + ): cctx = zstd.ZstdCompressor(level=level) chunker = cctx.chunker(chunk_size=chunk_size) @@ -830,7 +876,9 @@ decompressed_chunks.append(dobj.decompress(b"".join(chunks))) self.assertEqual( - dctx.decompress(b"".join(compressed_chunks), max_output_size=len(original)), + dctx.decompress( + b"".join(compressed_chunks), max_output_size=len(original) + ), original, ) self.assertEqual(b"".join(decompressed_chunks), original)
--- a/contrib/python-zstandard/tests/test_data_structures.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/tests/test_data_structures.py Thu Jan 23 13:10:48 2020 -0800 @@ -65,7 +65,9 @@ p = zstd.ZstdCompressionParameters(threads=4) self.assertEqual(p.threads, 4) - p = zstd.ZstdCompressionParameters(threads=2, job_size=1048576, overlap_log=6) + p = zstd.ZstdCompressionParameters( + threads=2, job_size=1048576, overlap_log=6 + ) self.assertEqual(p.threads, 2) self.assertEqual(p.job_size, 1048576) self.assertEqual(p.overlap_log, 6) @@ -128,7 +130,9 @@ with self.assertRaisesRegex( ValueError, "cannot specify both ldm_hash_rate_log" ): - zstd.ZstdCompressionParameters(ldm_hash_rate_log=8, ldm_hash_every_log=4) + zstd.ZstdCompressionParameters( + ldm_hash_rate_log=8, ldm_hash_every_log=4 + ) p = zstd.ZstdCompressionParameters(ldm_hash_rate_log=8) self.assertEqual(p.ldm_hash_every_log, 8) @@ -137,7 +141,9 @@ self.assertEqual(p.ldm_hash_every_log, 16) def test_overlap_log(self): - with self.assertRaisesRegex(ValueError, "cannot specify both overlap_log"): + with self.assertRaisesRegex( + ValueError, "cannot specify both overlap_log" + ): zstd.ZstdCompressionParameters(overlap_log=1, overlap_size_log=9) p = zstd.ZstdCompressionParameters(overlap_log=2) @@ -169,10 +175,14 @@ zstd.get_frame_parameters(u"foobarbaz") def test_invalid_input_sizes(self): - with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"): + with self.assertRaisesRegex( + zstd.ZstdError, "not enough data for frame" + ): zstd.get_frame_parameters(b"") - with self.assertRaisesRegex(zstd.ZstdError, "not enough data for frame"): + with self.assertRaisesRegex( + zstd.ZstdError, "not enough data for frame" + ): zstd.get_frame_parameters(zstd.FRAME_HEADER) def test_invalid_frame(self): @@ -201,7 +211,9 @@ self.assertTrue(params.has_checksum) # Upper 2 bits indicate content size. - params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x40\x00\xff\x00") + params = zstd.get_frame_parameters( + zstd.FRAME_HEADER + b"\x40\x00\xff\x00" + ) self.assertEqual(params.content_size, 511) self.assertEqual(params.window_size, 1024) self.assertEqual(params.dict_id, 0) @@ -215,7 +227,9 @@ self.assertFalse(params.has_checksum) # Set multiple things. - params = zstd.get_frame_parameters(zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00") + params = zstd.get_frame_parameters( + zstd.FRAME_HEADER + b"\x45\x40\x0f\x10\x00" + ) self.assertEqual(params.content_size, 272) self.assertEqual(params.window_size, 262144) self.assertEqual(params.dict_id, 15)
--- a/contrib/python-zstandard/tests/test_data_structures_fuzzing.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/tests/test_data_structures_fuzzing.py Thu Jan 23 13:10:48 2020 -0800 @@ -23,7 +23,9 @@ s_chainlog = strategies.integers( min_value=zstd.CHAINLOG_MIN, max_value=zstd.CHAINLOG_MAX ) -s_hashlog = strategies.integers(min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX) +s_hashlog = strategies.integers( + min_value=zstd.HASHLOG_MIN, max_value=zstd.HASHLOG_MAX +) s_searchlog = strategies.integers( min_value=zstd.SEARCHLOG_MIN, max_value=zstd.SEARCHLOG_MAX ) @@ -61,7 +63,14 @@ s_strategy, ) def test_valid_init( - self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy + self, + windowlog, + chainlog, + hashlog, + searchlog, + minmatch, + targetlength, + strategy, ): zstd.ZstdCompressionParameters( window_log=windowlog, @@ -83,7 +92,14 @@ s_strategy, ) def test_estimated_compression_context_size( - self, windowlog, chainlog, hashlog, searchlog, minmatch, targetlength, strategy + self, + windowlog, + chainlog, + hashlog, + searchlog, + minmatch, + targetlength, + strategy, ): if minmatch == zstd.MINMATCH_MIN and strategy in ( zstd.STRATEGY_FAST,
--- a/contrib/python-zstandard/tests/test_decompressor.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/tests/test_decompressor.py Thu Jan 23 13:10:48 2020 -0800 @@ -170,11 +170,15 @@ dctx.decompress(compressed, max_output_size=len(source) - 1) # Input size + 1 works - decompressed = dctx.decompress(compressed, max_output_size=len(source) + 1) + decompressed = dctx.decompress( + compressed, max_output_size=len(source) + 1 + ) self.assertEqual(decompressed, source) # A much larger buffer works. - decompressed = dctx.decompress(compressed, max_output_size=len(source) * 64) + decompressed = dctx.decompress( + compressed, max_output_size=len(source) * 64 + ) self.assertEqual(decompressed, source) def test_stupidly_large_output_buffer(self): @@ -237,7 +241,8 @@ dctx = zstd.ZstdDecompressor(max_window_size=2 ** zstd.WINDOWLOG_MIN) with self.assertRaisesRegex( - zstd.ZstdError, "decompression error: Frame requires too much memory" + zstd.ZstdError, + "decompression error: Frame requires too much memory", ): dctx.decompress(frame, max_output_size=len(source)) @@ -291,7 +296,9 @@ self.assertEqual(w, len(source.getvalue())) def test_read_write_size(self): - source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar")) + source = OpCountingBytesIO( + zstd.ZstdCompressor().compress(b"foobarfoobar") + ) dest = OpCountingBytesIO() dctx = zstd.ZstdDecompressor() @@ -309,7 +316,9 @@ dctx = zstd.ZstdDecompressor() with dctx.stream_reader(b"foo") as reader: - with self.assertRaisesRegex(ValueError, "cannot __enter__ multiple times"): + with self.assertRaisesRegex( + ValueError, "cannot __enter__ multiple times" + ): with reader as reader2: pass @@ -474,7 +483,9 @@ dctx = zstd.ZstdDecompressor() with dctx.stream_reader(frame) as reader: - with self.assertRaisesRegex(ValueError, "cannot seek to negative position"): + with self.assertRaisesRegex( + ValueError, "cannot seek to negative position" + ): reader.seek(-1, os.SEEK_SET) reader.read(1) @@ -490,7 +501,8 @@ reader.seek(-1, os.SEEK_CUR) with self.assertRaisesRegex( - ValueError, "zstd decompression streams cannot be seeked with SEEK_END" + ValueError, + "zstd decompression streams cannot be seeked with SEEK_END", ): reader.seek(0, os.SEEK_END) @@ -743,7 +755,9 @@ def test_read_lines(self): cctx = zstd.ZstdCompressor() - source = b"\n".join(("line %d" % i).encode("ascii") for i in range(1024)) + source = b"\n".join( + ("line %d" % i).encode("ascii") for i in range(1024) + ) frame = cctx.compress(source) @@ -821,7 +835,9 @@ dobj = dctx.decompressobj() dobj.decompress(data) - with self.assertRaisesRegex(zstd.ZstdError, "cannot use a decompressobj"): + with self.assertRaisesRegex( + zstd.ZstdError, "cannot use a decompressobj" + ): dobj.decompress(data) self.assertIsNone(dobj.flush()) @@ -1124,7 +1140,9 @@ # Buffer protocol works. dctx.read_to_iter(b"foobar") - with self.assertRaisesRegex(ValueError, "must pass an object with a read"): + with self.assertRaisesRegex( + ValueError, "must pass an object with a read" + ): b"".join(dctx.read_to_iter(True)) def test_empty_input(self): @@ -1226,7 +1244,9 @@ decompressed = b"".join(chunks) self.assertEqual(decompressed, source.getvalue()) - @unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set") + @unittest.skipUnless( + "ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set" + ) def test_large_input(self): bytes = list(struct.Struct(">B").pack(i) for i in range(256)) compressed = NonClosingBytesIO() @@ -1241,13 +1261,16 @@ len(compressed.getvalue()) > zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE ) - have_raw = input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2 + have_raw = ( + input_size > zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE * 2 + ) if have_compressed and have_raw: break compressed = io.BytesIO(compressed.getvalue()) self.assertGreater( - len(compressed.getvalue()), zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE + len(compressed.getvalue()), + zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE, ) dctx = zstd.ZstdDecompressor() @@ -1303,7 +1326,9 @@ self.assertEqual(streamed, source.getvalue()) def test_read_write_size(self): - source = OpCountingBytesIO(zstd.ZstdCompressor().compress(b"foobarfoobar")) + source = OpCountingBytesIO( + zstd.ZstdCompressor().compress(b"foobarfoobar") + ) dctx = zstd.ZstdDecompressor() for chunk in dctx.read_to_iter(source, read_size=1, write_size=1): self.assertEqual(len(chunk), 1) @@ -1355,10 +1380,14 @@ ): dctx.decompress_content_dict_chain([zstd.FRAME_HEADER]) - with self.assertRaisesRegex(ValueError, "chunk 0 is not a valid zstd frame"): + with self.assertRaisesRegex( + ValueError, "chunk 0 is not a valid zstd frame" + ): dctx.decompress_content_dict_chain([b"foo" * 8]) - no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64) + no_size = zstd.ZstdCompressor(write_content_size=False).compress( + b"foo" * 64 + ) with self.assertRaisesRegex( ValueError, "chunk 0 missing content size in frame" @@ -1389,10 +1418,14 @@ ): dctx.decompress_content_dict_chain([initial, zstd.FRAME_HEADER]) - with self.assertRaisesRegex(ValueError, "chunk 1 is not a valid zstd frame"): + with self.assertRaisesRegex( + ValueError, "chunk 1 is not a valid zstd frame" + ): dctx.decompress_content_dict_chain([initial, b"foo" * 8]) - no_size = zstd.ZstdCompressor(write_content_size=False).compress(b"foo" * 64) + no_size = zstd.ZstdCompressor(write_content_size=False).compress( + b"foo" * 64 + ) with self.assertRaisesRegex( ValueError, "chunk 1 missing content size in frame" @@ -1400,7 +1433,9 @@ dctx.decompress_content_dict_chain([initial, no_size]) # Corrupt second frame. - cctx = zstd.ZstdCompressor(dict_data=zstd.ZstdCompressionDict(b"foo" * 64)) + cctx = zstd.ZstdCompressor( + dict_data=zstd.ZstdCompressionDict(b"foo" * 64) + ) frame = cctx.compress(b"bar" * 64) frame = frame[0:12] + frame[15:] @@ -1447,7 +1482,9 @@ with self.assertRaises(TypeError): dctx.multi_decompress_to_buffer((1, 2)) - with self.assertRaisesRegex(TypeError, "item 0 not a bytes like object"): + with self.assertRaisesRegex( + TypeError, "item 0 not a bytes like object" + ): dctx.multi_decompress_to_buffer([u"foo"]) with self.assertRaisesRegex( @@ -1491,7 +1528,9 @@ if not hasattr(dctx, "multi_decompress_to_buffer"): self.skipTest("multi_decompress_to_buffer not available") - result = dctx.multi_decompress_to_buffer(frames, decompressed_sizes=sizes) + result = dctx.multi_decompress_to_buffer( + frames, decompressed_sizes=sizes + ) self.assertEqual(len(result), len(frames)) self.assertEqual(result.size(), sum(map(len, original))) @@ -1582,10 +1621,15 @@ # And a manual mode. b = b"".join([frames[0].tobytes(), frames[1].tobytes()]) b1 = zstd.BufferWithSegments( - b, struct.pack("=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1])) + b, + struct.pack( + "=QQQQ", 0, len(frames[0]), len(frames[0]), len(frames[1]) + ), ) - b = b"".join([frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()]) + b = b"".join( + [frames[2].tobytes(), frames[3].tobytes(), frames[4].tobytes()] + ) b2 = zstd.BufferWithSegments( b, struct.pack(
--- a/contrib/python-zstandard/tests/test_decompressor_fuzzing.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/tests/test_decompressor_fuzzing.py Thu Jan 23 13:10:48 2020 -0800 @@ -196,7 +196,9 @@ streaming=strategies.booleans(), source_read_size=strategies.integers(1, 1048576), ) - def test_stream_source_readall(self, original, level, streaming, source_read_size): + def test_stream_source_readall( + self, original, level, streaming, source_read_size + ): cctx = zstd.ZstdCompressor(level=level) if streaming: @@ -398,7 +400,9 @@ write_size=strategies.integers(min_value=1, max_value=8192), input_sizes=strategies.data(), ) - def test_write_size_variance(self, original, level, write_size, input_sizes): + def test_write_size_variance( + self, original, level, write_size, input_sizes + ): cctx = zstd.ZstdCompressor(level=level) frame = cctx.compress(original) @@ -433,7 +437,9 @@ read_size=strategies.integers(min_value=1, max_value=8192), write_size=strategies.integers(min_value=1, max_value=8192), ) - def test_read_write_size_variance(self, original, level, read_size, write_size): + def test_read_write_size_variance( + self, original, level, read_size, write_size + ): cctx = zstd.ZstdCompressor(level=level) frame = cctx.compress(original) @@ -441,7 +447,9 @@ dest = io.BytesIO() dctx = zstd.ZstdDecompressor() - dctx.copy_stream(source, dest, read_size=read_size, write_size=write_size) + dctx.copy_stream( + source, dest, read_size=read_size, write_size=write_size + ) self.assertEqual(dest.getvalue(), original) @@ -490,11 +498,14 @@ original=strategies.sampled_from(random_input_data()), level=strategies.integers(min_value=1, max_value=5), write_size=strategies.integers( - min_value=1, max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE + min_value=1, + max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE, ), chunk_sizes=strategies.data(), ) - def test_random_output_sizes(self, original, level, write_size, chunk_sizes): + def test_random_output_sizes( + self, original, level, write_size, chunk_sizes + ): cctx = zstd.ZstdCompressor(level=level) frame = cctx.compress(original) @@ -524,7 +535,9 @@ read_size=strategies.integers(min_value=1, max_value=4096), write_size=strategies.integers(min_value=1, max_value=4096), ) - def test_read_write_size_variance(self, original, level, read_size, write_size): + def test_read_write_size_variance( + self, original, level, read_size, write_size + ): cctx = zstd.ZstdCompressor(level=level) frame = cctx.compress(original) @@ -532,7 +545,9 @@ dctx = zstd.ZstdDecompressor() chunks = list( - dctx.read_to_iter(source, read_size=read_size, write_size=write_size) + dctx.read_to_iter( + source, read_size=read_size, write_size=write_size + ) ) self.assertEqual(b"".join(chunks), original) @@ -542,7 +557,9 @@ class TestDecompressor_multi_decompress_to_buffer_fuzzing(TestCase): @hypothesis.given( original=strategies.lists( - strategies.sampled_from(random_input_data()), min_size=1, max_size=1024 + strategies.sampled_from(random_input_data()), + min_size=1, + max_size=1024, ), threads=strategies.integers(min_value=1, max_value=8), use_dict=strategies.booleans(),
--- a/contrib/python-zstandard/tests/test_train_dictionary.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/tests/test_train_dictionary.py Thu Jan 23 13:10:48 2020 -0800 @@ -51,11 +51,15 @@ self.assertEqual(d.d, 16) def test_set_dict_id(self): - d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16, dict_id=42) + d = zstd.train_dictionary( + 8192, generate_samples(), k=64, d=16, dict_id=42 + ) self.assertEqual(d.dict_id(), 42) def test_optimize(self): - d = zstd.train_dictionary(8192, generate_samples(), threads=-1, steps=1, d=16) + d = zstd.train_dictionary( + 8192, generate_samples(), threads=-1, steps=1, d=16 + ) # This varies by platform. self.assertIn(d.k, (50, 2000)) @@ -71,10 +75,14 @@ def test_bad_precompute_compress(self): d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16) - with self.assertRaisesRegex(ValueError, "must specify one of level or "): + with self.assertRaisesRegex( + ValueError, "must specify one of level or " + ): d.precompute_compress() - with self.assertRaisesRegex(ValueError, "must only specify one of level or "): + with self.assertRaisesRegex( + ValueError, "must only specify one of level or " + ): d.precompute_compress( level=3, compression_params=zstd.CompressionParameters() ) @@ -88,5 +96,7 @@ d = zstd.ZstdCompressionDict( b"dictcontent" * 64, dict_type=zstd.DICT_TYPE_FULLDICT ) - with self.assertRaisesRegex(zstd.ZstdError, "unable to precompute dictionary"): + with self.assertRaisesRegex( + zstd.ZstdError, "unable to precompute dictionary" + ): d.precompute_compress(level=1)
--- a/contrib/python-zstandard/zstandard/cffi.py Tue Jan 21 17:15:34 2020 -0800 +++ b/contrib/python-zstandard/zstandard/cffi.py Thu Jan 23 13:10:48 2020 -0800 @@ -299,10 +299,14 @@ _set_compression_parameter(params, lib.ZSTD_c_chainLog, chain_log) _set_compression_parameter(params, lib.ZSTD_c_searchLog, search_log) _set_compression_parameter(params, lib.ZSTD_c_minMatch, min_match) - _set_compression_parameter(params, lib.ZSTD_c_targetLength, target_length) + _set_compression_parameter( + params, lib.ZSTD_c_targetLength, target_length + ) if strategy != -1 and compression_strategy != -1: - raise ValueError("cannot specify both compression_strategy and strategy") + raise ValueError( + "cannot specify both compression_strategy and strategy" + ) if compression_strategy != -1: strategy = compression_strategy @@ -313,12 +317,16 @@ _set_compression_parameter( params, lib.ZSTD_c_contentSizeFlag, write_content_size ) - _set_compression_parameter(params, lib.ZSTD_c_checksumFlag, write_checksum) + _set_compression_parameter( + params, lib.ZSTD_c_checksumFlag, write_checksum + ) _set_compression_parameter(params, lib.ZSTD_c_dictIDFlag, write_dict_id) _set_compression_parameter(params, lib.ZSTD_c_jobSize, job_size) if overlap_log != -1 and overlap_size_log != -1: - raise ValueError("cannot specify both overlap_log and overlap_size_log") + raise ValueError( + "cannot specify both overlap_log and overlap_size_log" + ) if overlap_size_log != -1: overlap_log = overlap_size_log @@ -326,12 +334,16 @@ overlap_log = 0 _set_compression_parameter(params, lib.ZSTD_c_overlapLog, overlap_log) - _set_compression_parameter(params, lib.ZSTD_c_forceMaxWindow, force_max_window) + _set_compression_parameter( + params, lib.ZSTD_c_forceMaxWindow, force_max_window + ) _set_compression_parameter( params, lib.ZSTD_c_enableLongDistanceMatching, enable_ldm ) _set_compression_parameter(params, lib.ZSTD_c_ldmHashLog, ldm_hash_log) - _set_compression_parameter(params, lib.ZSTD_c_ldmMinMatch, ldm_min_match) + _set_compression_parameter( + params, lib.ZSTD_c_ldmMinMatch, ldm_min_match + ) _set_compression_parameter( params, lib.ZSTD_c_ldmBucketSizeLog, ldm_bucket_size_log ) @@ -346,7 +358,9 @@ elif ldm_hash_rate_log == -1: ldm_hash_rate_log = 0 - _set_compression_parameter(params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log) + _set_compression_parameter( + params, lib.ZSTD_c_ldmHashRateLog, ldm_hash_rate_log + ) @property def format(self): @@ -354,7 +368,9 @@ @property def compression_level(self): - return _get_compression_parameter(self._params, lib.ZSTD_c_compressionLevel) + return _get_compression_parameter( + self._params, lib.ZSTD_c_compressionLevel + ) @property def window_log(self): @@ -386,7 +402,9 @@ @property def write_content_size(self): - return _get_compression_parameter(self._params, lib.ZSTD_c_contentSizeFlag) + return _get_compression_parameter( + self._params, lib.ZSTD_c_contentSizeFlag + ) @property def write_checksum(self): @@ -410,7 +428,9 @@ @property def force_max_window(self): - return _get_compression_parameter(self._params, lib.ZSTD_c_forceMaxWindow) + return _get_compression_parameter( + self._params, lib.ZSTD_c_forceMaxWindow + ) @property def enable_ldm(self): @@ -428,11 +448,15 @@ @property def ldm_bucket_size_log(self): - return _get_compression_parameter(self._params, lib.ZSTD_c_ldmBucketSizeLog) + return _get_compression_parameter( + self._params, lib.ZSTD_c_ldmBucketSizeLog + ) @property def ldm_hash_rate_log(self): - return _get_compression_parameter(self._params, lib.ZSTD_c_ldmHashRateLog) + return _get_compression_parameter( + self._params, lib.ZSTD_c_ldmHashRateLog + ) @property def ldm_hash_every_log(self): @@ -457,7 +481,8 @@ zresult = lib.ZSTD_CCtxParams_setParameter(params, param, value) if lib.ZSTD_isError(zresult): raise ZstdError( - "unable to set compression context parameter: %s" % _zstd_error(zresult) + "unable to set compression context parameter: %s" + % _zstd_error(zresult) ) @@ -467,14 +492,17 @@ zresult = lib.ZSTD_CCtxParams_getParameter(params, param, result) if lib.ZSTD_isError(zresult): raise ZstdError( - "unable to get compression context parameter: %s" % _zstd_error(zresult) + "unable to get compression context parameter: %s" + % _zstd_error(zresult) ) return result[0] class ZstdCompressionWriter(object): - def __init__(self, compressor, writer, source_size, write_size, write_return_read): + def __init__( + self, compressor, writer, source_size, write_size, write_return_read + ): self._compressor = compressor self._writer = writer self._write_size = write_size @@ -491,7 +519,9 @@ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(compressor._cctx, source_size) if lib.ZSTD_isError(zresult): - raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) def __enter__(self): if self._closed: @@ -595,13 +625,20 @@ while in_buffer.pos < in_buffer.size: zresult = lib.ZSTD_compressStream2( - self._compressor._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue + self._compressor._cctx, + out_buffer, + in_buffer, + lib.ZSTD_e_continue, ) if lib.ZSTD_isError(zresult): - raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) if out_buffer.pos: - self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:]) + self._writer.write( + ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + ) total_write += out_buffer.pos self._bytes_compressed += out_buffer.pos out_buffer.pos = 0 @@ -637,10 +674,14 @@ self._compressor._cctx, out_buffer, in_buffer, flush ) if lib.ZSTD_isError(zresult): - raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) if out_buffer.pos: - self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:]) + self._writer.write( + ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + ) total_write += out_buffer.pos self._bytes_compressed += out_buffer.pos out_buffer.pos = 0 @@ -672,7 +713,9 @@ self._compressor._cctx, self._out, source, lib.ZSTD_e_continue ) if lib.ZSTD_isError(zresult): - raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) if self._out.pos: chunks.append(ffi.buffer(self._out.dst, self._out.pos)[:]) @@ -681,7 +724,10 @@ return b"".join(chunks) def flush(self, flush_mode=COMPRESSOBJ_FLUSH_FINISH): - if flush_mode not in (COMPRESSOBJ_FLUSH_FINISH, COMPRESSOBJ_FLUSH_BLOCK): + if flush_mode not in ( + COMPRESSOBJ_FLUSH_FINISH, + COMPRESSOBJ_FLUSH_BLOCK, + ): raise ValueError("flush mode not recognized") if self._finished: @@ -768,7 +814,9 @@ self._in.pos = 0 if lib.ZSTD_isError(zresult): - raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) if self._out.pos == self._out.size: yield ffi.buffer(self._out.dst, self._out.pos)[:] @@ -780,7 +828,8 @@ if self._in.src != ffi.NULL: raise ZstdError( - "cannot call flush() before consuming output from " "previous operation" + "cannot call flush() before consuming output from " + "previous operation" ) while True: @@ -788,7 +837,9 @@ self._compressor._cctx, self._out, self._in, lib.ZSTD_e_flush ) if lib.ZSTD_isError(zresult): - raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) if self._out.pos: yield ffi.buffer(self._out.dst, self._out.pos)[:] @@ -812,7 +863,9 @@ self._compressor._cctx, self._out, self._in, lib.ZSTD_e_end ) if lib.ZSTD_isError(zresult): - raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) if self._out.pos: yield ffi.buffer(self._out.dst, self._out.pos)[:] @@ -939,7 +992,10 @@ old_pos = out_buffer.pos zresult = lib.ZSTD_compressStream2( - self._compressor._cctx, out_buffer, self._in_buffer, lib.ZSTD_e_continue + self._compressor._cctx, + out_buffer, + self._in_buffer, + lib.ZSTD_e_continue, ) self._bytes_compressed += out_buffer.pos - old_pos @@ -997,7 +1053,9 @@ self._bytes_compressed += out_buffer.pos - old_pos if lib.ZSTD_isError(zresult): - raise ZstdError("error ending compression stream: %s", _zstd_error(zresult)) + raise ZstdError( + "error ending compression stream: %s", _zstd_error(zresult) + ) if zresult == 0: self._finished_output = True @@ -1102,7 +1160,9 @@ self._bytes_compressed += out_buffer.pos - old_pos if lib.ZSTD_isError(zresult): - raise ZstdError("error ending compression stream: %s", _zstd_error(zresult)) + raise ZstdError( + "error ending compression stream: %s", _zstd_error(zresult) + ) if zresult == 0: self._finished_output = True @@ -1170,13 +1230,17 @@ threads=0, ): if level > lib.ZSTD_maxCLevel(): - raise ValueError("level must be less than %d" % lib.ZSTD_maxCLevel()) + raise ValueError( + "level must be less than %d" % lib.ZSTD_maxCLevel() + ) if threads < 0: threads = _cpu_count() if compression_params and write_checksum is not None: - raise ValueError("cannot define compression_params and " "write_checksum") + raise ValueError( + "cannot define compression_params and " "write_checksum" + ) if compression_params and write_content_size is not None: raise ValueError( @@ -1184,7 +1248,9 @@ ) if compression_params and write_dict_id is not None: - raise ValueError("cannot define compression_params and " "write_dict_id") + raise ValueError( + "cannot define compression_params and " "write_dict_id" + ) if compression_params and threads: raise ValueError("cannot define compression_params and threads") @@ -1201,7 +1267,9 @@ self._params = ffi.gc(params, lib.ZSTD_freeCCtxParams) - _set_compression_parameter(self._params, lib.ZSTD_c_compressionLevel, level) + _set_compression_parameter( + self._params, lib.ZSTD_c_compressionLevel, level + ) _set_compression_parameter( self._params, @@ -1210,7 +1278,9 @@ ) _set_compression_parameter( - self._params, lib.ZSTD_c_checksumFlag, 1 if write_checksum else 0 + self._params, + lib.ZSTD_c_checksumFlag, + 1 if write_checksum else 0, ) _set_compression_parameter( @@ -1218,7 +1288,9 @@ ) if threads: - _set_compression_parameter(self._params, lib.ZSTD_c_nbWorkers, threads) + _set_compression_parameter( + self._params, lib.ZSTD_c_nbWorkers, threads + ) cctx = lib.ZSTD_createCCtx() if cctx == ffi.NULL: @@ -1237,10 +1309,13 @@ ) def _setup_cctx(self): - zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams(self._cctx, self._params) + zresult = lib.ZSTD_CCtx_setParametersUsingCCtxParams( + self._cctx, self._params + ) if lib.ZSTD_isError(zresult): raise ZstdError( - "could not set compression parameters: %s" % _zstd_error(zresult) + "could not set compression parameters: %s" + % _zstd_error(zresult) ) dict_data = self._dict_data @@ -1259,7 +1334,8 @@ if lib.ZSTD_isError(zresult): raise ZstdError( - "could not load compression dictionary: %s" % _zstd_error(zresult) + "could not load compression dictionary: %s" + % _zstd_error(zresult) ) def memory_size(self): @@ -1275,7 +1351,9 @@ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, len(data_buffer)) if lib.ZSTD_isError(zresult): - raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) out_buffer = ffi.new("ZSTD_outBuffer *") in_buffer = ffi.new("ZSTD_inBuffer *") @@ -1307,11 +1385,15 @@ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) if lib.ZSTD_isError(zresult): - raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) cobj = ZstdCompressionObj() cobj._out = ffi.new("ZSTD_outBuffer *") - cobj._dst_buffer = ffi.new("char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE) + cobj._dst_buffer = ffi.new( + "char[]", COMPRESSION_RECOMMENDED_OUTPUT_SIZE + ) cobj._out.dst = cobj._dst_buffer cobj._out.size = COMPRESSION_RECOMMENDED_OUTPUT_SIZE cobj._out.pos = 0 @@ -1328,7 +1410,9 @@ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) if lib.ZSTD_isError(zresult): - raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) return ZstdCompressionChunker(self, chunk_size=chunk_size) @@ -1353,7 +1437,9 @@ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) if lib.ZSTD_isError(zresult): - raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) in_buffer = ffi.new("ZSTD_inBuffer *") out_buffer = ffi.new("ZSTD_outBuffer *") @@ -1381,7 +1467,9 @@ self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue ) if lib.ZSTD_isError(zresult): - raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) if out_buffer.pos: ofh.write(ffi.buffer(out_buffer.dst, out_buffer.pos)) @@ -1423,7 +1511,9 @@ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) if lib.ZSTD_isError(zresult): - raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) return ZstdCompressionReader(self, source, read_size) @@ -1443,7 +1533,9 @@ if size < 0: size = lib.ZSTD_CONTENTSIZE_UNKNOWN - return ZstdCompressionWriter(self, writer, size, write_size, write_return_read) + return ZstdCompressionWriter( + self, writer, size, write_size, write_return_read + ) write_to = stream_writer @@ -1473,7 +1565,9 @@ zresult = lib.ZSTD_CCtx_setPledgedSrcSize(self._cctx, size) if lib.ZSTD_isError(zresult): - raise ZstdError("error setting source size: %s" % _zstd_error(zresult)) + raise ZstdError( + "error setting source size: %s" % _zstd_error(zresult) + ) in_buffer = ffi.new("ZSTD_inBuffer *") out_buffer = ffi.new("ZSTD_outBuffer *") @@ -1517,7 +1611,9 @@ self._cctx, out_buffer, in_buffer, lib.ZSTD_e_continue ) if lib.ZSTD_isError(zresult): - raise ZstdError("zstd compress error: %s" % _zstd_error(zresult)) + raise ZstdError( + "zstd compress error: %s" % _zstd_error(zresult) + ) if out_buffer.pos: data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] @@ -1596,10 +1692,14 @@ data_buffer = ffi.from_buffer(data) zresult = lib.ZSTD_getFrameHeader(params, data_buffer, len(data_buffer)) if lib.ZSTD_isError(zresult): - raise ZstdError("cannot get frame parameters: %s" % _zstd_error(zresult)) + raise ZstdError( + "cannot get frame parameters: %s" % _zstd_error(zresult) + ) if zresult: - raise ZstdError("not enough data for frame parameters; need %d bytes" % zresult) + raise ZstdError( + "not enough data for frame parameters; need %d bytes" % zresult + ) return FrameParameters(params[0]) @@ -1611,9 +1711,14 @@ self.k = k self.d = d - if dict_type not in (DICT_TYPE_AUTO, DICT_TYPE_RAWCONTENT, DICT_TYPE_FULLDICT): + if dict_type not in ( + DICT_TYPE_AUTO, + DICT_TYPE_RAWCONTENT, + DICT_TYPE_FULLDICT, + ): raise ValueError( - "invalid dictionary load mode: %d; must use " "DICT_TYPE_* constants" + "invalid dictionary load mode: %d; must use " + "DICT_TYPE_* constants" ) self._dict_type = dict_type @@ -1630,7 +1735,9 @@ def precompute_compress(self, level=0, compression_params=None): if level and compression_params: - raise ValueError("must only specify one of level or " "compression_params") + raise ValueError( + "must only specify one of level or " "compression_params" + ) if not level and not compression_params: raise ValueError("must specify one of level or compression_params") @@ -1675,7 +1782,9 @@ if ddict == ffi.NULL: raise ZstdError("could not create decompression dict") - ddict = ffi.gc(ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict)) + ddict = ffi.gc( + ddict, lib.ZSTD_freeDDict, size=lib.ZSTD_sizeof_DDict(ddict) + ) self.__dict__["_ddict"] = ddict return ddict @@ -1805,7 +1914,9 @@ self._decompressor._dctx, out_buffer, in_buffer ) if lib.ZSTD_isError(zresult): - raise ZstdError("zstd decompressor error: %s" % _zstd_error(zresult)) + raise ZstdError( + "zstd decompressor error: %s" % _zstd_error(zresult) + ) if zresult == 0: self._finished = True @@ -2105,16 +2216,22 @@ if whence == os.SEEK_SET: if pos < 0: - raise ValueError("cannot seek to negative position with SEEK_SET") + raise ValueError( + "cannot seek to negative position with SEEK_SET" + ) if pos < self._bytes_decompressed: - raise ValueError("cannot seek zstd decompression stream " "backwards") + raise ValueError( + "cannot seek zstd decompression stream " "backwards" + ) read_amount = pos - self._bytes_decompressed elif whence == os.SEEK_CUR: if pos < 0: - raise ValueError("cannot seek zstd decompression stream " "backwards") + raise ValueError( + "cannot seek zstd decompression stream " "backwards" + ) read_amount = pos elif whence == os.SEEK_END: @@ -2123,7 +2240,9 @@ ) while read_amount: - result = self.read(min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)) + result = self.read( + min(read_amount, DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE) + ) if not result: break @@ -2257,10 +2376,14 @@ while in_buffer.pos < in_buffer.size: zresult = lib.ZSTD_decompressStream(dctx, out_buffer, in_buffer) if lib.ZSTD_isError(zresult): - raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult)) + raise ZstdError( + "zstd decompress error: %s" % _zstd_error(zresult) + ) if out_buffer.pos: - self._writer.write(ffi.buffer(out_buffer.dst, out_buffer.pos)[:]) + self._writer.write( + ffi.buffer(out_buffer.dst, out_buffer.pos)[:] + ) total_write += out_buffer.pos out_buffer.pos = 0 @@ -2299,7 +2422,9 @@ data_buffer = ffi.from_buffer(data) - output_size = lib.ZSTD_getFrameContentSize(data_buffer, len(data_buffer)) + output_size = lib.ZSTD_getFrameContentSize( + data_buffer, len(data_buffer) + ) if output_size == lib.ZSTD_CONTENTSIZE_ERROR: raise ZstdError("error determining content size from frame header") @@ -2307,7 +2432,9 @@ return b"" elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN: if not max_output_size: - raise ZstdError("could not determine content size in frame header") + raise ZstdError( + "could not determine content size in frame header" + ) result_buffer = ffi.new("char[]", max_output_size) result_size = max_output_size @@ -2330,7 +2457,9 @@ if lib.ZSTD_isError(zresult): raise ZstdError("decompression error: %s" % _zstd_error(zresult)) elif zresult: - raise ZstdError("decompression error: did not decompress full frame") + raise ZstdError( + "decompression error: did not decompress full frame" + ) elif output_size and out_buffer.pos != output_size: raise ZstdError( "decompression error: decompressed %d bytes; expected %d" @@ -2346,7 +2475,9 @@ read_across_frames=False, ): self._ensure_dctx() - return ZstdDecompressionReader(self, source, read_size, read_across_frames) + return ZstdDecompressionReader( + self, source, read_size, read_across_frames + ) def decompressobj(self, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE): if write_size < 1: @@ -2421,9 +2552,13 @@ while in_buffer.pos < in_buffer.size: assert out_buffer.pos == 0 - zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer) + zresult = lib.ZSTD_decompressStream( + self._dctx, out_buffer, in_buffer + ) if lib.ZSTD_isError(zresult): - raise ZstdError("zstd decompress error: %s" % _zstd_error(zresult)) + raise ZstdError( + "zstd decompress error: %s" % _zstd_error(zresult) + ) if out_buffer.pos: data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:] @@ -2449,7 +2584,9 @@ if not hasattr(writer, "write"): raise ValueError("must pass an object with a write() method") - return ZstdDecompressionWriter(self, writer, write_size, write_return_read) + return ZstdDecompressionWriter( + self, writer, write_size, write_return_read + ) write_to = stream_writer @@ -2491,7 +2628,9 @@ # Flush all read data to output. while in_buffer.pos < in_buffer.size: - zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer) + zresult = lib.ZSTD_decompressStream( + self._dctx, out_buffer, in_buffer + ) if lib.ZSTD_isError(zresult): raise ZstdError( "zstd decompressor error: %s" % _zstd_error(zresult) @@ -2521,7 +2660,9 @@ # All chunks should be zstd frames and should have content size set. chunk_buffer = ffi.from_buffer(chunk) params = ffi.new("ZSTD_frameHeader *") - zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer)) + zresult = lib.ZSTD_getFrameHeader( + params, chunk_buffer, len(chunk_buffer) + ) if lib.ZSTD_isError(zresult): raise ValueError("chunk 0 is not a valid zstd frame") elif zresult: @@ -2546,7 +2687,9 @@ zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer) if lib.ZSTD_isError(zresult): - raise ZstdError("could not decompress chunk 0: %s" % _zstd_error(zresult)) + raise ZstdError( + "could not decompress chunk 0: %s" % _zstd_error(zresult) + ) elif zresult: raise ZstdError("chunk 0 did not decompress full frame") @@ -2561,11 +2704,15 @@ raise ValueError("chunk %d must be bytes" % i) chunk_buffer = ffi.from_buffer(chunk) - zresult = lib.ZSTD_getFrameHeader(params, chunk_buffer, len(chunk_buffer)) + zresult = lib.ZSTD_getFrameHeader( + params, chunk_buffer, len(chunk_buffer) + ) if lib.ZSTD_isError(zresult): raise ValueError("chunk %d is not a valid zstd frame" % i) elif zresult: - raise ValueError("chunk %d is too small to contain a zstd frame" % i) + raise ValueError( + "chunk %d is too small to contain a zstd frame" % i + ) if params.frameContentSize == lib.ZSTD_CONTENTSIZE_UNKNOWN: raise ValueError("chunk %d missing content size in frame" % i) @@ -2580,7 +2727,9 @@ in_buffer.size = len(chunk_buffer) in_buffer.pos = 0 - zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer) + zresult = lib.ZSTD_decompressStream( + self._dctx, out_buffer, in_buffer + ) if lib.ZSTD_isError(zresult): raise ZstdError( "could not decompress chunk %d: %s" % _zstd_error(zresult) @@ -2597,7 +2746,9 @@ lib.ZSTD_DCtx_reset(self._dctx, lib.ZSTD_reset_session_only) if self._max_window_size: - zresult = lib.ZSTD_DCtx_setMaxWindowSize(self._dctx, self._max_window_size) + zresult = lib.ZSTD_DCtx_setMaxWindowSize( + self._dctx, self._max_window_size + ) if lib.ZSTD_isError(zresult): raise ZstdError( "unable to set max window size: %s" % _zstd_error(zresult) @@ -2605,11 +2756,14 @@ zresult = lib.ZSTD_DCtx_setFormat(self._dctx, self._format) if lib.ZSTD_isError(zresult): - raise ZstdError("unable to set decoding format: %s" % _zstd_error(zresult)) + raise ZstdError( + "unable to set decoding format: %s" % _zstd_error(zresult) + ) if self._dict_data and load_dict: zresult = lib.ZSTD_DCtx_refDDict(self._dctx, self._dict_data._ddict) if lib.ZSTD_isError(zresult): raise ZstdError( - "unable to reference prepared dictionary: %s" % _zstd_error(zresult) + "unable to reference prepared dictionary: %s" + % _zstd_error(zresult) )
--- a/hgext/lfs/blobstore.py Tue Jan 21 17:15:34 2020 -0800 +++ b/hgext/lfs/blobstore.py Thu Jan 23 13:10:48 2020 -0800 @@ -94,15 +94,12 @@ pass -class filewithprogress(object): +class lfsuploadfile(object): """a file-like object that supports __len__ and read. - - Useful to provide progress information for how many bytes are read. """ - def __init__(self, fp, callback): + def __init__(self, fp): self._fp = fp - self._callback = callback # func(readsize) fp.seek(0, os.SEEK_END) self._len = fp.tell() fp.seek(0) @@ -113,14 +110,12 @@ def read(self, size): if self._fp is None: return b'' - data = self._fp.read(size) - if data: - if self._callback: - self._callback(len(data)) - else: + return self._fp.read(size) + + def close(self): + if self._fp is not None: self._fp.close() self._fp = None - return data class local(object): @@ -495,15 +490,17 @@ _(b'detected corrupt lfs object: %s') % oid, hint=_(b'run hg verify'), ) - request.data = filewithprogress(localstore.open(oid), None) - request.get_method = lambda: r'PUT' - request.add_header('Content-Type', 'application/octet-stream') - request.add_header('Content-Length', len(request.data)) for k, v in headers: request.add_header(pycompat.strurl(k), pycompat.strurl(v)) try: + if action == b'upload': + request.data = lfsuploadfile(localstore.open(oid)) + request.get_method = lambda: 'PUT' + request.add_header('Content-Type', 'application/octet-stream') + request.add_header('Content-Length', len(request.data)) + with contextlib.closing(self.urlopener.open(request)) as res: contentlength = res.info().get(b"content-length") ui = self.ui # Shorten debug lines @@ -545,6 +542,9 @@ raise LfsRemoteError( _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint ) + finally: + if request.data: + request.data.close() def _batch(self, pointers, localstore, action): if action not in [b'upload', b'download']:
--- a/mercurial/debugcommands.py Tue Jan 21 17:15:34 2020 -0800 +++ b/mercurial/debugcommands.py Thu Jan 23 13:10:48 2020 -0800 @@ -13,6 +13,7 @@ import errno import operator import os +import platform import random import re import socket @@ -1487,6 +1488,11 @@ pycompat.sysexecutable or _(b"unknown"), ) fm.write( + b'pythonimplementation', + _(b"checking Python implementation (%s)\n"), + pycompat.sysbytes(platform.python_implementation()), + ) + fm.write( b'pythonver', _(b"checking Python version (%s)\n"), (b"%d.%d.%d" % sys.version_info[:3]),
--- a/mercurial/match.py Tue Jan 21 17:15:34 2020 -0800 +++ b/mercurial/match.py Thu Jan 23 13:10:48 2020 -0800 @@ -24,7 +24,7 @@ ) from .utils import stringutil -rustmod = policy.importrust('filepatterns') +rustmod = policy.importrust('dirstate') allpatternkinds = ( b're', @@ -1273,15 +1273,6 @@ '''Convert a (normalized) pattern of any kind into a regular expression. globsuffix is appended to the regexp of globs.''' - - if rustmod is not None: - try: - return rustmod.build_single_regex(kind, pat, globsuffix) - except rustmod.PatternError: - raise error.ProgrammingError( - b'not a regex pattern: %s:%s' % (kind, pat) - ) - if not pat and kind in (b'glob', b'relpath'): return b'' if kind == b're': @@ -1554,18 +1545,6 @@ This is useful to debug ignore patterns. ''' - if rustmod is not None: - result, warnings = rustmod.read_pattern_file( - filepath, bool(warn), sourceinfo, - ) - - for warning_params in warnings: - # Can't be easily emitted from Rust, because it would require - # a mechanism for both gettext and calling the `warn` function. - warn(_(b"%s: ignoring invalid syntax '%s'\n") % warning_params) - - return result - syntaxes = { b're': b'relre:', b'regexp': b'relre:',
--- a/rust/Cargo.lock Tue Jan 21 17:15:34 2020 -0800 +++ b/rust/Cargo.lock Thu Jan 23 13:10:48 2020 -0800 @@ -124,10 +124,16 @@ ] [[package]] +name = "hex" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] name = "hg-core" version = "0.1.0" dependencies = [ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -483,6 +489,7 @@ "checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" "checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" "checksum getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "473a1265acc8ff1e808cd0a1af8cee3c2ee5200916058a2ca113c29f2d903571" +"checksum hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "023b39be39e3a2da62a94feb433e91e8bcd37676fbc8bea371daf52b7a769a3e" "checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" "checksum libc 0.2.64 (registry+https://github.com/rust-lang/crates.io-index)" = "74dfca3d9957906e8d1e6a0b641dc9a59848e793f1da2165889fd4f62d10d79c" "checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e"
--- a/rust/hg-core/Cargo.toml Tue Jan 21 17:15:34 2020 -0800 +++ b/rust/hg-core/Cargo.toml Thu Jan 23 13:10:48 2020 -0800 @@ -10,6 +10,7 @@ [dependencies] byteorder = "1.3.1" +hex = "0.4.0" lazy_static = "1.3.0" memchr = "2.2.0" rand = "0.6.5"
--- a/rust/hg-core/src/revlog.rs Tue Jan 21 17:15:34 2020 -0800 +++ b/rust/hg-core/src/revlog.rs Thu Jan 23 13:10:48 2020 -0800 @@ -5,6 +5,10 @@ // GNU General Public License version 2 or any later version. //! Mercurial concepts for handling revision history +pub mod node; +pub mod nodemap; +pub use node::{Node, NodeError}; + /// Mercurial revision numbers /// /// As noted in revlog.c, revision numbers are actually encoded in
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-core/src/revlog/node.rs Thu Jan 23 13:10:48 2020 -0800 @@ -0,0 +1,191 @@ +// Copyright 2019-2020 Georges Racinet <georges.racinet@octobus.net> +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. + +//! Definitions and utilities for Revision nodes +//! +//! In Mercurial code base, it is customary to call "a node" the binary SHA +//! of a revision. + +use hex::{self, FromHex, FromHexError}; + +/// The length in bytes of a `Node` +/// +/// This constant is meant to ease refactors of this module, and +/// are private so that calling code does not expect all nodes have +/// the same size, should we support several formats concurrently in +/// the future. +const NODE_BYTES_LENGTH: usize = 20; + +/// The length in bytes of a `Node` +/// +/// see also `NODES_BYTES_LENGTH` about it being private. +const NODE_NYBBLES_LENGTH: usize = 2 * NODE_BYTES_LENGTH; + +/// Private alias for readability and to ease future change +type NodeData = [u8; NODE_BYTES_LENGTH]; + +/// Binary revision SHA +/// +/// ## Future changes of hash size +/// +/// To accomodate future changes of hash size, Rust callers +/// should use the conversion methods at the boundaries (FFI, actual +/// computation of hashes and I/O) only, and only if required. +/// +/// All other callers outside of unit tests should just handle `Node` values +/// and never make any assumption on the actual length, using [`nybbles_len`] +/// if they need a loop boundary. +/// +/// All methods that create a `Node` either take a type that enforces +/// the size or fail immediately at runtime with [`ExactLengthRequired`]. +/// +/// [`nybbles_len`]: #method.nybbles_len +/// [`ExactLengthRequired`]: struct.NodeError#variant.ExactLengthRequired +#[derive(Clone, Debug, PartialEq)] +pub struct Node { + data: NodeData, +} + +/// The node value for NULL_REVISION +pub const NULL_NODE: Node = Node { + data: [0; NODE_BYTES_LENGTH], +}; + +impl From<NodeData> for Node { + fn from(data: NodeData) -> Node { + Node { data } + } +} + +#[derive(Debug, PartialEq)] +pub enum NodeError { + ExactLengthRequired(usize, String), + HexError(FromHexError, String), +} + +/// Low level utility function, also for prefixes +fn get_nybble(s: &[u8], i: usize) -> u8 { + if i % 2 == 0 { + s[i / 2] >> 4 + } else { + s[i / 2] & 0x0f + } +} + +impl Node { + /// Retrieve the `i`th half-byte of the binary data. + /// + /// This is also the `i`th hexadecimal digit in numeric form, + /// also called a [nybble](https://en.wikipedia.org/wiki/Nibble). + pub fn get_nybble(&self, i: usize) -> u8 { + get_nybble(&self.data, i) + } + + /// Length of the data, in nybbles + pub fn nybbles_len(&self) -> usize { + // public exposure as an instance method only, so that we can + // easily support several sizes of hashes if needed in the future. + NODE_NYBBLES_LENGTH + } + + /// Convert from hexadecimal string representation + /// + /// Exact length is required. + /// + /// To be used in FFI and I/O only, in order to facilitate future + /// changes of hash format. + pub fn from_hex(hex: &str) -> Result<Node, NodeError> { + Ok(NodeData::from_hex(hex) + .map_err(|e| NodeError::from((e, hex)))? + .into()) + } + + /// Convert to hexadecimal string representation + /// + /// To be used in FFI and I/O only, in order to facilitate future + /// changes of hash format. + pub fn encode_hex(&self) -> String { + hex::encode(self.data) + } + + /// Provide access to binary data + /// + /// This is needed by FFI layers, for instance to return expected + /// binary values to Python. + pub fn as_bytes(&self) -> &[u8] { + &self.data + } +} + +impl From<(FromHexError, &str)> for NodeError { + fn from(err_offender: (FromHexError, &str)) -> Self { + let (err, offender) = err_offender; + match err { + FromHexError::InvalidStringLength => { + NodeError::ExactLengthRequired( + NODE_NYBBLES_LENGTH, + offender.to_string(), + ) + } + _ => NodeError::HexError(err, offender.to_string()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn sample_node() -> Node { + let mut data = [0; NODE_BYTES_LENGTH]; + data.copy_from_slice(&[ + 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, + 0x98, 0x76, 0x54, 0x32, 0x10, 0xde, 0xad, 0xbe, 0xef, + ]); + data.into() + } + + /// Pad an hexadecimal string to reach `NODE_NYBBLES_LENGTH` + /// + /// The padding is made with zeros + fn hex_pad_right(hex: &str) -> String { + let mut res = hex.to_string(); + while res.len() < NODE_NYBBLES_LENGTH { + res.push('0'); + } + res + } + + fn sample_node_hex() -> String { + hex_pad_right("0123456789abcdeffedcba9876543210deadbeef") + } + + #[test] + fn test_node_from_hex() { + assert_eq!(Node::from_hex(&sample_node_hex()), Ok(sample_node())); + + let mut short = hex_pad_right("0123"); + short.pop(); + short.pop(); + assert_eq!( + Node::from_hex(&short), + Err(NodeError::ExactLengthRequired(NODE_NYBBLES_LENGTH, short)), + ); + + let not_hex = hex_pad_right("012... oops"); + assert_eq!( + Node::from_hex(¬_hex), + Err(NodeError::HexError( + FromHexError::InvalidHexCharacter { c: '.', index: 3 }, + not_hex, + )), + ); + } + + #[test] + fn test_node_encode_hex() { + assert_eq!(sample_node().encode_hex(), sample_node_hex()); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/rust/hg-core/src/revlog/nodemap.rs Thu Jan 23 13:10:48 2020 -0800 @@ -0,0 +1,160 @@ +// Copyright 2018-2020 Georges Racinet <georges.racinet@octobus.net> +// and Mercurial contributors +// +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2 or any later version. +//! Indexing facilities for fast retrieval of `Revision` from `Node` +//! +//! This provides a variation on the 16-ary radix tree that is +//! provided as "nodetree" in revlog.c, ready for append-only persistence +//! on disk. +//! +//! Following existing implicit conventions, the "nodemap" terminology +//! is used in a more abstract context. + +use super::Revision; +use std::fmt; + +/// Low level NodeTree [`Blocks`] elements +/// +/// These are exactly as for instance on persistent storage. +type RawElement = i32; + +/// High level representation of values in NodeTree +/// [`Blocks`](struct.Block.html) +/// +/// This is the high level representation that most algorithms should +/// use. +#[derive(Clone, Debug, Eq, PartialEq)] +enum Element { + Rev(Revision), + Block(usize), + None, +} + +impl From<RawElement> for Element { + /// Conversion from low level representation, after endianness conversion. + /// + /// See [`Block`](struct.Block.html) for explanation about the encoding. + fn from(raw: RawElement) -> Element { + if raw >= 0 { + Element::Block(raw as usize) + } else if raw == -1 { + Element::None + } else { + Element::Rev(-raw - 2) + } + } +} + +impl From<Element> for RawElement { + fn from(element: Element) -> RawElement { + match element { + Element::None => 0, + Element::Block(i) => i as RawElement, + Element::Rev(rev) => -rev - 2, + } + } +} + +/// A logical block of the `NodeTree`, packed with a fixed size. +/// +/// These are always used in container types implementing `Index<Block>`, +/// such as `&Block` +/// +/// As an array of integers, its ith element encodes that the +/// ith potential edge from the block, representing the ith hexadecimal digit +/// (nybble) `i` is either: +/// +/// - absent (value -1) +/// - another `Block` in the same indexable container (value ≥ 0) +/// - a `Revision` leaf (value ≤ -2) +/// +/// Endianness has to be fixed for consistency on shared storage across +/// different architectures. +/// +/// A key difference with the C `nodetree` is that we need to be +/// able to represent the [`Block`] at index 0, hence -1 is the empty marker +/// rather than 0 and the `Revision` range upper limit of -2 instead of -1. +/// +/// Another related difference is that `NULL_REVISION` (-1) is not +/// represented at all, because we want an immutable empty nodetree +/// to be valid. + +#[derive(Clone, PartialEq)] +pub struct Block([RawElement; 16]); + +impl Block { + fn new() -> Self { + Block([-1; 16]) + } + + fn get(&self, nybble: u8) -> Element { + Element::from(RawElement::from_be(self.0[nybble as usize])) + } + + fn set(&mut self, nybble: u8, element: Element) { + self.0[nybble as usize] = RawElement::to_be(element.into()) + } +} + +impl fmt::Debug for Block { + /// sparse representation for testing and debugging purposes + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_map() + .entries((0..16).filter_map(|i| match self.get(i) { + Element::None => None, + element => Some((i, element)), + })) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Creates a `Block` using a syntax close to the `Debug` output + macro_rules! block { + {$($nybble:tt : $variant:ident($val:tt)),*} => ( + { + let mut block = Block::new(); + $(block.set($nybble, Element::$variant($val)));*; + block + } + ) + } + + #[test] + fn test_block_debug() { + let mut block = Block::new(); + block.set(1, Element::Rev(3)); + block.set(10, Element::Block(0)); + assert_eq!(format!("{:?}", block), "{1: Rev(3), 10: Block(0)}"); + } + + #[test] + fn test_block_macro() { + let block = block! {5: Block(2)}; + assert_eq!(format!("{:?}", block), "{5: Block(2)}"); + + let block = block! {13: Rev(15), 5: Block(2)}; + assert_eq!(format!("{:?}", block), "{5: Block(2), 13: Rev(15)}"); + } + + #[test] + fn test_raw_block() { + let mut raw = [-1; 16]; + raw[0] = 0; + raw[1] = RawElement::to_be(15); + raw[2] = RawElement::to_be(-2); + raw[3] = RawElement::to_be(-1); + raw[4] = RawElement::to_be(-3); + let block = Block(raw); + assert_eq!(block.get(0), Element::Block(0)); + assert_eq!(block.get(1), Element::Block(15)); + assert_eq!(block.get(3), Element::None); + assert_eq!(block.get(2), Element::Rev(0)); + assert_eq!(block.get(4), Element::Rev(1)); + } +}
--- a/rust/hg-core/src/utils/hg_path.rs Tue Jan 21 17:15:34 2020 -0800 +++ b/rust/hg-core/src/utils/hg_path.rs Thu Jan 23 13:10:48 2020 -0800 @@ -138,6 +138,79 @@ None } } + + #[cfg(windows)] + /// Copied from the Python stdlib's `os.path.splitdrive` implementation. + /// + /// Split a pathname into drive/UNC sharepoint and relative path specifiers. + /// Returns a 2-tuple (drive_or_unc, path); either part may be empty. + /// + /// If you assign + /// result = split_drive(p) + /// It is always true that: + /// result[0] + result[1] == p + /// + /// If the path contained a drive letter, drive_or_unc will contain everything + /// up to and including the colon. + /// e.g. split_drive("c:/dir") returns ("c:", "/dir") + /// + /// If the path contained a UNC path, the drive_or_unc will contain the host + /// name and share up to but not including the fourth directory separator + /// character. + /// e.g. split_drive("//host/computer/dir") returns ("//host/computer", "/dir") + /// + /// Paths cannot contain both a drive letter and a UNC path. + pub fn split_drive<'a>(&self) -> (&HgPath, &HgPath) { + let bytes = self.as_bytes(); + let is_sep = |b| std::path::is_separator(b as char); + + if self.len() < 2 { + (HgPath::new(b""), &self) + } else if is_sep(bytes[0]) + && is_sep(bytes[1]) + && (self.len() == 2 || !is_sep(bytes[2])) + { + // Is a UNC path: + // vvvvvvvvvvvvvvvvvvvv drive letter or UNC path + // \\machine\mountpoint\directory\etc\... + // directory ^^^^^^^^^^^^^^^ + + let machine_end_index = bytes[2..].iter().position(|b| is_sep(*b)); + let mountpoint_start_index = if let Some(i) = machine_end_index { + i + 2 + } else { + return (HgPath::new(b""), &self); + }; + + match bytes[mountpoint_start_index + 1..] + .iter() + .position(|b| is_sep(*b)) + { + // A UNC path can't have two slashes in a row + // (after the initial two) + Some(0) => (HgPath::new(b""), &self), + Some(i) => { + let (a, b) = + bytes.split_at(mountpoint_start_index + 1 + i); + (HgPath::new(a), HgPath::new(b)) + } + None => (&self, HgPath::new(b"")), + } + } else if bytes[1] == b':' { + // Drive path c:\directory + let (a, b) = bytes.split_at(2); + (HgPath::new(a), HgPath::new(b)) + } else { + (HgPath::new(b""), &self) + } + } + + #[cfg(unix)] + /// Split a pathname into drive and path. On Posix, drive is always empty. + pub fn split_drive(&self) -> (&HgPath, &HgPath) { + (HgPath::new(b""), &self) + } + /// Checks for errors in the path, short-circuiting at the first one. /// This generates fine-grained errors useful for debugging. /// To simply check if the path is valid during tests, use `is_valid`. @@ -473,4 +546,101 @@ let base = HgPath::new(b"ends/"); assert_eq!(Some(HgPath::new(b"with/dir/")), path.relative_to(base)); } + + #[test] + #[cfg(unix)] + fn test_split_drive() { + // Taken from the Python stdlib's tests + assert_eq!( + HgPath::new(br"/foo/bar").split_drive(), + (HgPath::new(b""), HgPath::new(br"/foo/bar")) + ); + assert_eq!( + HgPath::new(br"foo:bar").split_drive(), + (HgPath::new(b""), HgPath::new(br"foo:bar")) + ); + assert_eq!( + HgPath::new(br":foo:bar").split_drive(), + (HgPath::new(b""), HgPath::new(br":foo:bar")) + ); + // Also try NT paths; should not split them + assert_eq!( + HgPath::new(br"c:\foo\bar").split_drive(), + (HgPath::new(b""), HgPath::new(br"c:\foo\bar")) + ); + assert_eq!( + HgPath::new(b"c:/foo/bar").split_drive(), + (HgPath::new(b""), HgPath::new(br"c:/foo/bar")) + ); + assert_eq!( + HgPath::new(br"\\conky\mountpoint\foo\bar").split_drive(), + ( + HgPath::new(b""), + HgPath::new(br"\\conky\mountpoint\foo\bar") + ) + ); + } + + #[test] + #[cfg(windows)] + fn test_split_drive() { + assert_eq!( + HgPath::new(br"c:\foo\bar").split_drive(), + (HgPath::new(br"c:"), HgPath::new(br"\foo\bar")) + ); + assert_eq!( + HgPath::new(b"c:/foo/bar").split_drive(), + (HgPath::new(br"c:"), HgPath::new(br"/foo/bar")) + ); + assert_eq!( + HgPath::new(br"\\conky\mountpoint\foo\bar").split_drive(), + ( + HgPath::new(br"\\conky\mountpoint"), + HgPath::new(br"\foo\bar") + ) + ); + assert_eq!( + HgPath::new(br"//conky/mountpoint/foo/bar").split_drive(), + ( + HgPath::new(br"//conky/mountpoint"), + HgPath::new(br"/foo/bar") + ) + ); + assert_eq!( + HgPath::new(br"\\\conky\mountpoint\foo\bar").split_drive(), + ( + HgPath::new(br""), + HgPath::new(br"\\\conky\mountpoint\foo\bar") + ) + ); + assert_eq!( + HgPath::new(br"///conky/mountpoint/foo/bar").split_drive(), + ( + HgPath::new(br""), + HgPath::new(br"///conky/mountpoint/foo/bar") + ) + ); + assert_eq!( + HgPath::new(br"\\conky\\mountpoint\foo\bar").split_drive(), + ( + HgPath::new(br""), + HgPath::new(br"\\conky\\mountpoint\foo\bar") + ) + ); + assert_eq!( + HgPath::new(br"//conky//mountpoint/foo/bar").split_drive(), + ( + HgPath::new(br""), + HgPath::new(br"//conky//mountpoint/foo/bar") + ) + ); + // UNC part containing U+0130 + assert_eq!( + HgPath::new(b"//conky/MOUNTPO\xc4\xb0NT/foo/bar").split_drive(), + ( + HgPath::new(b"//conky/MOUNTPO\xc4\xb0NT"), + HgPath::new(br"/foo/bar") + ) + ); + } }
--- a/rust/hg-cpython/src/exceptions.rs Tue Jan 21 17:15:34 2020 -0800 +++ b/rust/hg-cpython/src/exceptions.rs Thu Jan 23 13:10:48 2020 -0800 @@ -13,7 +13,7 @@ //! //! [`GraphError`]: struct.GraphError.html use cpython::{ - exc::{IOError, RuntimeError, ValueError}, + exc::{RuntimeError, ValueError}, py_exception, PyErr, Python, }; use hg; @@ -39,34 +39,6 @@ } } -py_exception!(rustext, PatternError, RuntimeError); -py_exception!(rustext, PatternFileError, RuntimeError); py_exception!(rustext, HgPathPyError, RuntimeError); -impl PatternError { - pub fn pynew(py: Python, inner: hg::PatternError) -> PyErr { - match inner { - hg::PatternError::UnsupportedSyntax(m) => { - PatternError::new(py, ("PatternError", m)) - } - } - } -} - -impl PatternFileError { - pub fn pynew(py: Python, inner: hg::PatternFileError) -> PyErr { - match inner { - hg::PatternFileError::IO(e) => { - let value = (e.raw_os_error().unwrap_or(2), e.to_string()); - PyErr::new::<IOError, _>(py, value) - } - hg::PatternFileError::Pattern(e, l) => match e { - hg::PatternError::UnsupportedSyntax(m) => { - PatternFileError::new(py, ("PatternFileError", m, l)) - } - }, - } - } -} - py_exception!(shared_ref, AlreadyBorrowed, RuntimeError);
--- a/rust/hg-cpython/src/filepatterns.rs Tue Jan 21 17:15:34 2020 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,133 +0,0 @@ -// filepatterns.rs -// -// Copyright 2019, Georges Racinet <gracinet@anybox.fr>, -// Raphaël Gomès <rgomes@octobus.net> -// -// This software may be used and distributed according to the terms of the -// GNU General Public License version 2 or any later version. - -//! Bindings for the `hg::filepatterns` module provided by the -//! `hg-core` crate. From Python, this will be seen as `rustext.filepatterns` -//! and can be used as replacement for the the pure `filepatterns` Python -//! module. -use crate::exceptions::{PatternError, PatternFileError}; -use cpython::{ - PyBytes, PyDict, PyModule, PyObject, PyResult, PyTuple, Python, ToPyObject, -}; -use hg::utils::files; -use hg::{build_single_regex, read_pattern_file, LineNumber, PatternTuple}; -use std::path::PathBuf; - -/// Rust does not like functions with different return signatures. -/// The 3-tuple version is always returned by the hg-core function, -/// the (potential) conversion is handled at this level since it is not likely -/// to have any measurable impact on performance. -/// -/// The Python implementation passes a function reference for `warn` instead -/// of a boolean that is used to emit warnings while parsing. The Rust -/// implementation chooses to accumulate the warnings and propagate them to -/// Python upon completion. See the `readpatternfile` function in `match.py` -/// for more details. -fn read_pattern_file_wrapper( - py: Python, - file_path: PyObject, - warn: bool, - source_info: bool, -) -> PyResult<PyTuple> { - let bytes = file_path.extract::<PyBytes>(py)?; - let path = files::get_path_from_bytes(bytes.data(py)); - match read_pattern_file(path, warn) { - Ok((patterns, warnings)) => { - if source_info { - let itemgetter = |x: &PatternTuple| { - (PyBytes::new(py, &x.0), x.1, PyBytes::new(py, &x.2)) - }; - let results: Vec<(PyBytes, LineNumber, PyBytes)> = - patterns.iter().map(itemgetter).collect(); - return Ok((results, warnings_to_py_bytes(py, &warnings)) - .to_py_object(py)); - } - let itemgetter = |x: &PatternTuple| PyBytes::new(py, &x.0); - let results: Vec<PyBytes> = - patterns.iter().map(itemgetter).collect(); - Ok( - (results, warnings_to_py_bytes(py, &warnings)) - .to_py_object(py), - ) - } - Err(e) => Err(PatternFileError::pynew(py, e)), - } -} - -fn warnings_to_py_bytes( - py: Python, - warnings: &[(PathBuf, Vec<u8>)], -) -> Vec<(PyBytes, PyBytes)> { - warnings - .iter() - .map(|(path, syn)| { - ( - PyBytes::new(py, &files::get_bytes_from_path(path)), - PyBytes::new(py, syn), - ) - }) - .collect() -} - -fn build_single_regex_wrapper( - py: Python, - kind: PyObject, - pat: PyObject, - globsuffix: PyObject, -) -> PyResult<PyBytes> { - match build_single_regex( - kind.extract::<PyBytes>(py)?.data(py), - pat.extract::<PyBytes>(py)?.data(py), - globsuffix.extract::<PyBytes>(py)?.data(py), - ) { - Ok(regex) => Ok(PyBytes::new(py, ®ex)), - Err(e) => Err(PatternError::pynew(py, e)), - } -} - -pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> { - let dotted_name = &format!("{}.filepatterns", package); - let m = PyModule::new(py, dotted_name)?; - - m.add(py, "__package__", package)?; - m.add( - py, - "__doc__", - "Patterns files parsing - Rust implementation", - )?; - m.add( - py, - "build_single_regex", - py_fn!( - py, - build_single_regex_wrapper( - kind: PyObject, - pat: PyObject, - globsuffix: PyObject - ) - ), - )?; - m.add( - py, - "read_pattern_file", - py_fn!( - py, - read_pattern_file_wrapper( - file_path: PyObject, - warn: bool, - source_info: bool - ) - ), - )?; - m.add(py, "PatternError", py.get_type::<PatternError>())?; - let sys = PyModule::import(py, "sys")?; - let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?; - sys_modules.set_item(py, dotted_name, &m)?; - - Ok(m) -}
--- a/rust/hg-cpython/src/lib.rs Tue Jan 21 17:15:34 2020 -0800 +++ b/rust/hg-cpython/src/lib.rs Thu Jan 23 13:10:48 2020 -0800 @@ -33,7 +33,6 @@ pub mod dirstate; pub mod discovery; pub mod exceptions; -pub mod filepatterns; pub mod parsers; pub mod revlog; pub mod utils; @@ -53,25 +52,10 @@ m.add(py, "revlog", revlog::init_module(py, &dotted_name)?)?; m.add( py, - "filepatterns", - filepatterns::init_module(py, &dotted_name)?, - )?; - m.add( - py, "parsers", parsers::init_parsers_module(py, &dotted_name)?, )?; m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?; - m.add( - py, - "PatternFileError", - py.get_type::<exceptions::PatternFileError>(), - )?; - m.add( - py, - "PatternError", - py.get_type::<exceptions::PatternError>(), - )?; Ok(()) });
--- a/tests/run-tests.py Tue Jan 21 17:15:34 2020 -0800 +++ b/tests/run-tests.py Thu Jan 23 13:10:48 2020 -0800 @@ -555,12 +555,6 @@ help="use pure Python code instead of C extensions", ) hgconf.add_argument( - "-3", - "--py3-warnings", - action="store_true", - help="enable Py3k warnings on Python 2.7+", - ) - hgconf.add_argument( "--with-chg", metavar="CHG", help="use specified chg wrapper in place of hg", @@ -748,9 +742,6 @@ ) options.timeout = 0 options.slowtimeout = 0 - if options.py3_warnings: - if PYTHON3: - parser.error('--py3-warnings can only be used on Python 2.7') if options.blacklist: options.blacklist = parselistfiles(options.blacklist, 'blacklist') @@ -909,7 +900,6 @@ timeout=None, startport=None, extraconfigopts=None, - py3warnings=False, shell=None, hgcommand=None, slowtimeout=None, @@ -942,8 +932,6 @@ must have the form "key=value" (something understood by hgrc). Values of the form "foo.key=value" will result in "[foo] key=value". - py3warnings enables Py3k warnings. - shell is the shell to execute tests in. """ if timeout is None: @@ -968,7 +956,6 @@ self._slowtimeout = slowtimeout self._startport = startport self._extraconfigopts = extraconfigopts or [] - self._py3warnings = py3warnings self._shell = _bytespath(shell) self._hgcommand = hgcommand or b'hg' self._usechg = usechg @@ -1515,9 +1502,8 @@ return os.path.join(self._testdir, b'%s.out' % self.bname) def _run(self, env): - py3switch = self._py3warnings and b' -3' or b'' # Quote the python(3) executable for Windows - cmd = b'"%s"%s "%s"' % (PYTHON, py3switch, self.path) + cmd = b'"%s" "%s"' % (PYTHON, self.path) vlog("# Running", cmd.decode("utf-8")) normalizenewlines = os.name == 'nt' result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines) @@ -3366,7 +3352,6 @@ timeout=self.options.timeout, startport=self._getport(count), extraconfigopts=self.options.extra_config_opt, - py3warnings=self.options.py3_warnings, shell=self.options.shell, hgcommand=self._hgcommand, usechg=bool(self.options.with_chg or self.options.chg), @@ -3512,15 +3497,6 @@ self._usecorrectpython() - if self.options.py3_warnings and not self.options.anycoverage: - vlog("# Updating hg command to enable Py3k Warnings switch") - with open(os.path.join(self._bindir, 'hg'), 'rb') as f: - lines = [line.rstrip() for line in f] - lines[0] += ' -3' - with open(os.path.join(self._bindir, 'hg'), 'wb') as f: - for line in lines: - f.write(line + '\n') - hgbat = os.path.join(self._bindir, b'hg.bat') if os.path.isfile(hgbat): # hg.bat expects to be put in bin/scripts while run-tests.py
--- a/tests/test-check-format.t Tue Jan 21 17:15:34 2020 -0800 +++ b/tests/test-check-format.t Thu Jan 23 13:10:48 2020 -0800 @@ -1,5 +1,5 @@ #require black $ cd $RUNTESTDIR/.. - $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/** - "contrib/python-zstandard/**"'` + $ black --config=black.toml --check --diff `hg files 'set:(**.py + grep("^#!.*python")) - mercurial/thirdparty/**'`
--- a/tests/test-install.t Tue Jan 21 17:15:34 2020 -0800 +++ b/tests/test-install.t Thu Jan 23 13:10:48 2020 -0800 @@ -2,6 +2,7 @@ $ hg debuginstall checking encoding (ascii)... checking Python executable (*) (glob) + checking Python implementation (*) (glob) checking Python version (2.*) (glob) (no-py3 !) checking Python version (3.*) (glob) (py3 !) checking Python lib (.*[Ll]ib.*)... (re) @@ -43,6 +44,7 @@ "hgverextra": "*", (glob) "problems": 0, "pythonexe": "*", (glob) + "pythonimplementation": "*", (glob) "pythonlib": "*", (glob) "pythonsecurity": [*], (glob) "pythonver": "*.*.*", (glob) @@ -58,6 +60,7 @@ $ HGUSER= hg debuginstall checking encoding (ascii)... checking Python executable (*) (glob) + checking Python implementation (*) (glob) checking Python version (2.*) (glob) (no-py3 !) checking Python version (3.*) (glob) (py3 !) checking Python lib (.*[Ll]ib.*)... (re) @@ -103,6 +106,7 @@ $ HGEDITOR="~/tools/testeditor.exe" hg debuginstall checking encoding (ascii)... checking Python executable (*) (glob) + checking Python implementation (*) (glob) checking Python version (2.*) (glob) (no-py3 !) checking Python version (3.*) (glob) (py3 !) checking Python lib (.*[Ll]ib.*)... (re) @@ -128,6 +132,7 @@ $ HGEDITOR="c:\foo\bar\baz.exe -y -z" hg debuginstall checking encoding (ascii)... checking Python executable (*) (glob) + checking Python implementation (*) (glob) checking Python version (2.*) (glob) (no-py3 !) checking Python version (3.*) (glob) (py3 !) checking Python lib (.*[Ll]ib.*)... (re) @@ -185,6 +190,7 @@ $ ./installenv/*/hg debuginstall || cat pip.log checking encoding (ascii)... checking Python executable (*) (glob) + checking Python implementation (*) (glob) checking Python version (3.*) (glob) checking Python lib (*)... (glob) checking Python security support (*) (glob) @@ -221,6 +227,7 @@ $ ./installenv/*/hg debuginstall || cat pip.log checking encoding (ascii)... checking Python executable (*) (glob) + checking Python implementation (*) (glob) checking Python version (2.*) (glob) checking Python lib (*)... (glob) checking Python security support (*) (glob)
--- a/tests/test-rebase-collapse.t Tue Jan 21 17:15:34 2020 -0800 +++ b/tests/test-rebase-collapse.t Thu Jan 23 13:10:48 2020 -0800 @@ -486,61 +486,6 @@ abort: cannot collapse multiple named branches [255] - $ repeatchange() { - > hg checkout $1 - > hg cp d z - > echo blah >> z - > hg commit -Am "$2" --user "$3" - > } - $ repeatchange 3 "E" "user1" - 0 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ repeatchange 3 "E" "user2" - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved - created new head - $ hg tglog - @ 5: fbfb97b1089a 'E' - | - | o 4: f338eb3c2c7c 'E' - |/ - o 3: 41acb9dca9eb 'D' - | - | o 2: 8ac4a08debf1 'C' two - | | - | o 1: 1ba175478953 'B' one - |/ - o 0: 1994f17a630e 'A' - - $ hg rebase -s 5 -d 4 - rebasing 5:fbfb97b1089a "E" (tip) - note: not rebasing 5:fbfb97b1089a "E" (tip), its destination already has all its changes - saved backup bundle to $TESTTMP/e/.hg/strip-backup/fbfb97b1089a-553e1d85-rebase.hg - $ hg tglog - @ 4: f338eb3c2c7c 'E' - | - o 3: 41acb9dca9eb 'D' - | - | o 2: 8ac4a08debf1 'C' two - | | - | o 1: 1ba175478953 'B' one - |/ - o 0: 1994f17a630e 'A' - - $ hg export tip - # HG changeset patch - # User user1 - # Date 0 0 - # Thu Jan 01 00:00:00 1970 +0000 - # Node ID f338eb3c2c7cc5b5915676a2376ba7ac558c5213 - # Parent 41acb9dca9eb976e84cd21fcb756b4afa5a35c09 - E - - diff -r 41acb9dca9eb -r f338eb3c2c7c z - --- /dev/null Thu Jan 01 00:00:00 1970 +0000 - +++ b/z Thu Jan 01 00:00:00 1970 +0000 - @@ -0,0 +1,2 @@ - +d - +blah - $ cd .. Rebase, collapse and copies
--- a/tests/test-rebase-rename.t Tue Jan 21 17:15:34 2020 -0800 +++ b/tests/test-rebase-rename.t Thu Jan 23 13:10:48 2020 -0800 @@ -108,6 +108,62 @@ + $ repeatchange() { + > hg checkout $1 + > hg cp a z + > echo blah >> z + > hg commit -Am "$2" --user "$3" + > } + $ repeatchange 1 "E" "user1" + 2 files updated, 0 files merged, 3 files removed, 0 files unresolved + created new head + $ repeatchange 1 "E" "user2" + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + created new head + $ hg tglog + @ 5: af8ad1f97097 'E' + | + | o 4: 60f545c27784 'E' + |/ + | o 3: 032a9b75e83b 'rename A' + | | + | o 2: 220d0626d185 'rename B' + |/ + o 1: 3ab5da9a5c01 'B' + | + o 0: 1994f17a630e 'A' + + $ hg rebase -s 5 -d 4 + rebasing 5:af8ad1f97097 "E" (tip) + note: not rebasing 5:af8ad1f97097 "E" (tip), its destination already has all its changes + saved backup bundle to $TESTTMP/a/.hg/strip-backup/af8ad1f97097-c3e90708-rebase.hg + $ hg tglog + @ 4: 60f545c27784 'E' + | + | o 3: 032a9b75e83b 'rename A' + | | + | o 2: 220d0626d185 'rename B' + |/ + o 1: 3ab5da9a5c01 'B' + | + o 0: 1994f17a630e 'A' + + $ hg export tip + # HG changeset patch + # User user1 + # Date 0 0 + # Thu Jan 01 00:00:00 1970 +0000 + # Node ID 60f545c277846e6bad309919bae3ae106f59cb39 + # Parent 3ab5da9a5c01faa02c20f2ec4870a4f689c92da6 + E + + diff -r 3ab5da9a5c01 -r 60f545c27784 z + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 + +++ b/z Thu Jan 01 00:00:00 1970 +0000 + @@ -0,0 +1,2 @@ + +a + +blah + $ cd ..