Mercurial > public > mercurial-scm > hg
view contrib/python-zstandard/tests/test_train_dictionary.py @ 40121:73fef626dae3
zstandard: vendor python-zstandard 0.10.1
This was just released.
The upstream source distribution from PyPI was extracted. Unwanted
files were removed.
The clang-format ignore list was updated to reflect the new source
of files.
setup.py was updated to pass a new argument to python-zstandard's
function for returning an Extension instance. Upstream had to change
to use relative paths because Python 3.7's packaging doesn't
seem to like absolute paths when defining sources, includes, etc.
The default relative path calculation is relative to setup_zstd.py
which is different from the directory of Mercurial's setup.py.
The project contains a vendored copy of zstandard 1.3.6. The old
version was 1.3.4.
The API should be backwards compatible and nothing in core should
need adjusted. However, there is a new "chunker" API that we
may find useful in places where we want to emit compressed chunks
of a fixed size.
There are a pair of bug fixes in 0.10.0 with regards to
compressobj() and decompressobj() when block flushing is used. I
actually found these bugs when introducing these APIs in Mercurial!
But existing Mercurial code is not affected because we don't
perform block flushing.
# no-check-commit because 3rd party code has different style guidelines
Differential Revision: https://phab.mercurial-scm.org/D4911
author | Gregory Szorc <gregory.szorc@gmail.com> |
---|---|
date | Mon, 08 Oct 2018 16:27:40 -0700 |
parents | b1fb341d8a61 |
children | 69de49c4e39c |
line wrap: on
line source
import struct import sys import unittest import zstandard as zstd from . common import ( generate_samples, make_cffi, ) if sys.version_info[0] >= 3: int_type = int else: int_type = long @make_cffi class TestTrainDictionary(unittest.TestCase): def test_no_args(self): with self.assertRaises(TypeError): zstd.train_dictionary() def test_bad_args(self): with self.assertRaises(TypeError): zstd.train_dictionary(8192, u'foo') with self.assertRaises(ValueError): zstd.train_dictionary(8192, [u'foo']) def test_no_params(self): d = zstd.train_dictionary(8192, generate_samples()) self.assertIsInstance(d.dict_id(), int_type) # The dictionary ID may be different across platforms. expected = b'\x37\xa4\x30\xec' + struct.pack('<I', d.dict_id()) data = d.as_bytes() self.assertEqual(data[0:8], expected) def test_basic(self): d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16) self.assertIsInstance(d.dict_id(), int_type) data = d.as_bytes() self.assertEqual(data[0:4], b'\x37\xa4\x30\xec') self.assertEqual(d.k, 64) self.assertEqual(d.d, 16) def test_set_dict_id(self): d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16, dict_id=42) self.assertEqual(d.dict_id(), 42) def test_optimize(self): d = zstd.train_dictionary(8192, generate_samples(), threads=-1, steps=1, d=16) # This varies by platform. self.assertIn(d.k, (50, 2000)) self.assertEqual(d.d, 16) @make_cffi class TestCompressionDict(unittest.TestCase): def test_bad_mode(self): with self.assertRaisesRegexp(ValueError, 'invalid dictionary load mode'): zstd.ZstdCompressionDict(b'foo', dict_type=42) def test_bad_precompute_compress(self): d = zstd.train_dictionary(8192, generate_samples(), k=64, d=16) with self.assertRaisesRegexp(ValueError, 'must specify one of level or '): d.precompute_compress() with self.assertRaisesRegexp(ValueError, 'must only specify one of level or '): d.precompute_compress(level=3, compression_params=zstd.CompressionParameters()) def test_precompute_compress_rawcontent(self): d = zstd.ZstdCompressionDict(b'dictcontent' * 64, dict_type=zstd.DICT_TYPE_RAWCONTENT) d.precompute_compress(level=1) d = zstd.ZstdCompressionDict(b'dictcontent' * 64, dict_type=zstd.DICT_TYPE_FULLDICT) with self.assertRaisesRegexp(zstd.ZstdError, 'unable to precompute dictionary'): d.precompute_compress(level=1)